diff --git a/.forgejo/bin/check_ver.sh b/.forgejo/bin/check_ver.sh deleted file mode 100755 index 66c7fd0..0000000 --- a/.forgejo/bin/check_ver.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -# expects the following env variables: -# downstream: downstream repo - -repo=${downstream/*\/} - -curl --silent $downstream/x86_64/APKINDEX.tar.gz | tar -O -zx APKINDEX > APKINDEX - -owned_by_you=$(awk -v RS= -v ORS="\n\n" '/m:Antoine Martin \(ayakael\) /' APKINDEX | awk -F ':' '{if($1=="o"){print $2}}' | sort | uniq) - -echo "Found $(printf '%s\n' $owned_by_you | wc -l ) packages owned by you" - -rm -f out_of_date not_in_anitya - -for pkg in $owned_by_you; do - upstream_version=$(curl --fail -X GET -sS -H 'Content-Type: application/json' "https://release-monitoring.org/api/v2/packages/?name=$pkg&distribution=Alpine" | jq -r '.items.[].stable_version') - downstream_version=$(sed -n "/^P:$pkg$/,/^$/p" APKINDEX | awk -F ':' '{if($1=="V"){print $2}}' | sort -V | tail -n 1) - downstream_version=${downstream_version/-*} - - # special case for forgejo-aneksajo: - upstream_version=${upstream_version/-git-annex/_git} - - if [ -z "$upstream_version" ]; then - echo "$pkg not in anitya" - echo "$pkg" >> not_in_anitya - elif [ "$downstream_version" != "$(printf '%s\n' $upstream_version $downstream_version | sort -V | head -n 1)" ]; then - echo "$pkg higher downstream" - continue - elif [ "$upstream_version" != "$downstream_version" ]; then - echo "$pkg upstream version $upstream_version does not match downstream version $downstream_version" - echo "$pkg $downstream_version $upstream_version $repo" >> out_of_date - fi -done diff --git a/.forgejo/bin/create_issue.sh b/.forgejo/bin/create_issue.sh deleted file mode 100755 index d162758..0000000 --- a/.forgejo/bin/create_issue.sh +++ /dev/null @@ -1,165 +0,0 @@ -#!/bin/bash - -# expects: -# env variable FORGEJO_TOKEN -# file out_of_date - -IFS=' -' -repo=${downstream/*\/} - -does_it_exist() { - name=$1 - downstream_version=$2 - upstream_version=$3 - repo=$4 - - query="$repo/$name: upgrade to $upstream_version" - query="$(echo $query | sed 's| |%20|g' | sed 's|:|%3A|g' | sed 's|/|%2F|g' )" - - result="$(curl --silent -X 'GET' \ - "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues?state=open&q=$query&type=issues" \ - -H 'accept: application/json' \ - -H "authorization: Basic $FORGEJO_TOKEN" - )" - - if [ "$result" == "[]" ]; then - return 1 - fi -} - -is_it_old() { - name=$1 - downstream_version=$2 - upstream_version=$3 - repo=$4 - - query="$repo/$name: upgrade to" - query="$(echo $query | sed 's| |%20|g' | sed 's|:|%3A|g' | sed 's|/|%2F|g' )" - - result="$(curl --silent -X 'GET' \ - "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues?state=open&q=$query&type=issues" \ - -H 'accept: application/json' \ - -H "authorization: Basic $FORGEJO_TOKEN" - )" - - result_title="$(echo $result | jq -r '.[].title' )" - result_id="$(echo $result | jq -r '.[].number' )" - result_upstream_version="$(echo $result_title | awk '{print $4}')" - - if [ "$upstream_version" != "$result_upstream_version" ]; then - echo $result_id - else - echo 0 - fi -} - -update_title() { - name=$1 - downstream_version=$2 - upstream_version=$3 - repo=$4 - id=$5 - - result=$(curl --silent -X 'PATCH' \ - "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues/$id" \ - -H 'accept: application/json' \ - -H "authorization: Basic $FORGEJO_TOKEN" \ - -H 'Content-Type: application/json' \ - -d "{ - \"title\": \"$repo/$name: upgrade to $upstream_version\" - }" - ) - - return 0 -} - -create_issue() { - name=$1 - downstream_version=$2 - upstream_version=$3 - repo=$4 - - result=$(curl --silent -X 'POST' \ - "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues" \ - -H 'accept: application/json' \ - -H "authorization: Basic $FORGEJO_TOKEN" \ - -H 'Content-Type: application/json' \ - -d "{ - \"title\": \"$repo/$name: upgrade to $upstream_version\", - \"labels\": [ - $LABEL_NUMBER - ] - }") - - return 0 -} - -if [ -f out_of_date ]; then - out_of_date="$(cat out_of_date)" - - echo "Detected $(wc -l out_of_date) out-of-date packages, creating issues" - - for pkg in $out_of_date; do - name="$(echo $pkg | awk '{print $1}')" - downstream_version="$(echo $pkg | awk '{print $2}')" - upstream_version="$(echo $pkg | awk '{print $3}')" - repo="$(echo $pkg | awk '{print $4}')" - - if does_it_exist $name $downstream_version $upstream_version $repo; then - echo "Issue for $repo/$name already exists" - continue - fi - - id=$(is_it_old $name $downstream_version $upstream_version $repo) - - if [ "$id" != "0" ] && [ -n "$id" ]; then - echo "Issue for $repo/$name needs updating" - update_title $name $downstream_version $upstream_version $repo $id - continue - fi - - echo "Creating issue for $repo/$name" - create_issue $name $downstream_version $upstream_version $repo - done -fi - -if [ -f not_in_anitya ]; then - query="Add missing $repo packages to anitya" - query="$(echo $query | sed 's| |%20|g')" - - result="$(curl --silent -X 'GET' \ - "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues?state=open&q=$query&type=issues" \ - -H 'accept: application/json' \ - -H "authorization: Basic $FORGEJO_TOKEN" - )" - - if [ "$result" == "[]" ]; then - echo "Creating anitya issue" - result=$(curl --silent -X 'POST' \ - "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues" \ - -H 'accept: application/json' \ - -H "authorization: Basic $FORGEJO_TOKEN" \ - -H 'Content-Type: application/json' \ - -d "{ - \"title\": \"Add missing $repo packages to anitya\", - \"body\": \"- [ ] $(sed '{:q;N;s/\n/\\n- [ ] /g;t q}' not_in_anitya)\", - \"labels\": [ - $LABEL_NUMBER - ] - }") - - else - echo "Updating anitya issue" - result_id="$(echo $result | jq -r '.[].number' )" - result=$(curl --silent -X 'PATCH' \ - "$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues/$result_id" \ - -H 'accept: application/json' \ - -H "authorization: Basic $FORGEJO_TOKEN" \ - -H 'Content-Type: application/json' \ - -d "{ - \"body\": \"- [ ] $(sed '{:q;N;s/\n/\\n- [ ] /g;t q}' not_in_anitya)\" - }" - ) - fi -fi diff --git a/.forgejo/bin/deploy.sh b/.forgejo/bin/deploy.sh index daf2496..a2fd7d4 100755 --- a/.forgejo/bin/deploy.sh +++ b/.forgejo/bin/deploy.sh @@ -3,7 +3,7 @@ # shellcheck disable=SC3040 set -eu -o pipefail -readonly REPOS="backports user" +readonly REPOS="backports ilot" readonly BASEBRANCH=$GITHUB_BASE_REF readonly TARGET_REPO=$CI_ALPINE_REPO @@ -14,12 +14,18 @@ for apk in $apkgs; do arch=$(echo $apk | awk -F '/' '{print $3}') name=$(echo $apk | awk -F '/' '{print $4}') - echo "Sending $name of arch $arch to $TARGET_REPO/$BASEBRANCH/$branch" - return=$(curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN --upload-file $apk $TARGET_REPO/$BASEBRANCH/$branch 2>&1) - echo $return - if [ "$return" == "package file already exists" ]; then - echo "Package already exists, refreshing..." - curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN -X DELETE $TARGET_REPO/$BASEBRANCH/$branch/$arch/$name - curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN --upload-file $apk $TARGET_REPO/$BASEBRANCH/$branch + if [ "$(curl -s $GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/pulls/$GITHUB_EVENT_NUMBER | jq .draft)" == "true" ]; then + # if draft, send to -testing branch + branch="$branch-testing" fi + + # always clear out package before deploying + for delarch in x86_64 aarch64 armv7 armhf s390x ppc64le riscv64 loongarch64 x86; do + curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN -X DELETE $TARGET_REPO/$BASEBRANCH/$branch/$delarch/$name 2>&1 > /dev/null + done + + echo "Sending $name of arch $arch to $TARGET_REPO/$BASEBRANCH/$branch" + curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN --upload-file $apk $TARGET_REPO/$BASEBRANCH/$branch + done + diff --git a/.forgejo/workflows/build-aarch64.yaml b/.forgejo/workflows/build-aarch64.yaml index 0364014..8e19c74 100644 --- a/.forgejo/workflows/build-aarch64.yaml +++ b/.forgejo/workflows/build-aarch64.yaml @@ -19,8 +19,7 @@ jobs: steps: - name: Environment setup run: | - doas apk add nodejs git patch curl net-tools - doas hostname host.docker.internal + doas apk add nodejs git patch curl cd /etc/apk/keys doas curl -JO https://forge.ilot.io/api/packages/ilot/alpine/key - name: Repo pull diff --git a/.forgejo/workflows/build-x86_64.yaml b/.forgejo/workflows/build-x86_64.yaml index c805199..9a7dac2 100644 --- a/.forgejo/workflows/build-x86_64.yaml +++ b/.forgejo/workflows/build-x86_64.yaml @@ -19,8 +19,7 @@ jobs: steps: - name: Environment setup run: | - doas apk add nodejs git patch curl net-tools - doas hostname host.docker.internal + doas apk add nodejs git patch curl cd /etc/apk/keys doas curl -JO https://forge.ilot.io/api/packages/ilot/alpine/key - name: Repo pull diff --git a/.forgejo/workflows/check-ilot.yml b/.forgejo/workflows/check-ilot.yml deleted file mode 100644 index 6a3e2a7..0000000 --- a/.forgejo/workflows/check-ilot.yml +++ /dev/null @@ -1,27 +0,0 @@ -on: - workflow_dispatch: - - schedule: - - cron: '0 5 * * *' - -jobs: - check-user: - name: Check user repo - runs-on: x86_64 - container: - image: alpine:latest - env: - downstream: https://forge.ilot.io/api/packages/ilot/alpine/v3.21/ilot - FORGEJO_TOKEN: ${{ secrets.forgejo_token }} - LABEL_NUMBER: 8 - steps: - - name: Environment setup - run: apk add grep coreutils gawk curl wget bash nodejs git jq sed - - name: Get scripts - uses: actions/checkout@v4 - with: - fetch-depth: 1 - - name: Check out-of-date packages - run: ${{ github.workspace }}/.forgejo/bin/check_ver.sh - - name: Create issues - run: ${{ github.workspace }}/.forgejo/bin/create_issue.sh diff --git a/README.md b/README.md index 24fdd2a..ae24788 100644 --- a/README.md +++ b/README.md @@ -1,43 +1,44 @@ -# iports -Upstream: https://forge.ilot.io/ilot/iports +# user-aports +Upstream: https://lab.ilot.io/ayakael/user-aports ## Description This repository contains aports that are not yet merged in the official Alpine Linux repository or don’t adhere to Alpine polices. Packages are automatically -built using CI. Once built, they are deployed to a Forgejo-backed Alpine -repository. +built using GitLab CI on my own GitLab instance. Once built, they are deployed +to a git-lfs repository, making them available to apk. + +Branches are matched to Alpine releases. -Branches are matched to Alpine latest released. ## Repositories -You can browse all the repositories at https://forge.ilot.io/ilot/iports/packages +You can browse all the repositories at https://lab.ilot.io/ayakael/repo-apk. Affixed to each repository description is the appropriate link for use in `/etc/apk/repositories`. #### Backports ``` -https://forge.ilot.io/api/packages/ilot/alpine/v3.20/backports +https://lab.ilot.io/ayakael/repo-apk/-/raw/edge/backports ``` Aports from the official Alpine repositories backported from edge. -#### Ilot +#### User ``` -https://forge.ilot.io/api/packages/ilot/alpine/v3.20/backports +https://lab.ilot.io/ayakael/repo-apk/-/raw/edge/user ``` Aports that have yet to be (or may never be) upstreamed to the official -aports and that are used by ilot coop. +aports. ## How to use -Add security key of the apk repository to your /etc/apk/keys: +Add security key of the repo-apk repository to your /etc/apk/keys: ```shell cd /etc/apk/keys -curl -JO https://forge.ilot.io/api/packages/ilot/alpine/key +wget https://lab.ilot.io/ayakael/repo-apk/-/raw/edge/antoine.martin@protonmail.com-5b3109ad.rsa.pub ``` Add repositories that you want to use (see above) to `/etc/apk/repositories`. @@ -51,10 +52,10 @@ they will work for you. ## Contribution & bug reports If you wish to contribute to this aports collection, or wish to report a bug, -you can do so on Forge here: -https://forge.ilot.io/ilot/iports/issues +you can do so on Alpine's GitLab instance here: +https://gitlab.alpinelinux.org/ayakael/user-aports -For packages that are in backports, bug reports and merge requests +For packages that are in testing/community, bug reports and merge requests should be done on Alpine's aports repo instance: https://gitlab.alpinelinux.org/alpine/aports diff --git a/backports/forgejo-runner/APKBUILD b/backports/forgejo-runner/APKBUILD new file mode 100644 index 0000000..1005964 --- /dev/null +++ b/backports/forgejo-runner/APKBUILD @@ -0,0 +1,47 @@ +# Contributor: Patrycja Rosa +# Maintainer: Patrycja Rosa +pkgname=forgejo-runner +pkgver=3.5.0 +pkgrel=2 +pkgdesc="CI/CD job runner for Forgejo" +url="https://code.forgejo.org/forgejo/runner" +arch="all" +license="MIT" +makedepends="go" +install="$pkgname.pre-install $pkgname.pre-upgrade" +subpackages="$pkgname-openrc" +source="$pkgname-$pkgver.tar.gz::https://code.forgejo.org/forgejo/runner/archive/v$pkgver.tar.gz + + forgejo-runner.logrotate + forgejo-runner.initd + forgejo-runner.confd + " +builddir="$srcdir/runner" +options="!check" # tests require running forgejo + +build() { + go build \ + -o forgejo-runner \ + -ldflags "-X gitea.com/gitea/act_runner/internal/pkg/ver.version=$pkgver" + ./forgejo-runner generate-config > config.example.yaml +} + +check() { + go test ./... +} + +package() { + install -Dm755 forgejo-runner -t "$pkgdir"/usr/bin/ + install -Dm644 config.example.yaml -t "$pkgdir"/etc/forgejo-runner/ + + install -Dm755 "$srcdir"/forgejo-runner.initd "$pkgdir"/etc/init.d/forgejo-runner + install -Dm644 "$srcdir"/forgejo-runner.confd "$pkgdir"/etc/conf.d/forgejo-runner + install -Dm644 "$srcdir"/forgejo-runner.logrotate "$pkgdir"/etc/logrotate.d/forgejo-runner +} + +sha512sums=" +e78968a5f9b6e797fb759a5c8cbf46a5c2fef2083dabc88599c9017729faface963576c63a948b0add424cb267902e864fb1a1b619202660296976d93e670713 forgejo-runner-3.5.0.tar.gz +a3c7238b0c63053325d31e09277edd88690ef5260854517f82d9042d6173fb5d24ebfe36e1d7363673dd8801972638a6e69b6af8ad43debb6057515c73655236 forgejo-runner.logrotate +bb0c6fbe90109c77f9ef9cb0d35d20b8033be0e4b7a60839b596aa5528dfa24309ec894d8c04066bf8fb30143e63a5fd8cc6fc89aac364422b583e0f840e2da6 forgejo-runner.initd +e11eab27f88f1181112389befa7de3aa0bac7c26841861918707ede53335535425c805e6682e25704e9c8a6aecba3dc13e20900a99df1183762b012b62f26d5f forgejo-runner.confd +" diff --git a/backports/forgejo-runner/forgejo-runner.confd b/backports/forgejo-runner/forgejo-runner.confd new file mode 100644 index 0000000..874e695 --- /dev/null +++ b/backports/forgejo-runner/forgejo-runner.confd @@ -0,0 +1,17 @@ +# Configuration for /etc/init.d/forgejo-runner + +# Path to the config file (--config). +#cfgfile="/etc/forgejo-runner/config.yaml" + +# Path to the working directory (--working-directory). +#datadir="/var/lib/forgejo-runner" + +# Path to the log file where stdout/stderr will be redirected. +# Leave empty/commented out to use syslog instead. +#output_log="/var/log/forgejo-runner.log" + +# You may change this to root, e.g. to run jobs in LXC +#command_user="forgejo-runner" + +# Comment out to run without process supervisor. +supervisor=supervise-daemon diff --git a/backports/forgejo-runner/forgejo-runner.initd b/backports/forgejo-runner/forgejo-runner.initd new file mode 100644 index 0000000..c54acdd --- /dev/null +++ b/backports/forgejo-runner/forgejo-runner.initd @@ -0,0 +1,38 @@ +#!/sbin/openrc-run + +description="Forgejo CI Runner" +name="Forgejo Runner" + +: ${cfgfile:="/etc/forgejo-runner/config.yaml"} +: ${datadir:="/var/lib/forgejo-runner"} +: ${command_user:="forgejo-runner"} + +command="/usr/bin/forgejo-runner" +command_args="daemon --config $cfgfile" +command_background="yes" +directory="$datadir" +pidfile="/run/$RC_SVCNAME.pid" + +depend() { + need net + use dns logger +} + +start_pre() { + checkpath -d -o "$command_user" /etc/forgejo-runner + checkpath -d -o "$command_user" "$datadir" + + if ! [ -e "$cfgfile" ]; then + eerror "Config file $cfgfile doesn't exist." + eerror "You can generate it with: forgejo-runner generate-config," + eerror "or use the auto-generated one in /etc/forgejo-runner/config.example.yaml" + return 1 + fi + + if [ "$error_log" ]; then + output_log="$error_log" + else + output_logger="logger -t '${RC_SVCNAME}' -p daemon.info" + error_logger="logger -t '${RC_SVCNAME}' -p daemon.error" + fi +} diff --git a/backports/forgejo-runner/forgejo-runner.logrotate b/backports/forgejo-runner/forgejo-runner.logrotate new file mode 100644 index 0000000..1a0539e --- /dev/null +++ b/backports/forgejo-runner/forgejo-runner.logrotate @@ -0,0 +1,5 @@ +/var/log/forgejo-runner.log { + copytruncate + missingok + notifempty +} diff --git a/backports/forgejo-runner/forgejo-runner.pre-install b/backports/forgejo-runner/forgejo-runner.pre-install new file mode 100644 index 0000000..5ce27be --- /dev/null +++ b/backports/forgejo-runner/forgejo-runner.pre-install @@ -0,0 +1,14 @@ +#!/bin/sh + +addgroup -S forgejo-runner 2>/dev/null +adduser -S -D -H -h /var/lib/forgejo-runner -s /sbin/nologin -G forgejo-runner -g forgejo-runner forgejo-runner 2>/dev/null + +cat >&2 < # Maintainer: Antoine Martin (ayakael) pkgname=authentik -pkgver=2024.10.5 +pkgver=2024.4.4 pkgrel=0 pkgdesc="An open-source Identity Provider focused on flexibility and versatility" url="https://github.com/goauthentik/authentik" @@ -10,112 +10,151 @@ url="https://github.com/goauthentik/authentik" # ppc64le: not supported by Rollup build arch="aarch64 x86_64" license="MIT" -# following depends aren't direct dependencies, but are needed: -# py3-asn1crypto, py3-cbor2, py3-email-validator, py3-websockets -# py3-openssl, py3-uvloop, py3-httptools depends=" - bash libcap-setcap nginx postgresql procps pwgen + py3-aiohttp + py3-aiosignal + py3-amqp + py3-anyio + py3-asgiref + py3-asn1 py3-asn1crypto + py3-async-timeout + py3-attrs + py3-autobahn + py3-automat + py3-bcrypt + py3-billiard + py3-cachetools py3-cbor2 py3-celery + py3-certifi py3-cffi py3-channels py3-channels_redis + py3-charset-normalizer + py3-click + py3-click-didyoumean + py3-click-plugins + py3-click-repl + py3-codespell + py3-colorama + py3-constantly + py3-cparser py3-cryptography py3-dacite py3-daphne + py3-dateutil py3-deepmerge py3-defusedxml - py3-docker-py + py3-deprecated + py3-dnspython py3-django - py3-django-countries - py3-django-cte py3-django-filter py3-django-guardian py3-django-model-utils + py3-django-otp py3-django-prometheus - py3-django-pglock py3-django-redis - py3-django-rest-framework~3.14.0 + py3-django-rest-framework~=3.14.0 py3-django-rest-framework-guardian py3-django-storages py3-django-tenants + py3-docker-py + py3-dotenv py3-dumb-init - py3-duo-client - py3-drf-orjson-renderer + py3-duo_client py3-drf-spectacular py3-email-validator + py3-facebook-sdk py3-fido2 py3-flower + py3-frozenlist py3-geoip2 - py3-google-api-python-client + py3-google-auth py3-gunicorn + py3-h11 py3-httptools + py3-humanize + py3-hyperlink + py3-idna + py3-incremental + py3-inflection + py3-jsonschema py3-jsonpatch py3-jwt - py3-jwcrypto - py3-kadmin + py3-kombu py3-kubernetes py3-ldap3 py3-lxml py3-maxminddb py3-msgpack - py3-msgraph-sdk + py3-multidict + py3-oauthlib py3-opencontainers py3-openssl + py3-packaging py3-paramiko + py3-parsing + py3-prometheus-client + py3-prompt_toolkit py3-psycopg py3-psycopg-c - py3-pydantic py3-pydantic-scim - py3-pyrad - py3-python-gssapi + py3-pynacl + py3-pyrsistent + py3-python-jwt + py3-redis + py3-requests py3-requests-oauthlib + py3-rsa py3-scim2-filter-parser py3-setproctitle py3-sentry-sdk py3-service_identity + py3-setuptools py3-six py3-sniffio py3-sqlparse py3-structlog py3-swagger-spec-validator + py3-tornado py3-twilio + py3-twisted + py3-txaio py3-tenant-schemas-celery + py3-typing-extensions + py3-tz py3-ua-parser - py3-unidecode + py3-uritemplate py3-urllib3-secure-extra py3-uvloop + py3-vine py3-watchdog + py3-watchfiles + py3-wcwidth py3-webauthn + py3-websocket-client py3-websockets + py3-wrapt py3-wsproto py3-xmlsec py3-yaml + py3-yarl + py3-zope-interface py3-zxcvbn - valkey + redis uvicorn " -makedepends="go npm py3-packaging" -checkdepends=" - py3-pip - py3-coverage - py3-codespell - py3-colorama - py3-pytest - py3-pytest-django - py3-pytest-randomly - py3-pytest-timeout - py3-freezegun - py3-boto3 - py3-requests-mock - py3-k5test - " +makedepends="go npm" +# checkdepends scooped up by poetry due to number +checkdepends="poetry py3-coverage" +# tests disabled for now +options="!check" install="$pkgname.post-install $pkgname.post-upgrade $pkgname.pre-install" source=" $pkgname-$pkgver.tar.gz::https://github.com/goauthentik/authentik/archive/refs/tags/version/$pkgver.tar.gz @@ -126,10 +165,9 @@ source=" authentik-manage.sh fix-ak-bash.patch root-settings-csrf_trusted_origins.patch - go-downgrade-1.22.patch " builddir="$srcdir/"authentik-version-$pkgver -subpackages="$pkgname-openrc $pkgname-doc $pkgname-pyc" +subpackages="$pkgname-openrc $pkgname-doc" pkgusers="authentik" pkggroups="authentik" @@ -159,131 +197,57 @@ build() { npm run build } -# test failure neutralized due to: -# relation authentik_core_user_pb_groups_id_seq does not exist - -check() { - msg "Setting up test environments" - export POSTGRES_DB=authentik - export POSTGRES_USER=authentik - export POSTGRES_PASSWORD="EK-5jnKfjrGRm<77" - export AUTHENTIK_POSTGRESQL__TEST__NAME=authentik - - rm -Rf "$srcdir"/tmp - initdb -D "$srcdir"/tmp - postgres -D "$srcdir"/tmp --unix-socket-directories="$srcdir" > "$srcdir"/tmp/psql.log 2>&1 & - valkey-server > "$srcdir"/tmp/valkey.log 2>&1 & - trap "pkill valkey-server; pkill postgres" EXIT - sleep 5 - psql -h "$srcdir" -d postgres -c "CREATE ROLE $POSTGRES_USER PASSWORD '$POSTGRES_PASSWORD' INHERIT LOGIN;" - psql -h "$srcdir" -d postgres -c "CREATE DATABASE $POSTGRES_DB OWNER $POSTGRES_USER ENCODING 'UTF-8';" - psql -h "$srcdir" -d postgres -c "CREATE DATABASE test_$POSTGRES_DB OWNER $POSTGRES_USER ENCODING 'UTF-8';" - - # .github/actions/setup/action.yml: Generate config + csrf - python3 -c " -from authentik.lib.generators import generate_id -from yaml import safe_dump - -with open(\"local.env.yml\", \"w\") as _config: - safe_dump( - { - \"log_level\": \"debug\", - \"secret_key\": generate_id(), - \"csrf\": { \"trusted_origins\": ['https://*']}, - }, - _config, - default_flow_style=False, - ) -" - python -m lifecycle.migrate - - # no selenium package - pip install selenium drf_jsonschema_serializer pdoc --break-system-packages - - msg "Starting tests" - make test || true - - # TODO: Fix go-tests - # make go-test - - pkill valkey-server - pkill postgres -} - package() { msg "Packaging $pkgname" - local prefix="/usr/share/webapps" - local destdir="$pkgdir"$prefix/authentik + mkdir -p "$pkgdir"/usr/share/webapps/authentik/web + mkdir -p "$pkgdir"/usr/share/webapps/authentik/website + mkdir -p "$pkgdir"/var/lib/authentik + mkdir -p "$pkgdir"/usr/share/doc + mkdir -p "$pkgdir"/usr/bin + cp -dr "$builddir"/authentik "$pkgdir"/usr/share/webapps/authentik + cp -dr "$builddir"/web/dist "$pkgdir"/usr/share/webapps/authentik/web/dist + cp -dr "$builddir"/web/authentik "$pkgdir"/usr/share/webapps/authentik/web/authentik + cp -dr "$builddir"/website/build "$pkgdir"/usr/share/doc/authentik + cp -dr "$builddir"/tests "$pkgdir"/usr/share/webapps/authentik/tests + cp -dr "$builddir"/lifecycle "$pkgdir"/usr/share/webapps/authentik/lifecycle + cp -dr "$builddir"/locale "$pkgdir"/usr/share/webapps/authentik/locale + cp -dr "$builddir"/blueprints "$pkgdir"/var/lib/authentik/blueprints + install -Dm755 "$builddir"/manage.py "$pkgdir"/usr/share/webapps/authentik/manage.py + install -Dm755 "$builddir"/server "$pkgdir"/usr/share/webapps/authentik/server + ln -s "/etc/authentik/config.yml" "$pkgdir"/usr/share/webapps/authentik/local.env.yml - # authentik install - install -d -m755 \ - "$destdir" \ - "$destdir"/web \ - "$pkgdir"/usr/bin \ - "$pkgdir"/usr/share/doc \ - "$pkgdir"/var/lib/authentik + install -Dm755 "$builddir"/proxy "$pkgdir"/usr/bin/authentik-proxy + install -Dm755 "$builddir"/ldap "$pkgdir"/usr/bin/authentik-ldap + install -Dm755 "$builddir"/radius "$pkgdir"/usr/bin/authentik-radius - cp -rl authentik lifecycle locale tests \ - "$destdir"/ - - cp -rl blueprints \ - "$pkgdir"/var/lib/authentik/ - - cp -rl web/dist web/authentik \ - "$destdir"/web/ - - install -m755 -t "$destdir" \ - "$builddir"/server \ - "$builddir"/ldap \ - "$builddir"/radius \ - "$builddir"/proxy \ - "$builddir"/manage.py - - cp -rl website/build/ "$pkgdir"/usr/share/doc/authentik/ - - # symbolic bin links to usr/bin - for i in server proxy ldap radius; do - ln -s $prefix/authentik/$i "$pkgdir"/usr/bin/authentik-$i - done - - # openrc install - for i in $pkgname $pkgname-worker $pkgname-ldap; do - install -Dm755 "$srcdir"/$i.openrc "$pkgdir"/etc/init.d/$i - done - - # config file setup + install -Dm755 "$srcdir"/$pkgname.openrc \ + "$pkgdir"/etc/init.d/$pkgname + install -Dm755 "$srcdir"/$pkgname-worker.openrc \ + "$pkgdir"/etc/init.d/$pkgname-worker + install -Dm755 "$srcdir"/$pkgname-ldap.openrc \ + "$pkgdir"/etc/init.d/$pkgname-ldap + install -Dm640 "$srcdir"/$pkgname-ldap.conf \ + "$pkgdir"/etc/conf.d/$pkgname-ldap install -Dm640 "$builddir"/authentik/lib/default.yml \ "$pkgdir"/etc/authentik/config.yml - ln -s "/etc/authentik/config.yml" "$pkgdir"/usr/share/webapps/authentik/local.env.yml chown root:www-data "$pkgdir"/etc/authentik/config.yml + mv "$pkgdir"/usr/share/webapps/authentik/web/dist/custom.css "$pkgdir"/etc/authentik/custom.css + ln -s "/etc/authentik/custom.css" "$pkgdir"/usr/share/webapps/authentik/web/dist/custom.css + chown root:www-data "$pkgdir"/etc/authentik/custom.css + sed -i 's|cert_discovery_dir.*|cert_discovery_dir: /var/lib/authentik/certs|' "$pkgdir"/etc/authentik/config.yml sed -i 's|blueprints_dir.*|blueprints_dir: /var/lib/authentik/blueprints|' "$pkgdir"/etc/authentik/config.yml sed -i 's|template_dir.*|template_dir: /var/lib/authentik/templates|' "$pkgdir"/etc/authentik/config.yml printf "\ncsrf:\n trusted_origins: ['auth.example.com']" >> "$pkgdir"/etc/authentik/config.yml printf "\nsecret_key: '@@SECRET_KEY@@'" >> "$pkgdir"/etc/authentik/config.yml - # custom css location change - mv "$pkgdir"/usr/share/webapps/authentik/web/dist/custom.css "$pkgdir"/etc/authentik/custom.css - ln -s "/etc/authentik/custom.css" "$pkgdir"/usr/share/webapps/authentik/web/dist/custom.css - chown root:www-data "$pkgdir"/etc/authentik/custom.css - # Install wrapper script to /usr/bin. install -m755 -D "$srcdir"/authentik-manage.sh "$pkgdir"/usr/bin/authentik-manage } -pyc() { - default_pyc - - cd "$pkgdir" - # shellcheck disable=SC3003 - local IFS=$'\n' - # shellcheck disable=SC2046 - amove $(find usr/share/webapps/authentik -type d -name __pycache__) -} - sha512sums=" -f6e04ac1d1ac3a46e6d0f89548c0c2748f2214c551157e65f9071721dfdccac53c98b1664ecd1bc70650b4fceec47c5a5ab805da34e82ccc86d6a64087441702 authentik-2024.10.5.tar.gz +22c8ff16b93b9fcb84478b6476dd4f6413719037affc7756f20ba1dc3afff1fbaae2f1fc89d7b3a9c4372fcc856009d8a4ef5eb7854855e4528523fb456a2491 authentik-2024.4.4.tar.gz 4defb4fe3a4230f4aa517fbecd5e5b8bcef2a64e1b40615660ae9eec33597310a09df5e126f4d39ce7764bd1716c0a7040637699135c103cbc1879593c6c06f1 authentik.openrc 6cb03b9b69df39bb4539fe05c966536314d766b2e9307a92d87070ba5f5b7e7ab70f1b5ee1ab3c0c50c23454f9c5a4caec29e63fdf411bbb7a124ad687569b89 authentik-worker.openrc 351e6920d987861f8bf0d7ab2f942db716a8dbdad1f690ac662a6ef29ac0fd46cf817cf557de08f1c024703503d36bc8b46f0d9eb1ecaeb399dce4c3bb527d17 authentik-ldap.openrc @@ -291,5 +255,4 @@ f6e04ac1d1ac3a46e6d0f89548c0c2748f2214c551157e65f9071721dfdccac53c98b1664ecd1bc7 f1a3cb215b6210fa7d857a452a9f2bc4dc0520e49b9fa7027547cff093d740a7e2548f1bf1f8831f7d5ccb80c8e523ee0c8bafcc4dc42d2788725f2137d21bee authentik-manage.sh 3e47db684a3f353dcecdb7bab8836b9d5198766735d77f676a51d952141a0cf9903fcb92e6306c48d2522d7a1f3028b37247fdc1dc74d4d6e043da7eb4f36d49 fix-ak-bash.patch 5c60e54b6a7829d611af66f5cb8184a002b5ae927efbd024c054a7c176fcb9efcfbe5685279ffcf0390b0f0abb3bb03e02782c6867c2b38d1ad2d508aae83fa0 root-settings-csrf_trusted_origins.patch -badff70b19aad79cf16046bd46cb62db25c2a8b85b2673ce7c44c42eb60d42f6fcb1b9a7a7236c00f24803b25d3c66a4d64423f7ce14a59763b8415db292a5b9 go-downgrade-1.22.patch " diff --git a/ilot/authentik/go-downgrade-1.22.patch b/ilot/authentik/go-downgrade-1.22.patch deleted file mode 100644 index eeae018..0000000 --- a/ilot/authentik/go-downgrade-1.22.patch +++ /dev/null @@ -1,38 +0,0 @@ -diff --git a/go.mod.orig b/go.mod -index 65490a2..13a611e 100644 ---- a/go.mod.orig -+++ b/go.mod -@@ -1,8 +1,6 @@ - module goauthentik.io - --go 1.23 -- --toolchain go1.23.0 -+go 1.22.2 - - require ( - beryju.io/ldap v0.1.0 -@@ -16,7 +14,7 @@ require ( - github.com/gorilla/handlers v1.5.2 - github.com/gorilla/mux v1.8.1 - github.com/gorilla/securecookie v1.1.2 -- github.com/gorilla/sessions v1.4.0 -+ github.com/gorilla/sessions v1.3.0 - github.com/gorilla/websocket v1.5.3 - github.com/jellydator/ttlcache/v3 v3.2.1 - github.com/mitchellh/mapstructure v1.5.0 -diff --git a/go.sum.orig b/go.sum -index 94edf9c..856c2ee 100644 ---- a/go.sum.orig -+++ b/go.sum -@@ -175,8 +175,8 @@ github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+ - github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= - github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= - github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= --github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzqQ= --github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik= -+github.com/gorilla/sessions v1.3.0 h1:XYlkq7KcpOB2ZhHBPv5WpjMIxrQosiZanfoy1HLZFzg= -+github.com/gorilla/sessions v1.3.0/go.mod h1:ePLdVu+jbEgHH+KWw8I1z2wqd0BAdAQh/8LRvBeoNcQ= - github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= - github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= - github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= diff --git a/ilot/certbot-dns-gandi/APKBUILD b/ilot/certbot-dns-gandi/APKBUILD deleted file mode 100644 index d6845b5..0000000 --- a/ilot/certbot-dns-gandi/APKBUILD +++ /dev/null @@ -1,40 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=certbot-dns-gandi -pkgdesc="gandi DNS authenticator plugin for certbot" -pkgver=1.5.0 -pkgrel=0 -arch="noarch" -url="https://github.com/obynio/certbot-plugin-gandi" -license="MIT" -depends="certbot" -makedepends=" - py3-setuptools - py3-gpep517 - py3-wheel -" -subpackages="$pkgname-pyc" -options="!check" # No test suite -source=" - $pkgname-$pkgver.tar.gz::https://github.com/obynio/certbot-plugin-gandi/archive/refs/tags/$pkgver.tar.gz - gandi.ini -" -builddir="$srcdir"/certbot-plugin-gandi-$pkgver - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl - mkdir -p "$pkgdir"/etc/letsencrypt/gandi - install -m 0600 "$srcdir"/gandi.ini "$pkgdir"/etc/letsencrypt/gandi/example.ini -} - -sha512sums=" -0688baec8e6de429eed12f9d85b28f47384a5bd8cd01615d94e55e38fdaf35c01707ee1ef1ec3e9196c1de06df7087798f3f5a19f07bd446f1d3fd2442b2d702 certbot-dns-gandi-1.5.0.tar.gz -7bdfd769c8a7256a8c2d171f1c8fa4c16bea7c1abcd3442603face90834efb5f9c0d9aec54f57fc83421588c0349acbc3554d4987cb7498a7e833481b01dd712 gandi.ini -" diff --git a/ilot/certbot-dns-gandi/gandi.ini b/ilot/certbot-dns-gandi/gandi.ini deleted file mode 100644 index f1d20c3..0000000 --- a/ilot/certbot-dns-gandi/gandi.ini +++ /dev/null @@ -1,6 +0,0 @@ -# Uncomment following line as needed: -# Live DNS v5 api key -#dns_gandi_api_key=APIKEY - -# Optional organization id, remove it if not used -#dns_gandi_sharing_id=SHARINGID diff --git a/ilot/codeberg-pages-server/APKBUILD b/ilot/codeberg-pages-server/APKBUILD index 0fd0f1f..4478fdd 100644 --- a/ilot/codeberg-pages-server/APKBUILD +++ b/ilot/codeberg-pages-server/APKBUILD @@ -1,7 +1,7 @@ # Contributor: Antoine Martin (ayakael) # Maintainer: Antoine Martin (ayakael) pkgname=codeberg-pages-server -pkgver=6.2 +pkgver=5.1 pkgrel=0 pkgdesc="The Codeberg Pages Server – with custom domain support, per-repo pages using the "pages" branch, caching and more." url="https://codeberg.org/Codeberg/pages-server" @@ -14,6 +14,7 @@ options="!check" source=" $pkgname-$pkgver.tar.gz::https://codeberg.org/Codeberg/pages-server/archive/v$pkgver.tar.gz codeberg-pages-server.openrc + upgrade-go-sqlite3-to-1.14.19.patch " builddir="$srcdir/"pages-server subpackages="$pkgname-openrc" @@ -37,6 +38,7 @@ package() { } sha512sums=" -d48e10262e94eb2e36696646e3431da066d2f820e037ab713f4446dd72c2e3895c9bf153fcbf702e05b21ec5750aa15ed9b71e2fb383f9357aeeef61073a721a codeberg-pages-server-6.2.tar.gz +55a1dd5ed0f1cb2aaad1066eca8bfbd1d537169ed3712c748163ebff64edc45d05ac1f6f062433e232e2638a790232438282f96dd7410eb4cbaff7208f5f2427 codeberg-pages-server-5.1.tar.gz 4defb4fe3a4230f4aa517fbecd5e5b8bcef2a64e1b40615660ae9eec33597310a09df5e126f4d39ce7764bd1716c0a7040637699135c103cbc1879593c6c06f1 codeberg-pages-server.openrc +895f1c8d22fcf1d5491a6fe0ce5d93201f83b6dd5fc81b24016b609988fb6c66fdde75bb3830f385a5c83d96366ca3a5f4f9524f52058b6c5dfd8b80d14bac5b upgrade-go-sqlite3-to-1.14.19.patch " diff --git a/ilot/codeberg-pages-server/upgrade-go-sqlite3-to-1.14.19.patch b/ilot/codeberg-pages-server/upgrade-go-sqlite3-to-1.14.19.patch new file mode 100644 index 0000000..fabb214 --- /dev/null +++ b/ilot/codeberg-pages-server/upgrade-go-sqlite3-to-1.14.19.patch @@ -0,0 +1,26 @@ +diff --git a/go.mod.orig b/go.mod +index eba292e..00310e5 100644 +--- a/go.mod.orig ++++ b/go.mod +@@ -11,7 +11,7 @@ require ( + github.com/go-sql-driver/mysql v1.6.0 + github.com/joho/godotenv v1.4.0 + github.com/lib/pq v1.10.7 +- github.com/mattn/go-sqlite3 v1.14.16 ++ github.com/mattn/go-sqlite3 v1.14.19 + github.com/microcosm-cc/bluemonday v1.0.26 + github.com/reugn/equalizer v0.0.0-20210216135016-a959c509d7ad + github.com/rs/zerolog v1.27.0 +diff --git a/go.sum.orig b/go.sum +index 7ea8b78..19145ea 100644 +--- a/go.sum.orig ++++ b/go.sum +@@ -479,6 +479,8 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m + github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= + github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= + github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= ++github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= ++github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= + github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= + github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0= + github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= diff --git a/ilot/forgejo-aneksajo/APKBUILD b/ilot/forgejo-aneksajo/APKBUILD index efb6091..ca50a59 100644 --- a/ilot/forgejo-aneksajo/APKBUILD +++ b/ilot/forgejo-aneksajo/APKBUILD @@ -4,14 +4,14 @@ # Contributor: Patrycja Rosa # Maintainer: Antoine Martin (ayakael) pkgname=forgejo-aneksajo -pkgver=9.0.3_git0 -_gittag=v${pkgver/_git/-git-annex} +pkgver=8.0.1 +_gittag=v$pkgver-git-annex0 pkgrel=0 pkgdesc="Self-hosted Git service written in Go with git-annex support" url="https://forgejo.org" # riscv64: builds fail https://codeberg.org/forgejo/forgejo/issues/3025 arch="all !riscv64" -license="GPL-3.0-or-later" +license="MIT" depends="git git-lfs gnupg" makedepends="go nodejs npm" checkdepends="bash openssh openssh-keygen sqlite tzdata" @@ -55,7 +55,7 @@ build() { # XXX: LARGEFILE64 export CGO_CFLAGS="$CFLAGS -O2 -D_LARGEFILE64_SOURCE" export TAGS="bindata sqlite sqlite_unlock_notify" - export GITEA_VERSION="${pkgver/_git/-git-annex}" + export GITEA_VERSION="$pkgver" export EXTRA_GOFLAGS="$GOFLAGS" export CGO_LDFLAGS="$LDFLAGS" unset LDFLAGS @@ -106,7 +106,7 @@ package() { } sha512sums=" -2c2493c0011e83994c12c11859c2153d855a2265d234a671d2ce855e4f45b8e1b7d7f257e9c7ffa6284b844e0068a6184ef39b88800a1d79f399ce11c7cb23b7 forgejo-aneksajo-v9.0.3-git-annex0.tar.gz +d8e273d369c934eec7ff84795cd0d896cda53bc1a2d17f610dd8476ff92dc50c4a24c4598366ef8aac3be52ddef6630489043183085334376c30bc5d4d5f15c2 forgejo-aneksajo-v8.0.1-git-annex0.tar.gz eb93a9f6c8f204de5c813f58727015f53f9feaab546589e016c60743131559f04fc1518f487b6d2a0e7fa8fab6d4a67cd0cd9713a7ccd9dec767a8c1ddebe129 forgejo-aneksajo.initd b537b41b6b3a945274a6028800f39787b48c318425a37cf5d40ace0d1b305444fd07f17b4acafcd31a629bedd7d008b0bb3e30f82ffeb3d7e7e947bdbe0ff4f3 forgejo-aneksajo.ini " diff --git a/ilot/freescout/APKBUILD b/ilot/freescout/APKBUILD index 0093ab0..d083ae2 100644 --- a/ilot/freescout/APKBUILD +++ b/ilot/freescout/APKBUILD @@ -1,15 +1,15 @@ # Maintainer: Antoine Martin (ayakael) # Contributor: Antoine Martin (ayakael) pkgname=freescout -pkgver=1.8.160 -pkgrel=0 +pkgver=1.8.139 +pkgrel=1 pkgdesc="Free self-hosted help desk & shared mailbox" arch="noarch" url="freescout.net" license="AGPL-3.0" _php=php83 _php_mods="-fpm -mbstring -xml -imap -zip -gd -curl -intl -tokenizer -pdo_pgsql -openssl -session -iconv -fileinfo -dom -pcntl" -depends="$_php ${_php_mods//-/$_php-} nginx postgresql pwgen bash" +depends="$_php ${_php_mods//-/$_php-} nginx postgresql pwgen" makedepends="composer pcre" install="$pkgname.post-install $pkgname.post-upgrade $pkgname.pre-install" source=" @@ -17,7 +17,6 @@ source=" freescout.nginx freescout-manage.sh rename-client-to-membre-fr-en.patch - fix-laravel-log-viewer.patch " pkgusers="freescout" pkggroups="freescout" @@ -76,9 +75,8 @@ package() { install -m755 -D "$srcdir"/freescout-manage.sh "$pkgdir"/usr/bin/freescout-manage } sha512sums=" -8441385a36d9ee5b542936f34e7700e86e1595d9a16b07afeac42bf48409ba0ecd1c542bc82b48afb0bb9201c7219bd146fe9455491ba40116dc66953b994488 freescout-1.8.160.tar.gz +11d81fa670bd67a7db9f5bff3a067a1d1cf3c812a34c805a3fc83edc978ded3accc8334581eca1e73cf0ad95f8e289278add57de096528728e2989135b3057a3 freescout-1.8.139.tar.gz e4af6c85dc12f694bef2a02e4664e31ed50b2c109914d7ffad5001c2bbd764ef25b17ecaa59ff55ef41bccf17169bf910d1a08888364bdedd0ecc54d310e661f freescout.nginx 7ce9b3ee3a979db44f5e6d7daa69431e04a5281f364ae7be23e5a0a0547f96abc858d2a8010346be2fb99bd2355fb529e7030ed20d54f310249e61ed5db4d0ba freescout-manage.sh -0cba00b7d945ce84f72a2812d40028a073a5278856f610e46dbfe0ac78deff6bf5eba7643635fa4bc64d070c4d49eb47d24ea0a05ba1e6ea76690bfd77906366 rename-client-to-membre-fr-en.patch -2c651db6adac6d53597ba36965d0c65e005293f9b030e6be167853e4089384920524737aa947c5066877ee8caefb46741ccba797f653e7c2678556063540d261 fix-laravel-log-viewer.patch +3416da98d71aea5a7093913ea34e783e21ff05dca90bdc5ff3d00c548db5889f6d0ec98441cd65ab9f590be5cd59fdd0d7f1c98b5deef7bb3adbc8db435ec9bf rename-client-to-membre-fr-en.patch " diff --git a/ilot/freescout/fix-laravel-log-viewer.patch b/ilot/freescout/fix-laravel-log-viewer.patch deleted file mode 100644 index 8f29a36..0000000 --- a/ilot/freescout/fix-laravel-log-viewer.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/vendor/composer/installed.json.orig b/vendor/composer/installed.json -index 0b826f5..9d14ec8 100644 ---- a/vendor/composer/installed.json.orig -+++ b/vendor/composer/installed.json -@@ -4494,7 +4494,7 @@ - "installation-source": "dist", - "autoload": { - "classmap": [ -- "src/controllers" -+ "src/" - ], - "psr-0": { - "Rap2hpoutre\\LaravelLogViewer\\": "src/" diff --git a/ilot/freescout/rename-client-to-membre-fr-en.patch b/ilot/freescout/rename-client-to-membre-fr-en.patch index 90e75b8..097e503 100644 --- a/ilot/freescout/rename-client-to-membre-fr-en.patch +++ b/ilot/freescout/rename-client-to-membre-fr-en.patch @@ -38,7 +38,7 @@ index 00000000..82d26052 +} \ No newline at end of file diff --git a/resources/lang/fr.json.orig b/resources/lang/fr.json -index 6264973..8a7037e 100644 +index ff8d9d4..98d158f 100644 --- a/resources/lang/fr.json.orig +++ b/resources/lang/fr.json @@ -26,8 +26,8 @@ @@ -201,8 +201,8 @@ index 6264973..8a7037e 100644 - "This number is not visible to customers. It is only used to track conversations within :app_name": "Ce numéro n'est pas visible pour les clients. Il est uniquement utilisé pour suivre les conversations dans :app_name", + "This number is not visible to customers. It is only used to track conversations within :app_name": "Ce numéro n'est pas visible pour les membres. Il est uniquement utilisé pour suivre les conversations dans :app_name", "This password is incorrect.": "Ce mot de passe est incorrect.", -- "This reply will go to the customer. :%switch_start%Switch to a note:%switch_end% if you are replying to :user_name.": "Cette réponse ira au client. :%switch_start%Passez à une note:%switch_end% si vous répondez à :user_name.", -+ "This reply will go to the customer. :%switch_start%Switch to a note:%switch_end% if you are replying to :user_name.": "Cette réponse ira au membre. :%switch_start%Passez à une note:%switch_end% si vous répondez à :user_name.", +- "This reply will go to the customer. :%switch_start%Switch to a note:switch_end if you are replying to :user_name.": "Cette réponse ira au client. :%switch_start%Passez à une note:switch_end si vous répondez à :user_name.", ++ "This reply will go to the customer. :%switch_start%Switch to a note:switch_end if you are replying to :user_name.": "Cette réponse ira au membre. :%switch_start%Passez à une note:switch_end si vous répondez à :user_name.", "This setting gives you control over what page loads after you perform an action (send a reply, add a note, change conversation status or assignee).": "Ce paramètre vous permet de contrôler la page qui se charge après avoir effectué une action (envoyer une réponse, ajouter une note, etc.).", - "This text will be added to the beginning of each email reply sent to a customer.": "Ce texte sera ajouté au début de chaque réponse par e-mail envoyée à un client.", + "This text will be added to the beginning of each email reply sent to a customer.": "Ce texte sera ajouté au début de chaque réponse par e-mail envoyée à un membre.", diff --git a/ilot/listmonk/APKBUILD b/ilot/listmonk/APKBUILD index 1bf9721..0ad6acd 100644 --- a/ilot/listmonk/APKBUILD +++ b/ilot/listmonk/APKBUILD @@ -1,8 +1,8 @@ # Contributor: Antoine Martin (ayakael) # Maintainer: Antoine Martin (ayakael) pkgname=listmonk -pkgver=4.1.0 -pkgrel=0 +pkgver=3.0.0 +pkgrel=1 pkgdesc='Self-hosted newsletter and mailing list manager with a modern dashboard' arch="all" url=https://listmonk.app @@ -10,7 +10,6 @@ license="AGPL3" depends=" libcap-setcap postgresql - postgresql-contrib procps " makedepends="go npm nodejs yarn" @@ -53,7 +52,6 @@ package() { install -Dm644 -t "$pkgdir"/usr/share/webapps/listmonk/ \ schema.sql \ queries.sql \ - permissions.json \ config.toml.sample install -Dm755 listmonk "$pkgdir"/usr/share/webapps/listmonk/ install -Dm644 -t "$pkgdir"/usr/share/webapps/listmonk/frontend/dist/ \ @@ -67,7 +65,7 @@ package() { ln -s /etc/listmonk/config.toml "$pkgdir"/usr/share/webapps/listmonk/config.toml } sha512sums=" -936b33d6de1d69ee4e7f768810116ac997c516754aace0371089bc8106bebee944197864afc11b7bc5725afa9a4f195d6629957bfcdd37c847e3780aa34558ec listmonk-4.1.0.tar.gz +afd0ea1d4d2b2753c3043526590cf09c45a541a2d818f5d1581644ffd10818326fd553a3b04bca59494860a7bb6e96364b08afd33d337a9fc5c71bedd1a5ee6c listmonk-3.0.0.tar.gz 939450af4b23708e3d23a5a88fad4c24b957090bdd21351a6dd520959e52e45e5fcac117a3eafa280d9506616dae39ad3943589571f008cac5abe1ffd8062424 listmonk.sh 8e9c0b1f335c295fb741418246eb17c7566e5e4200a284c6483433e8ddbf5250aa692435211cf062ad1dfcdce3fae9148def28f03f2492d33fe5e66cbeebd4bd listmonk.openrc " diff --git a/ilot/listmonk/listmonk.post-install b/ilot/listmonk/listmonk.post-install index 3e25f91..fe3cc8d 100644 --- a/ilot/listmonk/listmonk.post-install +++ b/ilot/listmonk/listmonk.post-install @@ -10,12 +10,6 @@ if [ "${0##*.}" = 'post-upgrade' ]; then * * listmonk --upgrade * - * If upgrading from v3.0.0, please first set the following env variables: - * - * export LISTMONK_ADMIN_USER=your-admin-user - * export LISTMONK_ADMIN_PASSWORD=your-admin-password - * listmonk --upgrade - * EOF else cat >&2 <<-EOF diff --git a/ilot/loomio/APKBUILD b/ilot/loomio/APKBUILD index 1381afd..d0f99c6 100644 --- a/ilot/loomio/APKBUILD +++ b/ilot/loomio/APKBUILD @@ -7,7 +7,7 @@ _gittag=v$pkgver pkgrel=1 pkgdesc="A collaborative decision making tool" url="https://github.com/loomio/loomio" -# failing build +# build failure #arch="x86_64" license="MIT" depends=" diff --git a/ilot/peertube/APKBUILD b/ilot/peertube/APKBUILD index 809936b..1c240ed 100644 --- a/ilot/peertube/APKBUILD +++ b/ilot/peertube/APKBUILD @@ -4,8 +4,8 @@ pkgname=peertube pkgver=6.0.2 pkgrel=1 pkgdesc="ActivityPub-federated video streaming platform using P2P directly in your web browser" -# failing build -# arch="x86_64" +# build failure +#arch="x86_64" url="https://joinpeertube.org/" license="AGPL" depends=" diff --git a/ilot/py3-azure-core/APKBUILD b/ilot/py3-azure-core/APKBUILD deleted file mode 100644 index 6e76144..0000000 --- a/ilot/py3-azure-core/APKBUILD +++ /dev/null @@ -1,39 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-azure-core -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=azure-core -pkgver=1.32.0 -pkgrel=0 -pkgdesc="Microsoft Azure Core Library for Python" -url="https://pypi.python.org/project/microsoft-kiota-authentication-azure" -arch="noarch" -license="MIT" -depends="py3-aiohttp py3-requests" -checkdepends="py3-pytest-asyncio py3-trio" -makedepends="py3-setuptools py3-gpep517 py3-wheel py3-flit" -options="!check" #todo -source="$pkgname-$pkgver.tar.gz::https://github.com/Azure/azure-sdk-for-python/archive/refs/tags/azure-core_$pkgver.tar.gz" -builddir="$srcdir"/azure-sdk-for-python-azure-core_$pkgver/sdk/core/azure-core -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -d258a2ca3bc2c9514dec91bf2dbb19c0ee4c0c0bec73a4301b47fb43be768be836f32621b70a8cdb0e39f1491a522191a82a00f318ee7c901e8861a62439e934 py3-azure-core-1.32.0.tar.gz -" diff --git a/ilot/py3-azure-identity/APKBUILD b/ilot/py3-azure-identity/APKBUILD deleted file mode 100644 index 9341e11..0000000 --- a/ilot/py3-azure-identity/APKBUILD +++ /dev/null @@ -1,44 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-azure-identity -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=azure-identity -pkgver=1.19.0 -pkgrel=0 -pkgdesc="Microsoft Azure Identity Library for Python" -url="https://pypi.org/project/azure-identity/" -arch="noarch" -license="MIT" -depends=" - py3-azure-core - py3-cryptography - py3-msal-extensions - py3-typing-extensions -" -checkdepends="py3-pytest" -makedepends="py3-setuptools py3-gpep517 py3-wheel py3-flit" -options="!check" #todo -source="$pkgname-$pkgver.tar.gz::https://github.com/Azure/azure-sdk-for-python/archive/refs/tags/azure-identity_$pkgver.tar.gz" -builddir="$srcdir"/azure-sdk-for-python-azure-identity_$pkgver/sdk/identity/azure-identity -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -090aed812a7a72c649ded2574dc0a05dd7d9db41675e3d86921ab0555f8af7c83999cb879a2f2e0984880874b3b6dfead6b8de0563d8a99d81775715640a9e01 py3-azure-identity-1.19.0.tar.gz -" diff --git a/ilot/py3-django-countries/APKBUILD b/ilot/py3-django-countries/APKBUILD deleted file mode 100644 index 67e36b5..0000000 --- a/ilot/py3-django-countries/APKBUILD +++ /dev/null @@ -1,40 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-django-countries -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=django-countries -pkgver=7.6.1 -pkgrel=0 -pkgdesc="Provides a country field for Django models." -url="https://pypi.python.org/project/django-countries" -arch="noarch" -license="MIT" -depends="py3-django py3-asgiref py3-typing-extensions" -# missing py3-graphene -checkdepends="py3-pytest-django py3-pytest-cov py3-django-rest-framework" -makedepends="py3-setuptools py3-gpep517 py3-wheel" -source="$pkgname-$pkgver.tar.gz::https://github.com/SmileyChris/django-countries/archive/refs/tags/v$pkgver.tar.gz" -options="!check" # TODO -builddir="$srcdir/$_pkgreal-$pkgver" -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -53c7db02244aad196c141d1d04db5087c802d69d12de25e86fe0b2abdfb4ce9ed6ec84b6344c423dc6e7d2e57c2bb14a5324739c7cead54ec7d261e7e3fe6112 py3-django-countries-7.6.1.tar.gz -" diff --git a/ilot/py3-django-rest-framework/APKBUILD b/ilot/py3-django-rest-framework/APKBUILD index 82a1497..69f9f63 100644 --- a/ilot/py3-django-rest-framework/APKBUILD +++ b/ilot/py3-django-rest-framework/APKBUILD @@ -4,7 +4,7 @@ pkgname=py3-django-rest-framework _pkgname=django-rest-framework pkgver=3.14.0 -pkgrel=1 +pkgrel=2 pkgdesc="Web APIs for Django" url="https://github.com/encode/django-rest-framework" arch="noarch" diff --git a/ilot/py3-django-tenant-schemas/APKBUILD b/ilot/py3-django-tenant-schemas/APKBUILD deleted file mode 100644 index b309c54..0000000 --- a/ilot/py3-django-tenant-schemas/APKBUILD +++ /dev/null @@ -1,48 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-django-tenant-schemas -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=django-tenant-schemas -pkgver=1.12.0 -pkgrel=0 -pkgdesc="Tenant support for Django using PostgreSQL schemas." -url="https://pypi.python.org/project/django-tenant-schemas" -arch="noarch" -license="MIT" -depends=" - py3-django - py3-ordered-set - py3-six - py3-psycopg2 - " -checkdepends="py3-pytest" -makedepends="py3-setuptools py3-setuptools_scm py3-gpep517 py3-wheel" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/bernardopires/django-tenant-schemas/archive/refs/tags/v$pkgver.tar.gz - " -options="!check" # requires pg -builddir="$srcdir/$_pkgreal-$pkgver" -subpackages="$pkgname-pyc" - -build() { - export SETUPTOOLS_SCM_PRETEND_VERSION=$pkgver - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - cd tenant_schemas - DJANGO_SETTINGS_MODULE=tests.settings ../.testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -758f68dc834d4c0074097b166d742a7d63c86b6426ad67d3ce2f56983d417666bf05ae9c46b3ee89a04dee2d888892463651355d26eda7c265ebee8971992319 py3-django-tenant-schemas-1.12.0.tar.gz -" diff --git a/ilot/py3-django-tenants/997_update-from-pgclone-schema.patch b/ilot/py3-django-tenants/997_update-from-pgclone-schema.patch new file mode 100644 index 0000000..b2999d2 --- /dev/null +++ b/ilot/py3-django-tenants/997_update-from-pgclone-schema.patch @@ -0,0 +1,3823 @@ +From 07e14a3442d080bd4e873dc74e441296b8291ae2 Mon Sep 17 00:00:00 2001 +From: Marc 'risson' Schmitt +Date: Thu, 16 Nov 2023 13:26:16 +0100 +Subject: [PATCH 1/3] clone: update from pg-clone-schema + +Signed-off-by: Marc 'risson' Schmitt +--- + django_tenants/clone.py | 3407 ++++++++++++++++++++++++++++++++++----- + 1 file changed, 2977 insertions(+), 430 deletions(-) + +diff --git a/django_tenants/clone.py b/django_tenants/clone.py +index 426e81b8..3afce109 100644 +--- a/django_tenants/clone.py ++++ b/django_tenants/clone.py +@@ -6,24 +6,592 @@ + from django_tenants.utils import schema_exists + + CLONE_SCHEMA_FUNCTION = r""" +--- https://github.com/denishpatel/pg-clone-schema/ rev 0d3b522 ++-- https://github.com/denishpatel/pg-clone-schema/ rev 073922e + -- https://github.com/tomturner/django-tenants/issues/322 + +--- Function: clone_schema(text, text, boolean, boolean) ++do $$ ++<> ++DECLARE ++ cnt int; ++BEGIN ++ DROP TYPE IF EXISTS public.cloneparms CASCADE; ++ CREATE TYPE public.cloneparms AS ENUM ('DATA', 'NODATA','DDLONLY','NOOWNER','NOACL','VERBOSE','DEBUG','FILECOPY'); ++ -- END IF; ++end first_block $$; ++ ++ ++-- select * from public.get_insert_stmt_ddl('clone1','sample','address'); ++CREATE OR REPLACE FUNCTION public.get_insert_stmt_ddl( ++ source_schema text, ++ target_schema text, ++ atable text, ++ bTextCast boolean default False ++) ++RETURNS text ++LANGUAGE plpgsql VOLATILE ++AS ++$$ ++ DECLARE ++ -- the ddl we're building ++ v_insert_ddl text := ''; ++ v_cols text := ''; ++ v_cols_sel text := ''; ++ v_cnt int := 0; ++ v_colrec record; ++ v_schema text; ++ BEGIN ++ FOR v_colrec IN ++ SELECT c.column_name, c.data_type, c.udt_name, c.udt_schema, c.character_maximum_length, c.is_nullable, c.column_default, c.numeric_precision, c.numeric_scale, c.is_identity, c.identity_generation, c.is_generated ++ FROM information_schema.columns c WHERE (table_schema, table_name) = (source_schema, atable) ORDER BY ordinal_position ++ LOOP ++ IF v_colrec.udt_schema = 'public' THEN ++ v_schema = 'public'; ++ ELSE ++ v_schema = target_schema; ++ END IF; ++ ++ v_cnt = v_cnt + 1; ++ IF v_colrec.is_identity = 'YES' OR v_colrec.is_generated = 'ALWAYS' THEN ++ -- skip ++ continue; ++ END IF; ++ ++ IF v_colrec.data_type = 'USER-DEFINED' THEN ++ IF v_cols = '' THEN ++ v_cols = v_colrec.column_name; ++ IF bTextCast THEN ++ -- v_cols_sel = v_colrec.column_name || '::text::' || v_schema || '.' || v_colrec.udt_name; ++ IF v_schema = 'public' THEN ++ v_cols_sel = v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; ++ ELSE ++ v_cols_sel = v_colrec.column_name || '::text::' || v_colrec.udt_name; ++ END IF; ++ ELSE ++ v_cols_sel = v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; ++ END IF; ++ ELSE ++ v_cols = v_cols || ', ' || v_colrec.column_name; ++ IF bTextCast THEN ++ -- v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::text::' || v_schema || '.' || v_colrec.udt_name; ++ IF v_schema = 'public' THEN ++ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; ++ ELSE ++ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::text::' || v_colrec.udt_name; ++ END IF; ++ ELSE ++ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; ++ END IF; ++ END IF; ++ ELSE ++ IF v_cols = '' THEN ++ v_cols = v_colrec.column_name; ++ v_cols_sel = v_colrec.column_name; ++ ELSE ++ v_cols = v_cols || ', ' || v_colrec.column_name; ++ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name; ++ END IF; ++ END IF; ++ END LOOP; ++ ++ -- put it all together and return the insert statement ++ -- INSERT INTO clone1.address2 (id2, id3, addr) SELECT id2::text::clone1.udt_myint, id3::text::clone1.udt_myint, addr FROM sample.address; ++ v_insert_ddl = 'INSERT INTO ' || target_schema || '.' || atable || ' (' || v_cols || ') ' || 'SELECT ' || v_cols_sel || ' FROM ' || source_schema || '.' || atable || ';'; ++ RETURN v_insert_ddl; ++ END; ++$$; ++ ++ ++CREATE OR REPLACE FUNCTION public.get_table_ddl_complex( ++ src_schema text, ++ dst_schema text, ++ in_table text, ++ sq_server_version_num integer ++) ++RETURNS text ++LANGUAGE plpgsql VOLATILE ++AS ++$$ ++ DECLARE ++ v_table_ddl text; ++ v_buffer1 text; ++ v_buffer2 text; ++ ++ BEGIN ++ IF sq_server_version_num < 110000 THEN ++ SELECT 'CREATE TABLE ' ++ || quote_ident(dst_schema) ++ || '.' ++ || pc.relname ++ || E'(\n' ++ || string_agg( ++ pa.attname ++ || ' ' ++ || pg_catalog.format_type(pa.atttypid, pa.atttypmod) ++ || coalesce( ++ ' DEFAULT ' ++ || ( ++ SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid) ++ FROM pg_catalog.pg_attrdef d ++ WHERE d.adrelid = pa.attrelid ++ AND d.adnum = pa.attnum ++ AND pa.atthasdef ++ ), ++ '' ++ ) ++ || ' ' ++ || CASE pa.attnotnull ++ WHEN TRUE THEN 'NOT NULL' ++ ELSE 'NULL' ++ END, ++ E',\n' ++ ) ++ || coalesce( ++ ( ++ SELECT ++ E',\n' ++ || string_agg( ++ 'CONSTRAINT ' ++ || pc1.conname ++ || ' ' ++ || pg_get_constraintdef(pc1.oid), ++ E',\n' ++ ORDER BY pc1.conindid ++ ) ++ FROM pg_constraint pc1 ++ --Issue#103: do not return FKEYS for partitions since we assume it is implied by the one done on the parent table, otherwise error for trying to define it again. ++ WHERE pc1.conrelid = pa.attrelid ++ ), ++ '' ++ ) ++ INTO v_buffer1 ++ FROM pg_catalog.pg_attribute pa ++ JOIN pg_catalog.pg_class pc ON pc.oid = pa.attrelid ++ AND pc.relname = quote_ident(in_table) ++ JOIN pg_catalog.pg_namespace pn ON pn.oid = pc.relnamespace ++ AND pn.nspname = quote_ident(src_schema) ++ WHERE pa.attnum > 0 ++ AND NOT pa.attisdropped ++ GROUP BY pn.nspname, pc.relname, pa.attrelid; ++ ++ ELSE ++ SELECT 'CREATE TABLE ' ++ || quote_ident(dst_schema) ++ || '.' ++ || pc.relname ++ || E'(\n' ++ || string_agg( ++ pa.attname ++ || ' ' ++ || pg_catalog.format_type(pa.atttypid, pa.atttypmod) ++ || coalesce( ++ ' DEFAULT ' ++ || ( ++ SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid) ++ FROM pg_catalog.pg_attrdef d ++ WHERE d.adrelid = pa.attrelid ++ AND d.adnum = pa.attnum ++ AND pa.atthasdef ++ ), ++ '' ++ ) ++ || ' ' ++ || CASE pa.attnotnull ++ WHEN TRUE THEN 'NOT NULL' ++ ELSE 'NULL' ++ END, ++ E',\n' ++ ) ++ || coalesce( ++ ( ++ SELECT ++ E',\n' ++ || string_agg( ++ 'CONSTRAINT ' ++ || pc1.conname ++ || ' ' ++ || pg_get_constraintdef(pc1.oid), ++ E',\n' ++ ORDER BY pc1.conindid ++ ) ++ FROM pg_constraint pc1 ++ --Issue#103: do not return FKEYS for partitions since we assume it is implied by the one done on the parent table, otherwise error for trying to define it again. ++ WHERE pc1.conrelid = pa.attrelid AND pc1.conparentid = 0 ++ ), ++ '' ++ ) ++ INTO v_buffer1 ++ FROM pg_catalog.pg_attribute pa ++ JOIN pg_catalog.pg_class pc ON pc.oid = pa.attrelid ++ AND pc.relname = quote_ident(in_table) ++ JOIN pg_catalog.pg_namespace pn ON pn.oid = pc.relnamespace ++ AND pn.nspname = quote_ident(src_schema) ++ WHERE pa.attnum > 0 ++ AND NOT pa.attisdropped ++ GROUP BY pn.nspname, pc.relname, pa.attrelid; ++ END IF; ++ ++ -- append partition keyword to it ++ SELECT pg_catalog.pg_get_partkeydef(c.oid::pg_catalog.oid) into v_buffer2 ++ FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace ++ WHERE c.relname = quote_ident(in_table) COLLATE pg_catalog.default AND n.nspname = quote_ident(src_schema) COLLATE pg_catalog.default; ++ ++ v_table_ddl := v_buffer1 || ') PARTITION BY ' || v_buffer2 || ';'; ++ ++ RETURN v_table_ddl; ++ END; ++$$; ++ ++ ++-- SELECT * FROM public.get_table_ddl('sample', 'address', True); ++CREATE OR REPLACE FUNCTION public.get_table_ddl( ++ in_schema varchar, ++ in_table varchar, ++ bfkeys boolean ++) ++RETURNS text ++LANGUAGE plpgsql VOLATILE ++AS ++$$ ++ DECLARE ++ -- the ddl we're building ++ v_table_ddl text; ++ ++ -- data about the target table ++ v_table_oid int; ++ ++ -- records for looping ++ v_colrec record; ++ v_constraintrec record; ++ v_indexrec record; ++ v_primary boolean := False; ++ v_constraint_name text; ++ v_src_path_old text := ''; ++ v_src_path_new text := ''; ++ v_dummy text; ++ v_partbound text; ++ v_pgversion int; ++ v_parent text := ''; ++ v_relopts text := ''; ++ v_tablespace text; ++ v_partition_key text := ''; ++ v_temp text; ++ bPartitioned bool := False; ++ bInheritance bool := False; ++ bRelispartition bool; ++ constraintarr text[] := '{{}}'; ++ constraintelement text; ++ bSkip boolean; ++ ++ BEGIN ++ SELECT c.oid, ( ++ SELECT setting ++ FROM pg_settings ++ WHERE name = 'server_version_num') INTO v_table_oid, v_pgversion ++ FROM pg_catalog.pg_class c ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace ++ WHERE c.relkind IN ('r', 'p') ++ AND c.relname = in_table ++ AND n.nspname = in_schema; ++ IF (v_table_oid IS NULL) THEN ++ RAISE EXCEPTION 'table does not exist'; ++ END IF; ++ ++ -- get user-defined tablespaces if applicable ++ SELECT TABLESPACE INTO v_temp ++ FROM pg_tables ++ WHERE schemaname = in_schema ++ AND tablename = in_table ++ AND TABLESPACE IS NOT NULL; ++ -- Issue#99 Fix: simple coding error! ++ -- IF v_tablespace IS NULL THEN ++ IF v_temp IS NULL THEN ++ v_tablespace := 'TABLESPACE pg_default'; ++ ELSE ++ v_tablespace := 'TABLESPACE ' || v_temp; ++ END IF; ++ -- also see if there are any SET commands for this table, ie, autovacuum_enabled=off, fillfactor=70 ++ WITH relopts AS ( ++ SELECT unnest(c.reloptions) relopts ++ FROM pg_class c, pg_namespace n ++ WHERE n.nspname = in_schema ++ AND n.oid = c.relnamespace ++ AND c.relname = in_table ++ ) ++ SELECT string_agg(r.relopts, ', ') AS relopts INTO v_temp ++ FROM relopts r; ++ IF v_temp IS NULL THEN ++ v_relopts := ''; ++ ELSE ++ v_relopts := ' WITH (' || v_temp || ')'; ++ END IF; ++ ++ -- Issue#61 FIX: set search_path = public before we do anything to force explicit schema qualification but dont forget to set it back before exiting... ++ SELECT setting INTO v_src_path_old FROM pg_settings WHERE name = 'search_path'; ++ ++ SELECT REPLACE(REPLACE(setting, '"$user"', '$user'), '$user', '"$user"') INTO v_src_path_old ++ FROM pg_settings ++ WHERE name = 'search_path'; ++ -- RAISE INFO 'DEBUG tableddl: saving old search_path: ***%***', v_src_path_old; ++ EXECUTE 'SET search_path = "public"'; ++ SELECT setting INTO v_src_path_new FROM pg_settings WHERE name = 'search_path'; ++ ++ -- grab the oid of the table; https://www.postgresql.org/docs/8.3/catalog-pg-class.html ++ SELECT c.oid INTO v_table_oid ++ FROM pg_catalog.pg_class c ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace ++ WHERE 1 = 1 ++ AND c.relkind = 'r' ++ AND c.relname = in_table ++ AND n.nspname = in_schema; ++ ++ IF (v_table_oid IS NULL) THEN ++ -- Dont give up yet. It might be a partitioned table ++ SELECT c.oid INTO v_table_oid ++ FROM pg_catalog.pg_class c ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace ++ WHERE 1 = 1 ++ AND c.relkind = 'p' ++ AND c.relname = in_table ++ AND n.nspname = in_schema; ++ ++ IF (v_table_oid IS NULL) THEN ++ RAISE EXCEPTION 'table does not exist'; ++ END IF; ++ bPartitioned := True; ++ END IF; ++ IF v_pgversion < 100000 THEN ++ SELECT c2.relname parent INTO v_parent ++ FROM pg_class c1, pg_namespace n, pg_inherits i, pg_class c2 ++ WHERE n.nspname = in_schema ++ AND n.oid = c1.relnamespace ++ AND c1.relname = in_table ++ AND c1.oid = i.inhrelid ++ AND i.inhparent = c2.oid ++ AND c1.relkind = 'r'; ++ ++ IF (v_parent IS NOT NULL) THEN ++ bPartitioned := True; ++ bInheritance := True; ++ END IF; ++ ELSE ++ SELECT c2.relname parent, c1.relispartition, pg_get_expr(c1.relpartbound, c1.oid, TRUE) INTO v_parent, bRelispartition, v_partbound ++ FROM pg_class c1, pg_namespace n, pg_inherits i, pg_class c2 ++ WHERE n.nspname = in_schema ++ AND n.oid = c1.relnamespace ++ AND c1.relname = in_table ++ AND c1.oid = i.inhrelid ++ AND i.inhparent = c2.oid ++ AND c1.relkind = 'r'; ++ ++ IF (v_parent IS NOT NULL) THEN ++ bPartitioned := True; ++ IF bRelispartition THEN ++ bInheritance := False; ++ ELSE ++ bInheritance := True; ++ END IF; ++ END IF; ++ END IF; ++ -- RAISE NOTICE 'version=% schema=% parent=% relopts=% tablespace=% partitioned=% inherited=% relispartition=%',v_pgversion, in_schema, v_parent, v_relopts, v_tablespace, bPartitioned, bInheritance, bRelispartition; ++ ++ -- start the create definition ++ v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' (' || E'\n'; ++ ++ -- define all of the columns in the table; https://stackoverflow.com/a/8153081/3068233 ++ FOR v_colrec IN ++ SELECT c.column_name, c.data_type, c.udt_name, c.udt_schema, c.character_maximum_length, c.is_nullable, c.column_default, c.numeric_precision, c.numeric_scale, c.is_identity, c.identity_generation ++ FROM information_schema.columns c ++ WHERE (table_schema, table_name) = (in_schema, in_table) ++ ORDER BY ordinal_position ++ LOOP ++ v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column ++ || v_colrec.column_name || ' ' ++ -- FIX #82, FIX #100 as well by adding 'citext' to the list ++ -- FIX #105 by overriding the previous fixes (#82, #100), which presumed "public" was always the schema for extensions. It could be a custom schema. ++ -- so assume udt_schema for all USER-DEFINED datatypes ++ -- || CASE WHEN v_colrec.udt_name in ('geometry', 'box2d', 'box2df', 'box3d', 'geography', 'geometry_dump', 'gidx', 'spheroid', 'valid_detail','citext') ++ -- THEN v_colrec.udt_name ++ || CASE WHEN v_colrec.data_type = 'USER-DEFINED' ++ -- THEN in_schema || '.' || v_colrec.udt_name ELSE v_colrec.data_type END ++ THEN v_colrec.udt_schema || '.' || v_colrec.udt_name ELSE v_colrec.data_type END ++ || CASE WHEN v_colrec.is_identity = 'YES' ++ THEN ++ CASE WHEN v_colrec.identity_generation = 'ALWAYS' ++ THEN ' GENERATED ALWAYS AS IDENTITY' ELSE ' GENERATED BY DEFAULT AS IDENTITY' END ELSE '' END ++ || CASE WHEN v_colrec.character_maximum_length IS NOT NULL ++ THEN ('(' || v_colrec.character_maximum_length || ')') ++ WHEN v_colrec.numeric_precision > 0 AND v_colrec.numeric_scale > 0 ++ THEN '(' || v_colrec.numeric_precision || ',' || v_colrec.numeric_scale || ')' ++ ELSE '' END || ' ' ++ || CASE WHEN v_colrec.is_nullable = 'NO' ++ THEN 'NOT NULL' ELSE 'NULL' END ++ || CASE WHEN v_colrec.column_default IS NOT null ++ THEN (' DEFAULT ' || v_colrec.column_default) ELSE '' END ++ || ',' || E'\n'; ++ END LOOP; ++ -- define all the constraints in the; https://www.postgresql.org/docs/9.1/catalog-pg-constraint.html && https://dba.stackexchange.com/a/214877/75296 ++ -- Issue#103: do not get foreign keys for partitions since they are defined on the parent and this will cause an "already exists" error otherwise ++ -- Also conparentid is not in V10, so bypass since we do not have FKEYS in partitioned tables in V10 ++ IF v_pgversion < 110000 THEN ++ FOR v_constraintrec IN ++ SELECT ++ con.conname as constraint_name, ++ con.contype as constraint_type, ++ CASE ++ WHEN con.contype = 'p' THEN 1 -- primary key constraint ++ WHEN con.contype = 'u' THEN 2 -- unique constraint ++ WHEN con.contype = 'f' THEN 3 -- foreign key constraint ++ WHEN con.contype = 'c' THEN 4 ++ ELSE 5 ++ END as type_rank, ++ pg_get_constraintdef(con.oid) as constraint_definition ++ FROM pg_catalog.pg_constraint con ++ JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid ++ JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace ++ WHERE nsp.nspname = in_schema ++ AND rel.relname = in_table ++ ORDER BY type_rank ++ LOOP ++ -- Issue#85 fix ++ -- constraintarr := constraintarr || v_constraintrec.constraint_name; ++ constraintarr := constraintarr || v_constraintrec.constraint_name::text; ++ IF v_constraintrec.type_rank = 1 THEN ++ v_primary := True; ++ v_constraint_name := v_constraintrec.constraint_name; ++ END IF; ++ IF NOT bfkeys AND v_constraintrec.constraint_type = 'f' THEN ++ continue; ++ END IF; ++ v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column ++ || 'CONSTRAINT' || ' ' ++ || v_constraintrec.constraint_name || ' ' ++ || v_constraintrec.constraint_definition ++ || ',' || E'\n'; ++ END LOOP; ++ ELSE ++ FOR v_constraintrec IN ++ SELECT ++ con.conname as constraint_name, ++ con.contype as constraint_type, ++ CASE ++ WHEN con.contype = 'p' THEN 1 -- primary key constraint ++ WHEN con.contype = 'u' THEN 2 -- unique constraint ++ WHEN con.contype = 'f' THEN 3 -- foreign key constraint ++ WHEN con.contype = 'c' THEN 4 ++ ELSE 5 ++ END as type_rank, ++ pg_get_constraintdef(con.oid) as constraint_definition ++ FROM pg_catalog.pg_constraint con ++ JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid ++ JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace ++ WHERE nsp.nspname = in_schema ++ AND rel.relname = in_table ++ -- Issue#103: do not get partitioned tables ++ AND con.conparentid = 0 ++ ORDER BY type_rank ++ LOOP ++ -- Issue#85 fix ++ -- constraintarr := constraintarr || v_constraintrec.constraint_name; ++ constraintarr := constraintarr || v_constraintrec.constraint_name::text; ++ IF v_constraintrec.type_rank = 1 THEN ++ v_primary := True; ++ v_constraint_name := v_constraintrec.constraint_name; ++ END IF; ++ IF NOT bfkeys AND v_constraintrec.constraint_type = 'f' THEN ++ continue; ++ END IF; ++ v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column ++ || 'CONSTRAINT' || ' ' ++ || v_constraintrec.constraint_name || ' ' ++ || v_constraintrec.constraint_definition ++ || ',' || E'\n'; ++ END LOOP; ++ END IF; ++ ++ -- drop the last comma before ending the create statement ++ v_table_ddl = substr(v_table_ddl, 0, length(v_table_ddl) - 1) || E'\n'; ++ -- end the create table def but add inherits clause if valid ++ IF bPartitioned and bInheritance THEN ++ v_table_ddl := v_table_ddl || ') INHERITS (' || in_schema || '.' || v_parent || ') ' || v_relopts || ' ' || v_tablespace || ';' || E'\n'; ++ ELSIF v_pgversion >= 100000 AND bPartitioned and NOT bInheritance THEN ++ -- See if this is a partitioned table (pg_class.relkind = 'p') and add the partitioned key ++ SELECT pg_get_partkeydef (c1.oid) AS partition_key INTO v_partition_key ++ FROM pg_class c1 ++ JOIN pg_namespace n ON (n.oid = c1.relnamespace) ++ LEFT JOIN pg_partitioned_table p ON (c1.oid = p.partrelid) ++ WHERE n.nspname = in_schema ++ AND n.oid = c1.relnamespace ++ AND c1.relname = in_table ++ AND c1.relkind = 'p'; ++ END IF; ++ IF v_partition_key IS NOT NULL AND v_partition_key <> '' THEN ++ -- add partition clause ++ -- NOTE: cannot specify default tablespace for partitioned relations ++ v_table_ddl := v_table_ddl || ') PARTITION BY ' || v_partition_key || ';' || E'\n'; ++ ELSIF bPartitioned AND not bInheritance THEN ++ IF v_relopts <> '' THEN ++ v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' PARTITION OF ' || in_schema || '.' || v_parent || ' ' || v_partbound || v_relopts || ' ' || v_tablespace || '; ' || E'\n'; ++ ELSE ++ v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' PARTITION OF ' || in_schema || '.' || v_parent || ' ' || v_partbound || ' ' || v_tablespace || '; ' || E'\n'; ++ END IF; ++ ELSIF bPartitioned and bInheritance THEN ++ -- we already did this above ++ v_table_ddl := v_table_ddl; ++ ELSIF v_relopts <> '' THEN ++ v_table_ddl := v_table_ddl || ') ' || v_relopts || ' ' || v_tablespace || ';' || E'\n'; ++ ELSE ++ v_table_ddl := v_table_ddl || ') ' || v_tablespace || ';' || E'\n'; ++ END IF; ++ -- suffix create statement with all of the indexes on the table ++ FOR v_indexrec IN ++ SELECT indexdef, indexname ++ FROM pg_indexes ++ WHERE (schemaname, tablename) = (in_schema, in_table) ++ LOOP ++ -- Issue#83 fix: loop through constraints and skip ones already defined ++ bSkip = False; ++ FOREACH constraintelement IN ARRAY constraintarr ++ LOOP ++ IF constraintelement = v_indexrec.indexname THEN ++ bSkip = True; ++ EXIT; ++ END IF; ++ END LOOP; ++ if bSkip THEN CONTINUE; END IF; ++ v_table_ddl := v_table_ddl ++ || v_indexrec.indexdef ++ || ';' || E'\n'; ++ END LOOP; ++ ++ -- reset search_path back to what it was ++ IF v_src_path_old = '' THEN ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ELSE ++ EXECUTE 'SET search_path = ' || v_src_path_old; ++ END IF; ++ -- RAISE NOTICE 'DEBUG tableddl: reset search_path back to ***%***', v_src_path_old; ++ ++ -- return the ddl ++ RETURN v_table_ddl; ++ END; ++$$; + +--- DROP FUNCTION clone_schema(text, text, boolean, boolean); + ++-- Function: clone_schema(text, text, boolean, boolean, boolean) ++-- DROP FUNCTION clone_schema(text, text, boolean, boolean, boolean); ++-- DROP FUNCTION IF EXISTS public.clone_schema(text, text, boolean, boolean); ++ ++DROP FUNCTION IF EXISTS public.clone_schema(text, text, cloneparms[]); + CREATE OR REPLACE FUNCTION public.clone_schema( + source_schema text, + dest_schema text, +- include_recs boolean, +- ddl_only boolean) ++ VARIADIC arr public.cloneparms[] DEFAULT '{{}}':: public.cloneparms[]) + RETURNS void AS + $BODY$ + + -- This function will clone all sequences, tables, data, views & functions from any existing schema to a new one + -- SAMPLE CALL: +--- SELECT clone_schema('public', 'new_schema', True, False); ++-- SELECT clone_schema('sample', 'sample_clone2'); + + DECLARE + src_oid oid; +@@ -32,20 +600,37 @@ + object text; + buffer text; + buffer2 text; ++ buffer3 text; + srctbl text; ++ aname text; + default_ text; + column_ text; + qry text; + ix_old_name text; + ix_new_name text; ++ relpersist text; ++ udt_name text; ++ udt_schema text; ++ bRelispart bool; ++ bChild bool; ++ relknd text; ++ data_type text; ++ ocomment text; ++ adef text; + dest_qry text; + v_def text; ++ part_range text; + src_path_old text; ++ src_path_new text; + aclstr text; ++ -- issue#80 initialize arrays properly ++ tblarray text[] := '{{}}'; ++ tblarray2 text[] := '{{}}'; ++ tblarray3 text[] := '{{}}'; ++ tblelement text; + grantor text; + grantee text; + privs text; +- records_count bigint; + seqval bigint; + sq_last_value bigint; + sq_max_value bigint; +@@ -53,16 +638,28 @@ + sq_increment_by bigint; + sq_min_value bigint; + sq_cache_value bigint; +- sq_is_called boolean; ++ sq_is_called boolean := True; + sq_is_cycled boolean; ++ is_prokind boolean; ++ abool boolean; + sq_data_type text; + sq_cycled char(10); ++ sq_owned text; ++ sq_version text; ++ sq_server_version text; ++ sq_server_version_num integer; ++ bWindows boolean; + arec RECORD; + cnt integer; ++ cnt1 integer; + cnt2 integer; +- seq_cnt integer; ++ cnt3 integer; ++ cnt4 integer; + pos integer; ++ tblscopied integer := 0; ++ l_child integer; + action text := 'N/A'; ++ tblname text; + v_ret text; + v_diag1 text; + v_diag2 text; +@@ -70,48 +667,209 @@ + v_diag4 text; + v_diag5 text; + v_diag6 text; ++ v_dummy text; ++ spath text; ++ spath_tmp text; ++ -- issue#86 fix ++ isGenerated text; ++ ++ -- issue#91 fix ++ tblowner text; ++ func_owner text; ++ func_name text; ++ func_args text; ++ func_argno integer; ++ view_owner text; ++ ++ -- issue#92 ++ calleruser text; ++ ++ -- issue#94 ++ bData boolean := False; ++ bDDLOnly boolean := False; ++ bVerbose boolean := False; ++ bDebug boolean := False; ++ bNoACL boolean := False; ++ bNoOwner boolean := False; ++ arglen integer; ++ vargs text; ++ avarg public.cloneparms; ++ ++ -- issue#98 ++ mvarray text[] := '{{}}'; ++ mvscopied integer := 0; ++ ++ -- issue#99 tablespaces ++ tblspace text; ++ ++ -- issue#101 ++ bFileCopy boolean := False; ++ ++ t timestamptz := clock_timestamp(); ++ r timestamptz; ++ s timestamptz; ++ lastsql text := ''; ++ v_version text := '1.19 September 07, 2023'; + + BEGIN ++ -- Make sure NOTICE are shown ++ SET client_min_messages = 'notice'; ++ RAISE NOTICE 'clone_schema version %', v_version; ++ ++ IF 'DEBUG' = ANY ($3) THEN bDebug = True; END IF; ++ IF 'VERBOSE' = ANY ($3) THEN bVerbose = True; END IF; ++ ++ -- IF bVerbose THEN RAISE NOTICE 'START: %',clock_timestamp() - t; END IF; ++ ++ arglen := array_length($3, 1); ++ IF arglen IS NULL THEN ++ -- nothing to do, so defaults are assumed ++ NULL; ++ ELSE ++ -- loop thru args ++ -- IF 'NO_TRIGGERS' = ANY ($3) ++ -- select array_to_string($3, ',', '***') INTO vargs; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: arguments=%', $3; END IF; ++ FOREACH avarg IN ARRAY $3 LOOP ++ IF bDebug THEN RAISE NOTICE 'DEBUG: arg=%', avarg; END IF; ++ IF avarg = 'DATA' THEN ++ bData = True; ++ ELSEIF avarg = 'NODATA' THEN ++ -- already set to that by default ++ bData = False; ++ ELSEIF avarg = 'DDLONLY' THEN ++ bDDLOnly = True; ++ ELSEIF avarg = 'NOACL' THEN ++ bNoACL = True; ++ ELSEIF avarg = 'NOOWNER' THEN ++ bNoOwner = True; ++ -- issue#101 fix ++ ELSEIF avarg = 'FILECOPY' THEN ++ bFileCopy = True; ++ END IF; ++ END LOOP; ++ IF bData and bDDLOnly THEN ++ RAISE WARNING 'You can only specify DDLONLY or DATA, but not both.'; ++ RETURN; ++ END IF; ++ END IF; ++ ++ -- Get server version info to handle certain things differently based on the version. ++ SELECT setting INTO sq_server_version ++ FROM pg_settings ++ WHERE name = 'server_version'; ++ SELECT version() INTO sq_version; ++ ++ IF POSITION('compiled by Visual C++' IN sq_version) > 0 THEN ++ bWindows = True; ++ RAISE NOTICE 'Windows: %', sq_version; ++ ELSE ++ bWindows = False; ++ RAISE NOTICE 'Linux: %', sq_version; ++ END IF; ++ SELECT setting INTO sq_server_version_num ++ FROM pg_settings ++ WHERE name = 'server_version_num'; ++ ++ IF sq_server_version_num < 100000 THEN ++ IF sq_server_version_num > 90600 THEN ++ RAISE WARNING 'Server Version:% Number:% PG Versions older than v10 are not supported. Will try however for PG 9.6...', sq_server_version, sq_server_version_num; ++ ELSE ++ RAISE WARNING 'Server Version:% Number:% PG Versions older than v10 are not supported. You need to be at minimum version 9.6 to at least try', sq_server_version, sq_server_version_num; ++ RETURN; ++ END IF; ++ END IF; + + -- Check that source_schema exists + SELECT oid INTO src_oid +- FROM pg_namespace +- WHERE nspname = quote_ident(source_schema); ++ FROM pg_namespace ++ WHERE nspname = quote_ident(source_schema); ++ + IF NOT FOUND + THEN +- RAISE NOTICE 'source schema % does not exist!', source_schema; ++ RAISE NOTICE ' source schema % does not exist!', source_schema; + RETURN ; + END IF; + ++ -- Check for case-sensitive target schemas and reject them for now. ++ SELECT lower(dest_schema) = dest_schema INTO abool; ++ IF not abool THEN ++ RAISE NOTICE 'Case-sensitive target schemas are not supported at this time.'; ++ RETURN; ++ END IF; ++ + -- Check that dest_schema does not yet exist + PERFORM nspname +- FROM pg_namespace +- WHERE nspname = quote_ident(dest_schema); ++ FROM pg_namespace ++ WHERE nspname = quote_ident(dest_schema); ++ + IF FOUND + THEN +- RAISE NOTICE 'dest schema % already exists!', dest_schema; ++ RAISE NOTICE ' dest schema % already exists!', dest_schema; + RETURN ; + END IF; +- IF ddl_only and include_recs THEN ++ IF bDDLOnly and bData THEN + RAISE WARNING 'You cannot specify to clone data and generate ddl at the same time.'; + RETURN ; + END IF; + ++ -- Issue#92 ++ SELECT current_user into calleruser; ++ + -- Set the search_path to source schema. Before exiting set it back to what it was before. +- SELECT setting INTO src_path_old FROM pg_settings WHERE name='search_path'; ++ -- In order to avoid issues with the special schema name "$user" that may be ++ -- returned unquoted by some applications, we ensure it remains double quoted. ++ -- MJV FIX: #47 ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name='search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path=%', v_dummy; END IF; ++ ++ SELECT REPLACE(REPLACE(setting, '"$user"', '$user'), '$user', '"$user"') INTO src_path_old ++ FROM pg_settings WHERE name = 'search_path'; ++ ++ IF bDebug THEN RAISE NOTICE 'DEBUG: src_path_old=%', src_path_old; END IF; ++ + EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; +- -- RAISE NOTICE 'Using source search_path=%', buffer; ++ SELECT setting INTO src_path_new FROM pg_settings WHERE name='search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: new search_path=%', src_path_new; END IF; + + -- Validate required types exist. If not, create them. +- select a.objtypecnt, b.permtypecnt INTO cnt, cnt2 FROM +- (SELECT count(*) as objtypecnt FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace +- WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) +- AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) +- AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' AND pg_catalog.pg_type_is_visible(t.oid) AND pg_catalog.format_type(t.oid, NULL) = 'obj_type') a, +- (SELECT count(*) as permtypecnt FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace +- WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) +- AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) +- AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' AND pg_catalog.pg_type_is_visible(t.oid) AND pg_catalog.format_type(t.oid, NULL) = 'perm_type') b; ++ SELECT a.objtypecnt, b.permtypecnt INTO cnt, cnt2 ++ FROM ( ++ SELECT count(*) AS objtypecnt ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE (t.typrelid = 0 ++ OR ( ++ SELECT c.relkind = 'c' ++ FROM pg_catalog.pg_class c ++ WHERE c.oid = t.typrelid)) ++ AND NOT EXISTS ( ++ SELECT 1 ++ FROM pg_catalog.pg_type el ++ WHERE el.oid = t.typelem ++ AND el.typarray = t.oid) ++ AND n.nspname <> 'pg_catalog' ++ AND n.nspname <> 'information_schema' ++ AND pg_catalog.pg_type_is_visible(t.oid) ++ AND pg_catalog.format_type(t.oid, NULL) = 'obj_type') a, ( ++ SELECT count(*) AS permtypecnt ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE (t.typrelid = 0 ++ OR ( ++ SELECT c.relkind = 'c' ++ FROM pg_catalog.pg_class c ++ WHERE c.oid = t.typrelid)) ++ AND NOT EXISTS ( ++ SELECT 1 ++ FROM pg_catalog.pg_type el ++ WHERE el.oid = t.typelem ++ AND el.typarray = t.oid) ++ AND n.nspname <> 'pg_catalog' ++ AND n.nspname <> 'information_schema' ++ AND pg_catalog.pg_type_is_visible(t.oid) ++ AND pg_catalog.format_type(t.oid, NULL) = 'perm_type') b; ++ + IF cnt = 0 THEN + CREATE TYPE obj_type AS ENUM ('TABLE','VIEW','COLUMN','SEQUENCE','FUNCTION','SCHEMA','DATABASE'); + END IF; +@@ -119,53 +877,148 @@ + CREATE TYPE perm_type AS ENUM ('SELECT','INSERT','UPDATE','DELETE','TRUNCATE','REFERENCES','TRIGGER','USAGE','CREATE','EXECUTE','CONNECT','TEMPORARY'); + END IF; + +- IF ddl_only THEN +- RAISE NOTICE 'Only generating DDL, not actually creating anything...'; ++ -- Issue#95 ++ SELECT pg_catalog.pg_get_userbyid(nspowner) INTO buffer FROM pg_namespace WHERE nspname = quote_ident(source_schema); ++ ++ IF bDDLOnly THEN ++ RAISE NOTICE ' Only generating DDL, not actually creating anything...'; ++ -- issue#95 ++ IF bNoOwner THEN ++ RAISE INFO 'CREATE SCHEMA %;', quote_ident(dest_schema); ++ ELSE ++ RAISE INFO 'CREATE SCHEMA % AUTHORIZATION %;', quote_ident(dest_schema), buffer; ++ END IF; ++ RAISE NOTICE 'SET search_path=%;', quote_ident(dest_schema); ++ ELSE ++ -- issue#95 ++ IF bNoOwner THEN ++ EXECUTE 'CREATE SCHEMA ' || quote_ident(dest_schema) ; ++ ELSE ++ EXECUTE 'CREATE SCHEMA ' || quote_ident(dest_schema) || ' AUTHORIZATION ' || buffer; ++ END IF; + END IF; + +- IF ddl_only THEN +- RAISE NOTICE '%', 'CREATE SCHEMA ' || quote_ident(dest_schema); ++ -- Do system table validations for subsequent system table queries ++ -- Issue#65 Fix ++ SELECT count(*) into cnt ++ FROM pg_attribute ++ WHERE attrelid = 'pg_proc'::regclass AND attname = 'prokind'; ++ ++ IF cnt = 0 THEN ++ is_prokind = False; + ELSE +- EXECUTE 'CREATE SCHEMA ' || quote_ident(dest_schema) ; ++ is_prokind = True; + END IF; + + -- MV: Create Collations + action := 'Collations'; + cnt := 0; +- FOR arec IN +- SELECT n.nspname as schemaname, a.rolname as ownername , c.collname, c.collprovider, c.collcollate as locale, +- 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || CASE WHEN c.collprovider = 'i' THEN 'icu' WHEN c.collprovider = 'c' THEN 'libc' ELSE '' END || ', locale = ''' || c.collcollate || ''');' as COLL_DDL +- FROM pg_collation c JOIN pg_namespace n ON (c.collnamespace = n.oid) JOIN pg_roles a ON (c.collowner = a.oid) WHERE n.nspname = quote_ident(source_schema) order by c.collname +- LOOP +- BEGIN +- cnt := cnt + 1; +- IF ddl_only THEN +- RAISE INFO '%', arec.coll_ddl; +- ELSE +- EXECUTE arec.coll_ddl; +- END IF; +- END; +- END LOOP; ++ -- Issue#96 Handle differently based on PG Versions (PG15 rely on colliculocale, not collcolocate) ++ -- perhaps use this logic instead: COALESCE(c.collcollate, c.colliculocale) AS lc_collate, COALESCE(c.collctype, c.colliculocale) AS lc_type ++ IF sq_server_version_num > 150000 THEN ++ FOR arec IN ++ SELECT n.nspname AS schemaname, a.rolname AS ownername, c.collname, c.collprovider, c.collcollate AS locale, ++ 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || ++ CASE WHEN c.collprovider = 'i' THEN 'icu' WHEN c.collprovider = 'c' THEN 'libc' ELSE '' END || ++ ', locale = ''' || c.colliculocale || ''');' AS COLL_DDL ++ FROM pg_collation c ++ JOIN pg_namespace n ON (c.collnamespace = n.oid) ++ JOIN pg_roles a ON (c.collowner = a.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ ORDER BY c.collname ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.coll_ddl; ++ ELSE ++ EXECUTE arec.coll_ddl; ++ END IF; ++ END; ++ END LOOP; ++ ELSIF sq_server_version_num > 100000 THEN ++ FOR arec IN ++ SELECT n.nspname AS schemaname, a.rolname AS ownername, c.collname, c.collprovider, c.collcollate AS locale, ++ 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || ++ CASE WHEN c.collprovider = 'i' THEN 'icu' WHEN c.collprovider = 'c' THEN 'libc' ELSE '' END || ++ ', locale = ''' || c.collcollate || ''');' AS COLL_DDL ++ FROM pg_collation c ++ JOIN pg_namespace n ON (c.collnamespace = n.oid) ++ JOIN pg_roles a ON (c.collowner = a.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ ORDER BY c.collname ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.coll_ddl; ++ ELSE ++ EXECUTE arec.coll_ddl; ++ END IF; ++ END; ++ END LOOP; ++ ELSE ++ -- handle 9.6 that is missing some columns in pg_collation ++ FOR arec IN ++ SELECT n.nspname AS schemaname, a.rolname AS ownername, c.collname, c.collcollate AS locale, ++ 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || ++ ', locale = ''' || c.collcollate || ''');' AS COLL_DDL ++ FROM pg_collation c ++ JOIN pg_namespace n ON (c.collnamespace = n.oid) ++ JOIN pg_roles a ON (c.collowner = a.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ ORDER BY c.collname ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.coll_ddl; ++ ELSE ++ EXECUTE arec.coll_ddl; ++ END IF; ++ END; ++ END LOOP; ++ END IF; + RAISE NOTICE ' COLLATIONS cloned: %', LPAD(cnt::text, 5, ' '); + + -- MV: Create Domains + action := 'Domains'; + cnt := 0; + FOR arec IN +- SELECT n.nspname as "Schema", t.typname as "Name", pg_catalog.format_type(t.typbasetype, t.typtypmod) as "Type", +- (SELECT c.collname FROM pg_catalog.pg_collation c, pg_catalog.pg_type bt WHERE c.oid = t.typcollation AND +- bt.oid = t.typbasetype AND t.typcollation <> bt.typcollation) as "Collation", +- CASE WHEN t.typnotnull THEN 'not null' END as "Nullable", t.typdefault as "Default", +- pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM pg_catalog.pg_constraint r WHERE t.oid = r.contypid), ' ') as "Check", +- 'CREATE DOMAIN ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ' || pg_catalog.format_type(t.typbasetype, t.typtypmod) || +- CASE WHEN t.typnotnull IS NOT NULL THEN ' NOT NULL ' ELSE ' ' END || CASE WHEN t.typdefault IS NOT NULL THEN 'DEFAULT ' || t.typdefault || ' ' ELSE ' ' END || +- pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM pg_catalog.pg_constraint r WHERE t.oid = r.contypid), ' ') || ';' AS DOM_DDL +- FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace +- WHERE t.typtype = 'd' AND n.nspname = quote_ident(source_schema) AND pg_catalog.pg_type_is_visible(t.oid) ORDER BY 1, 2 ++ SELECT n.nspname AS "Schema", t.typname AS "Name", pg_catalog.format_type(t.typbasetype, t.typtypmod) AS "Type", ( ++ SELECT c.collname ++ FROM pg_catalog.pg_collation c, pg_catalog.pg_type bt ++ WHERE c.oid = t.typcollation ++ AND bt.oid = t.typbasetype ++ AND t.typcollation <> bt.typcollation) AS "Collation", CASE WHEN t.typnotnull THEN ++ 'not null' ++ END AS "Nullable", t.typdefault AS "Default", pg_catalog.array_to_string(ARRAY ( ++ SELECT pg_catalog.pg_get_constraintdef(r.oid, TRUE) ++ FROM pg_catalog.pg_constraint r ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on t.typename ++ WHERE t.oid = r.contypid), ' ') AS "Check", 'CREATE DOMAIN ' || quote_ident(dest_schema) || '.' || quote_ident(t.typname) || ' AS ' || pg_catalog.format_type(t.typbasetype, t.typtypmod) || ++ CASE WHEN t.typnotnull IS NOT NULL THEN ++ ' NOT NULL ' ++ ELSE ++ ' ' ++ END || CASE WHEN t.typdefault IS NOT NULL THEN ++ 'DEFAULT ' || t.typdefault || ' ' ++ ELSE ++ ' ' ++ END || pg_catalog.array_to_string(ARRAY ( ++ SELECT pg_catalog.pg_get_constraintdef(r.oid, TRUE) ++ FROM pg_catalog.pg_constraint r ++ WHERE t.oid = r.contypid), ' ') || ';' AS DOM_DDL ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE t.typtype = 'd' ++ AND n.nspname = quote_ident(source_schema) ++ AND pg_catalog.pg_type_is_visible(t.oid) ++ ORDER BY 1, 2 + LOOP + BEGIN + cnt := cnt + 1; +- IF ddl_only THEN ++ IF bDDLOnly THEN + RAISE INFO '%', arec.dom_ddl; + ELSE + EXECUTE arec.dom_ddl; +@@ -177,36 +1030,70 @@ + -- MV: Create types + action := 'Types'; + cnt := 0; ++ lastsql = ''; + FOR arec IN +- SELECT c.relkind, n.nspname AS schemaname, t.typname AS typname, t.typcategory, CASE WHEN t.typcategory='C' THEN +- 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS (' || array_to_string(array_agg(a.attname || ' ' || pg_catalog.format_type(a.atttypid, a.atttypmod) ORDER BY c.relname, a.attnum),', ') || ');' +- WHEN t.typcategory='E' THEN +- 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ENUM (' || REPLACE(quote_literal(array_to_string(array_agg(e.enumlabel ORDER BY e.enumsortorder),',')), ',', ''',''') || ');' +- ELSE '' END AS type_ddl FROM pg_type t JOIN pg_namespace n ON (n.oid = t.typnamespace) +- LEFT JOIN pg_enum e ON (t.oid = e.enumtypid) +- LEFT JOIN pg_class c ON (c.reltype = t.oid) LEFT JOIN pg_attribute a ON (a.attrelid = c.oid) +- WHERE n.nspname = quote_ident(source_schema) and (c.relkind IS NULL or c.relkind = 'c') and t.typcategory in ('C', 'E') group by 1,2,3,4 order by n.nspname, t.typcategory, t.typname ++ -- Fixed Issue#108:enclose double-quote roles with special characters for setting "OWNER TO" ++ -- SELECT c.relkind, n.nspname AS schemaname, t.typname AS typname, t.typcategory, pg_catalog.pg_get_userbyid(t.typowner) AS owner, CASE WHEN t.typcategory = 'C' THEN ++ SELECT c.relkind, n.nspname AS schemaname, t.typname AS typname, t.typcategory, '"' || pg_catalog.pg_get_userbyid(t.typowner) || '"' AS owner, CASE WHEN t.typcategory = 'C' THEN ++ 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS (' || array_to_string(array_agg(a.attname || ' ' || pg_catalog.format_type(a.atttypid, a.atttypmod) ++ ORDER BY c.relname, a.attnum), ', ') || ');' ++ WHEN t.typcategory = 'E' THEN ++ 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ENUM (' || REPLACE(quote_literal(array_to_string(array_agg(e.enumlabel ORDER BY e.enumsortorder), ',')), ',', ''',''') || ');' ++ ELSE ++ '' ++ END AS type_ddl ++ FROM pg_type t ++ JOIN pg_namespace n ON (n.oid = t.typnamespace) ++ LEFT JOIN pg_enum e ON (t.oid = e.enumtypid) ++ LEFT JOIN pg_class c ON (c.reltype = t.oid) ++ LEFT JOIN pg_attribute a ON (a.attrelid = c.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ AND (c.relkind IS NULL ++ OR c.relkind = 'c') ++ AND t.typcategory IN ('C', 'E') ++ GROUP BY 1, 2, 3, 4, 5 ++ ORDER BY n.nspname, t.typcategory, t.typname ++ + LOOP + BEGIN + cnt := cnt + 1; + -- Keep composite and enum types in separate branches for fine tuning later if needed. + IF arec.typcategory = 'E' THEN +- -- RAISE NOTICE '%', arec.type_ddl; +- IF ddl_only THEN +- RAISE INFO '%', arec.type_ddl; +- ELSE +- EXECUTE arec.type_ddl; +- END IF; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.type_ddl; ++ ++ --issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TYPE % OWNER TO %;', quote_ident(dest_schema) || '.' || arec.typname, arec.owner; ++ END IF; ++ ELSE ++ EXECUTE arec.type_ddl; + +- ELSEIF arec.typcategory = 'C' THEN +- -- RAISE NOTICE '%', arec.type_ddl; +- IF ddl_only THEN ++ --issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ EXECUTE 'ALTER TYPE ' || quote_ident(dest_schema) || '.' || arec.typname || ' OWNER TO ' || arec.owner; ++ END IF; ++ END IF; ++ ELSIF arec.typcategory = 'C' THEN ++ IF bDDLOnly THEN + RAISE INFO '%', arec.type_ddl; ++ --issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TYPE % OWNER TO %;', quote_ident(dest_schema) || '.' || arec.typname, arec.owner; ++ END IF; + ELSE + EXECUTE arec.type_ddl; ++ --issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ EXECUTE 'ALTER TYPE ' || quote_ident(dest_schema) || '.' || arec.typname || ' OWNER TO ' || arec.owner; ++ END IF; + END IF; + ELSE +- RAISE NOTICE 'Unhandled type:%-%', arec.typcategory, arec.typname; ++ RAISE NOTICE ' Unhandled type:%-%', arec.typcategory, arec.typname; + END IF; + END; + END LOOP; +@@ -214,82 +1101,361 @@ + + -- Create sequences + action := 'Sequences'; +- seq_cnt := 0; +- -- TODO: Find a way to make this sequence's owner is the correct table. +- FOR object IN +- SELECT sequence_name::text +- FROM information_schema.sequences +- WHERE sequence_schema = quote_ident(source_schema) ++ ++ cnt := 0; ++ -- fix#63 get from pg_sequences not information_schema ++ -- fix#63 take 2: get it from information_schema.sequences since we need to treat IDENTITY columns differently. ++ -- fix#95 get owner as well by joining to pg_sequences ++ -- fix#106 we can get owner info with pg_class, pg_user/pg_group, and information_schema.sequences, so we can avoid the hit to pg_sequences which is not available in 9.6 ++ FOR object, buffer IN ++ -- Fixed Issue#108: ++ -- SELECT s1.sequence_name::text, s2.sequenceowner FROM information_schema.sequences s1 JOIN pg_sequences s2 ON (s1.sequence_schema = s2.schemaname AND s1.sequence_name = s2.sequencename) AND s1.sequence_schema = quote_ident(source_schema) ++ -- SELECT s.sequence_name::text, '"' || u.usename || '"' as owner FROM information_schema.sequences s JOIN pg_class c ON (s.sequence_name = c.relname AND s.sequence_schema = c.relnamespace::regnamespace::text) JOIN pg_user u ON (c.relowner = u.usesysid) ++ -- WHERE c.relkind = 'S' AND s.sequence_schema = quote_ident(source_schema) ++ -- UNION SELECT s.sequence_name::text, g.groname as owner FROM information_schema.sequences s JOIN pg_class c ON (s.sequence_name = c.relname AND s.sequence_schema = c.relnamespace::regnamespace::text) JOIN pg_group g ON (c.relowner = g.grosysid) ++ -- WHERE c.relkind = 'S' AND s.sequence_schema = quote_ident(source_schema) ++ SELECT sequencename::text, sequenceowner FROM pg_catalog.pg_sequences WHERE schemaname = quote_ident(source_schema) + LOOP +- seq_cnt := seq_cnt + 1; +- IF ddl_only THEN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ -- issue#95 + RAISE INFO '%', 'CREATE SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ';'; ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO '%', 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ' OWNER TO ' || buffer || ';'; ++ END IF; + ELSE + EXECUTE 'CREATE SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object); ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ EXECUTE 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ' OWNER TO ' || buffer; ++ END IF; + END IF; + srctbl := quote_ident(source_schema) || '.' || quote_ident(object); + +- EXECUTE 'SELECT last_value, is_called +- FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';' +- INTO sq_last_value, sq_is_called; +- +- EXECUTE 'SELECT max_value, start_value, increment_by, min_value, cache_size, cycle, data_type +- FROM pg_catalog.pg_sequences WHERE schemaname='|| quote_literal(source_schema) || ' AND sequencename=' || quote_literal(object) || ';' +- INTO sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_is_cycled, sq_data_type ; ++ IF sq_server_version_num < 100000 THEN ++ EXECUTE 'SELECT last_value, is_called FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';' INTO sq_last_value, sq_is_called; ++ EXECUTE 'SELECT maximum_value, start_value, increment, minimum_value, 1 cache_size, cycle_option, data_type ++ FROM information_schema.sequences WHERE sequence_schema='|| quote_literal(source_schema) || ' AND sequence_name=' || quote_literal(object) || ';' ++ INTO sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_is_cycled, sq_data_type; ++ IF sq_is_cycled ++ THEN ++ sq_cycled := 'CYCLE'; ++ ELSE ++ sq_cycled := 'NO CYCLE'; ++ END IF; + +- IF sq_is_cycled +- THEN +- sq_cycled := 'CYCLE'; ++ qry := 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) ++ || ' INCREMENT BY ' || sq_increment_by ++ || ' MINVALUE ' || sq_min_value ++ || ' MAXVALUE ' || sq_max_value ++ -- will update current sequence value after this ++ || ' START WITH ' || sq_start_value ++ || ' RESTART ' || sq_min_value ++ || ' CACHE ' || sq_cache_value ++ || ' ' || sq_cycled || ' ;' ; + ELSE +- sq_cycled := 'NO CYCLE'; +- END IF; ++ EXECUTE 'SELECT max_value, start_value, increment_by, min_value, cache_size, cycle, data_type, COALESCE(last_value, 1) ++ FROM pg_catalog.pg_sequences WHERE schemaname='|| quote_literal(source_schema) || ' AND sequencename=' || quote_literal(object) || ';' ++ INTO sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_is_cycled, sq_data_type, sq_last_value; ++ IF sq_is_cycled ++ THEN ++ sq_cycled := 'CYCLE'; ++ ELSE ++ sq_cycled := 'NO CYCLE'; ++ END IF; + +- qry := 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) +- || ' AS ' || sq_data_type +- || ' INCREMENT BY ' || sq_increment_by +- || ' MINVALUE ' || sq_min_value +- || ' MAXVALUE ' || sq_max_value +- || ' START WITH ' || sq_start_value +- || ' RESTART ' || sq_min_value +- || ' CACHE ' || sq_cache_value +- || ' ' || sq_cycled || ' ;' ; ++ qry := 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) ++ || ' AS ' || sq_data_type ++ || ' INCREMENT BY ' || sq_increment_by ++ || ' MINVALUE ' || sq_min_value ++ || ' MAXVALUE ' || sq_max_value ++ -- will update current sequence value after this ++ || ' START WITH ' || sq_start_value ++ || ' RESTART ' || sq_min_value ++ || ' CACHE ' || sq_cache_value ++ || ' ' || sq_cycled || ' ;' ; ++ END IF; + +- IF ddl_only THEN ++ IF bDDLOnly THEN + RAISE INFO '%', qry; + ELSE + EXECUTE qry; + END IF; + + buffer := quote_ident(dest_schema) || '.' || quote_ident(object); +- IF include_recs THEN ++ IF bData THEN + EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; + ELSE +- if ddl_only THEN +- RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; ++ if bDDLOnly THEN ++ -- fix#63 ++ -- RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; ++ RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; + ELSE +- EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; ++ -- fix#63 ++ -- EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; ++ EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; + END IF; + + END IF; + END LOOP; +- RAISE NOTICE ' SEQUENCES cloned: %', LPAD(seq_cnt::text, 5, ' '); ++ RAISE NOTICE ' SEQUENCES cloned: %', LPAD(cnt::text, 5, ' '); ++ + +--- Create tables ++ -- Create tables including partitioned ones (parent/children) and unlogged ones. Order by is critical since child partition range logic is dependent on it. + action := 'Tables'; +- cnt := 0; +- FOR object IN +- SELECT TABLE_NAME::text +- FROM information_schema.tables +- WHERE table_schema = quote_ident(source_schema) +- AND table_type = 'BASE TABLE' ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name='search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path=%', v_dummy; END IF; + ++ cnt := 0; ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: setting search_path to empty string:%', v_dummy; END IF; ++ -- Fix#86 add isgenerated to column list ++ -- Fix#91 add tblowner for setting the table ownership to that of the source ++ -- Fix#99 added join to pg_tablespace ++ ++ -- Handle PG versions greater than last major/minor version of PG 9.6.24 ++ IF sq_server_version_num > 90624 THEN ++ FOR tblname, relpersist, bRelispart, relknd, data_type, udt_name, udt_schema, ocomment, l_child, isGenerated, tblowner, tblspace IN ++ -- 2021-03-08 MJV #39 fix: change sql to get indicator of user-defined columns to issue warnings ++ -- select c.relname, c.relpersistence, c.relispartition, c.relkind ++ -- FROM pg_class c, pg_namespace n where n.oid = c.relnamespace and n.nspname = quote_ident(source_schema) and c.relkind in ('r','p') and ++ -- order by c.relkind desc, c.relname ++ --Fix#65 add another left join to distinguish child tables by inheritance ++ -- Fix#86 add is_generated to column select ++ -- Fix#91 add tblowner to the select ++ -- Fix#105 need a different kinda distinct to avoid retrieving a table twice in the case of a table with multiple USER-DEFINED datatypes using DISTINCT ON instead of just DISTINCT ++ --SELECT DISTINCT c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ -- COALESCE(co.is_generated, ''), pg_catalog.pg_get_userbyid(c.relowner) as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ -- fixed #108 by enclosing owner in double quotes to avoid errors for bad characters like #.@... ++ -- SELECT DISTINCT ON (c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type) c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ SELECT DISTINCT ON (c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type) c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ COALESCE(co.is_generated, ''), '"' || pg_catalog.pg_get_userbyid(c.relowner) || '"' as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ FROM pg_class c ++ JOIN pg_namespace n ON (n.oid = c.relnamespace ++ AND n.nspname = quote_ident(source_schema) ++ AND c.relkind IN ('r', 'p')) ++ LEFT JOIN information_schema.columns co ON (co.table_schema = n.nspname ++ AND co.table_name = c.relname ++ AND (co.data_type = 'USER-DEFINED' OR co.is_generated = 'ALWAYS')) ++ LEFT JOIN pg_inherits i ON (c.oid = i.inhrelid) ++ -- issue#99 added join ++ LEFT JOIN pg_tablespace ts ON (c.reltablespace = ts.oid) ++ ORDER BY c.relkind DESC, c.relname + LOOP + cnt := cnt + 1; +- buffer := quote_ident(dest_schema) || '.' || quote_ident(object); +- IF ddl_only THEN +- RAISE INFO '%', 'CREATE TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(object) || ' INCLUDING ALL)'; ++ lastsql = ''; ++ IF l_child IS NULL THEN ++ bChild := False; + ELSE +- EXECUTE 'CREATE TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(object) || ' INCLUDING ALL)'; ++ bChild := True; ++ END IF; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: TABLE START --> table=% bRelispart=% relkind=% bChild=%',tblname, bRelispart, relknd, bChild; END IF; ++ ++ IF data_type = 'USER-DEFINED' THEN ++ -- RAISE NOTICE ' Table (%) has column(s) with user-defined types so using get_table_ddl() instead of CREATE TABLE LIKE construct.',tblname; ++ cnt :=cnt; ++ END IF; ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); ++ buffer2 := ''; ++ IF relpersist = 'u' THEN ++ buffer2 := 'UNLOGGED '; ++ END IF; ++ IF relknd = 'r' THEN ++ IF bDDLOnly THEN ++ IF data_type = 'USER-DEFINED' THEN ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ RAISE INFO '%', buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ ELSE ++ IF NOT bChild THEN ++ RAISE INFO '%', 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL);'; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ ++ -- issue#99 ++ IF tblspace <> 'pg_default' THEN ++ -- replace with user-defined tablespace ++ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; ++ RAISE INFO 'ALTER TABLE IF EXISTS % SET TABLESPACE %;', quote_ident(dest_schema) || '.' || tblname, tblspace; ++ END IF; ++ ELSE ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ RAISE INFO '%', buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ END IF; ++ END IF; ++ ELSE ++ IF data_type = 'USER-DEFINED' THEN ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef01:%', buffer3; END IF; ++ -- #82: Table def should be fully qualified with target schema, ++ -- so just make search path = public to handle extension types that should reside in public schema ++ v_dummy = 'public'; ++ SELECT set_config('search_path', v_dummy, false) into v_dummy; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ELSE ++ IF (NOT bChild OR bRelispart) THEN ++ buffer3 := 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL)'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef02:%', buffer3; END IF; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ -- issue#99 ++ IF tblspace <> 'pg_default' THEN ++ -- replace with user-defined tablespace ++ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' SET TABLESPACE ' || tblspace; ++ EXECUTE buffer3; ++ END IF; ++ ++ ELSE ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ -- set client_min_messages higher to avoid messages like this: ++ -- NOTICE: merging column "city_id" with inherited definition ++ set client_min_messages = 'WARNING'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef03:%', buffer3; END IF; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ -- reset it back, only get these for inheritance-based tables ++ set client_min_messages = 'notice'; ++ END IF; ++ END IF; ++ -- Add table comment. ++ IF ocomment IS NOT NULL THEN ++ EXECUTE 'COMMENT ON TABLE ' || buffer || ' IS ' || quote_literal(ocomment); ++ END IF; ++ END IF; ++ ELSIF relknd = 'p' THEN ++ -- define parent table and assume child tables have already been created based on top level sort order. ++ -- Issue #103 Put the complex query into its own function, get_table_ddl_complex() ++ SELECT * INTO qry FROM public.get_table_ddl_complex(source_schema, dest_schema, tblname, sq_server_version_num); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 - %', buffer; END IF; ++ ++ -- consider replacing complicated query above with this simple call to get_table_ddl()... ++ -- SELECT * INTO qry FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ -- qry := REPLACE(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || quote_ident(tblname), tblowner; ++ END IF; ++ ELSE ++ -- Issue#103: we need to always set search_path priority to target schema when we execute DDL ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 context: old search path=% new search path=% current search path=%', src_path_old, src_path_new, v_dummy; END IF; ++ SELECT setting INTO spath_tmp FROM pg_settings WHERE name = 'search_path'; ++ IF spath_tmp <> dest_schema THEN ++ -- change it to target schema and don't forget to change it back after we execute the DDL ++ spath = 'SET search_path = "' || dest_schema || '"'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: changing search_path --> %', spath; END IF; ++ EXECUTE spath; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed to %', v_dummy; END IF; ++ END IF; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04:%', qry; END IF; ++ EXECUTE qry; ++ ++ -- Issue#103 ++ -- Set search path back to what it was ++ spath = 'SET search_path = "' || spath_tmp || '"'; ++ EXECUTE spath; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed back to %', v_dummy; END IF; ++ ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ END IF; ++ -- loop for child tables and alter them to attach to parent for specific partition method. ++ -- Issue#103 fix: only loop for the table we are currently processing, tblname! ++ FOR aname, part_range, object IN ++ SELECT quote_ident(dest_schema) || '.' || c1.relname as tablename, pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) as partrange, quote_ident(dest_schema) || '.' || c2.relname as object ++ FROM pg_catalog.pg_class c1, pg_namespace n, pg_catalog.pg_inherits i, pg_class c2 ++ WHERE n.nspname = quote_ident(source_schema) AND c1.relnamespace = n.oid AND c1.relkind = 'r' ++ -- Issue#103: added this condition to only work on current partitioned table. The problem was regression testing previously only worked on one partition table clone case ++ AND c2.relname = tblname AND ++ c1.relispartition AND c1.oid=i.inhrelid AND i.inhparent = c2.oid AND c2.relnamespace = n.oid ORDER BY pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) = 'DEFAULT', ++ c1.oid::pg_catalog.regclass::pg_catalog.text ++ LOOP ++ qry := 'ALTER TABLE ONLY ' || object || ' ATTACH PARTITION ' || aname || ' ' || part_range || ';'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: %',qry; END IF; ++ -- issue#91, not sure if we need to do this for child tables ++ -- issue#95 we dont set ownership here ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ IF NOT bNoOwner THEN ++ NULL; ++ END IF; ++ ELSE ++ EXECUTE qry; ++ IF NOT bNoOwner THEN ++ NULL; ++ END IF; ++ END IF; ++ END LOOP; + END IF; + + -- INCLUDING ALL creates new index names, we restore them to the old name. +@@ -300,171 +1466,975 @@ + WHERE old.schemaname = source_schema + AND new.schemaname = dest_schema + AND old.tablename = new.tablename +- AND old.tablename = object ++ AND old.tablename = tblname + AND old.indexname <> new.indexname + AND regexp_replace(old.indexdef, E'.*USING','') = regexp_replace(new.indexdef, E'.*USING','') +- ORDER BY old.indexname, new.indexname ++ ORDER BY old.indexdef, new.indexdef + LOOP +- IF ddl_only THEN ++ IF bDDLOnly THEN + RAISE INFO '%', 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; + ELSE +- EXECUTE 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; ++ -- The SELECT query above may return duplicate names when a column is ++ -- indexed twice the same manner with 2 different names. Therefore, to ++ -- avoid a 'relation "xxx" already exists' we test if the index name ++ -- is in use or free. Skipping existing index will fallback on unused ++ -- ones and every duplicate will be mapped to distinct old names. ++ IF NOT EXISTS ( ++ SELECT TRUE ++ FROM pg_indexes ++ WHERE schemaname = dest_schema ++ AND tablename = tblname ++ AND indexname = quote_ident(ix_old_name)) ++ AND EXISTS ( ++ SELECT TRUE ++ FROM pg_indexes ++ WHERE schemaname = dest_schema ++ AND tablename = tblname ++ AND indexname = quote_ident(ix_new_name)) ++ THEN ++ EXECUTE 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; ++ END IF; + END IF; + END LOOP; + +- records_count := 0; +- IF include_recs +- THEN ++ lastsql = ''; ++ IF bData THEN + -- Insert records from source table +- RAISE NOTICE 'Populating cloned table, %', buffer; +- EXECUTE 'INSERT INTO ' || buffer || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';'; +- +- -- restart the counter for PK's internal identity sequence +- EXECUTE 'SELECT count(*) FROM ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ';' INTO records_count; +- FOR column_ IN +- SELECT column_name::text +- FROM information_schema.columns +- WHERE +- table_schema = dest_schema AND +- table_name = object AND +- is_identity = 'YES' +- LOOP +- EXECUTE 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ' ALTER COLUMN ' || quote_ident(column_) || ' RESTART WITH ' || records_count + 1 || ';'; +- END LOOP; ++ ++ -- 2021-03-03 MJV FIX ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); ++ ++ -- 2020/06/18 - Issue #31 fix: add "OVERRIDING SYSTEM VALUE" for IDENTITY columns marked as GENERATED ALWAYS. ++ select count(*) into cnt2 from pg_class c, pg_attribute a, pg_namespace n ++ where a.attrelid = c.oid and c.relname = quote_ident(tblname) and n.oid = c.relnamespace and n.nspname = quote_ident(source_schema) and a.attidentity = 'a'; ++ buffer3 := ''; ++ IF cnt2 > 0 THEN ++ buffer3 := ' OVERRIDING SYSTEM VALUE'; ++ END IF; ++ -- BUG for inserting rows from tables with user-defined columns ++ -- INSERT INTO sample_clone.address OVERRIDING SYSTEM VALUE SELECT * FROM sample.address; ++ -- ERROR: column "id2" is of type sample_clone.udt_myint but expression is of type udt_myint ++ ++ -- Issue#86 fix: ++ -- IF data_type = 'USER-DEFINED' THEN ++ IF bDebug THEN RAISE NOTICE 'DEBUG: includerecs branch table=% data_type=% isgenerated=% buffer3=%', tblname, data_type, isGenerated, buffer3; END IF; ++ IF data_type = 'USER-DEFINED' OR isGenerated = 'ALWAYS' THEN ++ ++ -- RAISE WARNING 'Bypassing copying rows for table (%) with user-defined data types. You must copy them manually.', tblname; ++ -- won't work --> INSERT INTO clone1.address (id2, id3, addr) SELECT cast(id2 as clone1.udt_myint), cast(id3 as clone1.udt_myint), addr FROM sample.address; ++ -- Issue#101 --> INSERT INTO clone1.address2 (id2, id3, addr) SELECT id2::text::clone1.udt_myint, id3::text::clone1.udt_myint, addr FROM sample.address; ++ ++ -- Issue#79 implementation follows ++ -- COPY sample.statuses(id, s) TO '/tmp/statuses.txt' WITH DELIMITER AS ','; ++ -- COPY sample_clone1.statuses FROM '/tmp/statuses.txt' (DELIMITER ',', NULL ''); ++ -- Issue#101 fix: use text cast to get around the problem. ++ IF bFileCopy THEN ++ IF bWindows THEN ++ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''C:\WINDOWS\TEMP\cloneschema.tmp'' WITH DELIMITER AS '','';'; ++ tblarray2 := tblarray2 || buffer2; ++ -- Issue #81 reformat COPY command for upload ++ -- buffer2:= 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL '''');'; ++ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; ++ tblarray2 := tblarray2 || buffer2; ++ ELSE ++ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''/tmp/cloneschema.tmp'' WITH DELIMITER AS '','';'; ++ tblarray2 := tblarray2 || buffer2; ++ -- Issue #81 reformat COPY command for upload ++ -- buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL '''');'; ++ -- works--> COPY sample.timestamptbl2 FROM '/tmp/cloneschema.tmp' WITH (DELIMITER ',', NULL '\N', FORMAT CSV) ; ++ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; ++ tblarray2 := tblarray2 || buffer2; ++ END IF; ++ ELSE ++ -- Issue#101: assume direct copy with text cast, add to separate array ++ SELECT * INTO buffer3 FROM public.get_insert_stmt_ddl(quote_ident(source_schema), quote_ident(dest_schema), quote_ident(tblname), True); ++ tblarray3 := tblarray3 || buffer3; ++ END IF; ++ ELSE ++ -- bypass child tables since we populate them when we populate the parents ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tblname=% bRelispart=% relknd=% l_child=% bChild=%', tblname, bRelispart, relknd, l_child, bChild; END IF; ++ IF NOT bRelispart AND NOT bChild THEN ++ -- Issue#75: Must defer population of tables until child tables have been added to parents ++ -- Issue#101 Offer alternative of copy to/from file. Although originally intended for tables with UDTs, it is now expanded to handle all cases for performance improvement perhaps for large tables. ++ -- Issue#106 buffer3 shouldn't be in the mix ++ -- revisited: buffer3 should be in play for PG versions that handle IDENTITIES ++ buffer2 := 'INSERT INTO ' || buffer || buffer3 || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; ++ -- buffer2 := 'INSERT INTO ' || buffer || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: buffer2=%',buffer2; END IF; ++ IF bFileCopy THEN ++ tblarray2:= tblarray2 || buffer2; ++ ELSE ++ tblarray := tblarray || buffer2; ++ END IF; ++ END IF; ++ END IF; + END IF; + +- SET search_path = ''; ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ + FOR column_, default_ IN + SELECT column_name::text, +- REPLACE(column_default::text, source_schema, dest_schema) +- FROM information_schema.COLUMNS +- WHERE table_schema = source_schema +- AND TABLE_NAME = object +- AND column_default LIKE 'nextval(%' || quote_ident(source_schema) || '%::regclass)' ++ REPLACE(column_default::text, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') ++ FROM information_schema.COLUMNS ++ WHERE table_schema = source_schema ++ AND TABLE_NAME = tblname ++ AND column_default LIKE 'nextval(%' || quote_ident(source_schema) || '%::regclass)' + LOOP +- IF ddl_only THEN ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on column name ++ buffer2 = 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || quote_ident(column_) || ' SET DEFAULT ' || default_ || ';'; ++ IF bDDLOnly THEN + -- May need to come back and revisit this since previous sql will not return anything since no schema as created! +- RAISE INFO '%', 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || column_ || ' SET DEFAULT ' || default_ || ';'; ++ RAISE INFO '%', buffer2; + ELSE +- EXECUTE 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || column_ || ' SET DEFAULT ' || default_; ++ EXECUTE buffer2; + END IF; + END LOOP; +- EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; + ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; + END LOOP; +- RAISE NOTICE ' TABLES cloned: %', LPAD(cnt::text, 5, ' '); +- +- -- add FK constraint +- action := 'FK Constraints'; +- cnt := 0; +- SET search_path = ''; +- FOR qry IN +- SELECT 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || quote_ident(rn.relname) +- || ' ADD CONSTRAINT ' || quote_ident(ct.conname) || ' ' || REPLACE(pg_get_constraintdef(ct.oid), 'REFERENCES ' ||quote_ident(source_schema), 'REFERENCES ' || quote_ident(dest_schema)) || ';' +- FROM pg_constraint ct +- JOIN pg_class rn ON rn.oid = ct.conrelid +- WHERE connamespace = src_oid +- AND rn.relkind = 'r' +- AND ct.contype = 'f' ++ ELSE ++ -- Handle 9.6 versions 90600 ++ FOR tblname, relpersist, relknd, data_type, udt_name, udt_schema, ocomment, l_child, isGenerated, tblowner, tblspace IN ++ -- 2021-03-08 MJV #39 fix: change sql to get indicator of user-defined columns to issue warnings ++ -- select c.relname, c.relpersistence, c.relispartition, c.relkind ++ -- FROM pg_class c, pg_namespace n where n.oid = c.relnamespace and n.nspname = quote_ident(source_schema) and c.relkind in ('r','p') and ++ -- order by c.relkind desc, c.relname ++ --Fix#65 add another left join to distinguish child tables by inheritance ++ -- Fix#86 add is_generated to column select ++ -- Fix#91 add tblowner to the select ++ -- Fix#105 need a different kinda distinct to avoid retrieving a table twice in the case of a table with multiple USER-DEFINED datatypes using DISTINCT ON instead of just DISTINCT ++ -- Fixed Issue#108: double quote roles to avoid problems with special characters in OWNER TO statements ++ --SELECT DISTINCT c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ -- COALESCE(co.is_generated, ''), pg_catalog.pg_get_userbyid(c.relowner) as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ -- SELECT DISTINCT ON (c.relname, c.relpersistence, c.relkind, co.data_type) c.relname, c.relpersistence, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ -- COALESCE(co.is_generated, ''), pg_catalog.pg_get_userbyid(c.relowner) as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ SELECT DISTINCT ON (c.relname, c.relpersistence, c.relkind, co.data_type) c.relname, c.relpersistence, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, ++ COALESCE(co.is_generated, ''), '"' || pg_catalog.pg_get_userbyid(c.relowner) || '"' as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace ++ FROM pg_class c ++ JOIN pg_namespace n ON (n.oid = c.relnamespace ++ AND n.nspname = quote_ident(source_schema) ++ AND c.relkind IN ('r', 'p')) ++ LEFT JOIN information_schema.columns co ON (co.table_schema = n.nspname ++ AND co.table_name = c.relname ++ AND (co.data_type = 'USER-DEFINED' OR co.is_generated = 'ALWAYS')) ++ LEFT JOIN pg_inherits i ON (c.oid = i.inhrelid) ++ -- issue#99 added join ++ LEFT JOIN pg_tablespace ts ON (c.reltablespace = ts.oid) ++ ORDER BY c.relkind DESC, c.relname + LOOP + cnt := cnt + 1; +- IF ddl_only THEN +- RAISE INFO '%', qry; ++ IF l_child IS NULL THEN ++ bChild := False; + ELSE +- EXECUTE qry; ++ bChild := True; + END IF; +- END LOOP; +- EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; +- RAISE NOTICE ' FKEYS cloned: %', LPAD(cnt::text, 5, ' '); +- +--- Create views +- action := 'Views'; +- cnt := 0; +- FOR object IN +- SELECT table_name::text, +- view_definition +- FROM information_schema.views +- WHERE table_schema = quote_ident(source_schema) +- +- LOOP +- cnt := cnt + 1; +- buffer := quote_ident(dest_schema) || '.' || quote_ident(object); +- SELECT view_definition INTO v_def +- FROM information_schema.views +- WHERE table_schema = quote_ident(source_schema) +- AND table_name = quote_ident(object); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: TABLE START --> table=% bRelispart=NA relkind=% bChild=%',tblname, relknd, bChild; END IF; + +- IF ddl_only THEN +- RAISE INFO '%', 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def || ';' ; +- ELSE +- EXECUTE 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def || ';' ; ++ IF data_type = 'USER-DEFINED' THEN ++ -- RAISE NOTICE ' Table (%) has column(s) with user-defined types so using get_table_ddl() instead of CREATE TABLE LIKE construct.',tblname; ++ cnt :=cnt; + END IF; +- END LOOP; +- RAISE NOTICE ' VIEWS cloned: %', LPAD(cnt::text, 5, ' '); +- +- -- Create Materialized views +- action := 'Mat. Views'; +- cnt := 0; +- FOR object IN +- SELECT matviewname::text, +- definition +- FROM pg_catalog.pg_matviews +- WHERE schemaname = quote_ident(source_schema) +- +- LOOP +- cnt := cnt + 1; +- buffer := dest_schema || '.' || quote_ident(object); +- SELECT replace(definition,';','') INTO v_def +- FROM pg_catalog.pg_matviews +- WHERE schemaname = quote_ident(source_schema) +- AND matviewname = quote_ident(object); +- +- IF include_recs THEN +- EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ';' ; +- ELSE +- IF ddl_only THEN +- RAISE INFO '%', 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ' WITH NO DATA;' ; +- ELSE +- EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ' WITH NO DATA;' ; +- END IF; ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); ++ buffer2 := ''; ++ IF relpersist = 'u' THEN ++ buffer2 := 'UNLOGGED '; ++ END IF; ++ IF relknd = 'r' THEN ++ IF bDDLOnly THEN ++ IF data_type = 'USER-DEFINED' THEN ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ RAISE INFO '%', buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ ELSE ++ IF NOT bChild THEN ++ RAISE INFO '%', 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL);'; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; + +- END IF; ++ -- issue#99 ++ IF tblspace <> 'pg_default' THEN ++ -- replace with user-defined tablespace ++ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; ++ RAISE INFO 'ALTER TABLE IF EXISTS % SET TABLESPACE %;', quote_ident(dest_schema) || '.' || tblname, tblspace; ++ END IF; ++ ELSE ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ RAISE INFO '%', buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; ++ END IF; ++ END IF; ++ END IF; ++ ELSE ++ IF data_type = 'USER-DEFINED' THEN ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef01:%', buffer3; END IF; ++ -- #82: Table def should be fully qualified with target schema, ++ -- so just make search path = public to handle extension types that should reside in public schema ++ v_dummy = 'public'; ++ SELECT set_config('search_path', v_dummy, false) into v_dummy; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ELSE ++ IF (NOT bChild) THEN ++ buffer3 := 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL)'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef02:%', buffer3; END IF; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ -- issue#99 ++ IF tblspace <> 'pg_default' THEN ++ -- replace with user-defined tablespace ++ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' SET TABLESPACE ' || tblspace; ++ EXECUTE buffer3; ++ END IF; ++ ++ ELSE ++ -- FIXED #65, #67 ++ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); ++ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ ++ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ -- set client_min_messages higher to avoid messages like this: ++ -- NOTICE: merging column "city_id" with inherited definition ++ set client_min_messages = 'WARNING'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef03:%', buffer3; END IF; ++ EXECUTE buffer3; ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; ++ lastsql = buffer3; ++ EXECUTE buffer3; ++ END IF; ++ ++ -- reset it back, only get these for inheritance-based tables ++ set client_min_messages = 'notice'; ++ END IF; ++ END IF; ++ -- Add table comment. ++ IF ocomment IS NOT NULL THEN ++ EXECUTE 'COMMENT ON TABLE ' || buffer || ' IS ' || quote_literal(ocomment); ++ END IF; ++ END IF; ++ ELSIF relknd = 'p' THEN ++ -- define parent table and assume child tables have already been created based on top level sort order. ++ -- Issue #103 Put the complex query into its own function, get_table_ddl_complex() ++ SELECT * INTO qry FROM public.get_table_ddl_complex(source_schema, dest_schema, tblname, sq_server_version_num); ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 - %', buffer; END IF; ++ ++ -- consider replacing complicated query above with this simple call to get_table_ddl()... ++ -- SELECT * INTO qry FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); ++ -- qry := REPLACE(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || quote_ident(tblname), tblowner; ++ END IF; ++ ELSE ++ -- Issue#103: we need to always set search_path priority to target schema when we execute DDL ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 context: old search path=% new search path=% current search path=%', src_path_old, src_path_new, v_dummy; END IF; ++ SELECT setting INTO spath_tmp FROM pg_settings WHERE name = 'search_path'; ++ IF spath_tmp <> dest_schema THEN ++ -- change it to target schema and don't forget to change it back after we execute the DDL ++ spath = 'SET search_path = "' || dest_schema || '"'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: changing search_path --> %', spath; END IF; ++ EXECUTE spath; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed to %', v_dummy; END IF; ++ END IF; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04:%', qry; END IF; ++ EXECUTE qry; ++ ++ -- Issue#103 ++ -- Set search path back to what it was ++ spath = 'SET search_path = "' || spath_tmp || '"'; ++ EXECUTE spath; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed back to %', v_dummy; END IF; ++ ++ -- issue#91 fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; ++ EXECUTE buffer3; ++ END IF; ++ ++ END IF; ++ -- loop for child tables and alter them to attach to parent for specific partition method. ++ -- Issue#103 fix: only loop for the table we are currently processing, tblname! ++ FOR aname, part_range, object IN ++ SELECT quote_ident(dest_schema) || '.' || c1.relname as tablename, pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) as partrange, quote_ident(dest_schema) || '.' || c2.relname as object ++ FROM pg_catalog.pg_class c1, pg_namespace n, pg_catalog.pg_inherits i, pg_class c2 ++ WHERE n.nspname = quote_ident(source_schema) AND c1.relnamespace = n.oid AND c1.relkind = 'r' ++ -- Issue#103: added this condition to only work on current partitioned table. The problem was regression testing previously only worked on one partition table clone case ++ AND c2.relname = tblname AND ++ c1.relispartition AND c1.oid=i.inhrelid AND i.inhparent = c2.oid AND c2.relnamespace = n.oid ORDER BY pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) = 'DEFAULT', ++ c1.oid::pg_catalog.regclass::pg_catalog.text ++ LOOP ++ qry := 'ALTER TABLE ONLY ' || object || ' ATTACH PARTITION ' || aname || ' ' || part_range || ';'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: %',qry; END IF; ++ -- issue#91, not sure if we need to do this for child tables ++ -- issue#95 we dont set ownership here ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ IF NOT bNoOwner THEN ++ NULL; ++ END IF; ++ ELSE ++ EXECUTE qry; ++ IF NOT bNoOwner THEN ++ NULL; ++ END IF; ++ END IF; ++ END LOOP; ++ END IF; ++ ++ -- INCLUDING ALL creates new index names, we restore them to the old name. ++ -- There should be no conflicts since they live in different schemas ++ FOR ix_old_name, ix_new_name IN ++ SELECT old.indexname, new.indexname ++ FROM pg_indexes old, pg_indexes new ++ WHERE old.schemaname = source_schema ++ AND new.schemaname = dest_schema ++ AND old.tablename = new.tablename ++ AND old.tablename = tblname ++ AND old.indexname <> new.indexname ++ AND regexp_replace(old.indexdef, E'.*USING','') = regexp_replace(new.indexdef, E'.*USING','') ++ ORDER BY old.indexdef, new.indexdef ++ LOOP ++ lastsql = ''; ++ IF bDDLOnly THEN ++ RAISE INFO '%', 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; ++ ELSE ++ -- The SELECT query above may return duplicate names when a column is ++ -- indexed twice the same manner with 2 different names. Therefore, to ++ -- avoid a 'relation "xxx" already exists' we test if the index name ++ -- is in use or free. Skipping existing index will fallback on unused ++ -- ones and every duplicate will be mapped to distinct old names. ++ IF NOT EXISTS ( ++ SELECT TRUE ++ FROM pg_indexes ++ WHERE schemaname = dest_schema ++ AND tablename = tblname ++ AND indexname = quote_ident(ix_old_name)) ++ AND EXISTS ( ++ SELECT TRUE ++ FROM pg_indexes ++ WHERE schemaname = dest_schema ++ AND tablename = tblname ++ AND indexname = quote_ident(ix_new_name)) ++ THEN ++ EXECUTE 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; ++ END IF; ++ END IF; ++ END LOOP; + ++ IF bData THEN ++ -- Insert records from source table ++ ++ -- 2021-03-03 MJV FIX ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); ++ ++ -- Issue#86 fix: ++ -- IF data_type = 'USER-DEFINED' THEN ++ IF bDebug THEN RAISE NOTICE 'DEBUG: includerecs branch table=% data_type=% isgenerated=%', tblname, data_type, isGenerated; END IF; ++ IF data_type = 'USER-DEFINED' OR isGenerated = 'ALWAYS' THEN ++ ++ -- RAISE WARNING 'Bypassing copying rows for table (%) with user-defined data types. You must copy them manually.', tblname; ++ -- won't work --> INSERT INTO clone1.address (id2, id3, addr) SELECT cast(id2 as clone1.udt_myint), cast(id3 as clone1.udt_myint), addr FROM sample.address; ++ -- Issue#101 --> INSERT INTO clone1.address2 (id2, id3, addr) SELECT id2::text::clone1.udt_myint, id3::text::clone1.udt_myint, addr FROM sample.address; ++ ++ -- Issue#79 implementation follows ++ -- COPY sample.statuses(id, s) TO '/tmp/statuses.txt' WITH DELIMITER AS ','; ++ -- COPY sample_clone1.statuses FROM '/tmp/statuses.txt' (DELIMITER ',', NULL ''); ++ -- Issue#101 fix: use text cast to get around the problem. ++ IF bFileCopy THEN ++ IF bWindows THEN ++ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''C:\WINDOWS\TEMP\cloneschema.tmp'' WITH DELIMITER AS '','';'; ++ tblarray2 := tblarray2 || buffer2; ++ -- Issue #81 reformat COPY command for upload ++ -- buffer2:= 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL '''');'; ++ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; ++ tblarray2 := tblarray2 || buffer2; ++ ELSE ++ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''/tmp/cloneschema.tmp'' WITH DELIMITER AS '','';'; ++ tblarray2 := tblarray2 || buffer2; ++ -- Issue #81 reformat COPY command for upload ++ -- buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL '''');'; ++ -- works--> COPY sample.timestamptbl2 FROM '/tmp/cloneschema.tmp' WITH (DELIMITER ',', NULL '\N', FORMAT CSV) ; ++ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; ++ tblarray2 := tblarray2 || buffer2; ++ END IF; ++ ELSE ++ -- Issue#101: assume direct copy with text cast, add to separate array ++ SELECT * INTO buffer3 FROM public.get_insert_stmt_ddl(quote_ident(source_schema), quote_ident(dest_schema), quote_ident(tblname), True); ++ tblarray3 := tblarray3 || buffer3; ++ END IF; ++ ELSE ++ -- bypass child tables since we populate them when we populate the parents ++ IF bDebug THEN RAISE NOTICE 'DEBUG: tblname=% bRelispart=NA relknd=% l_child=% bChild=%', tblname, relknd, l_child, bChild; END IF; ++ ++ IF NOT bChild THEN ++ -- Issue#75: Must defer population of tables until child tables have been added to parents ++ -- Issue#101 Offer alternative of copy to/from file. Although originally intended for tables with UDTs, it is now expanded to handle all cases for performance improvement perhaps for large tables. ++ -- buffer2 := 'INSERT INTO ' || buffer || buffer3 || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; ++ buffer2 := 'INSERT INTO ' || buffer || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: buffer2=%',buffer2; END IF; ++ IF bFileCopy THEN ++ tblarray2:= tblarray2 || buffer2; ++ ELSE ++ tblarray := tblarray || buffer2; ++ END IF; ++ END IF; ++ END IF; ++ END IF; ++ ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ++ FOR column_, default_ IN ++ SELECT column_name::text, ++ REPLACE(column_default::text, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') ++ FROM information_schema.COLUMNS ++ WHERE table_schema = source_schema ++ AND TABLE_NAME = tblname ++ AND column_default LIKE 'nextval(%' || quote_ident(source_schema) || '%::regclass)' ++ LOOP ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on column name ++ buffer2 = 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || quote_ident(column_) || ' SET DEFAULT ' || default_ || ';'; ++ IF bDDLOnly THEN ++ -- May need to come back and revisit this since previous sql will not return anything since no schema as created! ++ RAISE INFO '%', buffer2; ++ ELSE ++ EXECUTE buffer2; ++ END IF; + END LOOP; +- RAISE NOTICE ' MAT VIEWS cloned: %', LPAD(cnt::text, 5, ' '); + +--- Create functions +- action := 'Functions'; ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ END LOOP; ++ END IF; ++ -- end of 90600 branch ++ ++ RAISE NOTICE ' TABLES cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path=%', v_dummy; END IF; ++ ++ -- Assigning sequences to table columns. ++ action := 'Sequences assigning'; + cnt := 0; +- FOR func_oid IN +- SELECT oid +- FROM pg_proc +- WHERE pronamespace = src_oid ++ FOR object IN ++ SELECT sequence_name::text ++ FROM information_schema.sequences ++ WHERE sequence_schema = quote_ident(source_schema) + LOOP + cnt := cnt + 1; +- SELECT pg_get_functiondef(func_oid) INTO qry; +- SELECT replace(qry, source_schema, dest_schema) INTO dest_qry; +- IF ddl_only THEN +- RAISE INFO '%', dest_qry; ++ srctbl := quote_ident(source_schema) || '.' || quote_ident(object); ++ ++ -- Get owning column, inspired from Sadique Ali post at: ++ -- https://sadique.io/blog/2019/05/07/viewing-sequence-ownership-information-in-postgres/ ++ -- Fixed via pull request#109 ++ SELECT ' OWNED BY ' ++ || quote_ident(dest_schema) ++ || '.' ++ || quote_ident(dc.relname) ++ || '.' ++ || quote_ident(a.attname) ++ INTO sq_owned ++ FROM pg_class AS c ++ JOIN pg_namespace n ON c.relnamespace = n.oid ++ JOIN pg_depend AS d ON c.relfilenode = d.objid ++ JOIN pg_class AS dc ON ( ++ d.refobjid = dc.relfilenode ++ AND dc.relnamespace = n.oid ++ ) ++ JOIN pg_attribute AS a ON ( ++ a.attnum = d.refobjsubid ++ AND a.attrelid = d.refobjid ++ ) ++ WHERE n.nspname = quote_ident(source_schema) ++ AND c.relkind = 'S' ++ AND c.relname = object; ++ ++ IF sq_owned IS NOT NULL THEN ++ qry := 'ALTER SEQUENCE ' ++ || quote_ident(dest_schema) ++ || '.' ++ || quote_ident(object) ++ || sq_owned ++ || ';'; ++ ++ IF bDDLOnly THEN ++ RAISE NOTICE 'DEBUG: %',qry; ++ RAISE INFO '%', qry; ++ ELSE ++ EXECUTE qry; ++ END IF; ++ ++ END IF; ++ ++ END LOOP; ++ RAISE NOTICE ' SEQUENCES set: %', LPAD(cnt::text, 5, ' '); ++ ++ -- Update IDENTITY sequences to the last value, bypass 9.6 versions ++ IF sq_server_version_num > 90624 THEN ++ action := 'Identity updating'; ++ cnt := 0; ++ FOR object, sq_last_value IN ++ SELECT sequencename::text, COALESCE(last_value, -999) from pg_sequences where schemaname = quote_ident(source_schema) ++ AND NOT EXISTS ++ (select 1 from information_schema.sequences where sequence_schema = quote_ident(source_schema) and sequence_name = sequencename) ++ LOOP ++ IF sq_last_value = -999 THEN ++ continue; ++ END IF; ++ cnt := cnt + 1; ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(object); ++ IF bData THEN ++ EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; ++ ELSE ++ if bDDLOnly THEN ++ -- fix#63 ++ RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; ++ ELSE ++ -- fix#63 ++ EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; ++ END IF; ++ END IF; ++ END LOOP; ++ -- Fixed Issue#107: set lpad from 2 to 5 ++ RAISE NOTICE ' IDENTITIES set: %', LPAD(cnt::text, 5, ' '); ++ ELSE ++ -- Fixed Issue#107: set lpad from 2 to 5 ++ RAISE NOTICE ' IDENTITIES set: %', LPAD('-1'::text, 5, ' '); ++ END IF; ++ ++ -- Issue#78 forces us to defer FKeys until the end since we previously did row copies before FKeys ++ -- add FK constraint ++ -- action := 'FK Constraints'; ++ ++ -- Issue#62: Add comments on indexes, and then removed them from here and reworked later below. ++ ++ -- Issue 90: moved functions to here, before views or MVs that might use them ++ -- Create functions ++ action := 'Functions'; ++ cnt := 0; ++ -- MJV FIX per issue# 34 ++ -- SET search_path = ''; ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ ++ -- Fixed Issue#65 ++ -- Fixed Issue#97 ++ -- FOR func_oid IN SELECT oid FROM pg_proc WHERE pronamespace = src_oid AND prokind != 'a' ++ IF is_prokind THEN ++ FOR func_oid, func_owner, func_name, func_args, func_argno, buffer3 IN ++ SELECT p.oid, pg_catalog.pg_get_userbyid(p.proowner), p.proname, oidvectortypes(p.proargtypes), p.pronargs, ++ CASE WHEN prokind = 'p' THEN 'PROCEDURE' WHEN prokind = 'f' THEN 'FUNCTION' ELSE '' END ++ FROM pg_proc p WHERE p.pronamespace = src_oid AND p.prokind != 'a' ++ LOOP ++ cnt := cnt + 1; ++ SELECT pg_get_functiondef(func_oid) ++ INTO qry; ++ ++ SELECT replace(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO dest_qry; ++ IF bDDLOnly THEN ++ RAISE INFO '%;', dest_qry; ++ -- Issue#91 Fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ IF func_argno = 0 THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER % %() OWNER TO %', buffer3, quote_ident(dest_schema) || '.' || quote_ident(func_name), '"' || func_owner || '";'; ++ ELSE ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO 'ALTER % % OWNER TO %', buffer3, quote_ident(dest_schema) || '.' || quote_ident(func_name) || '(' || func_args || ')', '"' || func_owner || '";'; ++ END IF; ++ END IF; ++ ELSE ++ IF bDebug THEN RAISE NOTICE 'DEBUG: %', dest_qry; END IF; ++ EXECUTE dest_qry; ++ ++ -- Issue#91 Fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ IF func_argno = 0 THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ dest_qry = 'ALTER ' || buffer3 || ' ' || quote_ident(dest_schema) || '.' || quote_ident(func_name) || '() OWNER TO ' || '"' || func_owner || '";'; ++ ELSE ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ dest_qry = 'ALTER ' || buffer3 || ' ' || quote_ident(dest_schema) || '.' || quote_ident(func_name) || '(' || func_args || ') OWNER TO ' || '"' || func_owner || '";'; ++ END IF; ++ END IF; ++ EXECUTE dest_qry; ++ END IF; ++ END LOOP; ++ ELSE ++ FOR func_oid IN SELECT oid ++ FROM pg_proc ++ WHERE pronamespace = src_oid AND not proisagg ++ LOOP ++ cnt := cnt + 1; ++ SELECT pg_get_functiondef(func_oid) INTO qry; ++ SELECT replace(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO dest_qry; ++ IF bDDLOnly THEN ++ RAISE INFO '%;', dest_qry; ++ ELSE ++ EXECUTE dest_qry; ++ END IF; ++ END LOOP; ++ END IF; ++ ++ -- Create aggregate functions. ++ -- Fixed Issue#65 ++ -- FOR func_oid IN SELECT oid FROM pg_proc WHERE pronamespace = src_oid AND prokind = 'a' ++ IF is_prokind THEN ++ FOR func_oid IN ++ SELECT oid ++ FROM pg_proc ++ WHERE pronamespace = src_oid AND prokind = 'a' ++ LOOP ++ cnt := cnt + 1; ++ SELECT ++ 'CREATE AGGREGATE ' ++ || dest_schema ++ || '.' ++ || p.proname ++ || '(' ++ -- || format_type(a.aggtranstype, NULL) ++ -- Issue#65 Fixes for specific datatype mappings ++ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8' ++ WHEN format_type(a.aggtranstype, NULL) = 'anyarray' THEN 'anyelement' ++ ELSE format_type(a.aggtranstype, NULL) END ++ || ') (sfunc = ' ++ || regexp_replace(a.aggtransfn::text, '(^|\W)' || quote_ident(source_schema) || '\.', '\1' || quote_ident(dest_schema) || '.') ++ || ', stype = ' ++ -- || format_type(a.aggtranstype, NULL) ++ -- Issue#65 Fixes for specific datatype mappings ++ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8[]' ELSE format_type(a.aggtranstype, NULL) END ++ || CASE ++ WHEN op.oprname IS NULL THEN '' ++ ELSE ', sortop = ' || op.oprname ++ END ++ || CASE ++ WHEN a.agginitval IS NULL THEN '' ++ ELSE ', initcond = ''' || a.agginitval || '''' ++ END ++ || ')' ++ INTO dest_qry ++ FROM pg_proc p ++ JOIN pg_aggregate a ON a.aggfnoid = p.oid ++ LEFT JOIN pg_operator op ON op.oid = a.aggsortop ++ WHERE p.oid = func_oid; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%;', dest_qry; ++ ELSE ++ EXECUTE dest_qry; ++ END IF; ++ ++ END LOOP; ++ RAISE NOTICE ' FUNCTIONS cloned: %', LPAD(cnt::text, 5, ' '); ++ + ELSE +- EXECUTE dest_qry; ++ FOR func_oid IN SELECT oid FROM pg_proc WHERE pronamespace = src_oid AND proisagg ++ LOOP ++ cnt := cnt + 1; ++ SELECT ++ 'CREATE AGGREGATE ' ++ || dest_schema ++ || '.' ++ || p.proname ++ || '(' ++ -- || format_type(a.aggtranstype, NULL) ++ -- Issue#65 Fixes for specific datatype mappings ++ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8' ++ WHEN format_type(a.aggtranstype, NULL) = 'anyarray' THEN 'anyelement' ++ ELSE format_type(a.aggtranstype, NULL) END ++ || ') (sfunc = ' ++ || regexp_replace(a.aggtransfn::text, '(^|\W)' || quote_ident(source_schema) || '\.', '\1' || quote_ident(dest_schema) || '.') ++ || ', stype = ' ++ -- || format_type(a.aggtranstype, NULL) ++ -- Issue#65 Fixes for specific datatype mappings ++ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8[]' ELSE format_type(a.aggtranstype, NULL) END ++ || CASE ++ WHEN op.oprname IS NULL THEN '' ++ ELSE ', sortop = ' || op.oprname ++ END ++ || CASE ++ WHEN a.agginitval IS NULL THEN '' ++ ELSE ', initcond = ''' || a.agginitval || '''' ++ END ++ || ')' ++ INTO dest_qry ++ FROM pg_proc p ++ JOIN pg_aggregate a ON a.aggfnoid = p.oid ++ LEFT JOIN pg_operator op ON op.oid = a.aggsortop ++ WHERE p.oid = func_oid; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%;', dest_qry; ++ ELSE ++ EXECUTE dest_qry; ++ END IF; ++ ++ END LOOP; ++ RAISE NOTICE ' FUNCTIONS cloned: %', LPAD(cnt::text, 5, ' '); + END IF; + ++ -- Create views ++ action := 'Views'; ++ ++ -- Issue#61 FIX: use set_config for empty string ++ -- MJV FIX #43: also had to reset search_path from source schema to empty. ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) ++ INTO v_dummy; ++ ++ cnt := 0; ++ --FOR object IN ++ -- SELECT table_name::text, view_definition ++ -- FROM information_schema.views ++ -- WHERE table_schema = quote_ident(source_schema) ++ ++ -- Issue#73 replace loop query to handle dependencies ++ -- Issue#91 get view_owner ++ FOR srctbl, aname, view_owner, object IN ++ WITH RECURSIVE views AS ( ++ SELECT n.nspname as schemaname, v.relname as tablename, v.oid::regclass AS viewname, ++ v.relkind = 'm' AS is_materialized, pg_catalog.pg_get_userbyid(v.relowner) as owner, ++ 1 AS level ++ FROM pg_depend AS d ++ JOIN pg_rewrite AS r ++ ON r.oid = d.objid ++ JOIN pg_class AS v ++ ON v.oid = r.ev_class ++ JOIN pg_namespace n ++ ON n.oid = v.relnamespace ++ -- WHERE v.relkind IN ('v', 'm') ++ WHERE v.relkind IN ('v') ++ AND d.classid = 'pg_rewrite'::regclass ++ AND d.refclassid = 'pg_class'::regclass ++ AND d.deptype = 'n' ++ UNION ++ -- add the views that depend on these ++ SELECT n.nspname as schemaname, v.relname as tablename, v.oid::regclass AS viewname, ++ v.relkind = 'm', pg_catalog.pg_get_userbyid(v.relowner) as owner, ++ views.level + 1 ++ FROM views ++ JOIN pg_depend AS d ++ ON d.refobjid = views.viewname ++ JOIN pg_rewrite AS r ++ ON r.oid = d.objid ++ JOIN pg_class AS v ++ ON v.oid = r.ev_class ++ JOIN pg_namespace n ++ ON n.oid = v.relnamespace ++ -- WHERE v.relkind IN ('v', 'm') ++ WHERE v.relkind IN ('v') ++ AND d.classid = 'pg_rewrite'::regclass ++ AND d.refclassid = 'pg_class'::regclass ++ AND d.deptype = 'n' ++ AND v.oid <> views.viewname ++ ) ++ SELECT tablename, viewname, owner, format('CREATE OR REPLACE%s VIEW %s AS%s', ++ CASE WHEN is_materialized ++ THEN ' MATERIALIZED' ++ ELSE '' ++ END, ++ viewname, ++ pg_get_viewdef(viewname)) ++ FROM views ++ WHERE schemaname = quote_ident(source_schema) ++ GROUP BY schemaname, tablename, viewname, owner, is_materialized ++ ORDER BY max(level), schemaname, tablename ++ LOOP ++ cnt := cnt + 1; ++ -- Issue#73 replace logic based on new loop sql ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(aname); ++ -- MJV FIX: #43 ++ -- SELECT view_definition INTO v_def ++ -- SELECT REPLACE(view_definition, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO v_def ++ -- FROM information_schema.views ++ -- WHERE table_schema = quote_ident(source_schema) ++ -- AND table_name = quote_ident(object); ++ SELECT REPLACE(object, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO v_def; ++ -- NOTE: definition already includes the closing statement semicolon ++ SELECT REPLACE(aname, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO buffer3; ++ IF bDDLOnly THEN ++ RAISE INFO '%', v_def; ++ -- Issue#91 Fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ -- RAISE INFO 'ALTER TABLE % OWNER TO %', buffer3, view_owner || ';'; ++ RAISE INFO 'ALTER TABLE % OWNER TO %', buffer3, '"' ||view_owner || '";'; ++ END IF; ++ ELSE ++ -- EXECUTE 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def; ++ EXECUTE v_def; ++ -- Issue#73: commented out comment logic for views since we do it elsewhere now. ++ -- Issue#91 Fix ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ v_def = 'ALTER TABLE ' || buffer3 || ' OWNER TO ' || '"' || view_owner || '";'; ++ EXECUTE v_def; ++ END IF; ++ END IF; + END LOOP; +- RAISE NOTICE ' FUNCTIONS cloned: %', LPAD(cnt::text, 5, ' '); ++ RAISE NOTICE ' VIEWS cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ -- Create Materialized views ++ action := 'Mat. Views'; ++ cnt := 0; ++ -- Issue#91 get view_owner ++ FOR object, view_owner, v_def IN ++ SELECT matviewname::text, '"' || matviewowner::text || '"', replace(definition,';','') FROM pg_catalog.pg_matviews WHERE schemaname = quote_ident(source_schema) ++ LOOP ++ cnt := cnt + 1; ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on target schema and object ++ buffer := quote_ident(dest_schema) || '.' || quote_ident(object); ++ ++ -- MJV FIX: #72 remove source schema in MV def ++ SELECT REPLACE(v_def, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO buffer2; ++ ++ IF bData THEN ++ -- issue#98 defer creation until after regular tables are populated. Also defer the ownership as well. ++ -- EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH DATA;' ; ++ buffer3 = 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH DATA;'; ++ mvarray := mvarray || buffer3; ++ ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- buffer3 = 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; ++ -- EXECUTE buffer3; ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; ++ mvarray := mvarray || buffer3; ++ END IF; ++ ELSE ++ IF bDDLOnly THEN ++ RAISE INFO '%', 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH NO DATA;' ; ++ -- Issue#91 ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ RAISE INFO '%', 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; ++ END IF; ++ ELSE ++ EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH NO DATA;' ; ++ -- Issue#91 ++ -- issue#95 ++ IF NOT bNoOwner THEN ++ -- Fixed Issue#108: double-quote roles in case they have special characters ++ buffer3 = 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; ++ EXECUTE buffer3; ++ END IF; ++ END IF; ++ END IF; ++ SELECT coalesce(obj_description(oid), '') into adef from pg_class where relkind = 'm' and relname = object; ++ IF adef <> '' THEN ++ IF bDDLOnly THEN ++ RAISE INFO '%', 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; ++ ELSE ++ -- Issue#$98: also defer if copy rows is on since we defer MVIEWS in that case ++ IF bData THEN ++ buffer3 = 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; ++ mvarray = mvarray || buffer3; ++ ELSE ++ EXECUTE 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; ++ END IF; ++ ++ END IF; ++ END IF; ++ ++ FOR aname, adef IN ++ SELECT indexname, replace(indexdef, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') as newdef FROM pg_indexes where schemaname = quote_ident(source_schema) and tablename = object order by indexname ++ LOOP ++ IF bDDLOnly THEN ++ RAISE INFO '%', adef || ';'; ++ ELSE ++ EXECUTE adef || ';'; ++ END IF; ++ END LOOP; ++ ++ END LOOP; ++ RAISE NOTICE ' MAT VIEWS cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ -- Issue 90 Move create functions to before views + + -- MV: Create Triggers ++ ++ -- MJV FIX: #38 ++ -- EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ + action := 'Triggers'; + cnt := 0; + FOR arec IN +- SELECT trigger_schema, trigger_name, event_object_table, action_order, action_condition, action_statement, action_orientation, action_timing, array_to_string(array_agg(event_manipulation::text), ' OR '), +- 'CREATE TRIGGER ' || trigger_name || ' ' || action_timing || ' ' || array_to_string(array_agg(event_manipulation::text), ' OR ') || ' ON ' || quote_ident(dest_schema) || '.' || event_object_table || +- ' FOR EACH ' || action_orientation || ' ' || action_statement || ';' as TRIG_DDL +- FROM information_schema.triggers where trigger_schema = quote_ident(source_schema) GROUP BY 1,2,3,4,5,6,7,8 ++ -- 2021-03-09 MJV FIX: #40 fixed sql to get the def using pg_get_triggerdef() sql ++ SELECT n.nspname, c.relname, t.tgname, p.proname, REPLACE(pg_get_triggerdef(t.oid), quote_ident(source_schema), quote_ident(dest_schema)) || ';' AS trig_ddl ++ FROM pg_trigger t, pg_class c, pg_namespace n, pg_proc p ++ WHERE n.nspname = quote_ident(source_schema) ++ AND n.oid = c.relnamespace ++ AND c.relkind in ('r','p') ++ AND n.oid = p.pronamespace ++ AND c.oid = t.tgrelid ++ AND p.oid = t.tgfoid ++ ORDER BY c.relname, t.tgname + LOOP + BEGIN + cnt := cnt + 1; +- IF ddl_only THEN ++ IF bDDLOnly THEN + RAISE INFO '%', arec.trig_ddl; + ELSE + EXECUTE arec.trig_ddl; +@@ -474,55 +2444,383 @@ + END LOOP; + RAISE NOTICE ' TRIGGERS cloned: %', LPAD(cnt::text, 5, ' '); + +- -- --------------------- +- -- MV: Permissions: Defaults +- -- --------------------- +- action := 'PRIVS: Defaults'; ++ ++ -- MV: Create Rules ++ -- Fixes Issue#59 Implement Rules ++ action := 'Rules'; + cnt := 0; + FOR arec IN +- SELECT pg_catalog.pg_get_userbyid(d.defaclrole) AS "owner", n.nspname AS schema, +- CASE d.defaclobjtype WHEN 'r' THEN 'table' WHEN 'S' THEN 'sequence' WHEN 'f' THEN 'function' WHEN 'T' THEN 'type' WHEN 'n' THEN 'schema' END AS atype, +- d.defaclacl as defaclacl, pg_catalog.array_to_string(d.defaclacl, ',') as defaclstr +- FROM pg_catalog.pg_default_acl d LEFT JOIN pg_catalog.pg_namespace n ON (n.oid = d.defaclnamespace) WHERE n.nspname IS NOT NULL and n.nspname = quote_ident(source_schema) ORDER BY 3, 2, 1 ++ SELECT regexp_replace(definition, E'[\\n\\r]+', ' ', 'g' ) as definition ++ FROM pg_rules ++ WHERE schemaname = quote_ident(source_schema) + LOOP +- BEGIN +- -- RAISE NOTICE 'owner=% type=% defaclacl=% defaclstr=%', arec.owner, arec.atype, arec.defaclacl, arec.defaclstr; ++ cnt := cnt + 1; ++ buffer := REPLACE(arec.definition, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ END LOOP; ++ RAISE NOTICE ' RULES cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ ++ -- MV: Create Policies ++ -- Fixes Issue#66 Implement Security policies for RLS ++ action := 'Policies'; ++ cnt := 0; ++ -- #106 Handle 9.6 which doesn't have "permissive" ++ IF sq_server_version_num > 90624 THEN ++ FOR arec IN ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on policy, tablename ++ SELECT schemaname as schemaname, tablename as tablename, 'CREATE POLICY ' || policyname || ' ON ' || quote_ident(dest_schema) || '.' || quote_ident(tablename) || ' AS ' || permissive || ' FOR ' || cmd || ' TO ' ++ || array_to_string(roles, ',', '*') || ' USING (' || regexp_replace(qual, E'[\\n\\r]+', ' ', 'g' ) || ')' ++ || CASE WHEN with_check IS NOT NULL THEN ' WITH CHECK (' ELSE '' END || coalesce(with_check, '') || CASE WHEN with_check IS NOT NULL THEN ');' ELSE ';' END as definition ++ FROM pg_policies ++ WHERE schemaname = quote_ident(source_schema) ++ ORDER BY policyname ++ LOOP ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.definition; ++ ELSE ++ EXECUTE arec.definition; ++ END IF; ++ ++ -- Issue#76: Enable row security if indicated ++ SELECT c.relrowsecurity INTO abool FROM pg_class c, pg_namespace n where n.nspname = quote_ident(arec.schemaname) AND n.oid = c.relnamespace AND c.relname = quote_ident(arec.tablename) and c.relkind = 'r'; ++ IF abool THEN ++ buffer = 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || arec.tablename || ' ENABLE ROW LEVEL SECURITY;'; ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ END IF; ++ END LOOP; ++ ELSE ++ -- handle 9.6 versions ++ FOR arec IN ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on policy, tablename ++ SELECT schemaname as schemaname, tablename as tablename, 'CREATE POLICY ' || policyname || ' ON ' || quote_ident(dest_schema) || '.' || quote_ident(tablename) || ' FOR ' || cmd || ' TO ' ++ || array_to_string(roles, ',', '*') || ' USING (' || regexp_replace(qual, E'[\\n\\r]+', ' ', 'g' ) || ')' ++ || CASE WHEN with_check IS NOT NULL THEN ' WITH CHECK (' ELSE '' END || coalesce(with_check, '') || CASE WHEN with_check IS NOT NULL THEN ');' ELSE ';' END as definition ++ FROM pg_policies ++ WHERE schemaname = quote_ident(source_schema) ++ ORDER BY policyname ++ LOOP ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.definition; ++ ELSE ++ EXECUTE arec.definition; ++ END IF; ++ ++ -- Issue#76: Enable row security if indicated ++ SELECT c.relrowsecurity INTO abool FROM pg_class c, pg_namespace n where n.nspname = quote_ident(arec.schemaname) AND n.oid = c.relnamespace AND c.relname = quote_ident(arec.tablename) and c.relkind = 'r'; ++ IF abool THEN ++ buffer = 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || arec.tablename || ' ENABLE ROW LEVEL SECURITY;'; ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ END IF; ++ END LOOP; ++ END IF; ++ RAISE NOTICE ' POLICIES cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ ++ -- MJV Fixed #62 for comments (PASS 1) ++ action := 'Comments1'; ++ cnt := 0; ++ FOR qry IN ++ -- Issue#74 Fix: Change schema from source to target. Also, do not include comments on foreign tables since we do not clone foreign tables at this time. ++ SELECT 'COMMENT ON ' || CASE WHEN c.relkind in ('r','p') AND a.attname IS NULL THEN 'TABLE ' WHEN c.relkind in ('r','p') AND ++ a.attname IS NOT NULL THEN 'COLUMN ' WHEN c.relkind = 'f' THEN 'FOREIGN TABLE ' WHEN c.relkind = 'm' THEN 'MATERIALIZED VIEW ' WHEN c.relkind = 'v' THEN 'VIEW ' ++ WHEN c.relkind = 'i' THEN 'INDEX ' WHEN c.relkind = 'S' THEN 'SEQUENCE ' ELSE 'XX' END || quote_ident(dest_schema) || '.' || CASE WHEN c.relkind in ('r','p') AND ++ -- Issue#78: handle case-sensitive names with quote_ident() ++ a.attname IS NOT NULL THEN quote_ident(c.relname) || '.' || a.attname ELSE quote_ident(c.relname) END || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_class c ++ JOIN pg_namespace n ON (n.oid = c.relnamespace) ++ LEFT JOIN pg_description d ON (c.oid = d.objoid) ++ LEFT JOIN pg_attribute a ON (c.oid = a.attrelid ++ AND a.attnum > 0 and a.attnum = d.objsubid) ++ WHERE c.relkind <> 'f' AND d.description IS NOT NULL AND n.nspname = quote_ident(source_schema) ++ ORDER BY ddl ++ LOOP ++ cnt := cnt + 1; ++ ++ -- BAD : "COMMENT ON SEQUENCE sample_clone2.CaseSensitive_ID_seq IS 'just a comment on CaseSensitive sequence';" ++ -- GOOD: "COMMENT ON SEQUENCE "CaseSensitive_ID_seq" IS 'just a comment on CaseSensitive sequence';" ++ ++ -- Issue#98 For MVs we create comments when we create the MVs ++ IF substring(qry,1,28) = 'COMMENT ON MATERIALIZED VIEW' THEN ++ IF bDebug THEN RAISE NOTICE 'DEBUG: deferring comments on MVs'; END IF; ++ cnt = cnt - 1; ++ continue; ++ END IF; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ ELSE ++ EXECUTE qry; ++ END IF; ++ END LOOP; ++ RAISE NOTICE ' COMMENTS(1) cloned: %', LPAD(cnt::text, 5, ' '); ++ ++ -- MJV Fixed #62 for comments (PASS 2) ++ action := 'Comments2'; ++ cnt2 := 0; ++ IF is_prokind THEN ++ FOR qry IN ++ -- Issue#74 Fix: Change schema from source to target. ++ SELECT 'COMMENT ON SCHEMA ' || quote_ident(dest_schema) || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ from pg_namespace n, pg_description d where d.objoid = n.oid and n.nspname = quote_ident(source_schema) ++ UNION ++ -- Issue#74 Fix: need to replace source schema inline ++ -- SELECT 'COMMENT ON TYPE ' || pg_catalog.format_type(t.oid, NULL) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl ++ SELECT 'COMMENT ON TYPE ' || REPLACE(pg_catalog.format_type(t.oid, NULL), quote_ident(source_schema), quote_ident(dest_schema)) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl ++ FROM pg_catalog.pg_type t ++ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) ++ AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) ++ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default ++ AND pg_catalog.obj_description(t.oid, 'pg_type') IS NOT NULL and t.typtype = 'c' ++ UNION ++ -- Issue#78: handle case-sensitive names with quote_ident() ++ SELECT 'COMMENT ON COLLATION ' || quote_ident(dest_schema) || '.' || quote_ident(c.collname) || ' IS ''' || pg_catalog.obj_description(c.oid, 'pg_collation') || ''';' as ddl ++ FROM pg_catalog.pg_collation c, pg_catalog.pg_namespace n ++ WHERE n.oid = c.collnamespace AND c.collencoding IN (-1, pg_catalog.pg_char_to_encoding(pg_catalog.getdatabaseencoding())) ++ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default AND pg_catalog.obj_description(c.oid, 'pg_collation') IS NOT NULL ++ UNION ++ SELECT 'COMMENT ON ' || CASE WHEN p.prokind = 'f' THEN 'FUNCTION ' WHEN p.prokind = 'p' THEN 'PROCEDURE ' WHEN p.prokind = 'a' THEN 'AGGREGATE ' END || ++ quote_ident(dest_schema) || '.' || p.proname || ' (' || oidvectortypes(p.proargtypes) || ')' ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_catalog.pg_namespace n ++ JOIN pg_catalog.pg_proc p ON p.pronamespace = n.oid ++ JOIN pg_description d ON (d.objoid = p.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ UNION ++ SELECT 'COMMENT ON POLICY ' || p1.policyname || ' ON ' || quote_ident(dest_schema) || '.' || p1.tablename || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_policies p1, pg_policy p2, pg_class c, pg_namespace n, pg_description d ++ WHERE p1.schemaname = n.nspname AND p1.tablename = c.relname AND n.oid = c.relnamespace ++ AND c.relkind in ('r','p') AND p1.policyname = p2.polname AND d.objoid = p2.oid AND p1.schemaname = quote_ident(source_schema) ++ UNION ++ SELECT 'COMMENT ON DOMAIN ' || quote_ident(dest_schema) || '.' || t.typname || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ JOIN pg_catalog.pg_description d ON d.classoid = t.tableoid AND d.objoid = t.oid AND d.objsubid = 0 ++ WHERE t.typtype = 'd' AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default ++ ORDER BY 1 ++ LOOP ++ cnt2 := cnt2 + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ ELSE ++ EXECUTE qry; ++ END IF; ++ END LOOP; ++ ELSE -- must be v 10 or less ++ FOR qry IN ++ -- Issue#74 Fix: Change schema from source to target. ++ SELECT 'COMMENT ON SCHEMA ' || quote_ident(dest_schema) || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ from pg_namespace n, pg_description d where d.objoid = n.oid and n.nspname = quote_ident(source_schema) ++ UNION ++ -- Issue#74 Fix: need to replace source schema inline ++ -- SELECT 'COMMENT ON TYPE ' || pg_catalog.format_type(t.oid, NULL) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl ++ SELECT 'COMMENT ON TYPE ' || REPLACE(pg_catalog.format_type(t.oid, NULL), quote_ident(source_schema), quote_ident(dest_schema)) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl ++ FROM pg_catalog.pg_type t ++ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' ++ FROM pg_catalog.pg_class c ++ WHERE c.oid = t.typrelid)) ++ AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el ++ WHERE el.oid = t.typelem AND el.typarray = t.oid) ++ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default ++ AND pg_catalog.obj_description(t.oid, 'pg_type') IS NOT NULL and t.typtype = 'c' ++ UNION ++ -- FIX Issue#87 by adding double quotes around collation name ++ SELECT 'COMMENT ON COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" IS ''' || pg_catalog.obj_description(c.oid, 'pg_collation') || ''';' as ddl ++ FROM pg_catalog.pg_collation c, pg_catalog.pg_namespace n ++ WHERE n.oid = c.collnamespace AND c.collencoding IN (-1, pg_catalog.pg_char_to_encoding(pg_catalog.getdatabaseencoding())) ++ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default AND pg_catalog.obj_description(c.oid, 'pg_collation') IS NOT NULL ++ UNION ++ SELECT 'COMMENT ON ' || CASE WHEN proisagg THEN 'AGGREGATE ' ELSE 'FUNCTION ' END || ++ quote_ident(dest_schema) || '.' || p.proname || ' (' || oidvectortypes(p.proargtypes) || ')' ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_catalog.pg_namespace n ++ JOIN pg_catalog.pg_proc p ON p.pronamespace = n.oid ++ JOIN pg_description d ON (d.objoid = p.oid) ++ WHERE n.nspname = quote_ident(source_schema) ++ UNION ++ SELECT 'COMMENT ON POLICY ' || p1.policyname || ' ON ' || quote_ident(dest_schema) || '.' || p1.tablename || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_policies p1, pg_policy p2, pg_class c, pg_namespace n, pg_description d ++ WHERE p1.schemaname = n.nspname AND p1.tablename = c.relname AND n.oid = c.relnamespace ++ AND c.relkind in ('r','p') AND p1.policyname = p2.polname AND d.objoid = p2.oid AND p1.schemaname = quote_ident(source_schema) ++ UNION ++ SELECT 'COMMENT ON DOMAIN ' || quote_ident(dest_schema) || '.' || t.typname || ++ -- Issue#74 Fix ++ -- ' IS ''' || d.description || ''';' as ddl ++ ' IS ' || quote_literal(d.description) || ';' as ddl ++ FROM pg_catalog.pg_type t ++ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ++ JOIN pg_catalog.pg_description d ON d.classoid = t.tableoid AND d.objoid = t.oid AND d.objsubid = 0 ++ WHERE t.typtype = 'd' AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default ++ ORDER BY 1 ++ LOOP ++ cnt2 := cnt2 + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ ELSE ++ EXECUTE qry; ++ END IF; ++ END LOOP; ++ END IF; ++ RAISE NOTICE ' COMMENTS(2) cloned: %', LPAD(cnt2::text, 5, ' '); + +- FOREACH aclstr IN ARRAY arec.defaclacl +- LOOP +- cnt := cnt + 1; +- -- RAISE NOTICE 'aclstr=%', aclstr; +- -- break up into grantor, grantee, and privs, mydb_update=rwU/mydb_owner +- SELECT split_part(aclstr, '=',1) INTO grantee; +- SELECT split_part(aclstr, '=',2) INTO grantor; +- SELECT split_part(grantor, '/',1) INTO privs; +- SELECT split_part(grantor, '/',2) INTO grantor; +- -- RAISE NOTICE 'grantor=% grantee=% privs=%', grantor, grantee, privs; +- +- IF arec.atype = 'function' THEN +- -- Just having execute is enough to grant all apparently. +- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON FUNCTIONS TO "' || grantee || '";'; +- IF ddl_only THEN +- RAISE INFO '%', buffer; +- ELSE +- EXECUTE buffer; +- END IF; + +- ELSIF arec.atype = 'sequence' THEN +- IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN +- -- arU is enough for all privs +- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON SEQUENCES TO "' || grantee || '";'; +- IF ddl_only THEN ++ -- Issue#95 bypass if No ACL specified. ++ IF NOT bNoACL THEN ++ -- --------------------- ++ -- MV: Permissions: Defaults ++ -- --------------------- ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ action := 'PRIVS: Defaults'; ++ cnt := 0; ++ FOR arec IN ++ SELECT pg_catalog.pg_get_userbyid(d.defaclrole) AS "owner", n.nspname AS schema, ++ CASE d.defaclobjtype WHEN 'r' THEN 'table' WHEN 'S' THEN 'sequence' WHEN 'f' THEN 'function' WHEN 'T' THEN 'type' WHEN 'n' THEN 'schema' END AS atype, ++ d.defaclacl as defaclacl, pg_catalog.array_to_string(d.defaclacl, ',') as defaclstr ++ FROM pg_catalog.pg_default_acl d LEFT JOIN pg_catalog.pg_namespace n ON (n.oid = d.defaclnamespace) ++ WHERE n.nspname IS NOT NULL AND n.nspname = quote_ident(source_schema) ++ ORDER BY 3, 2, 1 ++ LOOP ++ BEGIN ++ -- RAISE NOTICE ' owner=% type=% defaclacl=% defaclstr=%', arec.owner, arec.atype, arec.defaclacl, arec.defaclstr; ++ ++ FOREACH aclstr IN ARRAY arec.defaclacl ++ LOOP ++ cnt := cnt + 1; ++ -- RAISE NOTICE ' aclstr=%', aclstr; ++ -- break up into grantor, grantee, and privs, mydb_update=rwU/mydb_owner ++ SELECT split_part(aclstr, '=',1) INTO grantee; ++ SELECT split_part(aclstr, '=',2) INTO grantor; ++ SELECT split_part(grantor, '/',1) INTO privs; ++ SELECT split_part(grantor, '/',2) INTO grantor; ++ -- RAISE NOTICE ' grantor=% grantee=% privs=%', grantor, grantee, privs; ++ ++ IF arec.atype = 'function' THEN ++ -- Just having execute is enough to grant all apparently. ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON FUNCTIONS TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ -- set role = cm_stage_ro_grp; ++ -- ALTER DEFAULT PRIVILEGES FOR ROLE cm_stage_ro_grp IN SCHEMA cm_stage GRANT REFERENCES, TRIGGER ON TABLES TO cm_stage_ro_grp; ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; ++ ++ IF bDDLOnly THEN + RAISE INFO '%', buffer; + ELSE + EXECUTE buffer; + END IF; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ ++ ELSIF arec.atype = 'sequence' THEN ++ IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN ++ -- arU is enough for all privs ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON SEQUENCES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; + +- ELSE +- -- have to specify each priv individually ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ ++ ELSE ++ -- have to specify each priv individually ++ buffer2 := ''; ++ IF POSITION('r' IN privs) > 0 THEN ++ buffer2 := 'SELECT'; ++ END IF; ++ IF POSITION('w' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'UPDATE'; ++ ELSE ++ buffer2 := buffer2 || ', UPDATE'; ++ END IF; ++ END IF; ++ IF POSITION('U' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'USAGE'; ++ ELSE ++ buffer2 := buffer2 || ', USAGE'; ++ END IF; ++ END IF; ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON SEQUENCES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ select current_user into buffer; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ END IF; ++ ++ ELSIF arec.atype = 'table' THEN ++ -- do each priv individually, jeeeesh! + buffer2 := ''; ++ IF POSITION('a' IN privs) > 0 THEN ++ buffer2 := 'INSERT'; ++ END IF; + IF POSITION('r' IN privs) > 0 THEN +- buffer2 := 'SELECT'; ++ IF buffer2 = '' THEN ++ buffer2 := 'SELECT'; ++ ELSE ++ buffer2 := buffer2 || ', SELECT'; ++ END IF; + END IF; + IF POSITION('w' IN privs) > 0 THEN + IF buffer2 = '' THEN +@@ -531,181 +2829,431 @@ + buffer2 := buffer2 || ', UPDATE'; + END IF; + END IF; +- IF POSITION('U' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'USAGE'; ++ IF POSITION('d' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'DELETE'; + ELSE +- buffer2 := buffer2 || ', USAGE'; ++ buffer2 := buffer2 || ', DELETE'; + END IF; + END IF; +- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON SEQUENCES TO "' || grantee || '";'; +- IF ddl_only THEN +- RAISE INFO '%', buffer; +- ELSE +- EXECUTE buffer; +- END IF; +- +- END IF; +- ELSIF arec.atype = 'table' THEN +- -- do each priv individually, jeeeesh! +- buffer2 := ''; +- IF POSITION('a' IN privs) > 0 THEN +- buffer2 := 'INSERT'; +- END IF; +- IF POSITION('r' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'SELECT'; +- ELSE +- buffer2 := buffer2 || ', SELECT'; ++ IF POSITION('t' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'TRIGGER'; ++ ELSE ++ buffer2 := buffer2 || ', TRIGGER'; ++ END IF; + END IF; +- END IF; +- IF POSITION('w' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'UPDATE'; +- ELSE +- buffer2 := buffer2 || ', UPDATE'; ++ IF POSITION('T' IN privs) > 0 THEN ++ IF buffer2 = '' THEN ++ buffer2 := 'TRUNCATE'; ++ ELSE ++ buffer2 := buffer2 || ', TRUNCATE'; ++ END IF; + END IF; +- END IF; +- IF POSITION('d' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'DELETE'; +- ELSE +- buffer2 := buffer2 || ', DELETE'; ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON TABLES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; + END IF; +- END IF; +- IF POSITION('t' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'TRIGGER'; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; + ELSE +- buffer2 := buffer2 || ', TRIGGER'; ++ EXECUTE buffer; + END IF; +- END IF; +- IF POSITION('T' IN privs) > 0 THEN +- IF buffer2 = '' THEN +- buffer2 := 'TRUNCATE'; ++ select current_user into buffer; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ ++ ELSIF arec.atype = 'type' THEN ++ IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN ++ -- arU is enough for all privs ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON TYPES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ ++ ELSIF POSITION('U' IN privs) THEN ++ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT USAGE ON TYPES TO "' || grantee || '";'; ++ ++ -- Issue#92 Fix ++ IF grantor = grantee THEN ++ -- append set role to statement ++ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; ++ END IF; ++ ++ IF bDDLOnly THEN ++ RAISE INFO '%', buffer; ++ ELSE ++ EXECUTE buffer; ++ END IF; ++ -- Issue#92 Fix: ++ EXECUTE 'SET ROLE = ' || calleruser; ++ + ELSE +- buffer2 := buffer2 || ', TRUNCATE'; +- END IF; ++ RAISE WARNING 'Unhandled TYPE Privs:: type=% privs=% owner=% defaclacl=% defaclstr=% grantor=% grantee=% ', arec.atype, privs, arec.owner, arec.defaclacl, arec.defaclstr, grantor, grantee; + END IF; +- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON TABLES TO "' || grantee || '";'; +- IF ddl_only THEN +- RAISE INFO '%', buffer; +- ELSE +- EXECUTE buffer; +- END IF; +- + ELSE +- RAISE WARNING 'Doing nothing for type=% privs=%', arec.atype, privs; ++ RAISE WARNING 'Unhandled Privs:: type=% privs=% owner=% defaclacl=% defaclstr=% grantor=% grantee=% ', arec.atype, privs, arec.owner, arec.defaclacl, arec.defaclstr, grantor, grantee; + END IF; +- END LOOP; +- END; +- END LOOP; ++ END LOOP; ++ END; ++ END LOOP; + +- RAISE NOTICE ' DFLT PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ RAISE NOTICE ' DFLT PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH + +- -- MV: PRIVS: schema +- -- crunchy data extension, check_access +- -- SELECT role_path, base_role, as_role, objtype, schemaname, objname, array_to_string(array_agg(privname),',') as privs FROM all_access() +- -- WHERE base_role != CURRENT_USER and objtype = 'schema' and schemaname = 'public' group by 1,2,3,4,5,6; ++ -- Issue#95 bypass if No ACL specified ++ IF NOT bNoACL THEN ++ -- MV: PRIVS: schema ++ -- crunchy data extension, check_access ++ -- SELECT role_path, base_role, as_role, objtype, schemaname, objname, array_to_string(array_agg(privname),',') as privs FROM all_access() ++ -- WHERE base_role != CURRENT_USER and objtype = 'schema' and schemaname = 'public' group by 1,2,3,4,5,6; + +- action := 'PRIVS: Schema'; +- cnt := 0; +- FOR arec IN +- SELECT 'GRANT ' || p.perm::perm_type || ' ON SCHEMA ' || quote_ident(dest_schema) || ' TO "' || r.rolname || '";' as schema_ddl +- FROM pg_catalog.pg_namespace AS n CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES ('USAGE'), ('CREATE')) AS p(perm) +- WHERE n.nspname = quote_ident(source_schema) AND NOT r.rolsuper AND has_schema_privilege(r.oid, n.oid, p.perm) order by r.rolname, p.perm::perm_type +- LOOP +- BEGIN +- cnt := cnt + 1; +- IF ddl_only THEN +- RAISE INFO '%', arec.schema_ddl; +- ELSE +- EXECUTE arec.schema_ddl; +- END IF; ++ action := 'PRIVS: Schema'; ++ cnt := 0; ++ FOR arec IN ++ SELECT 'GRANT ' || p.perm::perm_type || ' ON SCHEMA ' || quote_ident(dest_schema) || ' TO "' || r.rolname || '";' as schema_ddl ++ FROM pg_catalog.pg_namespace AS n ++ CROSS JOIN pg_catalog.pg_roles AS r ++ CROSS JOIN (VALUES ('USAGE'), ('CREATE')) AS p(perm) ++ WHERE n.nspname = quote_ident(source_schema) AND NOT r.rolsuper AND has_schema_privilege(r.oid, n.oid, p.perm) ++ ORDER BY r.rolname, p.perm::perm_type ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.schema_ddl; ++ ELSE ++ EXECUTE arec.schema_ddl; ++ END IF; + +- END; +- END LOOP; +- RAISE NOTICE 'SCHEMA PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END; ++ END LOOP; ++ RAISE NOTICE 'SCHEMA PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH + +- -- MV: PRIVS: sequences +- action := 'PRIVS: Sequences'; +- cnt := 0; +- FOR arec IN +- SELECT 'GRANT ' || p.perm::perm_type || ' ON ' || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as seq_ddl +- FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES ('SELECT'), ('USAGE'), ('UPDATE')) AS p(perm) +- WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind = 'S' AND NOT r.rolsuper AND has_sequence_privilege(r.oid, t.oid, p.perm) +- LOOP +- BEGIN +- cnt := cnt + 1; +- IF ddl_only OR seq_cnt = 0 THEN +- RAISE INFO '%', arec.seq_ddl; +- ELSE +- EXECUTE arec.seq_ddl; +- END IF; ++ -- Issue#95 bypass if No ACL specified ++ IF NOT bNoACL THEN ++ -- MV: PRIVS: sequences ++ action := 'PRIVS: Sequences'; ++ cnt := 0; ++ FOR arec IN ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on t.relname ++ SELECT 'GRANT ' || p.perm::perm_type || ' ON ' || quote_ident(dest_schema) || '.' || quote_ident(t.relname::text) || ' TO "' || r.rolname || '";' as seq_ddl ++ FROM pg_catalog.pg_class AS t ++ CROSS JOIN pg_catalog.pg_roles AS r ++ CROSS JOIN (VALUES ('SELECT'), ('USAGE'), ('UPDATE')) AS p(perm) ++ WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind = 'S' AND NOT r.rolsuper AND has_sequence_privilege(r.oid, t.oid, p.perm) ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ -- IF bDebug THEN RAISE NOTICE 'DEBUG: ddl=%', arec.seq_ddl; END IF; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.seq_ddl; ++ ELSE ++ EXECUTE arec.seq_ddl; ++ END IF; ++ END; ++ END LOOP; ++ RAISE NOTICE ' SEQ. PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH + +- END; +- END LOOP; +- RAISE NOTICE ' SEQ. PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ -- Issue#95 bypass if No ACL specified ++ IF NOT bNoACL THEN ++ -- MV: PRIVS: functions ++ action := 'PRIVS: Functions/Procedures'; ++ cnt := 0; + +- -- MV: PRIVS: functions +- action := 'PRIVS: Functions'; +- cnt := 0; +- FOR arec IN +- SELECT 'GRANT EXECUTE ON FUNCTION ' || quote_ident(dest_schema) || '.' || regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', '') || ' TO "' || r.rolname || '";' as func_ddl +- FROM pg_catalog.pg_proc f CROSS JOIN pg_catalog.pg_roles AS r WHERE f.pronamespace::regnamespace::name = quote_ident(source_schema) AND NOT r.rolsuper AND has_function_privilege(r.oid, f.oid, 'EXECUTE') +- order by regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', '') +- LOOP +- BEGIN +- cnt := cnt + 1; +- IF ddl_only THEN +- RAISE INFO '%', arec.func_ddl; +- ELSE +- EXECUTE arec.func_ddl; ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ++ -- RAISE NOTICE ' source_schema=% dest_schema=%',source_schema, dest_schema; ++ FOR arec IN ++ -- 2021-03-05 MJV FIX: issue#35: caused exception in some functions with parameters and gave privileges to other users that should not have gotten them. ++ -- SELECT 'GRANT EXECUTE ON FUNCTION ' || quote_ident(dest_schema) || '.' || replace(regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', ''), source_schema, dest_schema) || ' TO "' || r.rolname || '";' as func_ddl ++ -- FROM pg_catalog.pg_proc f CROSS JOIN pg_catalog.pg_roles AS r WHERE f.pronamespace::regnamespace::name = quote_ident(source_schema) AND NOT r.rolsuper AND has_function_privilege(r.oid, f.oid, 'EXECUTE') ++ -- order by regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', '') ++ ++ -- 2021-03-05 MJV FIX: issue#37: defaults cause problems, use system function that returns args WITHOUT DEFAULTS ++ -- COALESCE(r.routine_type, 'FUNCTION'): for aggregate functions, information_schema.routines contains NULL as routine_type value. ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on rp.routine_name ++ SELECT 'GRANT ' || rp.privilege_type || ' ON ' || COALESCE(r.routine_type, 'FUNCTION') || ' ' || quote_ident(dest_schema) || '.' || quote_ident(rp.routine_name) || ' (' || pg_get_function_identity_arguments(p.oid) || ') TO ' || string_agg(distinct rp.grantee, ',') || ';' as func_dcl ++ FROM information_schema.routine_privileges rp, information_schema.routines r, pg_proc p, pg_namespace n ++ WHERE rp.routine_schema = quote_ident(source_schema) ++ AND rp.is_grantable = 'YES' ++ AND rp.routine_schema = r.routine_schema ++ AND rp.routine_name = r.routine_name ++ AND rp.routine_schema = n.nspname ++ AND n.oid = p.pronamespace ++ AND p.proname = r.routine_name ++ GROUP BY rp.privilege_type, r.routine_type, rp.routine_name, pg_get_function_identity_arguments(p.oid) ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.func_dcl; ++ ELSE ++ EXECUTE arec.func_dcl; ++ END IF; ++ END; ++ END LOOP; ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ RAISE NOTICE ' FUNC PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH ++ ++ -- Issue#95 bypass if No ACL specified ++ IF NOT bNoACL THEN ++ -- MV: PRIVS: tables ++ action := 'PRIVS: Tables'; ++ -- regular, partitioned, and foreign tables plus view and materialized view permissions. Ignored for now: implement foreign table defs. ++ cnt := 0; ++ FOR arec IN ++ -- SELECT 'GRANT ' || p.perm::perm_type || CASE WHEN t.relkind in ('r', 'p', 'f') THEN ' ON TABLE ' WHEN t.relkind in ('v', 'm') THEN ' ON ' END || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as tbl_ddl, ++ -- has_table_privilege(r.oid, t.oid, p.perm) AS granted, t.relkind ++ -- FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES (TEXT 'SELECT'), ('INSERT'), ('UPDATE'), ('DELETE'), ('TRUNCATE'), ('REFERENCES'), ('TRIGGER')) AS p(perm) ++ -- WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind in ('r', 'p', 'f', 'v', 'm') AND NOT r.rolsuper AND has_table_privilege(r.oid, t.oid, p.perm) order by t.relname::text, t.relkind ++ -- 2021-03-05 MJV FIX: Fixed Issue#36 for tables ++ SELECT c.relkind, 'GRANT ' || tb.privilege_type || CASE WHEN c.relkind in ('r', 'p') THEN ' ON TABLE ' WHEN c.relkind in ('v', 'm') THEN ' ON ' END || ++ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on t.relname ++ -- Issue#108 FIX: enclose double-quote grantees with special characters ++ -- quote_ident(dest_schema) || '.' || quote_ident(tb.table_name) || ' TO ' || string_agg(tb.grantee, ',') || ';' as tbl_dcl ++ quote_ident(dest_schema) || '.' || quote_ident(tb.table_name) || ' TO ' || string_agg('"' || tb.grantee || '"', ',') || ';' as tbl_dcl ++ FROM information_schema.table_privileges tb, pg_class c, pg_namespace n ++ WHERE tb.table_schema = quote_ident(source_schema) AND tb.table_name = c.relname AND c.relkind in ('r', 'p', 'v', 'm') ++ AND c.relnamespace = n.oid AND n.nspname = quote_ident(source_schema) ++ GROUP BY c.relkind, tb.privilege_type, tb.table_schema, tb.table_name ++ LOOP ++ BEGIN ++ cnt := cnt + 1; ++ -- IF bDebug THEN RAISE NOTICE 'DEBUG: ddl=%', arec.tbl_dcl; END IF; ++ -- Issue#46. Fixed reference to invalid record name (tbl_ddl --> tbl_dcl). ++ IF arec.relkind = 'f' THEN ++ RAISE WARNING 'Foreign tables are not currently implemented, so skipping privs for them. ddl=%', arec.tbl_dcl; ++ ELSE ++ IF bDDLOnly THEN ++ RAISE INFO '%', arec.tbl_dcl; ++ ELSE ++ EXECUTE arec.tbl_dcl; ++ END IF; + END IF; ++ END; ++ END LOOP; ++ RAISE NOTICE ' TABLE PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ END IF; -- NO ACL BRANCH ++ ++ -- LOOP for regular tables and populate them if specified ++ -- Issue#75 moved from big table loop above to here. ++ IF bData THEN ++ r = clock_timestamp(); ++ -- IF bVerbose THEN RAISE NOTICE 'START: copy rows %',clock_timestamp() - t; END IF; ++ IF bVerbose THEN RAISE NOTICE 'Copying rows...'; END IF; ++ ++ EXECUTE 'SET search_path = ' || quote_ident(dest_schema) ; ++ action := 'Copy Rows'; ++ FOREACH tblelement IN ARRAY tblarray ++ LOOP ++ s = clock_timestamp(); ++ IF bDebug THEN RAISE NOTICE 'DEBUG1: no UDTs %', tblelement; END IF; ++ EXECUTE tblelement; ++ GET DIAGNOSTICS cnt = ROW_COUNT; ++ buffer = substring(tblelement, 13); ++ SELECT POSITION(' OVERRIDING SYSTEM VALUE SELECT ' IN buffer) INTO cnt2; ++ IF cnt2 = 0 THEN ++ SELECT POSITION(' SELECT ' IN buffer) INTO cnt2; ++ buffer = substring(buffer,1, cnt2); ++ ELSE ++ buffer = substring(buffer,1, cnt2); ++ END IF; ++ SELECT RPAD(buffer, 35, ' ') INTO buffer; ++ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Populated cloned table, % Rows Copied: % seconds: %', buffer, LPAD(cnt::text, 10, ' '), LPAD(cnt2::text, 5, ' '); END IF; ++ tblscopied := tblscopied + 1; ++ END LOOP; + +- END; +- END LOOP; +- RAISE NOTICE ' FUNC PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ -- Issue#79 implementation ++ -- Do same for tables with user-defined elements using copy to file method ++ FOREACH tblelement IN ARRAY tblarray2 ++ LOOP ++ s = clock_timestamp(); ++ IF bDebug THEN RAISE NOTICE 'DEBUG2: UDTs %', tblelement; END IF; ++ EXECUTE tblelement; ++ GET DIAGNOSTICS cnt = ROW_COUNT; ++ ++ -- STATEMENT LOOKS LIKE THIS: ++ -- INSERT INTO sample11.warehouses SELECT * FROM sample.warehouses; ++ -- INSERT INTO sample11.person OVERRIDING SYSTEM VALUE SELECT * FROM sample.person; ++ -- COPY sample.address TO '/tmp/cloneschema.tmp' WITH DELIMITER AS ',';\ ++ buffer = TRIM(tblelement::text); ++ -- RAISE NOTICE 'element=%', buffer; ++ cnt1 = POSITION('INSERT INTO' IN buffer); ++ cnt2 = POSITION('COPY ' IN buffer); ++ IF cnt1 > 0 THEN ++ buffer = substring(buffer, 12); ++ ELSIF cnt2 > 0 THEN ++ buffer = substring(buffer, 5); ++ ELSE ++ RAISE EXCEPTION 'Programming Error for parsing tblarray2.'; ++ END IF; ++ ++ -- RAISE NOTICE 'buffer1=%', buffer; ++ cnt1 = POSITION(' OVERRIDING ' IN buffer); ++ cnt2 = POSITION('SELECT * FROM ' IN buffer); ++ cnt3 = POSITION(' FROM ' IN buffer); ++ cnt4 = POSITION(' TO ' IN buffer); ++ IF cnt1 > 0 THEN ++ buffer = substring(buffer, 1, cnt1-2); ++ ELSIF cnt2 > 0 THEN ++ buffer = substring(buffer, 1, cnt2-2); ++ ELSIF cnt3 > 0 THEN ++ buffer = substring(buffer, 1, cnt3-1); ++ ELSIF cnt4 > 0 THEN ++ -- skip the COPY TO statements ++ continue; ++ ELSE ++ RAISE EXCEPTION 'Programming Error for parsing tblarray2.'; ++ END IF; ++ -- RAISE NOTICE 'buffer2=%', buffer; ++ ++ SELECT RPAD(buffer, 35, ' ') INTO buffer; ++ -- RAISE NOTICE 'buffer3=%', buffer; ++ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Populated cloned table, % Rows Copied: % seconds: %', buffer, LPAD(cnt::text, 10, ' '), LPAD(cnt2::text, 5, ' '); END IF; ++ tblscopied := tblscopied + 1; ++ END LOOP; + +- -- MV: PRIVS: tables +- action := 'PRIVS: Tables'; +- -- regular, partitioned, and foreign tables plus view and materialized view permissions. TODO: implement foreign table defs. ++ -- Issue#101 ++ -- Do same for tables with user-defined elements using direct method with text cast ++ FOREACH tblelement IN ARRAY tblarray3 ++ LOOP ++ s = clock_timestamp(); ++ IF bDebug THEN RAISE NOTICE 'DEBUG3: UDTs %', tblelement; END IF; ++ EXECUTE tblelement; ++ GET DIAGNOSTICS cnt = ROW_COUNT; ++ cnt2 = POSITION(' (' IN tblelement::text); ++ IF cnt2 > 0 THEN ++ buffer = substring(tblelement, 1, cnt2); ++ buffer = substring(buffer, 6); ++ SELECT RPAD(buffer, 35, ' ') INTO buffer; ++ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Populated cloned table, % Rows Copied: % seconds: %', buffer, LPAD(cnt::text, 10, ' '), LPAD(cnt2::text, 5, ' '); END IF; ++ tblscopied := tblscopied + 1; ++ END IF; ++ END LOOP; ++ ++ -- Issue#98 MVs deferred until now ++ FOREACH tblelement IN ARRAY mvarray ++ LOOP ++ s = clock_timestamp(); ++ EXECUTE tblelement; ++ -- get diagnostics for MV creates or refreshes does not work, always returns 1 ++ GET DIAGNOSTICS cnt = ROW_COUNT; ++ buffer = substring(tblelement, 25); ++ cnt2 = POSITION(' AS ' IN buffer); ++ IF cnt2 > 0 THEN ++ buffer = substring(buffer, 1, cnt2); ++ SELECT RPAD(buffer, 36, ' ') INTO buffer; ++ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Populated Mat. View, % Rows Inserted: ? seconds: %', buffer, LPAD(cnt2::text, 5, ' '); END IF; ++ mvscopied := mvscopied + 1; ++ END IF; ++ END LOOP; ++ ++ cnt := cast(extract(epoch from (clock_timestamp() - r)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'Copy rows duration: % seconds',cnt; END IF; ++ END IF; ++ RAISE NOTICE ' TABLES copied: %', LPAD(tblscopied::text, 5, ' '); ++ RAISE NOTICE ' MATVIEWS refreshed: %', LPAD(mvscopied::text, 5, ' '); ++ ++ ++ -- Issue#78 forces us to defer FKeys until the end since we previously did row copies before FKeys ++ -- add FK constraint ++ action := 'FK Constraints'; + cnt := 0; +- FOR arec IN +- SELECT 'GRANT ' || p.perm::perm_type || CASE WHEN t.relkind in ('r', 'p', 'f') THEN ' ON TABLE ' WHEN t.relkind in ('v', 'm') THEN ' ON ' END || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as tbl_ddl, +- has_table_privilege(r.oid, t.oid, p.perm) AS granted, t.relkind +- FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES (TEXT 'SELECT'), ('INSERT'), ('UPDATE'), ('DELETE'), ('TRUNCATE'), ('REFERENCES'), ('TRIGGER')) AS p(perm) +- WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind in ('r', 'p', 'f', 'v', 'm') AND NOT r.rolsuper AND has_table_privilege(r.oid, t.oid, p.perm) order by t.relname::text, t.relkind +- LOOP +- BEGIN +- cnt := cnt + 1; +- -- RAISE NOTICE 'ddl=%', arec.tbl_ddl; +- IF arec.relkind = 'f' THEN +- RAISE WARNING 'Foreign tables are not currently implemented, so skipping privs for them. ddl=%', arec.tbl_ddl; +- ELSE +- IF ddl_only THEN +- RAISE INFO '%', arec.tbl_ddl; +- ELSE +- EXECUTE arec.tbl_ddl; +- END IF; + +- END IF; +- END; ++ -- Issue#61 FIX: use set_config for empty string ++ -- SET search_path = ''; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ++ FOR qry IN ++ SELECT 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || quote_ident(rn.relname) ++ || ' ADD CONSTRAINT ' || quote_ident(ct.conname) || ' ' || REPLACE(pg_get_constraintdef(ct.oid), 'REFERENCES ' || quote_ident(source_schema) || '.', 'REFERENCES ' ++ || quote_ident(dest_schema) || '.') || ';' ++ FROM pg_constraint ct ++ JOIN pg_class rn ON rn.oid = ct.conrelid ++ -- Issue#103 needed to add this left join ++ LEFT JOIN pg_inherits i ON (rn.oid = i.inhrelid) ++ WHERE connamespace = src_oid ++ AND rn.relkind = 'r' ++ AND ct.contype = 'f' ++ -- Issue#103 fix: needed to also add this null check ++ AND i.inhrelid is null ++ LOOP ++ cnt := cnt + 1; ++ IF bDDLOnly THEN ++ RAISE INFO '%', qry; ++ ELSE ++ IF bDebug THEN RAISE NOTICE 'DEBUG: adding FKEY constraint: %', qry; END IF; ++ EXECUTE qry; ++ END IF; + END LOOP; +- RAISE NOTICE ' TABLE PRIVS cloned: %', LPAD(cnt::text, 5, ' '); ++ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; ++ RAISE NOTICE ' FKEYS cloned: %', LPAD(cnt::text, 5, ' '); + +- -- Set the search_path back to what it was before +- EXECUTE 'SET search_path = ' || src_path_old; ++ ++ IF src_path_old = '' OR src_path_old = '""' THEN ++ -- RAISE NOTICE 'Restoring old search_path to empty string'; ++ SELECT set_config('search_path', '', false) into v_dummy; ++ ELSE ++ -- RAISE NOTICE 'Restoring old search_path to:%', src_path_old; ++ EXECUTE 'SET search_path = ' || src_path_old; ++ END IF; ++ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; ++ IF bDebug THEN RAISE NOTICE 'DEBUG: setting search_path back to what it was: %', v_dummy; END IF; ++ cnt := cast(extract(epoch from (clock_timestamp() - t)) as numeric(18,3)); ++ IF bVerbose THEN RAISE NOTICE 'clone_schema duration: % seconds',cnt; END IF; + + EXCEPTION + WHEN others THEN + BEGIN + GET STACKED DIAGNOSTICS v_diag1 = MESSAGE_TEXT, v_diag2 = PG_EXCEPTION_DETAIL, v_diag3 = PG_EXCEPTION_HINT, v_diag4 = RETURNED_SQLSTATE, v_diag5 = PG_CONTEXT, v_diag6 = PG_EXCEPTION_CONTEXT; +- -- v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1 || ' .' || v_diag2 || ' .' || v_diag3; +- v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1; +- RAISE EXCEPTION 'Action: % Diagnostics: %',action, v_ret; +- -- Set the search_path back to what it was before +- EXECUTE 'SET search_path = ' || src_path_old; ++ v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1; ++ -- Issue#101: added version to exception output ++ -- RAISE NOTICE 'v_diag1=% v_diag2=% v_diag3=% v_diag4=% v_diag5=% v_diag6=%', v_diag1, v_diag2, v_diag3, v_diag4, v_diag5, v_diag6; ++ buffer2 = ''; ++ IF action = 'Copy Rows' AND v_diag4 = '42704' THEN ++ -- Issue#105 Help user to fix the problem. ++ buffer2 = 'It appears you have a USER-DEFINED column type mismatch. Try running clone_schema with the FILECOPY option. '; ++ END IF; ++ IF lastsql <> '' THEN ++ buffer = v_ret || E'\n'|| buffer2 || E'\n'|| lastsql; ++ ELSE ++ buffer = v_ret || E'\n'|| buffer2; ++ END IF; ++ RAISE EXCEPTION 'Version: % Action: % Diagnostics: %',v_version, action, buffer; ++ ++ IF src_path_old = '' THEN ++ -- RAISE NOTICE 'setting old search_path to empty string'; ++ SELECT set_config('search_path', '', false); ++ ELSE ++ -- RAISE NOTICE 'setting old search_path to:%', src_path_old; ++ EXECUTE 'SET search_path = ' || src_path_old; ++ END IF; ++ + RETURN; + END; + +@@ -713,14 +3261,14 @@ + END; + + $BODY$ +- LANGUAGE plpgsql VOLATILE +- COST 100; +-ALTER FUNCTION public.clone_schema(text, text, boolean, boolean) OWNER TO "{db_user}"; +-""" ++ LANGUAGE plpgsql VOLATILE COST 100; + ++ALTER FUNCTION public.clone_schema(text, text, cloneparms[]) OWNER TO "{db_user}"; ++-- REVOKE ALL PRIVILEGES ON FUNCTION clone_schema(text, text, cloneparms[]) FROM public; ++""" # noqa + +-class CloneSchema: + ++class CloneSchema: + def _create_clone_schema_function(self): + """ + Creates a postgres function `clone_schema` that copies a schema and its +@@ -752,9 +3300,8 @@ def clone_schema(self, base_schema_name, new_schema_name, set_connection=True): + if schema_exists(new_schema_name): + raise ValidationError("New schema name already exists") + +- sql = 'SELECT clone_schema(%(base_schema)s, %(new_schema)s, true, false)' ++ sql = "SELECT clone_schema(%(base_schema)s, %(new_schema)s, 'DATA')" + cursor.execute( +- sql, +- {'base_schema': base_schema_name, 'new_schema': new_schema_name} ++ sql, {"base_schema": base_schema_name, "new_schema": new_schema_name} + ) + cursor.close() + +From c49b4a1c254ebe713259515a4c8373a9b19dd000 Mon Sep 17 00:00:00 2001 +From: Marc 'risson' Schmitt +Date: Thu, 16 Nov 2023 13:32:06 +0100 +Subject: [PATCH 2/3] clone: allow setting up the clone mode (DATA, NODATA) + +Signed-off-by: Marc 'risson' Schmitt +--- + django_tenants/clone.py | 13 ++++++++++--- + django_tenants/models.py | 11 ++++++++++- + 2 files changed, 20 insertions(+), 4 deletions(-) + +diff --git a/django_tenants/clone.py b/django_tenants/clone.py +index 3afce109..6fa52c04 100644 +--- a/django_tenants/clone.py ++++ b/django_tenants/clone.py +@@ -3281,7 +3281,9 @@ def _create_clone_schema_function(self): + cursor.execute(CLONE_SCHEMA_FUNCTION.format(db_user=db_user)) + cursor.close() + +- def clone_schema(self, base_schema_name, new_schema_name, set_connection=True): ++ def clone_schema( ++ self, base_schema_name, new_schema_name, clone_mode="DATA", set_connection=True ++ ): + """ + Creates a new schema `new_schema_name` as a clone of an existing schema + `old_schema_name`. +@@ -3300,8 +3302,13 @@ def clone_schema(self, base_schema_name, new_schema_name, set_connection=True): + if schema_exists(new_schema_name): + raise ValidationError("New schema name already exists") + +- sql = "SELECT clone_schema(%(base_schema)s, %(new_schema)s, 'DATA')" ++ sql = "SELECT clone_schema(%(base_schema)s, %(new_schema)s, %(clone_mode)s)" + cursor.execute( +- sql, {"base_schema": base_schema_name, "new_schema": new_schema_name} ++ sql, ++ { ++ "base_schema": base_schema_name, ++ "new_schema": new_schema_name, ++ "clone_mode": clone_mode, ++ }, + ) + cursor.close() +diff --git a/django_tenants/models.py b/django_tenants/models.py +index 0d1812d8..655e1994 100644 +--- a/django_tenants/models.py ++++ b/django_tenants/models.py +@@ -29,6 +29,13 @@ class TenantMixin(models.Model): + to be automatically created upon save. + """ + ++ clone_mode = "DATA" ++ """ ++ One of "DATA", "NODATA". ++ When using TENANT_BASE_SCHEMA, controls whether only the database ++ structure will be copied, or if data will be copied along with it. ++ """ ++ + schema_name = models.CharField(max_length=63, unique=True, db_index=True, + validators=[_check_schema_name]) + +@@ -184,7 +191,9 @@ def create_schema(self, check_if_exists=False, sync_schema=True, + # copy tables and data from provided model schema + base_schema = get_tenant_base_schema() + clone_schema = CloneSchema() +- clone_schema.clone_schema(base_schema, self.schema_name) ++ clone_schema.clone_schema( ++ base_schema, self.schema_name, self.clone_mode ++ ) + + call_command('migrate_schemas', + tenant=True, + +From 218fbcd3bfa555b20c6fb904e5fcf307d69f18af Mon Sep 17 00:00:00 2001 +From: Marc 'risson' Schmitt +Date: Thu, 16 Nov 2023 13:32:54 +0100 +Subject: [PATCH 3/3] clone: always (re-)create the clone_schema function + +Signed-off-by: Marc 'risson' Schmitt +--- + django_tenants/clone.py | 10 +++------- + 1 file changed, 3 insertions(+), 7 deletions(-) + +diff --git a/django_tenants/clone.py b/django_tenants/clone.py +index 6fa52c04..63fb8e22 100644 +--- a/django_tenants/clone.py ++++ b/django_tenants/clone.py +@@ -1,7 +1,6 @@ + from django.conf import settings + from django.core.exceptions import ValidationError + from django.db import connection, transaction +-from django.db.utils import ProgrammingError + + from django_tenants.utils import schema_exists + +@@ -3292,12 +3291,9 @@ def clone_schema( + connection.set_schema_to_public() + cursor = connection.cursor() + +- # check if the clone_schema function already exists in the db +- try: +- cursor.execute("SELECT 'clone_schema'::regproc") +- except ProgrammingError: +- self._create_clone_schema_function() +- transaction.commit() ++ # create or update the clone_schema function in the db ++ self._create_clone_schema_function() ++ transaction.commit() + + if schema_exists(new_schema_name): + raise ValidationError("New schema name already exists") diff --git a/ilot/py3-django-tenants/APKBUILD b/ilot/py3-django-tenants/APKBUILD new file mode 100644 index 0000000..0183781 --- /dev/null +++ b/ilot/py3-django-tenants/APKBUILD @@ -0,0 +1,43 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=py3-django-tenants +#_pkgreal is used by apkbuild-pypi to find modules at PyPI +_pkgreal=django-tenants +pkgver=3.6.1 +pkgrel=1 +pkgdesc="Tenant support for Django using PostgreSQL schemas." +url="https://pypi.python.org/project/django-tenants" +arch="noarch" +license="KIT" +depends="py3-django py3-psycopg py3-gunicorn py3-coverage" +checkdepends="python3-dev py3-pytest" +makedepends="py3-setuptools py3-gpep517 py3-wheel" +source=" + $pkgname-$pkgver.tar.gz::https://codeload.github.com/django-tenants/django-tenants/tar.gz/refs/tags/v$pkgver + 997_update-from-pgclone-schema.patch + " +builddir="$srcdir/$_pkgreal-$pkgver" +options="!check" # Requires setting up test database +subpackages="$pkgname-pyc" + +build() { + gpep517 build-wheel \ + --wheel-dir .dist \ + --output-fd 3 3>&1 >&2 +} + +check() { + python3 -m venv --clear --without-pip --system-site-packages .testenv + .testenv/bin/python3 -m installer .dist/*.whl + DJANGO_SETTINGS_MODULE=tests.settings .testenv/bin/python3 -m pytest -v +} + +package() { + python3 -m installer -d "$pkgdir" \ + .dist/*.whl +} + +sha512sums=" +b18afce81ccc89e49fcc4ebe85d90be602415ca898c1660a4e71e2bef6a3ed2e8c724e94b61d8c6f48f3fb19eb2a87d6a6f5bbf449b3e2f661f87e4b5638eafb py3-django-tenants-3.6.1.tar.gz +f2424bb188db2e3c7d13c15e5bdf0959c6f794e68dbc677c8b876d4faa321f78aded5565539f1bfd97583c6df0fcc19ec05abe203b08407e4446dd7194756825 997_update-from-pgclone-schema.patch +" diff --git a/ilot/py3-kadmin/APKBUILD b/ilot/py3-kadmin/APKBUILD deleted file mode 100644 index 894a945..0000000 --- a/ilot/py3-kadmin/APKBUILD +++ /dev/null @@ -1,40 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-kadmin -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=kadmin -pkgver=0.2.0 -pkgrel=0 -pkgdesc="Python module for kerberos admin (kadm5)" -url="https://github.com/authentik-community/python-kadmin" -arch="all" -license="MIT" -checkdepends="py3-pytest py3-k5test" -makedepends="py3-setuptools py3-gpep517 py3-wheel poetry python3-dev" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/authentik-community/python-kadmin/archive/refs/tags/v$pkgver.tar.gz - fix-int-conversion-error.patch" -builddir="$srcdir"/python-kadmin-$pkgver -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 test/tests.py -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -b405e914cb296f2bfe4f78d2791329804a0db02816182517b59ed1452a21d51dafe303609fddafbbeea57128bba4bcdfcd9b363f193ae0402cc52cf1b3b9020e py3-kadmin-0.2.0.tar.gz -e17223f8597d51ea099f5d4483dd72545b7d64ad76895553a6b7112416536aae93a59a2fd7aea044420495ab8146db7290abd826b268b2d6e518442c3c85c506 fix-int-conversion-error.patch -" diff --git a/ilot/py3-kadmin/fix-int-conversion-error.patch b/ilot/py3-kadmin/fix-int-conversion-error.patch deleted file mode 100644 index 445b76b..0000000 --- a/ilot/py3-kadmin/fix-int-conversion-error.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/src/PyKAdminPolicyObject.c.orig b/src/PyKAdminPolicyObject.c -index 0bf3ee8..68387c4 100644 ---- a/src/PyKAdminPolicyObject.c.orig -+++ b/src/PyKAdminPolicyObject.c -@@ -120,7 +120,7 @@ PyTypeObject PyKAdminPolicyObject_Type = { - sizeof(PyKAdminPolicyObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - (destructor)PyKAdminPolicyObject_dealloc, /*tp_dealloc*/ -- KAdminPolicyObject_print, /*tp_print*/ -+ 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - 0, /*tp_compare*/ diff --git a/ilot/py3-microsoft-kiota-abstractions/APKBUILD b/ilot/py3-microsoft-kiota-abstractions/APKBUILD deleted file mode 100644 index 24d7e1e..0000000 --- a/ilot/py3-microsoft-kiota-abstractions/APKBUILD +++ /dev/null @@ -1,44 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-microsoft-kiota-abstractions -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=microsoft-kiota-abstractions -pkgver=1.6.8 -pkgrel=0 -pkgdesc="Abstractions library for Kiota generated Python clients" -url="https://pypi.python.org/project/microsoft-kiota-abstractions" -arch="noarch" -license="MIT" -depends=" - py3-std-uritemplate<2.0.0 - py3-opentelemetry-sdk - py3-importlib-metadata - " -checkdepends="py3-pytest py3-pytest-asyncio" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-abstractions-v$pkgver.tar.gz - " -builddir="$srcdir/kiota-python-microsoft-kiota-abstractions-v$pkgver/packages/abstractions" -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -55341b1ff3fb1a516ceb84817db991d6e6aa83b01326f64cf21690dee1ab84e9c9c4f7162f9f71ec1261b4e0380b73b13284128bd786b80da29faf968720b355 py3-microsoft-kiota-abstractions-1.6.8.tar.gz -" diff --git a/ilot/py3-microsoft-kiota-authentication-azure/APKBUILD b/ilot/py3-microsoft-kiota-authentication-azure/APKBUILD deleted file mode 100644 index c84acdc..0000000 --- a/ilot/py3-microsoft-kiota-authentication-azure/APKBUILD +++ /dev/null @@ -1,45 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-microsoft-kiota-authentication-azure -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=microsoft-kiota-authentication-azure -pkgver=1.6.8 -pkgrel=0 -pkgdesc="Authentication provider for Kiota using Azure Identity" -url="https://pypi.python.org/project/microsoft-kiota-authentication-azure" -arch="noarch" -license="MIT" -depends=" - py3-azure-core - py3-microsoft-kiota-abstractions - py3-importlib-metadata - " -checkdepends="py3-pytest" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-authentication-azure-v$pkgver.tar.gz - " -options="!check" # TODO -builddir="$srcdir/kiota-python-microsoft-kiota-authentication-azure-v$pkgver/packages/authentication/azure" -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -d661d379f036b45bf356e349e28d3478f4a10b351dfde2d1b11a429c0f2160cde9696990cc18d72a224cfd3cc4c90bdc2e6f07d9e4763bd126cd9f66a09b9bec py3-microsoft-kiota-authentication-azure-1.6.8.tar.gz -" diff --git a/ilot/py3-microsoft-kiota-http/APKBUILD b/ilot/py3-microsoft-kiota-http/APKBUILD deleted file mode 100644 index bebb592..0000000 --- a/ilot/py3-microsoft-kiota-http/APKBUILD +++ /dev/null @@ -1,44 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-microsoft-kiota-http -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=microsoft-kiota-http -pkgver=1.6.8 -pkgrel=0 -pkgdesc="Kiota http request adapter implementation for httpx library" -url="https://pypi.python.org/project/microsoft-kiota-http" -arch="noarch" -license="MIT" -depends=" - py3-microsoft-kiota-abstractions - py3-httpx - " -checkdepends="py3-pytest" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-http-v$pkgver.tar.gz - " -options="!check" # TODO -builddir="$srcdir/kiota-python-microsoft-kiota-http-v$pkgver/packages/http/httpx" -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -c453c89d31cc062f2d8be4a28bda0666dbde6b5a8e42855892cda72e5d104e6bb5516db01d9feb7f619b8fa77237c9e3badd24b29326f627f95b69210835321d py3-microsoft-kiota-http-1.6.8.tar.gz -" diff --git a/ilot/py3-microsoft-kiota-serialization-form/APKBUILD b/ilot/py3-microsoft-kiota-serialization-form/APKBUILD deleted file mode 100644 index fccfd62..0000000 --- a/ilot/py3-microsoft-kiota-serialization-form/APKBUILD +++ /dev/null @@ -1,43 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-microsoft-kiota-serialization-form -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=microsoft-kiota-serialization-form -pkgver=1.6.8 -pkgrel=0 -pkgdesc="Kiota Form encoded serialization implementation for Python" -url="https://pypi.python.org/project/microsoft-kiota-serialization-form" -arch="noarch" -license="MIT" -depends=" - py3-microsoft-kiota-abstractions - py3-pendulum - " -checkdepends="py3-pytest" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-serialization-form-v$pkgver.tar.gz - " -builddir="$srcdir/kiota-python-microsoft-kiota-serialization-form-v$pkgver/packages/serialization/form" -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -0e4fabe18980612ca3f55fd7350148d2393da3f35dc79cd4fe56b01f50bc2af147bde5e294580d83b97b4a549d77e6581ece8ddb19ea09ee92fd6cbfead0d3db py3-microsoft-kiota-serialization-form-1.6.8.tar.gz -" diff --git a/ilot/py3-microsoft-kiota-serialization-json/APKBUILD b/ilot/py3-microsoft-kiota-serialization-json/APKBUILD deleted file mode 100644 index f59d827..0000000 --- a/ilot/py3-microsoft-kiota-serialization-json/APKBUILD +++ /dev/null @@ -1,44 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-microsoft-kiota-serialization-json -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=microsoft-kiota-serialization-json -pkgver=1.6.8 -pkgrel=0 -pkgdesc="JSON serialization implementation for Kiota clients in Python" -url="https://pypi.python.org/project/microsoft-kiota-serialization-json" -arch="noarch" -license="MIT" -depends=" - py3-microsoft-kiota-abstractions - py3-pendulum - " -checkdepends="py3-pytest" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-serialization-json-v$pkgver.tar.gz - " -options="!check" # TODO -builddir="$srcdir/kiota-python-microsoft-kiota-serialization-json-v$pkgver/packages/serialization/json" -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -42b8e1d2bfb175e52876314a598647de7b70acb8140cefbfb20d0f8de241bbb03a1cfe6c7108a56047f2a8e3f8f781a23fe54d5612d68a5966340279ff0eb8bc py3-microsoft-kiota-serialization-json-1.6.8.tar.gz -" diff --git a/ilot/py3-microsoft-kiota-serialization-multipart/APKBUILD b/ilot/py3-microsoft-kiota-serialization-multipart/APKBUILD deleted file mode 100644 index c0da7ff..0000000 --- a/ilot/py3-microsoft-kiota-serialization-multipart/APKBUILD +++ /dev/null @@ -1,40 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-microsoft-kiota-serialization-multipart -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=microsoft-kiota-serialization-multipart -pkgver=1.6.8 -pkgrel=0 -pkgdesc="Multipart serialization implementation for python based kiota clients" -url="https://pypi.python.org/project/microsoft-kiota-serialization-multipart" -arch="noarch" -license="MIT" -depends="py3-microsoft-kiota-abstractions py3-microsoft-kiota-serialization-json" -checkdepends="py3-pytest" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-serialization-multipart-v$pkgver.tar.gz - " -builddir="$srcdir/kiota-python-microsoft-kiota-serialization-multipart-v$pkgver/packages/serialization/multipart" -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -d6d6d36fe55f4aa595d380e43f93f3de7674633edba676aec16fc26254a12e4f700427fedf1bedfddde30a7f708c93ccbbe586bb0e6950748a2debe609bf44c1 py3-microsoft-kiota-serialization-multipart-1.6.8.tar.gz -" diff --git a/ilot/py3-microsoft-kiota-serialization-text/APKBUILD b/ilot/py3-microsoft-kiota-serialization-text/APKBUILD deleted file mode 100644 index 3c38b26..0000000 --- a/ilot/py3-microsoft-kiota-serialization-text/APKBUILD +++ /dev/null @@ -1,43 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-microsoft-kiota-serialization-text -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=microsoft-kiota-serialization-text -pkgver=1.6.8 -pkgrel=0 -pkgdesc="Text serialization implementation for Kiota generated clients in Python" -url="https://pypi.python.org/project/microsoft-kiota-abstractions" -arch="noarch" -license="MIT" -depends=" - py3-microsoft-kiota-abstractions - py3-dateutil - " -checkdepends="py3-pytest" -makedepends="poetry py3-gpep517 py3-wheel py3-flit" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/microsoft/kiota-python/archive/refs/tags/microsoft-kiota-serialization-text-v$pkgver.tar.gz - " -builddir="$srcdir/kiota-python-microsoft-kiota-serialization-text-v$pkgver/packages/serialization/text" -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -55dbc87253819f496e2f25de2bf24b170761f335117da414bb35c6db9008e9ca8c6fd13d5e429914c322a850a57858d9abdee7dc209ad55e469182995290d568 py3-microsoft-kiota-serialization-text-1.6.8.tar.gz -" diff --git a/ilot/py3-msal-extensions/APKBUILD b/ilot/py3-msal-extensions/APKBUILD deleted file mode 100644 index a2e26c4..0000000 --- a/ilot/py3-msal-extensions/APKBUILD +++ /dev/null @@ -1,42 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-msal-extensions -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=msal-extensions -pkgver=1.2.0 -pkgrel=0 -pkgdesc="Microsoft Authentication Library extensions (MSAL EX) provides a persistence API " -url="https://pypi.org/project/msal-extensions" -arch="noarch" -license="MIT" -depends=" - py3-msal - py3-portalocker -" -checkdepends="py3-pytest" -makedepends="py3-setuptools py3-gpep517 py3-wheel" -options="!check" #todo -source="$pkgname-$pkgver.tar.gz::https://github.com/AzureAD/microsoft-authentication-extensions-for-python/archive/refs/tags/$pkgver.tar.gz" -builddir="$srcdir"/microsoft-authentication-extensions-for-python-$pkgver -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -847a87e2f7a7b71d47fb758bd3445666b2a9f1f2034c575f8a78ba687e1c5faa682b89ea78906d4afa1350bca608cd9452c7ad244c7ec456145c15c49ad46fb2 py3-msal-extensions-1.2.0.tar.gz -" diff --git a/ilot/py3-msal/APKBUILD b/ilot/py3-msal/APKBUILD deleted file mode 100644 index 02b267a..0000000 --- a/ilot/py3-msal/APKBUILD +++ /dev/null @@ -1,43 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-msal -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=msal -pkgver=1.31.1 -pkgrel=0 -pkgdesc="Microsoft Authentication Library (MSAL) for Python" -url="https://pypi.org/project/msal" -arch="noarch" -license="MIT" -depends=" - py3-requests - py3-cryptography - py3-jwt -" -checkdepends="py3-pytest" -makedepends="py3-setuptools py3-gpep517 py3-wheel" -options="!check" #todo -source="$pkgname-$pkgver.tar.gz::https://github.com/AzureAD/microsoft-authentication-library-for-python/archive/refs/tags/$pkgver.tar.gz" -builddir="$srcdir"/microsoft-authentication-library-for-python-$pkgver -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -f75541337f09ba29d4de13206346ad7793b3f2bdbdbf8fcb050ee7976b397ca666d61aee21121a4efdd7c150c9d2f87f75812e7b8aa96a5f8ac5219e7a946af2 py3-msal-1.31.1.tar.gz -" diff --git a/ilot/py3-msgraph-core/APKBUILD b/ilot/py3-msgraph-core/APKBUILD deleted file mode 100644 index e8d9cb5..0000000 --- a/ilot/py3-msgraph-core/APKBUILD +++ /dev/null @@ -1,43 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-msgraph-core -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=msgraph-core -pkgver=1.1.8 -pkgrel=0 -pkgdesc="The Microsoft Graph Python SDK" -url="https://pypi.python.org/project/msgraph-core" -arch="noarch" -license="MIT" -depends=" - py3-azure-identity - py3-microsoft-kiota-authentication-azure - py3-microsoft-kiota-http - " -checkdepends="py3-pytest" -makedepends="py3-setuptools py3-gpep517 py3-wheel py3-flit" -source="$pkgname-$pkgver.tar.gz::https://github.com/microsoftgraph/msgraph-sdk-python-core/archive/refs/tags/v$pkgver.tar.gz" -options="!check" # TODO -builddir="$srcdir/msgraph-sdk-python-core-$pkgver" -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -0cae6f76cb1373d1ef76448e47b9951e5076a144140c19edc14186f7bfd92930e50c9f6c459170e3362ef267903cdf261d1897566983a7302beab205f9d61389 py3-msgraph-core-1.1.8.tar.gz -" diff --git a/ilot/py3-msgraph-sdk/APKBUILD b/ilot/py3-msgraph-sdk/APKBUILD deleted file mode 100644 index f23f733..0000000 --- a/ilot/py3-msgraph-sdk/APKBUILD +++ /dev/null @@ -1,44 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-msgraph-sdk -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=msgraph-sdk -pkgver=1.16.0 -pkgrel=0 -pkgdesc="The Microsoft Graph Python SDK" -url="https://pypi.python.org/project/msgraph-sdk" -arch="noarch" -license="MIT" -depends=" - py3-microsoft-kiota-serialization-text - py3-microsoft-kiota-serialization-form - py3-microsoft-kiota-serialization-multipart - py3-msgraph-core - " -checkdepends="py3-pytest" -makedepends="py3-setuptools py3-gpep517 py3-wheel py3-flit" -source="$pkgname-$pkgver.tar.gz::https://github.com/microsoftgraph/msgraph-sdk-python/archive/refs/tags/v$pkgver.tar.gz" -options="!check" # TODO -builddir="$srcdir/$_pkgreal-python-$pkgver" -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -af930e5e470f6ac78724650885f70cf447482a53f90043d326b3e00dc7572fd0d476658ebb1677118010e38b54f1e4e609dcfb5fcef5664f05b25062786d11af py3-msgraph-sdk-1.16.0.tar.gz -" diff --git a/ilot/py3-opentelemetry-sdk/APKBUILD b/ilot/py3-opentelemetry-sdk/APKBUILD deleted file mode 100644 index 08bc2ad..0000000 --- a/ilot/py3-opentelemetry-sdk/APKBUILD +++ /dev/null @@ -1,75 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-opentelemetry-sdk -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=opentelemetry-sdk -pkgver=1.29.0 -pkgrel=0 -pkgdesc="OpenTelemetry Python SDK" -url="https://github.com/open-telemetry/opentelemetry-python/tree/main" -arch="noarch" -license="Apache-2.0" -depends="py3-opentelemetry-semantic-conventions py3-typing-extensions" -checkdepends="py3-pytest" -makedepends="py3-setuptools py3-gpep517 py3-wheel py3-hatchling" -source="$pkgname-$pkgver.tar.gz::https://github.com/open-telemetry/opentelemetry-python/archive/refs/tags/v$pkgver.tar.gz" -builddir="$srcdir/opentelemetry-python-$pkgver" -options="!check" # TODO -# need to figure out -pyc -subpackages=" - $pkgname-pyc - py3-opentelemetry-api - py3-opentelemetry-semantic-conventions - py3-opentelemetry-proto - " - -build() { - for i in api semantic-conventions sdk proto; do - cd "$builddir"/opentelemetry-$i - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 - done -} - -check() { - for i in api semantic-conventions sdk proto; do - python3 -m venv --clear --without-pip --system-site-packages "$builddir"/.testenv - "$builddir"/.testenv/bin/python3 -m installer .dist/*.whl - "$builddir"/.testenv/bin/python3 -m pytest -v - done -} - -package() { - cd "$builddir"/opentelemetry-sdk - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -api() { - depends="py3-deprecated" - pkgdesc="OpenTelemetry Python API" - cd "$builddir"/opentelemetry-api - python3 -m installer -d "$subpkgdir" \ - .dist/*.whl -} - -conventions() { - pkgdesc="OpenTelemetry Semantic Conventions" - depends="py3-opentelemetry-api py3-deprecated" - cd "$builddir"/opentelemetry-semantic-conventions - python3 -m installer -d "$subpkgdir" \ - .dist/*.whl -} - -proto() { - pkgdesc="OpenTelemetry Python Proto" - depends="py3-protobuf" - cd "$builddir"/opentelemetry-proto - python3 -m installer -d "$subpkgdir" \ - .dist/*.whl -} - -sha512sums=" -92c90e6a684d8cfab3bba4d72612ccf53ae54cdd9784e3434b25adc3730fe114f21fd7aa21da80edf6e0e7c80b39c64ee31fb16f68b04809289bbf5d49d4ca2e py3-opentelemetry-sdk-1.29.0.tar.gz -" diff --git a/ilot/py3-scim2-filter-parser/APKBUILD b/ilot/py3-scim2-filter-parser/APKBUILD new file mode 100644 index 0000000..15d12e5 --- /dev/null +++ b/ilot/py3-scim2-filter-parser/APKBUILD @@ -0,0 +1,38 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=py3-scim2-filter-parser +#_pkgreal is used by apkbuild-pypi to find modules at PyPI +_pkgreal=scim2-filter-parser +pkgver=0.5.0 +pkgrel=1 +pkgdesc="A customizable parser/transpiler for SCIM2.0 filters" +url="https://pypi.python.org/project/scim2-filter-parser" +arch="noarch" +license="MIT" +depends="py3-django py3-sly" +checkdepends="py3-pytest" +makedepends="py3-setuptools py3-gpep517 py3-wheel poetry" +source="$pkgname-$pkgver.tar.gz::https://github.com/15five/scim2-filter-parser/archive/refs/tags/$pkgver.tar.gz" +builddir="$srcdir/$_pkgreal-$pkgver" +subpackages="$pkgname-pyc" + +build() { + gpep517 build-wheel \ + --wheel-dir .dist \ + --output-fd 3 3>&1 >&2 +} + +check() { + python3 -m venv --clear --without-pip --system-site-packages .testenv + .testenv/bin/python3 -m installer .dist/*.whl + .testenv/bin/python3 -m pytest -v +} + +package() { + python3 -m installer -d "$pkgdir" \ + .dist/*.whl +} + +sha512sums=" +5347852af6b82a764a32bc491a7e0f05f06b4f4d93dfa375668b5ca1a15ee58f488702536e350100fe5c96a5c94c492ea8cbd0e1952c5920d5a10e1453357f8c py3-scim2-filter-parser-0.5.0.tar.gz +" diff --git a/ilot/py3-std-uritemplate/APKBUILD b/ilot/py3-std-uritemplate/APKBUILD deleted file mode 100644 index caca02f..0000000 --- a/ilot/py3-std-uritemplate/APKBUILD +++ /dev/null @@ -1,41 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-std-uritemplate -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=std-uritemplate -pkgver=2.0.1 -pkgrel=0 -pkgdesc="A complete and maintained cross-language implementation of the Uri Template specification RFC 6570 Level 4" -url="https://pypi.python.org/project/std-uritemplate" -arch="noarch" -license="Apache-2.0" -depends="python3" -checkdepends="py3-pytest" -makedepends="py3-setuptools py3-gpep517 py3-wheel poetry" -source="$pkgname-$pkgver.tar.gz::https://github.com/std-uritemplate/std-uritemplate/archive/refs/tags/$pkgver.tar.gz" -options="!check" # TODO -builddir="$srcdir"/$_pkgreal-$pkgver/python -subpackages="$pkgname-pyc" - -prepare() { - default_prepare - ln -s ../Readme.md Readme.md -} -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - poetry run python test.py -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -e073a1204d65bb639cc93480b0f68e1edfe5ac3cff607b72c8da8916b7660eea2b2b246b5db02979cd5c856087958c84dc3bc5e9d76a9540f2ac2a7da8cd18df py3-std-uritemplate-2.0.1.tar.gz -" diff --git a/ilot/py3-tenant-schemas-celery/APKBUILD b/ilot/py3-tenant-schemas-celery/APKBUILD new file mode 100644 index 0000000..c5f9029 --- /dev/null +++ b/ilot/py3-tenant-schemas-celery/APKBUILD @@ -0,0 +1,41 @@ +# Contributor: Antoine Martin (ayakael) +# Maintainer: Antoine Martin (ayakael) +pkgname=py3-tenant-schemas-celery +#_pkgreal is used by apkbuild-pypi to find modules at PyPI +_pkgreal=tenant-schemas-celery +pkgver=2.2.0 +pkgrel=1 +pkgdesc="Celery integration for django-tenant-schemas and django-tenants" +url="https://pypi.python.org/project/tenant-schemas-celery" +arch="noarch" +license="MIT" +depends="py3-django-tenants py3-celery" +checkdepends="python3-dev py3-pytest" +makedepends="py3-setuptools py3-gpep517 py3-wheel" +source=" + $pkgname-$pkgver.tar.gz::https://codeload.github.com/maciej-gol/tenant-schemas-celery/tar.gz/refs/tags/$pkgver + " +options="!check" # Test suite wants docker +builddir="$srcdir/$_pkgreal-$pkgver" +subpackages="$pkgname-pyc" + +build() { + gpep517 build-wheel \ + --wheel-dir .dist \ + --output-fd 3 3>&1 >&2 +} + +check() { + python3 -m venv --clear --without-pip --system-site-packages .testenv + .testenv/bin/python3 -m installer .dist/*.whl + DJANGO_SETTINGS_MODULE=tests.settings .testenv/bin/python3 -m pytest -v +} + +package() { + python3 -m installer -d "$pkgdir" \ + .dist/*.whl +} + +sha512sums=" +dad71011306936dc84d966797b113008780750e9e973513092bec892be0d1468e0a0e7e8e2fcca9765309a27767e1c72bdaad7c8aca16353ae1eef783c239148 py3-tenant-schemas-celery-2.2.0.tar.gz +" diff --git a/ilot/uptime-kuma/APKBUILD b/ilot/uptime-kuma/APKBUILD index 6bc88c8..f0acb67 100644 --- a/ilot/uptime-kuma/APKBUILD +++ b/ilot/uptime-kuma/APKBUILD @@ -1,8 +1,8 @@ # Contributor: Antoine Martin (ayakael) # Maintainer: Antoine Martin (ayakael) pkgname=uptime-kuma -pkgver=1.23.16 -pkgrel=0 +pkgver=1.23.13 +pkgrel=1 pkgdesc='A fancy self-hosted monitoring tool' arch="all" url="https://github.com/louislam/uptime-kuma" @@ -43,7 +43,7 @@ package() { mv "$pkgdir"/usr/share/webapps/uptime-kuma/LICENSE "$pkgdir"/usr/share/licenses/uptime-kuma/. } sha512sums=" -a132d1cd796fbd868782627edfd45d2a6bd3d2fadece23e0bbf000e6a30482659062a43c4590c98e390cac9b8c1926efd8ff01c5b358b7ccea4438259b86f24e uptime-kuma-1.23.16.tar.gz +9045cdc69d46ce34011f7866844a8d1866eee21850be6eede3226e77b9c0d3ecc0190481671f04f25da40345b29cc2d13de07bcc27e7baeff7901b4bd9c8b93f uptime-kuma-1.23.13.tar.gz 0ceddb98a6f318029b8bd8b5a49b55c883e77a5f8fffe2b9b271c9abf0ac52dc7a6ea4dbb4a881124a7857f1e43040f18755c1c2a034479e6a94d2b65a73d847 uptime-kuma.openrc 1dbae536b23e3624e139155abbff383bba3209ff2219983da2616b4376b1a5041df812d1e5164716fc6e967a8446d94baae3b96ee575d400813cc6fdc2cc274e uptime-kuma.conf " diff --git a/ilot/uvicorn/2540_add-websocketssansioprotocol.patch b/ilot/uvicorn/2540_add-websocketssansioprotocol.patch deleted file mode 100644 index 0cb8db4..0000000 --- a/ilot/uvicorn/2540_add-websocketssansioprotocol.patch +++ /dev/null @@ -1,618 +0,0 @@ -diff --git a/docs/deployment.md b/docs/deployment.md -index d69fcf8..99dfbf3 100644 ---- a/docs/deployment.md -+++ b/docs/deployment.md -@@ -60,7 +60,7 @@ Options: - --loop [auto|asyncio|uvloop] Event loop implementation. [default: auto] - --http [auto|h11|httptools] HTTP protocol implementation. [default: - auto] -- --ws [auto|none|websockets|wsproto] -+ --ws [auto|none|websockets|websockets-sansio|wsproto] - WebSocket protocol implementation. - [default: auto] - --ws-max-size INTEGER WebSocket max size message in bytes -diff --git a/docs/index.md b/docs/index.md -index bb6fc32..50e2ab9 100644 ---- a/docs/index.md -+++ b/docs/index.md -@@ -130,7 +130,7 @@ Options: - --loop [auto|asyncio|uvloop] Event loop implementation. [default: auto] - --http [auto|h11|httptools] HTTP protocol implementation. [default: - auto] -- --ws [auto|none|websockets|wsproto] -+ --ws [auto|none|websockets|websockets-sansio|wsproto] - WebSocket protocol implementation. - [default: auto] - --ws-max-size INTEGER WebSocket max size message in bytes -diff --git a/pyproject.toml b/pyproject.toml -index 0a89966..8771bfb 100644 ---- a/pyproject.toml -+++ b/pyproject.toml -@@ -92,6 +92,10 @@ filterwarnings = [ - "ignore:Uvicorn's native WSGI implementation is deprecated.*:DeprecationWarning", - "ignore: 'cgi' is deprecated and slated for removal in Python 3.13:DeprecationWarning", - "ignore: remove second argument of ws_handler:DeprecationWarning:websockets", -+ "ignore: websockets.legacy is deprecated.*:DeprecationWarning", -+ "ignore: websockets.server.WebSocketServerProtocol is deprecated.*:DeprecationWarning", -+ "ignore: websockets.client.connect is deprecated.*:DeprecationWarning", -+ "ignore: websockets.exceptions.InvalidStatusCode is deprecated", - ] - - [tool.coverage.run] -diff --git a/tests/conftest.py b/tests/conftest.py -index 1b0c0e8..7061a14 100644 ---- a/tests/conftest.py -+++ b/tests/conftest.py -@@ -233,9 +233,9 @@ def unused_tcp_port() -> int: - marks=pytest.mark.skipif(not importlib.util.find_spec("wsproto"), reason="wsproto not installed."), - id="wsproto", - ), -+ pytest.param("uvicorn.protocols.websockets.websockets_impl:WebSocketProtocol", id="websockets"), - pytest.param( -- "uvicorn.protocols.websockets.websockets_impl:WebSocketProtocol", -- id="websockets", -+ "uvicorn.protocols.websockets.websockets_sansio_impl:WebSocketsSansIOProtocol", id="websockets-sansio" - ), - ] - ) -diff --git a/tests/middleware/test_logging.py b/tests/middleware/test_logging.py -index f27633a..63d7daf 100644 ---- a/tests/middleware/test_logging.py -+++ b/tests/middleware/test_logging.py -@@ -49,7 +49,9 @@ async def app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable - await send({"type": "http.response.body", "body": b"", "more_body": False}) - - --async def test_trace_logging(caplog: pytest.LogCaptureFixture, logging_config, unused_tcp_port: int): -+async def test_trace_logging( -+ caplog: pytest.LogCaptureFixture, logging_config: dict[str, typing.Any], unused_tcp_port: int -+): - config = Config( - app=app, - log_level="trace", -@@ -91,8 +93,8 @@ async def test_trace_logging_on_http_protocol(http_protocol_cls, caplog, logging - - async def test_trace_logging_on_ws_protocol( - ws_protocol_cls: WSProtocol, -- caplog, -- logging_config, -+ caplog: pytest.LogCaptureFixture, -+ logging_config: dict[str, typing.Any], - unused_tcp_port: int, - ): - async def websocket_app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): -@@ -104,7 +106,7 @@ async def test_trace_logging_on_ws_protocol( - elif message["type"] == "websocket.disconnect": - break - -- async def open_connection(url): -+ async def open_connection(url: str): - async with websockets.client.connect(url) as websocket: - return websocket.open - -diff --git a/tests/middleware/test_proxy_headers.py b/tests/middleware/test_proxy_headers.py -index 0ade974..d300c45 100644 ---- a/tests/middleware/test_proxy_headers.py -+++ b/tests/middleware/test_proxy_headers.py -@@ -465,6 +465,7 @@ async def test_proxy_headers_websocket_x_forwarded_proto( - host, port = scope["client"] - await send({"type": "websocket.accept"}) - await send({"type": "websocket.send", "text": f"{scheme}://{host}:{port}"}) -+ await send({"type": "websocket.close"}) - - app_with_middleware = ProxyHeadersMiddleware(websocket_app, trusted_hosts="*") - config = Config( -diff --git a/tests/protocols/test_websocket.py b/tests/protocols/test_websocket.py -index 15ccfdd..e728544 100644 ---- a/tests/protocols/test_websocket.py -+++ b/tests/protocols/test_websocket.py -@@ -7,6 +7,8 @@ from copy import deepcopy - import httpx - import pytest - import websockets -+import websockets.asyncio -+import websockets.asyncio.client - import websockets.client - import websockets.exceptions - from typing_extensions import TypedDict -@@ -601,12 +603,9 @@ async def test_connection_lost_before_handshake_complete( - await send_accept_task.wait() - disconnect_message = await receive() # type: ignore - -- response: httpx.Response | None = None -- - async def websocket_session(uri: str): -- nonlocal response - async with httpx.AsyncClient() as client: -- response = await client.get( -+ await client.get( - f"http://127.0.0.1:{unused_tcp_port}", - headers={ - "upgrade": "websocket", -@@ -623,9 +622,6 @@ async def test_connection_lost_before_handshake_complete( - send_accept_task.set() - await asyncio.sleep(0.1) - -- assert response is not None -- assert response.status_code == 500, response.text -- assert response.text == "Internal Server Error" - assert disconnect_message == {"type": "websocket.disconnect", "code": 1006} - await task - -@@ -920,6 +916,9 @@ async def test_server_reject_connection_with_body_nolength( - async def test_server_reject_connection_with_invalid_msg( - ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProtocol, unused_tcp_port: int - ): -+ if ws_protocol_cls.__name__ == "WebSocketsSansIOProtocol": -+ pytest.skip("WebSocketsSansIOProtocol sends both start and body messages in one message.") -+ - async def app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): - assert scope["type"] == "websocket" - assert "extensions" in scope and "websocket.http.response" in scope["extensions"] -@@ -951,6 +950,9 @@ async def test_server_reject_connection_with_invalid_msg( - async def test_server_reject_connection_with_missing_body( - ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProtocol, unused_tcp_port: int - ): -+ if ws_protocol_cls.__name__ == "WebSocketsSansIOProtocol": -+ pytest.skip("WebSocketsSansIOProtocol sends both start and body messages in one message.") -+ - async def app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): - assert scope["type"] == "websocket" - assert "extensions" in scope and "websocket.http.response" in scope["extensions"] -@@ -986,6 +988,8 @@ async def test_server_multiple_websocket_http_response_start_events( - The server should raise an exception if it sends multiple - websocket.http.response.start events. - """ -+ if ws_protocol_cls.__name__ == "WebSocketsSansIOProtocol": -+ pytest.skip("WebSocketsSansIOProtocol sends both start and body messages in one message.") - exception_message: str | None = None - - async def app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): -diff --git a/uvicorn/config.py b/uvicorn/config.py -index 664d191..cbfeea6 100644 ---- a/uvicorn/config.py -+++ b/uvicorn/config.py -@@ -25,7 +25,7 @@ from uvicorn.middleware.proxy_headers import ProxyHeadersMiddleware - from uvicorn.middleware.wsgi import WSGIMiddleware - - HTTPProtocolType = Literal["auto", "h11", "httptools"] --WSProtocolType = Literal["auto", "none", "websockets", "wsproto"] -+WSProtocolType = Literal["auto", "none", "websockets", "websockets-sansio", "wsproto"] - LifespanType = Literal["auto", "on", "off"] - LoopSetupType = Literal["none", "auto", "asyncio", "uvloop"] - InterfaceType = Literal["auto", "asgi3", "asgi2", "wsgi"] -@@ -47,6 +47,7 @@ WS_PROTOCOLS: dict[WSProtocolType, str | None] = { - "auto": "uvicorn.protocols.websockets.auto:AutoWebSocketsProtocol", - "none": None, - "websockets": "uvicorn.protocols.websockets.websockets_impl:WebSocketProtocol", -+ "websockets-sansio": "uvicorn.protocols.websockets.websockets_sansio_impl:WebSocketsSansIOProtocol", - "wsproto": "uvicorn.protocols.websockets.wsproto_impl:WSProtocol", - } - LIFESPAN: dict[LifespanType, str] = { -diff --git a/uvicorn/protocols/websockets/websockets_sansio_impl.py b/uvicorn/protocols/websockets/websockets_sansio_impl.py -new file mode 100644 -index 0000000..994af07 ---- /dev/null -+++ b/uvicorn/protocols/websockets/websockets_sansio_impl.py -@@ -0,0 +1,405 @@ -+from __future__ import annotations -+ -+import asyncio -+import logging -+from asyncio.transports import BaseTransport, Transport -+from http import HTTPStatus -+from typing import Any, Literal, cast -+from urllib.parse import unquote -+ -+from websockets import InvalidState -+from websockets.extensions.permessage_deflate import ServerPerMessageDeflateFactory -+from websockets.frames import Frame, Opcode -+from websockets.http11 import Request -+from websockets.server import ServerProtocol -+ -+from uvicorn._types import ( -+ ASGIReceiveEvent, -+ ASGISendEvent, -+ WebSocketAcceptEvent, -+ WebSocketCloseEvent, -+ WebSocketDisconnectEvent, -+ WebSocketReceiveEvent, -+ WebSocketResponseBodyEvent, -+ WebSocketResponseStartEvent, -+ WebSocketScope, -+ WebSocketSendEvent, -+) -+from uvicorn.config import Config -+from uvicorn.logging import TRACE_LOG_LEVEL -+from uvicorn.protocols.utils import ( -+ ClientDisconnected, -+ get_local_addr, -+ get_path_with_query_string, -+ get_remote_addr, -+ is_ssl, -+) -+from uvicorn.server import ServerState -+ -+ -+class WebSocketsSansIOProtocol(asyncio.Protocol): -+ def __init__( -+ self, -+ config: Config, -+ server_state: ServerState, -+ app_state: dict[str, Any], -+ _loop: asyncio.AbstractEventLoop | None = None, -+ ) -> None: -+ if not config.loaded: -+ config.load() # pragma: no cover -+ -+ self.config = config -+ self.app = config.loaded_app -+ self.loop = _loop or asyncio.get_event_loop() -+ self.logger = logging.getLogger("uvicorn.error") -+ self.root_path = config.root_path -+ self.app_state = app_state -+ -+ # Shared server state -+ self.connections = server_state.connections -+ self.tasks = server_state.tasks -+ self.default_headers = server_state.default_headers -+ -+ # Connection state -+ self.transport: asyncio.Transport = None # type: ignore[assignment] -+ self.server: tuple[str, int] | None = None -+ self.client: tuple[str, int] | None = None -+ self.scheme: Literal["wss", "ws"] = None # type: ignore[assignment] -+ -+ # WebSocket state -+ self.queue: asyncio.Queue[ASGIReceiveEvent] = asyncio.Queue() -+ self.handshake_initiated = False -+ self.handshake_complete = False -+ self.close_sent = False -+ self.initial_response: tuple[int, list[tuple[str, str]], bytes] | None = None -+ -+ extensions = [] -+ if self.config.ws_per_message_deflate: -+ extensions = [ServerPerMessageDeflateFactory()] -+ self.conn = ServerProtocol( -+ extensions=extensions, -+ max_size=self.config.ws_max_size, -+ logger=logging.getLogger("uvicorn.error"), -+ ) -+ -+ self.read_paused = False -+ self.writable = asyncio.Event() -+ self.writable.set() -+ -+ # Buffers -+ self.bytes = b"" -+ -+ def connection_made(self, transport: BaseTransport) -> None: -+ """Called when a connection is made.""" -+ transport = cast(Transport, transport) -+ self.connections.add(self) -+ self.transport = transport -+ self.server = get_local_addr(transport) -+ self.client = get_remote_addr(transport) -+ self.scheme = "wss" if is_ssl(transport) else "ws" -+ -+ if self.logger.level <= TRACE_LOG_LEVEL: -+ prefix = "%s:%d - " % self.client if self.client else "" -+ self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection made", prefix) -+ -+ def connection_lost(self, exc: Exception | None) -> None: -+ code = 1005 if self.handshake_complete else 1006 -+ self.queue.put_nowait({"type": "websocket.disconnect", "code": code}) -+ self.connections.remove(self) -+ -+ if self.logger.level <= TRACE_LOG_LEVEL: -+ prefix = "%s:%d - " % self.client if self.client else "" -+ self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection lost", prefix) -+ -+ self.handshake_complete = True -+ if exc is None: -+ self.transport.close() -+ -+ def eof_received(self) -> None: -+ pass -+ -+ def shutdown(self) -> None: -+ if self.handshake_complete: -+ self.queue.put_nowait({"type": "websocket.disconnect", "code": 1012}) -+ self.conn.send_close(1012) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ else: -+ self.send_500_response() -+ self.transport.close() -+ -+ def data_received(self, data: bytes) -> None: -+ self.conn.receive_data(data) -+ parser_exc = self.conn.parser_exc -+ if parser_exc is not None: -+ self.handle_parser_exception() -+ return -+ self.handle_events() -+ -+ def handle_events(self) -> None: -+ for event in self.conn.events_received(): -+ if isinstance(event, Request): -+ self.handle_connect(event) -+ if isinstance(event, Frame): -+ if event.opcode == Opcode.CONT: -+ self.handle_cont(event) -+ elif event.opcode == Opcode.TEXT: -+ self.handle_text(event) -+ elif event.opcode == Opcode.BINARY: -+ self.handle_bytes(event) -+ elif event.opcode == Opcode.PING: -+ self.handle_ping(event) -+ elif event.opcode == Opcode.CLOSE: -+ self.handle_close(event) -+ -+ # Event handlers -+ -+ def handle_connect(self, event: Request) -> None: -+ self.request = event -+ self.response = self.conn.accept(event) -+ self.handshake_initiated = True -+ if self.response.status_code != 101: -+ self.handshake_complete = True -+ self.close_sent = True -+ self.conn.send_response(self.response) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ self.transport.close() -+ return -+ -+ headers = [ -+ (key.encode("ascii"), value.encode("ascii", errors="surrogateescape")) -+ for key, value in event.headers.raw_items() -+ ] -+ raw_path, _, query_string = event.path.partition("?") -+ self.scope: WebSocketScope = { -+ "type": "websocket", -+ "asgi": {"version": self.config.asgi_version, "spec_version": "2.3"}, -+ "http_version": "1.1", -+ "scheme": self.scheme, -+ "server": self.server, -+ "client": self.client, -+ "root_path": self.root_path, -+ "path": unquote(raw_path), -+ "raw_path": raw_path.encode("ascii"), -+ "query_string": query_string.encode("ascii"), -+ "headers": headers, -+ "subprotocols": event.headers.get_all("Sec-WebSocket-Protocol"), -+ "state": self.app_state.copy(), -+ "extensions": {"websocket.http.response": {}}, -+ } -+ self.queue.put_nowait({"type": "websocket.connect"}) -+ task = self.loop.create_task(self.run_asgi()) -+ task.add_done_callback(self.on_task_complete) -+ self.tasks.add(task) -+ -+ def handle_cont(self, event: Frame) -> None: -+ self.bytes += event.data -+ if event.fin: -+ self.send_receive_event_to_app() -+ -+ def handle_text(self, event: Frame) -> None: -+ self.bytes = event.data -+ self.curr_msg_data_type: Literal["text", "bytes"] = "text" -+ if event.fin: -+ self.send_receive_event_to_app() -+ -+ def handle_bytes(self, event: Frame) -> None: -+ self.bytes = event.data -+ self.curr_msg_data_type = "bytes" -+ if event.fin: -+ self.send_receive_event_to_app() -+ -+ def send_receive_event_to_app(self) -> None: -+ data_type = self.curr_msg_data_type -+ msg: WebSocketReceiveEvent -+ if data_type == "text": -+ msg = {"type": "websocket.receive", data_type: self.bytes.decode()} -+ else: -+ msg = {"type": "websocket.receive", data_type: self.bytes} -+ self.queue.put_nowait(msg) -+ if not self.read_paused: -+ self.read_paused = True -+ self.transport.pause_reading() -+ -+ def handle_ping(self, event: Frame) -> None: -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ -+ def handle_close(self, event: Frame) -> None: -+ if not self.close_sent and not self.transport.is_closing(): -+ disconnect_event: WebSocketDisconnectEvent = { -+ "type": "websocket.disconnect", -+ "code": self.conn.close_rcvd.code, # type: ignore[union-attr] -+ "reason": self.conn.close_rcvd.reason, # type: ignore[union-attr] -+ } -+ self.queue.put_nowait(disconnect_event) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ self.transport.close() -+ -+ def handle_parser_exception(self) -> None: -+ disconnect_event: WebSocketDisconnectEvent = { -+ "type": "websocket.disconnect", -+ "code": self.conn.close_sent.code, # type: ignore[union-attr] -+ "reason": self.conn.close_sent.reason, # type: ignore[union-attr] -+ } -+ self.queue.put_nowait(disconnect_event) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ self.close_sent = True -+ self.transport.close() -+ -+ def on_task_complete(self, task: asyncio.Task[None]) -> None: -+ self.tasks.discard(task) -+ -+ async def run_asgi(self) -> None: -+ try: -+ result = await self.app(self.scope, self.receive, self.send) -+ except ClientDisconnected: -+ self.transport.close() -+ except BaseException: -+ self.logger.exception("Exception in ASGI application\n") -+ self.send_500_response() -+ self.transport.close() -+ else: -+ if not self.handshake_complete: -+ msg = "ASGI callable returned without completing handshake." -+ self.logger.error(msg) -+ self.send_500_response() -+ self.transport.close() -+ elif result is not None: -+ msg = "ASGI callable should return None, but returned '%s'." -+ self.logger.error(msg, result) -+ self.transport.close() -+ -+ def send_500_response(self) -> None: -+ if self.initial_response or self.handshake_complete: -+ return -+ response = self.conn.reject(500, "Internal Server Error") -+ self.conn.send_response(response) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ -+ async def send(self, message: ASGISendEvent) -> None: -+ await self.writable.wait() -+ -+ message_type = message["type"] -+ -+ if not self.handshake_complete and self.initial_response is None: -+ if message_type == "websocket.accept": -+ message = cast(WebSocketAcceptEvent, message) -+ self.logger.info( -+ '%s - "WebSocket %s" [accepted]', -+ self.scope["client"], -+ get_path_with_query_string(self.scope), -+ ) -+ headers = [ -+ (name.decode("latin-1").lower(), value.decode("latin-1").lower()) -+ for name, value in (self.default_headers + list(message.get("headers", []))) -+ ] -+ accepted_subprotocol = message.get("subprotocol") -+ if accepted_subprotocol: -+ headers.append(("Sec-WebSocket-Protocol", accepted_subprotocol)) -+ self.response.headers.update(headers) -+ -+ if not self.transport.is_closing(): -+ self.handshake_complete = True -+ self.conn.send_response(self.response) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ -+ elif message_type == "websocket.close": -+ message = cast(WebSocketCloseEvent, message) -+ self.queue.put_nowait({"type": "websocket.disconnect", "code": 1006}) -+ self.logger.info( -+ '%s - "WebSocket %s" 403', -+ self.scope["client"], -+ get_path_with_query_string(self.scope), -+ ) -+ response = self.conn.reject(HTTPStatus.FORBIDDEN, "") -+ self.conn.send_response(response) -+ output = self.conn.data_to_send() -+ self.close_sent = True -+ self.handshake_complete = True -+ self.transport.write(b"".join(output)) -+ self.transport.close() -+ elif message_type == "websocket.http.response.start" and self.initial_response is None: -+ message = cast(WebSocketResponseStartEvent, message) -+ if not (100 <= message["status"] < 600): -+ raise RuntimeError("Invalid HTTP status code '%d' in response." % message["status"]) -+ self.logger.info( -+ '%s - "WebSocket %s" %d', -+ self.scope["client"], -+ get_path_with_query_string(self.scope), -+ message["status"], -+ ) -+ headers = [ -+ (name.decode("latin-1"), value.decode("latin-1")) -+ for name, value in list(message.get("headers", [])) -+ ] -+ self.initial_response = (message["status"], headers, b"") -+ else: -+ msg = ( -+ "Expected ASGI message 'websocket.accept', 'websocket.close' " -+ "or 'websocket.http.response.start' " -+ "but got '%s'." -+ ) -+ raise RuntimeError(msg % message_type) -+ -+ elif not self.close_sent and self.initial_response is None: -+ try: -+ if message_type == "websocket.send": -+ message = cast(WebSocketSendEvent, message) -+ bytes_data = message.get("bytes") -+ text_data = message.get("text") -+ if text_data: -+ self.conn.send_text(text_data.encode()) -+ elif bytes_data: -+ self.conn.send_binary(bytes_data) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ -+ elif message_type == "websocket.close" and not self.transport.is_closing(): -+ message = cast(WebSocketCloseEvent, message) -+ code = message.get("code", 1000) -+ reason = message.get("reason", "") or "" -+ self.queue.put_nowait({"type": "websocket.disconnect", "code": code}) -+ self.conn.send_close(code, reason) -+ output = self.conn.data_to_send() -+ self.transport.write(b"".join(output)) -+ self.close_sent = True -+ self.transport.close() -+ else: -+ msg = "Expected ASGI message 'websocket.send' or 'websocket.close'," " but got '%s'." -+ raise RuntimeError(msg % message_type) -+ except InvalidState: -+ raise ClientDisconnected() -+ elif self.initial_response is not None: -+ if message_type == "websocket.http.response.body": -+ message = cast(WebSocketResponseBodyEvent, message) -+ body = self.initial_response[2] + message["body"] -+ self.initial_response = self.initial_response[:2] + (body,) -+ if not message.get("more_body", False): -+ response = self.conn.reject(self.initial_response[0], body.decode()) -+ response.headers.update(self.initial_response[1]) -+ self.queue.put_nowait({"type": "websocket.disconnect", "code": 1006}) -+ self.conn.send_response(response) -+ output = self.conn.data_to_send() -+ self.close_sent = True -+ self.transport.write(b"".join(output)) -+ self.transport.close() -+ else: -+ msg = "Expected ASGI message 'websocket.http.response.body' " "but got '%s'." -+ raise RuntimeError(msg % message_type) -+ -+ else: -+ msg = "Unexpected ASGI message '%s', after sending 'websocket.close'." -+ raise RuntimeError(msg % message_type) -+ -+ async def receive(self) -> ASGIReceiveEvent: -+ message = await self.queue.get() -+ if self.read_paused and self.queue.empty(): -+ self.read_paused = False -+ self.transport.resume_reading() -+ return message -diff --git a/uvicorn/server.py b/uvicorn/server.py -index cca2e85..50c5ed2 100644 ---- a/uvicorn/server.py -+++ b/uvicorn/server.py -@@ -23,9 +23,10 @@ if TYPE_CHECKING: - from uvicorn.protocols.http.h11_impl import H11Protocol - from uvicorn.protocols.http.httptools_impl import HttpToolsProtocol - from uvicorn.protocols.websockets.websockets_impl import WebSocketProtocol -+ from uvicorn.protocols.websockets.websockets_sansio_impl import WebSocketsSansIOProtocol - from uvicorn.protocols.websockets.wsproto_impl import WSProtocol - -- Protocols = Union[H11Protocol, HttpToolsProtocol, WSProtocol, WebSocketProtocol] -+ Protocols = Union[H11Protocol, HttpToolsProtocol, WSProtocol, WebSocketProtocol, WebSocketsSansIOProtocol] - - HANDLED_SIGNALS = ( - signal.SIGINT, # Unix signal 2. Sent by Ctrl+C. diff --git a/ilot/uvicorn/2541_bump-wesockets-on-requirements.patch b/ilot/uvicorn/2541_bump-wesockets-on-requirements.patch deleted file mode 100644 index c1179f3..0000000 --- a/ilot/uvicorn/2541_bump-wesockets-on-requirements.patch +++ /dev/null @@ -1,567 +0,0 @@ -diff --git a/requirements.txt b/requirements.txt -index e26e6b3..b16569f 100644 ---- a/requirements.txt -+++ b/requirements.txt -@@ -7,7 +7,7 @@ h11 @ git+https://github.com/python-hyper/h11.git@master - # Explicit optionals - a2wsgi==1.10.7 - wsproto==1.2.0 --websockets==13.1 -+websockets==14.1 - - # Packaging - build==1.2.2.post1 -diff --git a/tests/middleware/test_logging.py b/tests/middleware/test_logging.py -index 63d7daf..5aef174 100644 ---- a/tests/middleware/test_logging.py -+++ b/tests/middleware/test_logging.py -@@ -8,8 +8,7 @@ import typing - - import httpx - import pytest --import websockets --import websockets.client -+from websockets.asyncio.client import connect - - from tests.utils import run_server - from uvicorn import Config -@@ -107,8 +106,8 @@ async def test_trace_logging_on_ws_protocol( - break - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.open -+ async with connect(url): -+ return True - - config = Config( - app=websocket_app, -diff --git a/tests/middleware/test_proxy_headers.py b/tests/middleware/test_proxy_headers.py -index d300c45..4b5f195 100644 ---- a/tests/middleware/test_proxy_headers.py -+++ b/tests/middleware/test_proxy_headers.py -@@ -5,7 +5,7 @@ from typing import TYPE_CHECKING - import httpx - import httpx._transports.asgi - import pytest --import websockets.client -+from websockets.asyncio.client import connect - - from tests.response import Response - from tests.utils import run_server -@@ -479,7 +479,7 @@ async def test_proxy_headers_websocket_x_forwarded_proto( - async with run_server(config): - url = f"ws://127.0.0.1:{unused_tcp_port}" - headers = {X_FORWARDED_FOR: "1.2.3.4", X_FORWARDED_PROTO: forwarded_proto} -- async with websockets.client.connect(url, extra_headers=headers) as websocket: -+ async with connect(url, additional_headers=headers) as websocket: - data = await websocket.recv() - assert data == expected - -diff --git a/tests/protocols/test_websocket.py b/tests/protocols/test_websocket.py -index e728544..b9035ec 100644 ---- a/tests/protocols/test_websocket.py -+++ b/tests/protocols/test_websocket.py -@@ -12,6 +12,8 @@ import websockets.asyncio.client - import websockets.client - import websockets.exceptions - from typing_extensions import TypedDict -+from websockets.asyncio.client import ClientConnection, connect -+from websockets.exceptions import ConnectionClosed, ConnectionClosedError, InvalidHandshake, InvalidStatus - from websockets.extensions.permessage_deflate import ClientPerMessageDeflateFactory - from websockets.typing import Subprotocol - -@@ -130,8 +132,8 @@ async def test_accept_connection(ws_protocol_cls: WSProtocol, http_protocol_cls: - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.open -+ async with connect(url): -+ return True - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -146,7 +148,7 @@ async def test_shutdown(ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProt - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config) as server: -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}"): -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}"): - # Attempt shutdown while connection is still open - await server.shutdown() - -@@ -160,8 +162,8 @@ async def test_supports_permessage_deflate_extension( - - async def open_connection(url: str): - extension_factories = [ClientPerMessageDeflateFactory()] -- async with websockets.client.connect(url, extensions=extension_factories) as websocket: -- return [extension.name for extension in websocket.extensions] -+ async with connect(url, extensions=extension_factories) as websocket: -+ return [extension.name for extension in websocket.protocol.extensions] - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -180,8 +182,8 @@ async def test_can_disable_permessage_deflate_extension( - # enable per-message deflate on the client, so that we can check the server - # won't support it when it's disabled. - extension_factories = [ClientPerMessageDeflateFactory()] -- async with websockets.client.connect(url, extensions=extension_factories) as websocket: -- return [extension.name for extension in websocket.extensions] -+ async with connect(url, extensions=extension_factories) as websocket: -+ return [extension.name for extension in websocket.protocol.extensions] - - config = Config( - app=App, -@@ -203,8 +205,8 @@ async def test_close_connection(ws_protocol_cls: WSProtocol, http_protocol_cls: - - async def open_connection(url: str): - try: -- await websockets.client.connect(url) -- except websockets.exceptions.InvalidHandshake: -+ await connect(url) -+ except InvalidHandshake: - return False - return True # pragma: no cover - -@@ -224,8 +226,8 @@ async def test_headers(ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProto - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url, extra_headers=[("username", "abraão")]) as websocket: -- return websocket.open -+ async with connect(url, additional_headers=[("username", "abraão")]): -+ return True - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -239,8 +241,9 @@ async def test_extra_headers(ws_protocol_cls: WSProtocol, http_protocol_cls: HTT - await self.send({"type": "websocket.accept", "headers": [(b"extra", b"header")]}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.response_headers -+ async with connect(url) as websocket: -+ assert websocket.response -+ return websocket.response.headers - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -258,8 +261,8 @@ async def test_path_and_raw_path(ws_protocol_cls: WSProtocol, http_protocol_cls: - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.open -+ async with connect(url): -+ return True - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -276,7 +279,7 @@ async def test_send_text_data_to_client( - await self.send({"type": "websocket.send", "text": "123"}) - - async def get_data(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - return await websocket.recv() - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) -@@ -294,7 +297,7 @@ async def test_send_binary_data_to_client( - await self.send({"type": "websocket.send", "bytes": b"123"}) - - async def get_data(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - return await websocket.recv() - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) -@@ -313,7 +316,7 @@ async def test_send_and_close_connection( - await self.send({"type": "websocket.close"}) - - async def get_data(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - data = await websocket.recv() - is_open = True - try: -@@ -342,7 +345,7 @@ async def test_send_text_data_to_server( - await self.send({"type": "websocket.send", "text": _text}) - - async def send_text(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - await websocket.send("abc") - return await websocket.recv() - -@@ -365,7 +368,7 @@ async def test_send_binary_data_to_server( - await self.send({"type": "websocket.send", "bytes": _bytes}) - - async def send_text(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - await websocket.send(b"abc") - return await websocket.recv() - -@@ -387,7 +390,7 @@ async def test_send_after_protocol_close( - await self.send({"type": "websocket.send", "text": "123"}) - - async def get_data(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - data = await websocket.recv() - is_open = True - try: -@@ -407,14 +410,14 @@ async def test_missing_handshake(ws_protocol_cls: WSProtocol, http_protocol_cls: - async def app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): - pass - -- async def connect(url: str): -- await websockets.client.connect(url) -+ async def open_connection(url: str): -+ await connect(url) - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -- with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc_info: -- await connect(f"ws://127.0.0.1:{unused_tcp_port}") -- assert exc_info.value.status_code == 500 -+ with pytest.raises(InvalidStatus) as exc_info: -+ await open_connection(f"ws://127.0.0.1:{unused_tcp_port}") -+ assert exc_info.value.response.status_code == 500 - - - async def test_send_before_handshake( -@@ -423,14 +426,14 @@ async def test_send_before_handshake( - async def app(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): - await send({"type": "websocket.send", "text": "123"}) - -- async def connect(url: str): -- await websockets.client.connect(url) -+ async def open_connection(url: str): -+ await connect(url) - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -- with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc_info: -- await connect(f"ws://127.0.0.1:{unused_tcp_port}") -- assert exc_info.value.status_code == 500 -+ with pytest.raises(InvalidStatus) as exc_info: -+ await open_connection(f"ws://127.0.0.1:{unused_tcp_port}") -+ assert exc_info.value.response.status_code == 500 - - - async def test_duplicate_handshake(ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProtocol, unused_tcp_port: int): -@@ -440,10 +443,10 @@ async def test_duplicate_handshake(ws_protocol_cls: WSProtocol, http_protocol_cl - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -- with pytest.raises(websockets.exceptions.ConnectionClosed): -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -+ with pytest.raises(ConnectionClosed): - _ = await websocket.recv() -- assert websocket.close_code == 1006 -+ assert websocket.protocol.close_code == 1006 - - - async def test_asgi_return_value(ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProtocol, unused_tcp_port: int): -@@ -458,10 +461,10 @@ async def test_asgi_return_value(ws_protocol_cls: WSProtocol, http_protocol_cls: - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -- with pytest.raises(websockets.exceptions.ConnectionClosed): -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -+ with pytest.raises(ConnectionClosed): - _ = await websocket.recv() -- assert websocket.close_code == 1006 -+ assert websocket.protocol.close_code == 1006 - - - @pytest.mark.parametrize("code", [None, 1000, 1001]) -@@ -493,13 +496,13 @@ async def test_app_close( - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: - await websocket.ping() - await websocket.send("abc") -- with pytest.raises(websockets.exceptions.ConnectionClosed): -+ with pytest.raises(ConnectionClosed): - await websocket.recv() -- assert websocket.close_code == (code or 1000) -- assert websocket.close_reason == (reason or "") -+ assert websocket.protocol.close_code == (code or 1000) -+ assert websocket.protocol.close_reason == (reason or "") - - - async def test_client_close(ws_protocol_cls: WSProtocol, http_protocol_cls: HTTPProtocol, unused_tcp_port: int): -@@ -518,7 +521,7 @@ async def test_client_close(ws_protocol_cls: WSProtocol, http_protocol_cls: HTTP - break - - async def websocket_session(url: str): -- async with websockets.client.connect(url) as websocket: -+ async with connect(url) as websocket: - await websocket.ping() - await websocket.send("abc") - await websocket.close(code=1001, reason="custom reason") -@@ -555,7 +558,7 @@ async def test_client_connection_lost( - port=unused_tcp_port, - ) - async with run_server(config): -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: - websocket.transport.close() - await asyncio.sleep(0.1) - got_disconnect_event_before_shutdown = got_disconnect_event -@@ -583,7 +586,7 @@ async def test_client_connection_lost_on_send( - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): - url = f"ws://127.0.0.1:{unused_tcp_port}" -- async with websockets.client.connect(url): -+ async with connect(url): - await asyncio.sleep(0.1) - disconnect.set() - -@@ -642,11 +645,11 @@ async def test_send_close_on_server_shutdown( - disconnect_message = message - break - -- websocket: websockets.client.WebSocketClientProtocol | None = None -+ websocket: ClientConnection | None = None - - async def websocket_session(uri: str): - nonlocal websocket -- async with websockets.client.connect(uri) as ws_connection: -+ async with connect(uri) as ws_connection: - websocket = ws_connection - await server_shutdown_event.wait() - -@@ -676,9 +679,7 @@ async def test_subprotocols( - await self.send({"type": "websocket.accept", "subprotocol": subprotocol}) - - async def get_subprotocol(url: str): -- async with websockets.client.connect( -- url, subprotocols=[Subprotocol("proto1"), Subprotocol("proto2")] -- ) as websocket: -+ async with connect(url, subprotocols=[Subprotocol("proto1"), Subprotocol("proto2")]) as websocket: - return websocket.subprotocol - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) -@@ -688,7 +689,7 @@ async def test_subprotocols( - - - MAX_WS_BYTES = 1024 * 1024 * 16 --MAX_WS_BYTES_PLUS1 = MAX_WS_BYTES + 1 -+MAX_WS_BYTES_PLUS1 = MAX_WS_BYTES + 10 - - - @pytest.mark.parametrize( -@@ -731,15 +732,15 @@ async def test_send_binary_data_to_server_bigger_than_default_on_websockets( - port=unused_tcp_port, - ) - async with run_server(config): -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}", max_size=client_size_sent) as ws: -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}", max_size=client_size_sent) as ws: - await ws.send(b"\x01" * client_size_sent) - if expected_result == 0: - data = await ws.recv() - assert data == b"\x01" * client_size_sent - else: -- with pytest.raises(websockets.exceptions.ConnectionClosedError): -+ with pytest.raises(ConnectionClosedError): - await ws.recv() -- assert ws.close_code == expected_result -+ assert ws.protocol.close_code == expected_result - - - async def test_server_reject_connection( -@@ -764,10 +765,10 @@ async def test_server_reject_connection( - disconnected_message = await receive() - - async def websocket_session(url: str): -- with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc_info: -- async with websockets.client.connect(url): -+ with pytest.raises(InvalidStatus) as exc_info: -+ async with connect(url): - pass # pragma: no cover -- assert exc_info.value.status_code == 403 -+ assert exc_info.value.response.status_code == 403 - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -937,10 +938,10 @@ async def test_server_reject_connection_with_invalid_msg( - await send(message) - - async def websocket_session(url: str): -- with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc_info: -- async with websockets.client.connect(url): -+ with pytest.raises(InvalidStatus) as exc_info: -+ async with connect(url): - pass # pragma: no cover -- assert exc_info.value.status_code == 404 -+ assert exc_info.value.response.status_code == 404 - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -971,10 +972,10 @@ async def test_server_reject_connection_with_missing_body( - # no further message - - async def websocket_session(url: str): -- with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc_info: -- async with websockets.client.connect(url): -+ with pytest.raises(InvalidStatus) as exc_info: -+ async with connect(url): - pass # pragma: no cover -- assert exc_info.value.status_code == 404 -+ assert exc_info.value.response.status_code == 404 - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -1014,17 +1015,17 @@ async def test_server_multiple_websocket_http_response_start_events( - exception_message = str(exc) - - async def websocket_session(url: str): -- with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc_info: -- async with websockets.client.connect(url): -+ with pytest.raises(InvalidStatus) as exc_info: -+ async with connect(url): - pass # pragma: no cover -- assert exc_info.value.status_code == 404 -+ assert exc_info.value.response.status_code == 404 - - config = Config(app=app, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): - await websocket_session(f"ws://127.0.0.1:{unused_tcp_port}") - - assert exception_message == ( -- "Expected ASGI message 'websocket.http.response.body' but got " "'websocket.http.response.start'." -+ "Expected ASGI message 'websocket.http.response.body' but got 'websocket.http.response.start'." - ) - - -@@ -1053,7 +1054,7 @@ async def test_server_can_read_messages_in_buffer_after_close( - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -- async with websockets.client.connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: -+ async with connect(f"ws://127.0.0.1:{unused_tcp_port}") as websocket: - await websocket.send(b"abc") - await websocket.send(b"abc") - await websocket.send(b"abc") -@@ -1070,8 +1071,9 @@ async def test_default_server_headers( - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.response_headers -+ async with connect(url) as websocket: -+ assert websocket.response -+ return websocket.response.headers - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -1085,8 +1087,9 @@ async def test_no_server_headers(ws_protocol_cls: WSProtocol, http_protocol_cls: - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.response_headers -+ async with connect(url) as websocket: -+ assert websocket.response -+ return websocket.response.headers - - config = Config( - app=App, -@@ -1108,8 +1111,9 @@ async def test_no_date_header_on_wsproto(http_protocol_cls: HTTPProtocol, unused - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.response_headers -+ async with connect(url) as websocket: -+ assert websocket.response -+ return websocket.response.headers - - config = Config( - app=App, -@@ -1140,8 +1144,9 @@ async def test_multiple_server_header( - ) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.response_headers -+ async with connect(url) as websocket: -+ assert websocket.response -+ return websocket.response.headers - - config = Config(app=App, ws=ws_protocol_cls, http=http_protocol_cls, lifespan="off", port=unused_tcp_port) - async with run_server(config): -@@ -1176,8 +1181,8 @@ async def test_lifespan_state(ws_protocol_cls: WSProtocol, http_protocol_cls: HT - await self.send({"type": "websocket.accept"}) - - async def open_connection(url: str): -- async with websockets.client.connect(url) as websocket: -- return websocket.open -+ async with connect(url): -+ return True - - async def app_wrapper(scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable): - if scope["type"] == "lifespan": -diff --git a/uvicorn/protocols/websockets/websockets_impl.py b/uvicorn/protocols/websockets/websockets_impl.py -index cd6c54f..685d6b6 100644 ---- a/uvicorn/protocols/websockets/websockets_impl.py -+++ b/uvicorn/protocols/websockets/websockets_impl.py -@@ -13,8 +13,7 @@ from websockets.datastructures import Headers - from websockets.exceptions import ConnectionClosed - from websockets.extensions.base import ServerExtensionFactory - from websockets.extensions.permessage_deflate import ServerPerMessageDeflateFactory --from websockets.legacy.server import HTTPResponse --from websockets.server import WebSocketServerProtocol -+from websockets.legacy.server import HTTPResponse, WebSocketServerProtocol - from websockets.typing import Subprotocol - - from uvicorn._types import ( -diff --git a/uvicorn/protocols/websockets/wsproto_impl.py b/uvicorn/protocols/websockets/wsproto_impl.py -index 828afe5..5d84bff 100644 ---- a/uvicorn/protocols/websockets/wsproto_impl.py -+++ b/uvicorn/protocols/websockets/wsproto_impl.py -@@ -149,12 +149,13 @@ class WSProtocol(asyncio.Protocol): - self.writable.set() # pragma: full coverage - - def shutdown(self) -> None: -- if self.handshake_complete: -- self.queue.put_nowait({"type": "websocket.disconnect", "code": 1012}) -- output = self.conn.send(wsproto.events.CloseConnection(code=1012)) -- self.transport.write(output) -- else: -- self.send_500_response() -+ if not self.response_started: -+ if self.handshake_complete: -+ self.queue.put_nowait({"type": "websocket.disconnect", "code": 1012}) -+ output = self.conn.send(wsproto.events.CloseConnection(code=1012)) -+ self.transport.write(output) -+ else: -+ self.send_500_response() - self.transport.close() - - def on_task_complete(self, task: asyncio.Task[None]) -> None: -@@ -221,13 +222,15 @@ class WSProtocol(asyncio.Protocol): - def send_500_response(self) -> None: - if self.response_started or self.handshake_complete: - return # we cannot send responses anymore -+ reject_data = b"Internal Server Error" - headers: list[tuple[bytes, bytes]] = [ - (b"content-type", b"text/plain; charset=utf-8"), -+ (b"content-length", str(len(reject_data)).encode()), - (b"connection", b"close"), - (b"content-length", b"21"), - ] - output = self.conn.send(wsproto.events.RejectConnection(status_code=500, headers=headers, has_body=True)) -- output += self.conn.send(wsproto.events.RejectData(data=b"Internal Server Error")) -+ output += self.conn.send(wsproto.events.RejectData(data=reject_data)) - self.transport.write(output) - - async def run_asgi(self) -> None: diff --git a/ilot/uvicorn/APKBUILD b/ilot/uvicorn/APKBUILD deleted file mode 100644 index 1f14918..0000000 --- a/ilot/uvicorn/APKBUILD +++ /dev/null @@ -1,59 +0,0 @@ -maintainer="Michał Polański " -pkgname=uvicorn -pkgver=0.34.0 -pkgrel=0 -pkgdesc="Lightning-fast ASGI server" -url="https://www.uvicorn.org/" -license="BSD-3-Clause" -# disable due to lack of support for websockets 14 -# https://gitlab.alpinelinux.org/alpine/aports/-/issues/16646 -arch="noarch" -depends="py3-click py3-h11" -makedepends="py3-gpep517 py3-hatchling" -checkdepends=" - py3-a2wsgi - py3-dotenv - py3-httptools - py3-httpx - py3-pytest - py3-pytest-mock - py3-trustme - py3-typing-extensions - py3-watchfiles - py3-websockets - py3-wsproto - py3-yaml - " -subpackages="$pkgname-pyc" -source="https://github.com/encode/uvicorn/archive/$pkgver/uvicorn-$pkgver.tar.gz - test_multiprocess.patch - 2540_add-websocketssansioprotocol.patch - 2541_bump-wesockets-on-requirements.patch - fix-test-wsgi.patch - " - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest \ - -k "not test_close_connection_with_multiple_requests" # a known issue -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/uvicorn-$pkgver-py3-none-any.whl -} - -sha512sums=" -260782e385a2934049da8c474750958826afe1bfe23b38fe2f6420f355af7a537563f8fe6ac3830814c7469203703d10f4f9f3d6e53e79113bfd2fd34f7a7c72 uvicorn-0.34.0.tar.gz -cfad91dd84f8974362f52d754d7a29f09d07927a46acaa0eb490b6115a5729d84d6df94fead10ccd4cce7f5ea376f1348b0f59daede661dd8373a3851c313c46 test_multiprocess.patch -858e9a7baaf1c12e076aecd81aaaf622b35a59dcaabea4ee1bfc4cda704c9fe271b1cc616a5910d845393717e4989cecb3b04be249cb5d0df1001ec5224c293f 2540_add-websocketssansioprotocol.patch -f8a8c190981b9070232ea985880685bc801947cc7f673d59abf73d3e68bc2e13515ad200232a1de2af0808bc85da48a341f57d47caf87bcc190bfdc3c45718e0 2541_bump-wesockets-on-requirements.patch -379963f9ccbda013e4a0bc3441eee70a581c91f60206aedc15df6a8737950824b7cb8d867774fc415763449bb3e0bba66601e8551101bfc1741098acd035f0cc fix-test-wsgi.patch -" diff --git a/ilot/uvicorn/fix-test-wsgi.patch b/ilot/uvicorn/fix-test-wsgi.patch deleted file mode 100644 index ed49e52..0000000 --- a/ilot/uvicorn/fix-test-wsgi.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/tests/middleware/test_wsgi.py.orig b/tests/middleware/test_wsgi.py -index 6003f27..2750487 100644 ---- a/tests/middleware/test_wsgi.py.orig -+++ b/tests/middleware/test_wsgi.py -@@ -73,7 +73,7 @@ async def test_wsgi_post(wsgi_middleware: Callable) -> None: - async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client: - response = await client.post("/", json={"example": 123}) - assert response.status_code == 200 -- assert response.text == '{"example":123}' -+ assert response.text == '{"example": 123}' - - - @pytest.mark.anyio diff --git a/ilot/uvicorn/test_multiprocess.patch b/ilot/uvicorn/test_multiprocess.patch deleted file mode 100644 index 231526e..0000000 --- a/ilot/uvicorn/test_multiprocess.patch +++ /dev/null @@ -1,14 +0,0 @@ -Wait a bit longer, otherwise the workers might -not have time to finish restarting. - ---- a/tests/supervisors/test_multiprocess.py -+++ b/tests/supervisors/test_multiprocess.py -@@ -132,7 +132,7 @@ def test_multiprocess_sighup() -> None: - time.sleep(1) - pids = [p.pid for p in supervisor.processes] - supervisor.signal_queue.append(signal.SIGHUP) -- time.sleep(1) -+ time.sleep(3) - assert pids != [p.pid for p in supervisor.processes] - supervisor.signal_queue.append(signal.SIGINT) - supervisor.join_all() diff --git a/ilot/wikijs/APKBUILD b/ilot/wikijs/APKBUILD index 5b75746..43d8189 100644 --- a/ilot/wikijs/APKBUILD +++ b/ilot/wikijs/APKBUILD @@ -1,8 +1,8 @@ # Maintainer: Antoine Martin (ayakael) # Contributor: Antoine Martin (ayakael) pkgname=wikijs -pkgver=2.5.305 -pkgrel=0 +pkgver=2.5.303 +pkgrel=1 pkgdesc="Wiki.js | A modern, lightweight and powerful wiki app built on Node.js" license="AGPL-3.0" arch="!armv7 x86_64" @@ -49,14 +49,11 @@ package() { install -Dm644 "$builddir"/package.json -t "$pkgdir"/usr/lib/bundles/wikijs cp -aR "$builddir"/assets "$builddir"/server "$builddir"/node_modules "$pkgdir"/usr/lib/bundles/wikijs - # remove prebuilts - rm -Rf "$pkgdir"/usr/lib/bundles/wikijs/node_modules/*/prebuilds - mkdir -p "$pkgdir"/var/lib/wikijs chown 5494:5494 "$pkgdir"/var/lib/wikijs } sha512sums=" -e715e2d93fd176dc93676b3dd97d8dd745589552a7d67971fce0c1097f607fa44a3147534709a82b3ad13dda95d7c5833bc30ec37538c6cdef54ac309e6b44d1 wikijs-2.5.305.tar.gz +a463d79ad0d8ff15dbe568b839094d697c6de0b2e991b77a4944e2a82f9789de6840e504a4673e4e0900d61596e880ca276008de86dac4f05f5823dc0427d2fc wikijs-2.5.303.tar.gz 355131ee5617348b82681cb8543c784eea59689990a268ecd3b77d44fe9abcca9c86fb8b047f0a8faeba079c650faa7790c5dd65418d313cd7561f38bb590c03 wikijs.initd 07b536c20e370d2a926038165f0e953283259c213a80a8648419565f5359ab05f528ac310e81606914013da212270df6feddb22e514cbcb2464c8274c956e4af config.sample.yml.patch "