diff --git a/.forgejo/bin/deploy.sh b/.forgejo/bin/deploy.sh deleted file mode 100755 index e8e811c..0000000 --- a/.forgejo/bin/deploy.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh - -# shellcheck disable=SC3040 -set -eu -o pipefail - -readonly REPOS="backports ilot" -readonly BASEBRANCH=$GITHUB_BASE_REF -readonly TARGET_REPO=$CI_ALPINE_REPO - -apkgs=$(find package -type f -name "*.apk") - -for apk in $apkgs; do - branch=$(echo $apk | awk -F '/' '{print $2}') - arch=$(echo $apk | awk -F '/' '{print $3}') - name=$(echo $apk | awk -F '/' '{print $4}') - - if [ "$(curl -s $GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/pulls/$GITHUB_EVENT_NUMBER | jq .draft)" == "true" ]; then - # if draft, send to -testing branch - branch="$branch-testing" - else - # if not draft, assume that this was sent to $branch-testing and nuke it - curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN -X DELETE $TARGET_REPO/$BASEBRANCH/$branch-testing/$arch/$name - fi - - echo "Sending $name of arch $arch to $TARGET_REPO/$BASEBRANCH/$branch" - return=$(curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN --upload-file $apk $TARGET_REPO/$BASEBRANCH/$branch 2>&1) - echo $return - if [ "$return" == "package file already exists" ]; then - echo "Package already exists, refreshing..." - curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN -X DELETE $TARGET_REPO/$BASEBRANCH/$branch/$arch/$name - curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN --upload-file $apk $TARGET_REPO/$BASEBRANCH/$branch - fi -done - diff --git a/.forgejo/patches/build.patch b/.forgejo/patches/build.patch deleted file mode 100644 index 6de7c04..0000000 --- a/.forgejo/patches/build.patch +++ /dev/null @@ -1,66 +0,0 @@ -diff --git a/usr/local/bin/build.sh.orig b/usr/local/bin/build.sh -old mode 100644 -new mode 100755 -index c3b8f7a..f609018 ---- a/usr/local/bin/build.sh.orig -+++ b/usr/local/bin/build.sh -@@ -7,13 +7,15 @@ - set -eu -o pipefail - - readonly APORTSDIR=$CI_PROJECT_DIR --readonly REPOS="main community testing non-free" -+readonly REPOS="ilot backports" -+readonly ALPINE_REPOS="main community testing" - readonly ARCH=$(apk --print-arch) - # gitlab variables - readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME - - : "${REPODEST:=$HOME/packages}" --: "${MIRROR:=https://dl-cdn.alpinelinux.org/alpine}" -+: "${MIRROR:=https://forge.ilot.io/api/packages/ilot/alpine}" -+: "${ALPINE_MIRROR:=http://dl-cdn.alpinelinux.org/alpine}" - : "${MAX_ARTIFACT_SIZE:=300000000}" #300M - : "${CI_DEBUG_BUILD:=}" - -@@ -68,8 +70,8 @@ report() { - - get_release() { - case $BASEBRANCH in -- *-stable) echo v"${BASEBRANCH%-*}";; -- master) echo edge;; -+ v*) echo "$BASEBRANCH";; -+ edge) echo edge;; - *) die "Branch \"$BASEBRANCH\" not supported!" - esac - } -@@ -101,11 +103,11 @@ set_repositories_for() { - release=$(get_release) - for repo in $REPOS; do - [ "$repo" = "non-free" ] && continue -- [ "$release" != "edge" ] && [ "$repo" == "testing" ] && continue -+ [ "$release" == "edge" ] && [ "$repo" == "backports" ] && continue - repos="$repos $MIRROR/$release/$repo $REPODEST/$repo" - [ "$repo" = "$target_repo" ] && break - done -- doas sh -c "printf '%s\n' $repos > /etc/apk/repositories" -+ doas sh -c "printf '%s\n' $repos >> /etc/apk/repositories" - doas apk update - } - -@@ -118,7 +120,15 @@ apply_offset_limit() { - } - - setup_system() { -- doas sh -c "echo $MIRROR/$(get_release)/main > /etc/apk/repositories" -+ local repos='' repo='' -+ local release -+ -+ release=$(get_release) -+ for repo in $ALPINE_REPOS; do -+ [ "$release" != "edge" ] && [ "$repo" == "testing" ] && continue -+ repos="$repos $ALPINE_MIRROR/$release/$repo" -+ done -+ doas sh -c "printf '%s\n' $repos > /etc/apk/repositories" - doas apk -U upgrade -a || apk fix || die "Failed to up/downgrade system" - abuild-keygen -ain - doas sed -i -E 's/export JOBS=[0-9]+$/export JOBS=$(nproc)/' /etc/abuild.conf diff --git a/.forgejo/workflows/build-aarch64.yaml b/.forgejo/workflows/build-aarch64.yaml deleted file mode 100644 index b1fe4fe..0000000 --- a/.forgejo/workflows/build-aarch64.yaml +++ /dev/null @@ -1,57 +0,0 @@ -on: - pull_request: - types: [ assigned, opened, synchronize, reopened ] - -concurrency: - group: ${{ github.head_ref || github.ref_name }} - cancel-in-progress: true - -jobs: - build-aarch64: - runs-on: aarch64 - container: - image: alpinelinux/alpine-gitlab-ci:latest - env: - CI_PROJECT_DIR: ${{ github.workspace }} - CI_DEBUG_BUILD: ${{ runner.debug }} - CI_MERGE_REQUEST_PROJECT_URL: ${{ github.server_url }}/${{ github.repository }} - CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.base_ref }} - steps: - - name: Environment setup - run: | - doas apk add nodejs git patch curl - cd /etc/apk/keys - doas curl -JO https://forge.ilot.io/api/packages/ilot/alpine/key - - name: Repo pull - uses: actions/checkout@v4 - with: - fetch-depth: 500 - - name: Package build - run: | - doas patch -d / -p1 -i ${{ github.workspace }}/.forgejo/patches/build.patch - build.sh - - name: Package upload - uses: forgejo/upload-artifact@v3 - with: - name: package - path: packages - - deploy-aarch64: - needs: [build-aarch64] - runs-on: aarch64 - container: - image: alpine:latest - env: - CI_ALPINE_REPO: 'https://forge.ilot.io/api/packages/ilot/alpine' - FORGE_REPO_TOKEN: ${{ secrets.FORGE_REPO_TOKEN }} - FORGE_REPO_USER: ${{ vars.FORGE_REPO_USER }} - GITHUB_EVENT_NUMBER: ${{ github.event.number }} - steps: - - name: Setting up environment - run: apk add nodejs curl findutils git gawk jq - - name: Repo pull - uses: actions/checkout@v4 - - name: Package download - uses: forgejo/download-artifact@v3 - - name: Package deployment - run: ${{ github.workspace }}/.forgejo/bin/deploy.sh diff --git a/.forgejo/workflows/build-x86_64.yaml b/.forgejo/workflows/build-x86_64.yaml deleted file mode 100644 index 2725613..0000000 --- a/.forgejo/workflows/build-x86_64.yaml +++ /dev/null @@ -1,57 +0,0 @@ -on: - pull_request: - types: [ assigned, opened, synchronize, reopened ] - -concurrency: - group: ${{ github.head_ref || github.ref_name }} - cancel-in-progress: true - -jobs: - build-x86_64: - runs-on: x86_64 - container: - image: alpinelinux/alpine-gitlab-ci:latest - env: - CI_PROJECT_DIR: ${{ github.workspace }} - CI_DEBUG_BUILD: ${{ runner.debug }} - CI_MERGE_REQUEST_PROJECT_URL: ${{ github.server_url }}/${{ github.repository }} - CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.base_ref }} - steps: - - name: Environment setup - run: | - doas apk add nodejs git patch curl - cd /etc/apk/keys - doas curl -JO https://forge.ilot.io/api/packages/ilot/alpine/key - - name: Repo pull - uses: actions/checkout@v4 - with: - fetch-depth: 500 - - name: Package build - run: | - doas patch -d / -p1 -i ${{ github.workspace }}/.forgejo/patches/build.patch - build.sh - - name: Package upload - uses: forgejo/upload-artifact@v3 - with: - name: package - path: packages - - deploy-x86_64: - needs: [build-x86_64] - runs-on: x86_64 - container: - image: alpine:latest - env: - CI_ALPINE_REPO: 'https://forge.ilot.io/api/packages/ilot/alpine' - FORGE_REPO_TOKEN: ${{ secrets.FORGE_REPO_TOKEN }} - FORGE_REPO_USER: ${{ vars.FORGE_REPO_USER }} - GITHUB_EVENT_NUMBER: ${{ github.event.number }} - steps: - - name: Setting up environment - run: apk add nodejs curl findutils git gawk jq - - name: Repo pull - uses: actions/checkout@v4 - - name: Package download - uses: forgejo/download-artifact@v3 - - name: Package deployment - run: ${{ github.workspace }}/.forgejo/bin/deploy.sh diff --git a/.forgejo/workflows/lint.yaml b/.forgejo/workflows/lint.yaml deleted file mode 100644 index 3614deb..0000000 --- a/.forgejo/workflows/lint.yaml +++ /dev/null @@ -1,21 +0,0 @@ -on: - pull_request: - types: [ assigned, opened, synchronize, reopened ] - -jobs: - lint: - run-name: lint - runs-on: x86_64 - container: - image: alpinelinux/apkbuild-lint-tools:latest - env: - CI_PROJECT_DIR: ${{ github.workspace }} - CI_DEBUG_BUILD: ${{ runner.debug }} - CI_MERGE_REQUEST_PROJECT_URL: ${{ github.server_url }}/${{ github.repository }} - CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.base_ref }} - steps: - - run: doas apk add nodejs git - - uses: actions/checkout@v4 - with: - fetch-depth: 500 - - run: lint diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000..dd8afae --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,109 @@ +stages: + - verify + - build + - deploy + +variables: + GIT_STRATEGY: clone + GIT_DEPTH: "500" + +lint: + stage: verify + interruptible: true + script: + - | + sudo apk add shellcheck atools sudo abuild + export PATH="$PATH:$CI_PROJECT_DIR/.gitlab/bin" + lint + allow_failure: true + only: + - merge_requests + tags: + - apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME-x86_64 + +.build: + stage: build + interruptible: true + script: + - | + sudo apk add alpine-sdk lua-aports sudo + sudo addgroup $USER abuild + export PATH="$PATH:$CI_PROJECT_DIR/.gitlab/bin" + sudo -Eu $USER build.sh + artifacts: + paths: + - packages/ + - keys/ + - logs/ + expire_in: 7 days + when: always + only: + - merge_requests + +.cross: + stage: build + interruptible: true + script: + - | + sudo apk add alpine-sdk lua-aports sudo gzip xz qemu-$CI_QEMU_TARGET_ARCH + sudo addgroup $USER abuild + export PATH="$PATH:$CI_PROJECT_DIR/.gitlab/bin" + build-rootfs.sh alpine${CI_MERGE_REQUEST_TARGET_BRANCH_NAME/v} $CI_ALPINE_TARGET_ARCH --rootfsdir $HOME/sysroot-$CI_ALPINE_TARGET_ARCH + cp /etc/apk/repositories $HOME/sysroot-$CI_ALPINE_TARGET_ARCH/etc/apk/. + sudo -Eu $USER CHOST=$CI_TARGET_ALPINE_ARCH build.sh + artifacts: + paths: + - packages/ + - keys/ + - logs/ + expire_in: 7 days + when: always + only: + - merge_requests + +build-x86_64: + extends: .build + when: always + tags: + - apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME-x86_64 + +build-aarch64: + extends: .build + when: always + tags: + - apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME-aarch64 + +build-ppc64le: + extends: .build + when: manual + tags: + - apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME-ppc64le + +build-s390x: + extends: .build + when: manual + tags: + - apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME-s390x + +build-armv7: + extends: .cross + when: manual + tags: + - apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME-x86_64 + variables: + CI_ALPINE_TARGET_ARCH: armv7 + CI_QEMU_TARGET_ARCH: arm + +push: + interruptible: true + stage: deploy + script: + - | + sudo apk add abuild git-lfs findutils + export PATH="$PATH:$CI_PROJECT_DIR/.gitlab/bin" + push.sh + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + when: manual + tags: + - repo diff --git a/.gitlab/bin/APKBUILD_SHIM b/.gitlab/bin/APKBUILD_SHIM new file mode 100755 index 0000000..76577ff --- /dev/null +++ b/.gitlab/bin/APKBUILD_SHIM @@ -0,0 +1,111 @@ +#!/bin/sh + +set -e + +arch= +builddir= +checkdepends= +depends= +depends_dev= +depends_doc= +depends_libs= +depends_openrc= +depends_static= +install= +install_if= +langdir= +ldpath= +license= +makedepends= +makedepends_build= +makedepends_host= +md5sums= +options= +patch_args= +pkgbasedir= +pkgdesc= +pkgdir= +pkgname= +pkgrel= +pkgver= +pkggroups= +pkgusers= +provides= +provider_priority= +replaces= +sha256sums= +sha512sums= +sonameprefix= +source= +srcdir= +startdir= +subpackages= +subpkgdir= +subpkgname= +triggers= +url= + +# abuild.conf + +CFLAGS= +CXXFLAGS= +CPPFLAGS= +LDFLAGS= +JOBS= +MAKEFLAGS= +CMAKE_CROSSOPTS= + +. ./APKBUILD + +: "$arch" +: "$builddir" +: "$checkdepends" +: "$depends" +: "$depends_dev" +: "$depends_doc" +: "$depends_libs" +: "$depends_openrc" +: "$depends_static" +: "$install" +: "$install_if" +: "$langdir" +: "$ldpath" +: "$license" +: "$makedepends" +: "$makedepends_build" +: "$makedepends_host" +: "$md5sums" +: "$options" +: "$patch_args" +: "$pkgbasedir" +: "$pkgdesc" +: "$pkgdir" +: "$pkgname" +: "$pkgrel" +: "$pkgver" +: "$pkggroups" +: "$pkgusers" +: "$provides" +: "$provider_priority" +: "$replaces" +: "$sha256sums" +: "$sha512sums" +: "$sonameprefix" +: "$source" +: "$srcdir" +: "$startdir" +: "$subpackages" +: "$subpkgdir" +: "$subpkgname" +: "$triggers" +: "$url" + +# abuild.conf + +: "$CFLAGS" +: "$CXXFLAGS" +: "$CPPFLAGS" +: "$LDFLAGS" +: "$JOBS" +: "$MAKEFLAGS" +: "$CMAKE_CROSSOPTS" diff --git a/.gitlab/bin/apkbuild-shellcheck b/.gitlab/bin/apkbuild-shellcheck new file mode 100755 index 0000000..3126684 --- /dev/null +++ b/.gitlab/bin/apkbuild-shellcheck @@ -0,0 +1,16 @@ +#!/bin/sh + +shellcheck -s ash \ + -e SC3043 \ + -e SC3057 \ + -e SC3060 \ + -e SC2016 \ + -e SC2086 \ + -e SC2169 \ + -e SC2155 \ + -e SC2100 \ + -e SC2209 \ + -e SC2030 \ + -e SC2031 \ + -e SC1090 \ + -xa $CI_PROJECT_DIR/.gitlab/bin/APKBUILD_SHIM diff --git a/.gitlab/bin/build-rootfs.sh b/.gitlab/bin/build-rootfs.sh new file mode 100755 index 0000000..44c4372 --- /dev/null +++ b/.gitlab/bin/build-rootfs.sh @@ -0,0 +1,556 @@ +#!/usr/bin/env bash +# Availabl here: https://lab.ilot.io/dotnet/arcade/-/blob/7f6d9796cc7f594772f798358dbdd8c69b6a97af/eng/common/cross/build-rootfs.sh +# Only modification: qemu-$arch-static becomes qemu-$arch + +set -e + +usage() +{ + echo "Usage: $0 [BuildArch] [CodeName] [lldbx.y] [llvmx[.y]] [--skipunmount] --rootfsdir ]" + echo "BuildArch can be: arm(default), arm64, armel, armv6, ppc64le, riscv64, s390x, x64, x86" + echo "CodeName - optional, Code name for Linux, can be: xenial(default), zesty, bionic, alpine" + echo " for alpine can be specified with version: alpineX.YY or alpineedge" + echo " for FreeBSD can be: freebsd12, freebsd13" + echo " for illumos can be: illumos" + echo " for Haiku can be: haiku." + echo "lldbx.y - optional, LLDB version, can be: lldb3.9(default), lldb4.0, lldb5.0, lldb6.0 no-lldb. Ignored for alpine and FreeBSD" + echo "llvmx[.y] - optional, LLVM version for LLVM related packages." + echo "--skipunmount - optional, will skip the unmount of rootfs folder." + echo "--use-mirror - optional, use mirror URL to fetch resources, when available." + echo "--jobs N - optional, restrict to N jobs." + exit 1 +} + +__CodeName=xenial +__CrossDir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +__BuildArch=arm +__AlpineArch=armv7 +__FreeBSDArch=arm +__FreeBSDMachineArch=armv7 +__IllumosArch=arm7 +__QEMUArch=arm +__UbuntuArch=armhf +__UbuntuRepo="http://ports.ubuntu.com/" +__LLDB_Package="liblldb-3.9-dev" +__SkipUnmount=0 + +# base development support +__UbuntuPackages="build-essential" + +__AlpinePackages="alpine-base" +__AlpinePackages+=" build-base" + +# symlinks fixer +__UbuntuPackages+=" symlinks" + +# runtime dependencies +__UbuntuPackages+=" libicu-dev" +__UbuntuPackages+=" liblttng-ust-dev" +__UbuntuPackages+=" libunwind8-dev" +__UbuntuPackages+=" libnuma-dev" + +# runtime libraries' dependencies +__UbuntuPackages+=" libcurl4-openssl-dev" +__UbuntuPackages+=" libkrb5-dev" +__UbuntuPackages+=" libssl-dev" +__UbuntuPackages+=" zlib1g-dev" + +__FreeBSDBase="12.3-RELEASE" +__FreeBSDPkg="1.17.0" +__FreeBSDABI="12" +__FreeBSDPackages="libunwind" +__FreeBSDPackages+=" icu" +__FreeBSDPackages+=" libinotify" +__FreeBSDPackages+=" openssl" +__FreeBSDPackages+=" krb5" +__FreeBSDPackages+=" terminfo-db" + +__IllumosPackages="icu" +__IllumosPackages+=" mit-krb5" +__IllumosPackages+=" openssl" +__IllumosPackages+=" zlib" + +__HaikuPackages="gmp" +__HaikuPackages+=" gmp_devel" +__HaikuPackages+=" krb5" +__HaikuPackages+=" krb5_devel" +__HaikuPackages+=" libiconv" +__HaikuPackages+=" libiconv_devel" +__HaikuPackages+=" llvm12_libunwind" +__HaikuPackages+=" llvm12_libunwind_devel" +__HaikuPackages+=" mpfr" +__HaikuPackages+=" mpfr_devel" + +# ML.NET dependencies +__UbuntuPackages+=" libomp5" +__UbuntuPackages+=" libomp-dev" + +__Keyring= +__UseMirror=0 + +__UnprocessedBuildArgs= +while :; do + if [[ "$#" -le 0 ]]; then + break + fi + + lowerI="$(echo "$1" | tr "[:upper:]" "[:lower:]")" + case $lowerI in + -\?|-h|--help) + usage + exit 1 + ;; + arm) + __BuildArch=arm + __UbuntuArch=armhf + __AlpineArch=armv7 + __QEMUArch=arm + ;; + arm64) + __BuildArch=arm64 + __UbuntuArch=arm64 + __AlpineArch=aarch64 + __QEMUArch=aarch64 + __FreeBSDArch=arm64 + __FreeBSDMachineArch=aarch64 + ;; + armel) + __BuildArch=armel + __UbuntuArch=armel + __UbuntuRepo="http://ftp.debian.org/debian/" + __CodeName=jessie + ;; + armv6) + __BuildArch=armv6 + __UbuntuArch=armhf + __QEMUArch=arm + __UbuntuRepo="http://raspbian.raspberrypi.org/raspbian/" + __CodeName=buster + __LLDB_Package="liblldb-6.0-dev" + + if [[ -e "/usr/share/keyrings/raspbian-archive-keyring.gpg" ]]; then + __Keyring="--keyring /usr/share/keyrings/raspbian-archive-keyring.gpg" + fi + ;; + riscv64) + __BuildArch=riscv64 + __AlpineArch=riscv64 + __QEMUArch=riscv64 + __UbuntuArch=riscv64 + __UbuntuRepo="http://deb.debian.org/debian-ports" + __UbuntuPackages="${__UbuntuPackages// libunwind8-dev/}" + unset __LLDB_Package + + if [[ -e "/usr/share/keyrings/debian-ports-archive-keyring.gpg" ]]; then + __Keyring="--keyring /usr/share/keyrings/debian-ports-archive-keyring.gpg --include=debian-ports-archive-keyring" + fi + ;; + ppc64le) + __BuildArch=ppc64le + __AlpineArch=ppc64le + __QEMUArch=ppc64le + __UbuntuArch=ppc64el + __UbuntuRepo="http://ports.ubuntu.com/ubuntu-ports/" + __UbuntuPackages="${__UbuntuPackages// libunwind8-dev/}" + __UbuntuPackages="${__UbuntuPackages// libomp-dev/}" + __UbuntuPackages="${__UbuntuPackages// libomp5/}" + unset __LLDB_Package + ;; + s390x) + __BuildArch=s390x + __AlpineArch=s390x + __QEMUArch=s390x + __UbuntuArch=s390x + __UbuntuRepo="http://ports.ubuntu.com/ubuntu-ports/" + __UbuntuPackages="${__UbuntuPackages// libunwind8-dev/}" + __UbuntuPackages="${__UbuntuPackages// libomp-dev/}" + __UbuntuPackages="${__UbuntuPackages// libomp5/}" + unset __LLDB_Package + ;; + x64) + __BuildArch=x64 + __AlpineArch=x86_64 + __QEMUArch=x86_64 + __UbuntuArch=amd64 + __FreeBSDArch=amd64 + __FreeBSDMachineArch=amd64 + __illumosArch=x86_64 + __UbuntuRepo= + ;; + x86) + __BuildArch=x86 + __AlpineArch=i386 + __QEMUArch=i386 + __UbuntuArch=i386 + __AlpineArch=x86 + __UbuntuRepo="http://archive.ubuntu.com/ubuntu/" + ;; + lldb*) + version="${lowerI/lldb/}" + parts=(${version//./ }) + + # for versions > 6.0, lldb has dropped the minor version + if [[ "${parts[0]}" -gt 6 ]]; then + version="${parts[0]}" + fi + + __LLDB_Package="liblldb-${version}-dev" + ;; + no-lldb) + unset __LLDB_Package + ;; + llvm*) + version="${lowerI/llvm/}" + parts=(${version//./ }) + __LLVM_MajorVersion="${parts[0]}" + __LLVM_MinorVersion="${parts[1]}" + + # for versions > 6.0, llvm has dropped the minor version + if [[ -z "$__LLVM_MinorVersion" && "$__LLVM_MajorVersion" -le 6 ]]; then + __LLVM_MinorVersion=0; + fi + ;; + xenial) # Ubuntu 16.04 + if [[ "$__CodeName" != "jessie" ]]; then + __CodeName=xenial + fi + ;; + zesty) # Ubuntu 17.04 + if [[ "$__CodeName" != "jessie" ]]; then + __CodeName=zesty + fi + ;; + bionic) # Ubuntu 18.04 + if [[ "$__CodeName" != "jessie" ]]; then + __CodeName=bionic + fi + ;; + focal) # Ubuntu 20.04 + if [[ "$__CodeName" != "jessie" ]]; then + __CodeName=focal + fi + ;; + jammy) # Ubuntu 22.04 + if [[ "$__CodeName" != "jessie" ]]; then + __CodeName=jammy + fi + ;; + jessie) # Debian 8 + __CodeName=jessie + + if [[ -z "$__UbuntuRepo" ]]; then + __UbuntuRepo="http://ftp.debian.org/debian/" + fi + ;; + stretch) # Debian 9 + __CodeName=stretch + __LLDB_Package="liblldb-6.0-dev" + + if [[ -z "$__UbuntuRepo" ]]; then + __UbuntuRepo="http://ftp.debian.org/debian/" + fi + ;; + buster) # Debian 10 + __CodeName=buster + __LLDB_Package="liblldb-6.0-dev" + + if [[ -z "$__UbuntuRepo" ]]; then + __UbuntuRepo="http://ftp.debian.org/debian/" + fi + ;; + bullseye) # Debian 11 + __CodeName=bullseye + + if [[ -z "$__UbuntuRepo" ]]; then + __UbuntuRepo="http://ftp.debian.org/debian/" + fi + ;; + sid) # Debian sid + __CodeName=sid + + if [[ -z "$__UbuntuRepo" ]]; then + __UbuntuRepo="http://ftp.debian.org/debian/" + fi + ;; + tizen) + __CodeName= + __UbuntuRepo= + __Tizen=tizen + ;; + alpine*) + __CodeName=alpine + __UbuntuRepo= + version="${lowerI/alpine/}" + + if [[ "$version" == "edge" ]]; then + __AlpineVersion=edge + else + parts=(${version//./ }) + __AlpineMajorVersion="${parts[0]}" + __AlpineMinoVersion="${parts[1]}" + __AlpineVersion="$__AlpineMajorVersion.$__AlpineMinoVersion" + fi + ;; + freebsd12) + __CodeName=freebsd + __SkipUnmount=1 + ;; + freebsd13) + __CodeName=freebsd + __FreeBSDBase="13.0-RELEASE" + __FreeBSDABI="13" + __SkipUnmount=1 + ;; + illumos) + __CodeName=illumos + __SkipUnmount=1 + ;; + haiku) + __CodeName=haiku + __BuildArch=x64 + __SkipUnmount=1 + ;; + --skipunmount) + __SkipUnmount=1 + ;; + --rootfsdir|-rootfsdir) + shift + __RootfsDir="$1" + ;; + --use-mirror) + __UseMirror=1 + ;; + --use-jobs) + shift + MAXJOBS=$1 + ;; + *) + __UnprocessedBuildArgs="$__UnprocessedBuildArgs $1" + ;; + esac + + shift +done + +if [[ "$__BuildArch" == "armel" ]]; then + __LLDB_Package="lldb-3.5-dev" +fi + +__UbuntuPackages+=" ${__LLDB_Package:-}" + +if [[ -n "$__LLVM_MajorVersion" ]]; then + __UbuntuPackages+=" libclang-common-${__LLVM_MajorVersion}${__LLVM_MinorVersion:+.$__LLVM_MinorVersion}-dev" +fi + +if [[ -z "$__RootfsDir" && -n "$ROOTFS_DIR" ]]; then + __RootfsDir="$ROOTFS_DIR" +fi + +if [[ -z "$__RootfsDir" ]]; then + __RootfsDir="$__CrossDir/../../../.tools/rootfs/$__BuildArch" +fi + +if [[ -d "$__RootfsDir" ]]; then + if [[ "$__SkipUnmount" == "0" ]]; then + umount "$__RootfsDir"/* || true + fi + rm -rf "$__RootfsDir" +fi + +mkdir -p "$__RootfsDir" +__RootfsDir="$( cd "$__RootfsDir" && pwd )" + +if [[ "$__CodeName" == "alpine" ]]; then + __ApkToolsVersion=2.12.11 + __ApkToolsDir="$(mktemp -d)" + + wget "https://gitlab.alpinelinux.org/api/v4/projects/5/packages/generic//v$__ApkToolsVersion/x86_64/apk.static" -P "$__ApkToolsDir" + chmod +x "$__ApkToolsDir/apk.static" + + mkdir -p "$__RootfsDir"/usr/bin + cp -v "/usr/bin/qemu-$__QEMUArch" "$__RootfsDir/usr/bin" + + if [[ "$__AlpineVersion" == "edge" ]]; then + version=edge + else + version="v$__AlpineVersion" + fi + + # initialize DB + "$__ApkToolsDir/apk.static" \ + -X "http://dl-cdn.alpinelinux.org/alpine/$version/main" \ + -X "http://dl-cdn.alpinelinux.org/alpine/$version/community" \ + -U --allow-untrusted --root "$__RootfsDir" --arch "$__AlpineArch" --initdb add + + if [[ "$__AlpineLlvmLibsLookup" == 1 ]]; then + __AlpinePackages+=" $("$__ApkToolsDir/apk.static" \ + -X "http://dl-cdn.alpinelinux.org/alpine/$version/main" \ + -X "http://dl-cdn.alpinelinux.org/alpine/$version/community" \ + -U --allow-untrusted --root "$__RootfsDir" --arch "$__AlpineArch" \ + search 'llvm*-libs' | sort | tail -1 | sed 's/-[^-]*//2g')" + fi + + # install all packages in one go + "$__ApkToolsDir/apk.static" \ + -X "http://dl-cdn.alpinelinux.org/alpine/$version/main" \ + -X "http://dl-cdn.alpinelinux.org/alpine/$version/community" \ + -U --allow-untrusted --no-scripts --root "$__RootfsDir" --arch "$__AlpineArch" \ + add $__AlpinePackages + + rm -r "$__ApkToolsDir" +elif [[ "$__CodeName" == "freebsd" ]]; then + mkdir -p "$__RootfsDir"/usr/local/etc + JOBS=${MAXJOBS:="$(getconf _NPROCESSORS_ONLN)"} + wget -O - "https://download.freebsd.org/ftp/releases/${__FreeBSDArch}/${__FreeBSDMachineArch}/${__FreeBSDBase}/base.txz" | tar -C "$__RootfsDir" -Jxf - ./lib ./usr/lib ./usr/libdata ./usr/include ./usr/share/keys ./etc ./bin/freebsd-version + echo "ABI = \"FreeBSD:${__FreeBSDABI}:${__FreeBSDMachineArch}\"; FINGERPRINTS = \"${__RootfsDir}/usr/share/keys\"; REPOS_DIR = [\"${__RootfsDir}/etc/pkg\"]; REPO_AUTOUPDATE = NO; RUN_SCRIPTS = NO;" > "${__RootfsDir}"/usr/local/etc/pkg.conf + echo "FreeBSD: { url: \"pkg+http://pkg.FreeBSD.org/\${ABI}/quarterly\", mirror_type: \"srv\", signature_type: \"fingerprints\", fingerprints: \"${__RootfsDir}/usr/share/keys/pkg\", enabled: yes }" > "${__RootfsDir}"/etc/pkg/FreeBSD.conf + mkdir -p "$__RootfsDir"/tmp + # get and build package manager + wget -O - "https://github.com/freebsd/pkg/archive/${__FreeBSDPkg}.tar.gz" | tar -C "$__RootfsDir"/tmp -zxf - + cd "$__RootfsDir/tmp/pkg-${__FreeBSDPkg}" + # needed for install to succeed + mkdir -p "$__RootfsDir"/host/etc + ./autogen.sh && ./configure --prefix="$__RootfsDir"/host && make -j "$JOBS" && make install + rm -rf "$__RootfsDir/tmp/pkg-${__FreeBSDPkg}" + # install packages we need. + INSTALL_AS_USER=$(whoami) "$__RootfsDir"/host/sbin/pkg -r "$__RootfsDir" -C "$__RootfsDir"/usr/local/etc/pkg.conf update + INSTALL_AS_USER=$(whoami) "$__RootfsDir"/host/sbin/pkg -r "$__RootfsDir" -C "$__RootfsDir"/usr/local/etc/pkg.conf install --yes $__FreeBSDPackages +elif [[ "$__CodeName" == "illumos" ]]; then + mkdir "$__RootfsDir/tmp" + pushd "$__RootfsDir/tmp" + JOBS=${MAXJOBS:="$(getconf _NPROCESSORS_ONLN)"} + echo "Downloading sysroot." + wget -O - https://github.com/illumos/sysroot/releases/download/20181213-de6af22ae73b-v1/illumos-sysroot-i386-20181213-de6af22ae73b-v1.tar.gz | tar -C "$__RootfsDir" -xzf - + echo "Building binutils. Please wait.." + wget -O - https://ftp.gnu.org/gnu/binutils/binutils-2.33.1.tar.bz2 | tar -xjf - + mkdir build-binutils && cd build-binutils + ../binutils-2.33.1/configure --prefix="$__RootfsDir" --target="${__illumosArch}-sun-solaris2.10" --program-prefix="${__illumosArch}-illumos-" --with-sysroot="$__RootfsDir" + make -j "$JOBS" && make install && cd .. + echo "Building gcc. Please wait.." + wget -O - https://ftp.gnu.org/gnu/gcc/gcc-8.4.0/gcc-8.4.0.tar.xz | tar -xJf - + CFLAGS="-fPIC" + CXXFLAGS="-fPIC" + CXXFLAGS_FOR_TARGET="-fPIC" + CFLAGS_FOR_TARGET="-fPIC" + export CFLAGS CXXFLAGS CXXFLAGS_FOR_TARGET CFLAGS_FOR_TARGET + mkdir build-gcc && cd build-gcc + ../gcc-8.4.0/configure --prefix="$__RootfsDir" --target="${__illumosArch}-sun-solaris2.10" --program-prefix="${__illumosArch}-illumos-" --with-sysroot="$__RootfsDir" --with-gnu-as \ + --with-gnu-ld --disable-nls --disable-libgomp --disable-libquadmath --disable-libssp --disable-libvtv --disable-libcilkrts --disable-libada --disable-libsanitizer \ + --disable-libquadmath-support --disable-shared --enable-tls + make -j "$JOBS" && make install && cd .. + BaseUrl=https://pkgsrc.smartos.org + if [[ "$__UseMirror" == 1 ]]; then + BaseUrl=https://pkgsrc.smartos.skylime.net + fi + BaseUrl="$BaseUrl/packages/SmartOS/trunk/${__illumosArch}/All" + echo "Downloading manifest" + wget "$BaseUrl" + echo "Downloading dependencies." + read -ra array <<<"$__IllumosPackages" + for package in "${array[@]}"; do + echo "Installing '$package'" + # find last occurrence of package in listing and extract its name + package="$(sed -En '/.*href="('"$package"'-[0-9].*).tgz".*/h;$!d;g;s//\1/p' All)" + echo "Resolved name '$package'" + wget "$BaseUrl"/"$package".tgz + ar -x "$package".tgz + tar --skip-old-files -xzf "$package".tmp.tg* -C "$__RootfsDir" 2>/dev/null + done + echo "Cleaning up temporary files." + popd + rm -rf "$__RootfsDir"/{tmp,+*} + mkdir -p "$__RootfsDir"/usr/include/net + mkdir -p "$__RootfsDir"/usr/include/netpacket + wget -P "$__RootfsDir"/usr/include/net https://raw.githubusercontent.com/illumos/illumos-gate/master/usr/src/uts/common/io/bpf/net/bpf.h + wget -P "$__RootfsDir"/usr/include/net https://raw.githubusercontent.com/illumos/illumos-gate/master/usr/src/uts/common/io/bpf/net/dlt.h + wget -P "$__RootfsDir"/usr/include/netpacket https://raw.githubusercontent.com/illumos/illumos-gate/master/usr/src/uts/common/inet/sockmods/netpacket/packet.h + wget -P "$__RootfsDir"/usr/include/sys https://raw.githubusercontent.com/illumos/illumos-gate/master/usr/src/uts/common/sys/sdt.h +elif [[ "$__CodeName" == "haiku" ]]; then + JOBS=${MAXJOBS:="$(getconf _NPROCESSORS_ONLN)"} + + echo "Building Haiku sysroot for x86_64" + mkdir -p "$__RootfsDir/tmp" + cd "$__RootfsDir/tmp" + git clone -b hrev56235 https://review.haiku-os.org/haiku + git clone -b btrev43195 https://review.haiku-os.org/buildtools + cd "$__RootfsDir/tmp/buildtools" && git checkout 7487388f5110021d400b9f3b88e1a7f310dc066d + + # Fetch some unmerged patches + cd "$__RootfsDir/tmp/haiku" + ## Add development build profile (slimmer than nightly) + git fetch origin refs/changes/64/4164/1 && git -c commit.gpgsign=false cherry-pick FETCH_HEAD + + # Build jam + cd "$__RootfsDir/tmp/buildtools/jam" + make + + # Configure cross tools + echo "Building cross-compiler" + mkdir -p "$__RootfsDir/generated" + cd "$__RootfsDir/generated" + "$__RootfsDir/tmp/haiku/configure" -j"$JOBS" --sysroot "$__RootfsDir" --cross-tools-source "$__RootfsDir/tmp/buildtools" --build-cross-tools x86_64 + + # Build Haiku packages + echo "Building Haiku" + echo 'HAIKU_BUILD_PROFILE = "development-raw" ;' > UserProfileConfig + "$__RootfsDir/tmp/buildtools/jam/jam0" -j"$JOBS" -q 'package' 'Haiku' + + BaseUrl="https://depot.haiku-os.org/__api/v2/pkg/get-pkg" + + # Download additional packages + echo "Downloading additional required packages" + read -ra array <<<"$__HaikuPackages" + for package in "${array[@]}"; do + echo "Downloading $package..." + # API documented here: https://github.com/haiku/haikudepotserver/blob/master/haikudepotserver-api2/src/main/resources/api2/pkg.yaml#L60 + # The schema here: https://github.com/haiku/haikudepotserver/blob/master/haikudepotserver-api2/src/main/resources/api2/pkg.yaml#L598 + hpkgDownloadUrl="$(wget -qO- --post-data='{"name":"'"$package"'","repositorySourceCode":"haikuports_x86_64","versionType":"LATEST","naturalLanguageCode":"en"}' \ + --header='Content-Type:application/json' "$BaseUrl" | jq -r '.result.versions[].hpkgDownloadURL')" + wget -P "$__RootfsDir/generated/download" "$hpkgDownloadUrl" + done + + # Setup the sysroot + echo "Setting up sysroot and extracting needed packages" + mkdir -p "$__RootfsDir/boot/system" + for file in "$__RootfsDir/generated/objects/haiku/x86_64/packaging/packages/"*.hpkg; do + "$__RootfsDir/generated/objects/linux/x86_64/release/tools/package/package" extract -C "$__RootfsDir/boot/system" "$file" + done + for file in "$__RootfsDir/generated/download/"*.hpkg; do + "$__RootfsDir/generated/objects/linux/x86_64/release/tools/package/package" extract -C "$__RootfsDir/boot/system" "$file" + done + + # Cleaning up temporary files + echo "Cleaning up temporary files" + rm -rf "$__RootfsDir/tmp" + for name in "$__RootfsDir/generated/"*; do + if [[ "$name" =~ "cross-tools-" ]]; then + : # Keep the cross-compiler + else + rm -rf "$name" + fi + done +elif [[ -n "$__CodeName" ]]; then + qemu-debootstrap $__Keyring --arch "$__UbuntuArch" "$__CodeName" "$__RootfsDir" "$__UbuntuRepo" + cp "$__CrossDir/$__BuildArch/sources.list.$__CodeName" "$__RootfsDir/etc/apt/sources.list" + chroot "$__RootfsDir" apt-get update + chroot "$__RootfsDir" apt-get -f -y install + chroot "$__RootfsDir" apt-get -y install $__UbuntuPackages + chroot "$__RootfsDir" symlinks -cr /usr + chroot "$__RootfsDir" apt-get clean + + if [[ "$__SkipUnmount" == "0" ]]; then + umount "$__RootfsDir"/* || true + fi + + if [[ "$__BuildArch" == "armel" && "$__CodeName" == "jessie" ]]; then + pushd "$__RootfsDir" + patch -p1 < "$__CrossDir/$__BuildArch/armel.jessie.patch" + popd + fi +elif [[ "$__Tizen" == "tizen" ]]; then + ROOTFS_DIR="$__RootfsDir" "$__CrossDir/tizen-build-rootfs.sh" "$__BuildArch" +else + echo "Unsupported target platform." + usage; + exit 1 +fi diff --git a/.gitlab/bin/build.sh b/.gitlab/bin/build.sh new file mode 100755 index 0000000..286f965 --- /dev/null +++ b/.gitlab/bin/build.sh @@ -0,0 +1,283 @@ +#!/bin/sh +# shellcheck disable=SC3043 + +. $CI_PROJECT_DIR/.gitlab/bin/functions.sh + +# shellcheck disable=SC3040 +set -eu -o pipefail + +readonly APORTSDIR=$CI_PROJECT_DIR +readonly REPOS="cross backports user testing community" +readonly ALPINE_REPOS="main community" +readonly ARCH=$(apk --print-arch) +# gitlab variables +readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME + +: "${REPODEST:=$HOME/packages}" +: "${MIRROR:=https://lab.ilot.io/ayakael/repo-apk/-/raw}" +: "${ALPINE_MIRROR:=http://dl-cdn.alpinelinux.org/alpine}" +: "${MAX_ARTIFACT_SIZE:=300000000}" #300M +: "${CI_DEBUG_BUILD:=}" + +: "${CI_ALPINE_BUILD_OFFSET:=0}" +: "${CI_ALPINE_BUILD_LIMIT:=9999}" +: "${CI_ALPINE_TARGET_ARCH:=$(uname -m)}" + +msg() { + local color=${2:-green} + case "$color" in + red) color="31";; + green) color="32";; + yellow) color="33";; + blue) color="34";; + *) color="32";; + esac + printf "\033[1;%sm>>>\033[1;0m %s\n" "$color" "$1" | xargs >&2 +} + +verbose() { + echo "> " "$@" + # shellcheck disable=SC2068 + $@ +} + +debugging() { + [ -n "$CI_DEBUG_BUILD" ] +} + +debug() { + if debugging; then + verbose "$@" + fi +} + +die() { + msg "$1" red + exit 1 +} + +capture_stderr() { + "$@" 2>&1 +} + +report() { + report=$1 + + reportsdir=$APORTSDIR/logs/ + mkdir -p "$reportsdir" + + tee -a "$reportsdir/$report.log" +} + +get_release() { + case $BASEBRANCH in + v*) echo "${BASEBRANCH%-*}";; + edge) echo edge;; + *) die "Branch \"$BASEBRANCH\" not supported!" + esac +} + +build_aport() { + local repo="$1" aport="$2" + cd "$APORTSDIR/$repo/$aport" + export CHOST=$CI_ALPINE_TARGET_ARCH + if abuild -r 2>&1 | report "build-$aport"; then + checkapk | report "checkapk-$aport" || true + aport_ok="$aport_ok $repo/$aport" + else + aport_ng="$aport_ng $repo/$aport" + fi +} + +check_aport() { + local repo="$1" aport="$2" + cd "$APORTSDIR/$repo/$aport" + export CHOST=$CI_ALPINE_TARGET_ARCH + # TODO: this enables crossbuild only on user, this should be cleaner + if [ "$repo" != "user" ] && [ "$repo" != "backports" ] && [ "$CI_ALPINE_TARGET_ARCH" != "$ARCH" ]; then + aport_na="$aport_na $repo/$aport" + return 1 + fi + if ! abuild check_arch 2>/dev/null; then + aport_na="$aport_na $repo/$aport" + return 1 + fi +} + +set_repositories_for() { + local target_repo="$1" repos='' repo='' + local release + + release=$(get_release) + for repo in $REPOS; do + repos="$repos $MIRROR/$release/$repo $REPODEST/$repo" + [ "$repo" = "$target_repo" ] && break + done + sudo sh -c "printf '%s\n' $repos >> /etc/apk/repositories" + sudo apk update || true + if [ "$CI_ALPINE_TARGET_ARCH" != "$ARCH" ]; then + sudo sh -c "printf '%s\n' $repos >> $HOME/sysroot-$CI_ALPINE_TARGET_ARCH/etc/apk/repositories" + sudo cp -R /etc/apk/keys/* $HOME/sysroot-$CI_ALPINE_TARGET_ARCH/etc/apk/keys/. + sudo apk --root=$HOME/sysroot-$CI_ALPINE_TARGET_ARCH update || true + fi +} + +apply_offset_limit() { + start=$1 + limit=$2 + end=$((start+limit)) + + sed -n "$((start+1)),${end}p" +} + +setup_system() { + local repos='' repo='' + local release + + release=$(get_release) + for repo in $ALPINE_REPOS; do + [ "$release" != "edge" ] && [ "$repo" == "testing" ] && continue + repos="$repos $ALPINE_MIRROR/$release/$repo" + done + repos="$repos $MIRROR/$release/cross" + sudo sh -c "printf '%s\n' $repos > /etc/apk/repositories" + sudo apk -U upgrade -a || sudo apk fix || die "Failed to up/downgrade system" + if [ "$CI_ALPINE_TARGET_ARCH" != "$ARCH" ]; then + sudo apk add gcc-$CI_ALPINE_TARGET_ARCH + fi + gitlab_key_to_rsa $ABUILD_KEY rsa-private $HOME/.abuild/$ABUILD_KEY_NAME.rsa + gitlab_key_to_rsa $ABUILD_KEY_PUB rsa-public $HOME/.abuild/$ABUILD_KEY_NAME.rsa.pub + chmod 700 $HOME/.abuild/$ABUILD_KEY_NAME.rsa + echo "PACKAGER_PRIVKEY=$HOME/.abuild/$ABUILD_KEY_NAME.rsa" >> $HOME/.abuild/abuild.conf + sudo cp $HOME/.abuild/$ABUILD_KEY_NAME.rsa.pub /etc/apk/keys/$ABUILD_KEY_NAME.rsa.pub + + # patch abuild for crosscompiling + sudo patch -p1 -d / -i $CI_PROJECT_DIR/.gitlab/patches/abuild-cross.patch + + sudo sed -i -E 's/export JOBS=[0-9]+$/export JOBS=$(nproc)/' /etc/abuild.conf + ( . /etc/abuild.conf && echo "Building with $JOBS jobs" ) + mkdir -p "$REPODEST" + git config --global init.defaultBranch master +} + +sysinfo() { + printf ">>> Host system information (arch: %s, release: %s) <<<\n" "$ARCH" "$(get_release)" + printf "- Number of Cores: %s\n" "$(nproc)" + printf "- Memory: %s Gb\n" "$(awk '/^MemTotal/ {print ($2/1024/1024)}' /proc/meminfo)" + printf "- Free space: %s\n" "$(df -hP / | awk '/\/$/ {print $4}')" +} + +copy_artifacts() { + cd "$APORTSDIR" + + packages_size="$(du -sk "$REPODEST" | awk '{print $1 * 1024}')" + if [ -z "$packages_size" ]; then + return + fi + + echo "Artifact size: $packages_size bytes" + + mkdir -p keys/ packages/ + + if [ "$packages_size" -lt $MAX_ARTIFACT_SIZE ]; then + msg "Copying packages for artifact upload" + cp -ar "$REPODEST"/* packages/ 2>/dev/null + cp ~/.abuild/*.rsa.pub keys/ + else + msg "Artifact size $packages_size larger than max ($MAX_ARTIFACT_SIZE), skipping uploading them" yellow + fi +} + +section_start setup "Setting up the system" collapse + +if debugging; then + set -x +fi + +aport_ok= +aport_na= +aport_ng= +failed= + +sysinfo || true +setup_system || die "Failed to setup system" + +# git no longer allows to execute in repositories owned by different users +sudo chown -R $USER: . + +fetch_flags="-qn" +debugging && fetch_flags="-v" + +git fetch $fetch_flags "$CI_MERGE_REQUEST_PROJECT_URL" \ + "+refs/heads/$BASEBRANCH:refs/heads/$BASEBRANCH" + +if debugging; then + merge_base=$(git merge-base "$BASEBRANCH" HEAD) || echo "Could not determine merge-base" + echo "Merge base: $merge_base" + git --version + git config -l + [ -n "$merge_base" ] && git tag -f merge-base "$merge_base" + git --no-pager log -200 --oneline --graph --decorate --all +fi + +section_end setup + +build_start=$CI_ALPINE_BUILD_OFFSET +build_limit=$CI_ALPINE_BUILD_LIMIT + +for repo in $(changed_repos); do + mkdir -p "$APORTSDIR"/logs "$APORTSDIR"/packages "$APORTSDIR"/keys + set_repositories_for "$repo" + built_aports=0 + changed_aports_in_repo=$(changed_aports "$repo") + changed_aports_in_repo_count=$(echo "$changed_aports_in_repo" | wc -l) + changed_aports_to_build=$(echo "$changed_aports_in_repo" | apply_offset_limit "$build_start" "$build_limit") + + msg "Changed aports in $repo:" + # shellcheck disable=SC2086 # Splitting is expected here + printf " - %s\n" $changed_aports_to_build + for pkgname in $changed_aports_to_build; do + section_start "build_$pkgname" "Building package $pkgname" + built_aports=$((built_aports+1)) + if check_aport "$repo" "$pkgname"; then + build_aport "$repo" "$pkgname" + fi + section_end "build_$pkgname" + done + + build_start=$((build_start-(changed_aports_in_repo_count-built_aports))) + build_limit=$((build_limit-built_aports)) + + if [ $build_limit -le 0 ]; then + msg "Limit reached, breaking" + break + fi +done + +section_start artifacts "Handeling artifacts" collapse +copy_artifacts || true +section_end artifacts + +section_start summary "Build summary" + +echo "### Build summary ###" + +for ok in $aport_ok; do + msg "$ok: build succesfully" +done + +for na in $aport_na; do + msg "$na: disabled for $CI_ALPINE_TARGET_ARCH" yellow +done + +for ng in $aport_ng; do + msg "$ng: build failed" red + failed=true +done +section_end summary + +if [ "$failed" = true ]; then + exit 1 +elif [ -z "$aport_ok" ]; then + msg "No packages found to be built." yellow +fi diff --git a/.gitlab/bin/changed-aports b/.gitlab/bin/changed-aports new file mode 100755 index 0000000..4541230 --- /dev/null +++ b/.gitlab/bin/changed-aports @@ -0,0 +1,20 @@ +#!/bin/sh + +if [ $# -lt 1 ]; then + echo "Usage: $0 " + exit 1 +fi + +if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then + echo "Fatal: not inside a git repository" + exit 2 +fi + +basebranch=$1 + +if ! git rev-parse --verify --quiet $basebranch >/dev/null; then + # The base branch does not eixst, probably due to a shallow clone + git fetch -v $CI_MERGE_REQUEST_PROJECT_URL.git +refs/heads/$basebranch:refs/heads/$basebranch +fi + +git --no-pager diff --diff-filter=ACMR --name-only $basebranch...HEAD -- "*/APKBUILD" | xargs -r -n1 dirname diff --git a/.gitlab/bin/functions.sh b/.gitlab/bin/functions.sh new file mode 100755 index 0000000..44de1fe --- /dev/null +++ b/.gitlab/bin/functions.sh @@ -0,0 +1,74 @@ +# shellcheck disable=SC3043 + +: + +# shellcheck disable=SC3040 +set -eu -o pipefail + +changed_repos() { + : "${APORTSDIR?APORTSDIR missing}" + : "${BASEBRANCH?BASEBRANCH missing}" + + cd "$APORTSDIR" + for repo in $REPOS; do + git diff --diff-filter=ACMR --exit-code "$BASEBRANCH"...HEAD -- "$repo" >/dev/null \ + || echo "$repo" + done +} + +changed_aports() { + : "${APORTSDIR?APORTSDIR missing}" + : "${BASEBRANCH?BASEBRANCH missing}" + + cd "$APORTSDIR" + local repo="$1" + local aports + + aports=$(git diff --name-only --diff-filter=ACMR --relative="$repo" \ + "$BASEBRANCH"...HEAD -- "*/APKBUILD" | xargs -rn1 dirname) + + # shellcheck disable=2086 + ap builddirs -d "$APORTSDIR/$repo" $aports 2>/dev/null | xargs -rn1 basename +} + +section_start() { + name=${1?arg 1 name missing} + header=${2?arg 2 header missing} + collapsed=$2 + timestamp=$(date +%s) + + options="" + case $collapsed in + yes|on|collapsed|true) options="[collapsed=true]";; + esac + + printf "\e[0Ksection_start:%d:%s%s\r\e[0K%s\n" "$timestamp" "$name" "$options" "$header" +} + +section_end() { + name=$1 + timestamp=$(date +%s) + + printf "\e[0Ksection_end:%d:%s\r\e[0K" "$timestamp" "$name" +} + +gitlab_key_to_rsa() { + KEY=$1 + TYPE=$2 + TGT=$3 + TGT_DIR=${TGT%/*} + if [ "$TGT" == "$TGT_DIR" ]; then + TGT_DIR="./" + fi + if [ ! -d "$TGT_DIR" ]; then + mkdir -p "$TGT_DIR" + fi + case $TYPE in + rsa-public) local type="PUBLIC";; + rsa-private) local type="RSA PRIVATE";; + esac + echo "-----BEGIN $type KEY-----" > "$TGT" + echo $1 | sed 's/.\{64\}/&\ +/g' >> "$TGT" + echo "-----END $type KEY-----" >> "$TGT" +} diff --git a/.gitlab/bin/lint b/.gitlab/bin/lint new file mode 100755 index 0000000..c1edcfb --- /dev/null +++ b/.gitlab/bin/lint @@ -0,0 +1,96 @@ +#!/bin/sh + +BLUE="\e[34m" +MAGENTA="\e[35m" +RESET="\e[0m" + +readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME + +verbose() { + echo "> " "$@" + # shellcheck disable=SC2068 + $@ +} + +debugging() { + [ -n "$CI_DEBUG_BUILD" ] +} + +debug() { + if debugging; then + verbose "$@" + fi +} + +# git no longer allows to execute in repositories owned by different users +sudo chown -R gitlab-runner: . + +fetch_flags="-qn" +debugging && fetch_flags="-v" + +git fetch $fetch_flags "$CI_MERGE_REQUEST_PROJECT_URL" \ + "+refs/heads/$BASEBRANCH:refs/heads/$BASEBRANCH" + +if debugging; then + merge_base=$(git merge-base "$BASEBRANCH" HEAD) + echo "$merge_base" + git --version + git config -l + git tag merge-base "$merge_base" || { echo "Could not determine merge-base"; exit 50; } + git log --oneline --graph --decorate --all +fi + +has_problems=0 + +for PKG in $(changed-aports "$BASEBRANCH"); do + printf "$BLUE==>$RESET Linting $PKG\n" + + ( + cd "$PKG" + + repo=$(basename $(dirname $PKG)); + + if [ "$repo" == "backports" ]; then + echo "Skipping $PKG as backports (we don't care)" + continue + fi + + printf "\n\n" + printf "$BLUE" + printf '======================================================\n' + printf " parse APKBUILD:\n" + printf '======================================================' + printf "$RESET\n\n" + ( . ./APKBUILD ) || has_problems=1 + + printf "\n\n" + printf "$BLUE" + printf '======================================================\n' + printf " abuild sanitycheck:\n" + printf '======================================================' + printf "$RESET\n\n" + abuild sanitycheck || has_problems=1 + + printf "\n\n" + printf "$BLUE" + printf '======================================================\n' + printf " apkbuild-shellcheck:\n" + printf '======================================================' + printf "$RESET\n" + apkbuild-shellcheck || has_problems=1 + + printf "\n\n" + printf "$BLUE" + printf '======================================================\n' + printf " apkbuild-lint:\n" + printf '======================================================' + printf "$RESET\n\n" + apkbuild-lint APKBUILD || has_problems=1 + + return $has_problems + ) || has_problems=1 + + echo +done + +exit $has_problems diff --git a/.gitlab/bin/push.sh b/.gitlab/bin/push.sh new file mode 100755 index 0000000..e93101a --- /dev/null +++ b/.gitlab/bin/push.sh @@ -0,0 +1,56 @@ +#!/bin/sh + +# shellcheck disable=SC3043 + +. $CI_PROJECT_DIR/.gitlab/bin/functions.sh + +# shellcheck disable=SC3040 +set -eu -o pipefail + +readonly APORTSDIR=$CI_PROJECT_DIR +readonly REPOS="backports user" +readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME + +export GIT_SSH_COMMAND="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" + +gitlab_key_to_rsa $ABUILD_KEY rsa-private $HOME/.abuild/$ABUILD_KEY_NAME.rsa +gitlab_key_to_rsa $ABUILD_KEY_PUB rsa-public $HOME/.abuild/$ABUILD_KEY_NAME.rsa.pub +gitlab_key_to_rsa $SSH_KEY rsa-private $HOME/.ssh/id_rsa +chmod 700 "$HOME"/.ssh/id_rsa +chmod 700 "$HOME"/.abuild/$ABUILD_KEY_NAME.rsa + +echo "PACKAGER_PRIVKEY=$HOME/.abuild/$ABUILD_KEY_NAME.rsa" > $HOME/.abuild/abuild.conf +echo "REPODEST=$HOME/repo-apk" >> $HOME/.abuild/abuild.conf +sudo cp $HOME/.abuild/$ABUILD_KEY_NAME.rsa.pub /etc/apk/keys/. + +if [ -d $HOME/repo-apk ]; then + git -C $HOME/repo-apk fetch + git -C $HOME/repo-apk checkout $BASEBRANCH + git -C $HOME/repo-apk pull --rebase +else + git clone git@lab.ilot.io:ayakael/repo-apk -b $BASEBRANCH $HOME/repo-apk +fi + +for i in $(find packages -type f -name "*.apk"); do + install -vDm644 $i ${i/packages/$HOME\/repo-apk} +done + +fetch_flags="-qn" +git fetch $fetch_flags "$CI_MERGE_REQUEST_PROJECT_URL" \ + "+refs/heads/$BASEBRANCH:refs/heads/$BASEBRANCH" + +for repo in $(changed_repos); do + rm $HOME/repo-apk/$repo/*/APKINDEX.tar.gz | true + mkdir -p $repo/DUMMY + echo "pkgname=DUMMY" > $repo/DUMMY/APKBUILD + cd $repo/DUMMY + for i in $(find $HOME/repo-apk/$repo -maxdepth 1 -mindepth 1 -printf '%P '); do + CHOST=$i abuild index + done + cd "$CI_PROJECT_DIR" + rm -R $repo/DUMMY +done + +git -C $HOME/repo-apk add . +git -C $HOME/repo-apk commit -m "Update from $CI_MERGE_REQUEST_IID - $CI_MERGE_REQUEST_TITLE" +git -C $HOME/repo-apk push diff --git a/.gitlab/patches/abuild-cross.patch b/.gitlab/patches/abuild-cross.patch new file mode 100644 index 0000000..50afd23 --- /dev/null +++ b/.gitlab/patches/abuild-cross.patch @@ -0,0 +1,17 @@ +diff --git a/usr/bin/abuild.orig b/usr/bin/abuild +index 71e0681..d4ae3dd 100755 +--- a/usr/bin/abuild.orig ++++ b/usr/bin/abuild +@@ -2231,7 +2231,11 @@ calcdeps() { + list_has $i $builddeps && continue + subpackages_has ${i%%[<>=]*} || builddeps="$builddeps $i" + done +- hostdeps="$EXTRADEPENDS_TARGET" ++ for i in $EXTRADEPENDS_HOST $EXTRADEPENDS_TARGET $depends $makedepends; do ++ [ "$pkgname" = "${i%%[<>=]*}" ] && continue ++ list_has $i $hostdeps && continue ++ subpackages_has ${i%%[<>=]*} || hostdeps="$hostdeps $i" ++ done + fi + } + diff --git a/README.md b/README.md index 852d3b4..ae24788 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,19 @@ -# iports -Upstream: https://codeberg.org/ilot/iports +# user-aports +Upstream: https://lab.ilot.io/ayakael/user-aports ## Description This repository contains aports that are not yet merged in the official Alpine Linux repository or don’t adhere to Alpine polices. Packages are automatically -built using CI. Once built, they are deployed to a git-lfs repository, making -them available to apk. +built using GitLab CI on my own GitLab instance. Once built, they are deployed +to a git-lfs repository, making them available to apk. Branches are matched to Alpine releases. + ## Repositories -You can browse all the repositories at https://codeberg.org/ilot/iports +You can browse all the repositories at https://lab.ilot.io/ayakael/repo-apk. Affixed to each repository description is the appropriate link for use in `/etc/apk/repositories`. @@ -23,13 +24,13 @@ https://lab.ilot.io/ayakael/repo-apk/-/raw/edge/backports Aports from the official Alpine repositories backported from edge. -#### Ilot +#### User ``` https://lab.ilot.io/ayakael/repo-apk/-/raw/edge/user ``` Aports that have yet to be (or may never be) upstreamed to the official -aports and that are used by ilot coop. +aports. ## How to use @@ -51,13 +52,12 @@ they will work for you. ## Contribution & bug reports If you wish to contribute to this aports collection, or wish to report a bug, -you can do so on Codeberg here: -https://codeberg.org/ilot/iports/issues +you can do so on Alpine's GitLab instance here: +https://gitlab.alpinelinux.org/ayakael/user-aports -For packages that are in backports, bug reports and merge requests +For packages that are in testing/community, bug reports and merge requests should be done on Alpine's aports repo instance: https://gitlab.alpinelinux.org/alpine/aports ## License This readme, abuilds and support scripts are licensed under MIT License. - diff --git a/ilot/authentik/APKBUILD b/ilot/authentik/APKBUILD index 72e65ad..d10a575 100644 --- a/ilot/authentik/APKBUILD +++ b/ilot/authentik/APKBUILD @@ -2,7 +2,7 @@ # Maintainer: Antoine Martin (ayakael) pkgname=authentik pkgver=2024.4.3 -pkgrel=2 +pkgrel=1 pkgdesc="An open-source Identity Provider focused on flexibility and versatility" url="https://github.com/goauthentik/authentik" # s390x: missing py3-celery py3-flower and py3-kombu diff --git a/ilot/freescout/APKBUILD b/ilot/freescout/APKBUILD index d083ae2..1fd520c 100644 --- a/ilot/freescout/APKBUILD +++ b/ilot/freescout/APKBUILD @@ -2,7 +2,7 @@ # Contributor: Antoine Martin (ayakael) pkgname=freescout pkgver=1.8.139 -pkgrel=1 +pkgrel=0 pkgdesc="Free self-hosted help desk & shared mailbox" arch="noarch" url="freescout.net" diff --git a/ilot/listmonk/APKBUILD b/ilot/listmonk/APKBUILD index 0ad6acd..00951f0 100644 --- a/ilot/listmonk/APKBUILD +++ b/ilot/listmonk/APKBUILD @@ -2,7 +2,7 @@ # Maintainer: Antoine Martin (ayakael) pkgname=listmonk pkgver=3.0.0 -pkgrel=1 +pkgrel=0 pkgdesc='Self-hosted newsletter and mailing list manager with a modern dashboard' arch="all" url=https://listmonk.app diff --git a/ilot/loomio/APKBUILD b/ilot/loomio/APKBUILD index 74452af..38eb631 100644 --- a/ilot/loomio/APKBUILD +++ b/ilot/loomio/APKBUILD @@ -4,7 +4,7 @@ pkgname=loomio pkgver=2.21.4 _gittag=v$pkgver -pkgrel=1 +pkgrel=0 pkgdesc="A collaborative decision making tool" url="https://github.com/loomio/loomio" arch="x86_64" diff --git a/ilot/peertube/APKBUILD b/ilot/peertube/APKBUILD index 365fa90..f50d1c1 100644 --- a/ilot/peertube/APKBUILD +++ b/ilot/peertube/APKBUILD @@ -2,7 +2,7 @@ # Contributor: Antoine Martin (ayakael) pkgname=peertube pkgver=6.0.2 -pkgrel=1 +pkgrel=0 pkgdesc="ActivityPub-federated video streaming platform using P2P directly in your web browser" arch="x86_64" url="https://joinpeertube.org/" diff --git a/ilot/php82-pecl-inotify/APKBUILD b/ilot/php82-pecl-inotify/APKBUILD index d2bb518..44903a1 100644 --- a/ilot/php82-pecl-inotify/APKBUILD +++ b/ilot/php82-pecl-inotify/APKBUILD @@ -3,7 +3,7 @@ pkgname=php82-pecl-inotify _extname=inotify pkgver=3.0.0 -pkgrel=1 +pkgrel=0 pkgdesc="Inotify bindings for PHP 8.3" url="https://pecl.php.net/package/inotify" arch="all" diff --git a/ilot/php83-pecl-inotify/APKBUILD b/ilot/php83-pecl-inotify/APKBUILD index 48f2bbf..771466f 100644 --- a/ilot/php83-pecl-inotify/APKBUILD +++ b/ilot/php83-pecl-inotify/APKBUILD @@ -3,7 +3,7 @@ pkgname=php83-pecl-inotify _extname=inotify pkgver=3.0.0 -pkgrel=1 +pkgrel=0 pkgdesc="Inotify bindings for PHP 8.3" url="https://pecl.php.net/package/inotify" arch="all" diff --git a/ilot/py3-django-rest-framework/APKBUILD b/ilot/py3-django-rest-framework/APKBUILD deleted file mode 100644 index 82a1497..0000000 --- a/ilot/py3-django-rest-framework/APKBUILD +++ /dev/null @@ -1,59 +0,0 @@ -# Contributor: Leonardo Arena -# Contributor: Justin Berthault -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-django-rest-framework -_pkgname=django-rest-framework -pkgver=3.14.0 -pkgrel=1 -pkgdesc="Web APIs for Django" -url="https://github.com/encode/django-rest-framework" -arch="noarch" -license="Custom" -depends=" - py3-django - py3-tz -" -makedepends=" - py3-setuptools - py3-gpep517 - py3-wheel -" -checkdepends=" - py3-pytest-django - py3-pytest-cov - py3-core-api - py3-jinja2 - py3-uritemplate - py3-django-guardian - py3-psycopg2 - py3-markdown - py3-yaml - py3-inflection -" -subpackages="$pkgname-pyc" -source="$pkgname-$pkgver.tar.gz::https://github.com/encode/$_pkgname/archive/$pkgver.tar.gz" -options="!check" # Failing tests -builddir="$srcdir"/$_pkgname-$pkgver - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer "$builddir"/.dist/*.whl - # test_urlpatterns: AssertionError: assert [] is not [] - # test_markdown: rather hard to decipher assertion error - .testenv/bin/python3 -m pytest -v -k 'not test_urlpatterns and not test_markdown' -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -c1012c656b427e0318b2056e2f984ddc75a5b4e85f375c76fba165ad06e285848eee1bc6dc76c097daec57d780efb2551110199d62ce636a03951aec13ab4013 py3-django-rest-framework-3.14.0.tar.gz -" diff --git a/ilot/py3-django-tenants/997_update-from-pgclone-schema.patch b/ilot/py3-django-tenants/997_update-from-pgclone-schema.patch deleted file mode 100644 index b2999d2..0000000 --- a/ilot/py3-django-tenants/997_update-from-pgclone-schema.patch +++ /dev/null @@ -1,3823 +0,0 @@ -From 07e14a3442d080bd4e873dc74e441296b8291ae2 Mon Sep 17 00:00:00 2001 -From: Marc 'risson' Schmitt -Date: Thu, 16 Nov 2023 13:26:16 +0100 -Subject: [PATCH 1/3] clone: update from pg-clone-schema - -Signed-off-by: Marc 'risson' Schmitt ---- - django_tenants/clone.py | 3407 ++++++++++++++++++++++++++++++++++----- - 1 file changed, 2977 insertions(+), 430 deletions(-) - -diff --git a/django_tenants/clone.py b/django_tenants/clone.py -index 426e81b8..3afce109 100644 ---- a/django_tenants/clone.py -+++ b/django_tenants/clone.py -@@ -6,24 +6,592 @@ - from django_tenants.utils import schema_exists - - CLONE_SCHEMA_FUNCTION = r""" ---- https://github.com/denishpatel/pg-clone-schema/ rev 0d3b522 -+-- https://github.com/denishpatel/pg-clone-schema/ rev 073922e - -- https://github.com/tomturner/django-tenants/issues/322 - ---- Function: clone_schema(text, text, boolean, boolean) -+do $$ -+<> -+DECLARE -+ cnt int; -+BEGIN -+ DROP TYPE IF EXISTS public.cloneparms CASCADE; -+ CREATE TYPE public.cloneparms AS ENUM ('DATA', 'NODATA','DDLONLY','NOOWNER','NOACL','VERBOSE','DEBUG','FILECOPY'); -+ -- END IF; -+end first_block $$; -+ -+ -+-- select * from public.get_insert_stmt_ddl('clone1','sample','address'); -+CREATE OR REPLACE FUNCTION public.get_insert_stmt_ddl( -+ source_schema text, -+ target_schema text, -+ atable text, -+ bTextCast boolean default False -+) -+RETURNS text -+LANGUAGE plpgsql VOLATILE -+AS -+$$ -+ DECLARE -+ -- the ddl we're building -+ v_insert_ddl text := ''; -+ v_cols text := ''; -+ v_cols_sel text := ''; -+ v_cnt int := 0; -+ v_colrec record; -+ v_schema text; -+ BEGIN -+ FOR v_colrec IN -+ SELECT c.column_name, c.data_type, c.udt_name, c.udt_schema, c.character_maximum_length, c.is_nullable, c.column_default, c.numeric_precision, c.numeric_scale, c.is_identity, c.identity_generation, c.is_generated -+ FROM information_schema.columns c WHERE (table_schema, table_name) = (source_schema, atable) ORDER BY ordinal_position -+ LOOP -+ IF v_colrec.udt_schema = 'public' THEN -+ v_schema = 'public'; -+ ELSE -+ v_schema = target_schema; -+ END IF; -+ -+ v_cnt = v_cnt + 1; -+ IF v_colrec.is_identity = 'YES' OR v_colrec.is_generated = 'ALWAYS' THEN -+ -- skip -+ continue; -+ END IF; -+ -+ IF v_colrec.data_type = 'USER-DEFINED' THEN -+ IF v_cols = '' THEN -+ v_cols = v_colrec.column_name; -+ IF bTextCast THEN -+ -- v_cols_sel = v_colrec.column_name || '::text::' || v_schema || '.' || v_colrec.udt_name; -+ IF v_schema = 'public' THEN -+ v_cols_sel = v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; -+ ELSE -+ v_cols_sel = v_colrec.column_name || '::text::' || v_colrec.udt_name; -+ END IF; -+ ELSE -+ v_cols_sel = v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; -+ END IF; -+ ELSE -+ v_cols = v_cols || ', ' || v_colrec.column_name; -+ IF bTextCast THEN -+ -- v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::text::' || v_schema || '.' || v_colrec.udt_name; -+ IF v_schema = 'public' THEN -+ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; -+ ELSE -+ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::text::' || v_colrec.udt_name; -+ END IF; -+ ELSE -+ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name || '::' || v_schema || '.' || v_colrec.udt_name; -+ END IF; -+ END IF; -+ ELSE -+ IF v_cols = '' THEN -+ v_cols = v_colrec.column_name; -+ v_cols_sel = v_colrec.column_name; -+ ELSE -+ v_cols = v_cols || ', ' || v_colrec.column_name; -+ v_cols_sel = v_cols_sel || ', ' || v_colrec.column_name; -+ END IF; -+ END IF; -+ END LOOP; -+ -+ -- put it all together and return the insert statement -+ -- INSERT INTO clone1.address2 (id2, id3, addr) SELECT id2::text::clone1.udt_myint, id3::text::clone1.udt_myint, addr FROM sample.address; -+ v_insert_ddl = 'INSERT INTO ' || target_schema || '.' || atable || ' (' || v_cols || ') ' || 'SELECT ' || v_cols_sel || ' FROM ' || source_schema || '.' || atable || ';'; -+ RETURN v_insert_ddl; -+ END; -+$$; -+ -+ -+CREATE OR REPLACE FUNCTION public.get_table_ddl_complex( -+ src_schema text, -+ dst_schema text, -+ in_table text, -+ sq_server_version_num integer -+) -+RETURNS text -+LANGUAGE plpgsql VOLATILE -+AS -+$$ -+ DECLARE -+ v_table_ddl text; -+ v_buffer1 text; -+ v_buffer2 text; -+ -+ BEGIN -+ IF sq_server_version_num < 110000 THEN -+ SELECT 'CREATE TABLE ' -+ || quote_ident(dst_schema) -+ || '.' -+ || pc.relname -+ || E'(\n' -+ || string_agg( -+ pa.attname -+ || ' ' -+ || pg_catalog.format_type(pa.atttypid, pa.atttypmod) -+ || coalesce( -+ ' DEFAULT ' -+ || ( -+ SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid) -+ FROM pg_catalog.pg_attrdef d -+ WHERE d.adrelid = pa.attrelid -+ AND d.adnum = pa.attnum -+ AND pa.atthasdef -+ ), -+ '' -+ ) -+ || ' ' -+ || CASE pa.attnotnull -+ WHEN TRUE THEN 'NOT NULL' -+ ELSE 'NULL' -+ END, -+ E',\n' -+ ) -+ || coalesce( -+ ( -+ SELECT -+ E',\n' -+ || string_agg( -+ 'CONSTRAINT ' -+ || pc1.conname -+ || ' ' -+ || pg_get_constraintdef(pc1.oid), -+ E',\n' -+ ORDER BY pc1.conindid -+ ) -+ FROM pg_constraint pc1 -+ --Issue#103: do not return FKEYS for partitions since we assume it is implied by the one done on the parent table, otherwise error for trying to define it again. -+ WHERE pc1.conrelid = pa.attrelid -+ ), -+ '' -+ ) -+ INTO v_buffer1 -+ FROM pg_catalog.pg_attribute pa -+ JOIN pg_catalog.pg_class pc ON pc.oid = pa.attrelid -+ AND pc.relname = quote_ident(in_table) -+ JOIN pg_catalog.pg_namespace pn ON pn.oid = pc.relnamespace -+ AND pn.nspname = quote_ident(src_schema) -+ WHERE pa.attnum > 0 -+ AND NOT pa.attisdropped -+ GROUP BY pn.nspname, pc.relname, pa.attrelid; -+ -+ ELSE -+ SELECT 'CREATE TABLE ' -+ || quote_ident(dst_schema) -+ || '.' -+ || pc.relname -+ || E'(\n' -+ || string_agg( -+ pa.attname -+ || ' ' -+ || pg_catalog.format_type(pa.atttypid, pa.atttypmod) -+ || coalesce( -+ ' DEFAULT ' -+ || ( -+ SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid) -+ FROM pg_catalog.pg_attrdef d -+ WHERE d.adrelid = pa.attrelid -+ AND d.adnum = pa.attnum -+ AND pa.atthasdef -+ ), -+ '' -+ ) -+ || ' ' -+ || CASE pa.attnotnull -+ WHEN TRUE THEN 'NOT NULL' -+ ELSE 'NULL' -+ END, -+ E',\n' -+ ) -+ || coalesce( -+ ( -+ SELECT -+ E',\n' -+ || string_agg( -+ 'CONSTRAINT ' -+ || pc1.conname -+ || ' ' -+ || pg_get_constraintdef(pc1.oid), -+ E',\n' -+ ORDER BY pc1.conindid -+ ) -+ FROM pg_constraint pc1 -+ --Issue#103: do not return FKEYS for partitions since we assume it is implied by the one done on the parent table, otherwise error for trying to define it again. -+ WHERE pc1.conrelid = pa.attrelid AND pc1.conparentid = 0 -+ ), -+ '' -+ ) -+ INTO v_buffer1 -+ FROM pg_catalog.pg_attribute pa -+ JOIN pg_catalog.pg_class pc ON pc.oid = pa.attrelid -+ AND pc.relname = quote_ident(in_table) -+ JOIN pg_catalog.pg_namespace pn ON pn.oid = pc.relnamespace -+ AND pn.nspname = quote_ident(src_schema) -+ WHERE pa.attnum > 0 -+ AND NOT pa.attisdropped -+ GROUP BY pn.nspname, pc.relname, pa.attrelid; -+ END IF; -+ -+ -- append partition keyword to it -+ SELECT pg_catalog.pg_get_partkeydef(c.oid::pg_catalog.oid) into v_buffer2 -+ FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace -+ WHERE c.relname = quote_ident(in_table) COLLATE pg_catalog.default AND n.nspname = quote_ident(src_schema) COLLATE pg_catalog.default; -+ -+ v_table_ddl := v_buffer1 || ') PARTITION BY ' || v_buffer2 || ';'; -+ -+ RETURN v_table_ddl; -+ END; -+$$; -+ -+ -+-- SELECT * FROM public.get_table_ddl('sample', 'address', True); -+CREATE OR REPLACE FUNCTION public.get_table_ddl( -+ in_schema varchar, -+ in_table varchar, -+ bfkeys boolean -+) -+RETURNS text -+LANGUAGE plpgsql VOLATILE -+AS -+$$ -+ DECLARE -+ -- the ddl we're building -+ v_table_ddl text; -+ -+ -- data about the target table -+ v_table_oid int; -+ -+ -- records for looping -+ v_colrec record; -+ v_constraintrec record; -+ v_indexrec record; -+ v_primary boolean := False; -+ v_constraint_name text; -+ v_src_path_old text := ''; -+ v_src_path_new text := ''; -+ v_dummy text; -+ v_partbound text; -+ v_pgversion int; -+ v_parent text := ''; -+ v_relopts text := ''; -+ v_tablespace text; -+ v_partition_key text := ''; -+ v_temp text; -+ bPartitioned bool := False; -+ bInheritance bool := False; -+ bRelispartition bool; -+ constraintarr text[] := '{{}}'; -+ constraintelement text; -+ bSkip boolean; -+ -+ BEGIN -+ SELECT c.oid, ( -+ SELECT setting -+ FROM pg_settings -+ WHERE name = 'server_version_num') INTO v_table_oid, v_pgversion -+ FROM pg_catalog.pg_class c -+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace -+ WHERE c.relkind IN ('r', 'p') -+ AND c.relname = in_table -+ AND n.nspname = in_schema; -+ IF (v_table_oid IS NULL) THEN -+ RAISE EXCEPTION 'table does not exist'; -+ END IF; -+ -+ -- get user-defined tablespaces if applicable -+ SELECT TABLESPACE INTO v_temp -+ FROM pg_tables -+ WHERE schemaname = in_schema -+ AND tablename = in_table -+ AND TABLESPACE IS NOT NULL; -+ -- Issue#99 Fix: simple coding error! -+ -- IF v_tablespace IS NULL THEN -+ IF v_temp IS NULL THEN -+ v_tablespace := 'TABLESPACE pg_default'; -+ ELSE -+ v_tablespace := 'TABLESPACE ' || v_temp; -+ END IF; -+ -- also see if there are any SET commands for this table, ie, autovacuum_enabled=off, fillfactor=70 -+ WITH relopts AS ( -+ SELECT unnest(c.reloptions) relopts -+ FROM pg_class c, pg_namespace n -+ WHERE n.nspname = in_schema -+ AND n.oid = c.relnamespace -+ AND c.relname = in_table -+ ) -+ SELECT string_agg(r.relopts, ', ') AS relopts INTO v_temp -+ FROM relopts r; -+ IF v_temp IS NULL THEN -+ v_relopts := ''; -+ ELSE -+ v_relopts := ' WITH (' || v_temp || ')'; -+ END IF; -+ -+ -- Issue#61 FIX: set search_path = public before we do anything to force explicit schema qualification but dont forget to set it back before exiting... -+ SELECT setting INTO v_src_path_old FROM pg_settings WHERE name = 'search_path'; -+ -+ SELECT REPLACE(REPLACE(setting, '"$user"', '$user'), '$user', '"$user"') INTO v_src_path_old -+ FROM pg_settings -+ WHERE name = 'search_path'; -+ -- RAISE INFO 'DEBUG tableddl: saving old search_path: ***%***', v_src_path_old; -+ EXECUTE 'SET search_path = "public"'; -+ SELECT setting INTO v_src_path_new FROM pg_settings WHERE name = 'search_path'; -+ -+ -- grab the oid of the table; https://www.postgresql.org/docs/8.3/catalog-pg-class.html -+ SELECT c.oid INTO v_table_oid -+ FROM pg_catalog.pg_class c -+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace -+ WHERE 1 = 1 -+ AND c.relkind = 'r' -+ AND c.relname = in_table -+ AND n.nspname = in_schema; -+ -+ IF (v_table_oid IS NULL) THEN -+ -- Dont give up yet. It might be a partitioned table -+ SELECT c.oid INTO v_table_oid -+ FROM pg_catalog.pg_class c -+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace -+ WHERE 1 = 1 -+ AND c.relkind = 'p' -+ AND c.relname = in_table -+ AND n.nspname = in_schema; -+ -+ IF (v_table_oid IS NULL) THEN -+ RAISE EXCEPTION 'table does not exist'; -+ END IF; -+ bPartitioned := True; -+ END IF; -+ IF v_pgversion < 100000 THEN -+ SELECT c2.relname parent INTO v_parent -+ FROM pg_class c1, pg_namespace n, pg_inherits i, pg_class c2 -+ WHERE n.nspname = in_schema -+ AND n.oid = c1.relnamespace -+ AND c1.relname = in_table -+ AND c1.oid = i.inhrelid -+ AND i.inhparent = c2.oid -+ AND c1.relkind = 'r'; -+ -+ IF (v_parent IS NOT NULL) THEN -+ bPartitioned := True; -+ bInheritance := True; -+ END IF; -+ ELSE -+ SELECT c2.relname parent, c1.relispartition, pg_get_expr(c1.relpartbound, c1.oid, TRUE) INTO v_parent, bRelispartition, v_partbound -+ FROM pg_class c1, pg_namespace n, pg_inherits i, pg_class c2 -+ WHERE n.nspname = in_schema -+ AND n.oid = c1.relnamespace -+ AND c1.relname = in_table -+ AND c1.oid = i.inhrelid -+ AND i.inhparent = c2.oid -+ AND c1.relkind = 'r'; -+ -+ IF (v_parent IS NOT NULL) THEN -+ bPartitioned := True; -+ IF bRelispartition THEN -+ bInheritance := False; -+ ELSE -+ bInheritance := True; -+ END IF; -+ END IF; -+ END IF; -+ -- RAISE NOTICE 'version=% schema=% parent=% relopts=% tablespace=% partitioned=% inherited=% relispartition=%',v_pgversion, in_schema, v_parent, v_relopts, v_tablespace, bPartitioned, bInheritance, bRelispartition; -+ -+ -- start the create definition -+ v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' (' || E'\n'; -+ -+ -- define all of the columns in the table; https://stackoverflow.com/a/8153081/3068233 -+ FOR v_colrec IN -+ SELECT c.column_name, c.data_type, c.udt_name, c.udt_schema, c.character_maximum_length, c.is_nullable, c.column_default, c.numeric_precision, c.numeric_scale, c.is_identity, c.identity_generation -+ FROM information_schema.columns c -+ WHERE (table_schema, table_name) = (in_schema, in_table) -+ ORDER BY ordinal_position -+ LOOP -+ v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column -+ || v_colrec.column_name || ' ' -+ -- FIX #82, FIX #100 as well by adding 'citext' to the list -+ -- FIX #105 by overriding the previous fixes (#82, #100), which presumed "public" was always the schema for extensions. It could be a custom schema. -+ -- so assume udt_schema for all USER-DEFINED datatypes -+ -- || CASE WHEN v_colrec.udt_name in ('geometry', 'box2d', 'box2df', 'box3d', 'geography', 'geometry_dump', 'gidx', 'spheroid', 'valid_detail','citext') -+ -- THEN v_colrec.udt_name -+ || CASE WHEN v_colrec.data_type = 'USER-DEFINED' -+ -- THEN in_schema || '.' || v_colrec.udt_name ELSE v_colrec.data_type END -+ THEN v_colrec.udt_schema || '.' || v_colrec.udt_name ELSE v_colrec.data_type END -+ || CASE WHEN v_colrec.is_identity = 'YES' -+ THEN -+ CASE WHEN v_colrec.identity_generation = 'ALWAYS' -+ THEN ' GENERATED ALWAYS AS IDENTITY' ELSE ' GENERATED BY DEFAULT AS IDENTITY' END ELSE '' END -+ || CASE WHEN v_colrec.character_maximum_length IS NOT NULL -+ THEN ('(' || v_colrec.character_maximum_length || ')') -+ WHEN v_colrec.numeric_precision > 0 AND v_colrec.numeric_scale > 0 -+ THEN '(' || v_colrec.numeric_precision || ',' || v_colrec.numeric_scale || ')' -+ ELSE '' END || ' ' -+ || CASE WHEN v_colrec.is_nullable = 'NO' -+ THEN 'NOT NULL' ELSE 'NULL' END -+ || CASE WHEN v_colrec.column_default IS NOT null -+ THEN (' DEFAULT ' || v_colrec.column_default) ELSE '' END -+ || ',' || E'\n'; -+ END LOOP; -+ -- define all the constraints in the; https://www.postgresql.org/docs/9.1/catalog-pg-constraint.html && https://dba.stackexchange.com/a/214877/75296 -+ -- Issue#103: do not get foreign keys for partitions since they are defined on the parent and this will cause an "already exists" error otherwise -+ -- Also conparentid is not in V10, so bypass since we do not have FKEYS in partitioned tables in V10 -+ IF v_pgversion < 110000 THEN -+ FOR v_constraintrec IN -+ SELECT -+ con.conname as constraint_name, -+ con.contype as constraint_type, -+ CASE -+ WHEN con.contype = 'p' THEN 1 -- primary key constraint -+ WHEN con.contype = 'u' THEN 2 -- unique constraint -+ WHEN con.contype = 'f' THEN 3 -- foreign key constraint -+ WHEN con.contype = 'c' THEN 4 -+ ELSE 5 -+ END as type_rank, -+ pg_get_constraintdef(con.oid) as constraint_definition -+ FROM pg_catalog.pg_constraint con -+ JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid -+ JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace -+ WHERE nsp.nspname = in_schema -+ AND rel.relname = in_table -+ ORDER BY type_rank -+ LOOP -+ -- Issue#85 fix -+ -- constraintarr := constraintarr || v_constraintrec.constraint_name; -+ constraintarr := constraintarr || v_constraintrec.constraint_name::text; -+ IF v_constraintrec.type_rank = 1 THEN -+ v_primary := True; -+ v_constraint_name := v_constraintrec.constraint_name; -+ END IF; -+ IF NOT bfkeys AND v_constraintrec.constraint_type = 'f' THEN -+ continue; -+ END IF; -+ v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column -+ || 'CONSTRAINT' || ' ' -+ || v_constraintrec.constraint_name || ' ' -+ || v_constraintrec.constraint_definition -+ || ',' || E'\n'; -+ END LOOP; -+ ELSE -+ FOR v_constraintrec IN -+ SELECT -+ con.conname as constraint_name, -+ con.contype as constraint_type, -+ CASE -+ WHEN con.contype = 'p' THEN 1 -- primary key constraint -+ WHEN con.contype = 'u' THEN 2 -- unique constraint -+ WHEN con.contype = 'f' THEN 3 -- foreign key constraint -+ WHEN con.contype = 'c' THEN 4 -+ ELSE 5 -+ END as type_rank, -+ pg_get_constraintdef(con.oid) as constraint_definition -+ FROM pg_catalog.pg_constraint con -+ JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid -+ JOIN pg_catalog.pg_namespace nsp ON nsp.oid = connamespace -+ WHERE nsp.nspname = in_schema -+ AND rel.relname = in_table -+ -- Issue#103: do not get partitioned tables -+ AND con.conparentid = 0 -+ ORDER BY type_rank -+ LOOP -+ -- Issue#85 fix -+ -- constraintarr := constraintarr || v_constraintrec.constraint_name; -+ constraintarr := constraintarr || v_constraintrec.constraint_name::text; -+ IF v_constraintrec.type_rank = 1 THEN -+ v_primary := True; -+ v_constraint_name := v_constraintrec.constraint_name; -+ END IF; -+ IF NOT bfkeys AND v_constraintrec.constraint_type = 'f' THEN -+ continue; -+ END IF; -+ v_table_ddl := v_table_ddl || ' ' -- note: two char spacer to start, to indent the column -+ || 'CONSTRAINT' || ' ' -+ || v_constraintrec.constraint_name || ' ' -+ || v_constraintrec.constraint_definition -+ || ',' || E'\n'; -+ END LOOP; -+ END IF; -+ -+ -- drop the last comma before ending the create statement -+ v_table_ddl = substr(v_table_ddl, 0, length(v_table_ddl) - 1) || E'\n'; -+ -- end the create table def but add inherits clause if valid -+ IF bPartitioned and bInheritance THEN -+ v_table_ddl := v_table_ddl || ') INHERITS (' || in_schema || '.' || v_parent || ') ' || v_relopts || ' ' || v_tablespace || ';' || E'\n'; -+ ELSIF v_pgversion >= 100000 AND bPartitioned and NOT bInheritance THEN -+ -- See if this is a partitioned table (pg_class.relkind = 'p') and add the partitioned key -+ SELECT pg_get_partkeydef (c1.oid) AS partition_key INTO v_partition_key -+ FROM pg_class c1 -+ JOIN pg_namespace n ON (n.oid = c1.relnamespace) -+ LEFT JOIN pg_partitioned_table p ON (c1.oid = p.partrelid) -+ WHERE n.nspname = in_schema -+ AND n.oid = c1.relnamespace -+ AND c1.relname = in_table -+ AND c1.relkind = 'p'; -+ END IF; -+ IF v_partition_key IS NOT NULL AND v_partition_key <> '' THEN -+ -- add partition clause -+ -- NOTE: cannot specify default tablespace for partitioned relations -+ v_table_ddl := v_table_ddl || ') PARTITION BY ' || v_partition_key || ';' || E'\n'; -+ ELSIF bPartitioned AND not bInheritance THEN -+ IF v_relopts <> '' THEN -+ v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' PARTITION OF ' || in_schema || '.' || v_parent || ' ' || v_partbound || v_relopts || ' ' || v_tablespace || '; ' || E'\n'; -+ ELSE -+ v_table_ddl := 'CREATE TABLE ' || in_schema || '.' || in_table || ' PARTITION OF ' || in_schema || '.' || v_parent || ' ' || v_partbound || ' ' || v_tablespace || '; ' || E'\n'; -+ END IF; -+ ELSIF bPartitioned and bInheritance THEN -+ -- we already did this above -+ v_table_ddl := v_table_ddl; -+ ELSIF v_relopts <> '' THEN -+ v_table_ddl := v_table_ddl || ') ' || v_relopts || ' ' || v_tablespace || ';' || E'\n'; -+ ELSE -+ v_table_ddl := v_table_ddl || ') ' || v_tablespace || ';' || E'\n'; -+ END IF; -+ -- suffix create statement with all of the indexes on the table -+ FOR v_indexrec IN -+ SELECT indexdef, indexname -+ FROM pg_indexes -+ WHERE (schemaname, tablename) = (in_schema, in_table) -+ LOOP -+ -- Issue#83 fix: loop through constraints and skip ones already defined -+ bSkip = False; -+ FOREACH constraintelement IN ARRAY constraintarr -+ LOOP -+ IF constraintelement = v_indexrec.indexname THEN -+ bSkip = True; -+ EXIT; -+ END IF; -+ END LOOP; -+ if bSkip THEN CONTINUE; END IF; -+ v_table_ddl := v_table_ddl -+ || v_indexrec.indexdef -+ || ';' || E'\n'; -+ END LOOP; -+ -+ -- reset search_path back to what it was -+ IF v_src_path_old = '' THEN -+ SELECT set_config('search_path', '', false) into v_dummy; -+ ELSE -+ EXECUTE 'SET search_path = ' || v_src_path_old; -+ END IF; -+ -- RAISE NOTICE 'DEBUG tableddl: reset search_path back to ***%***', v_src_path_old; -+ -+ -- return the ddl -+ RETURN v_table_ddl; -+ END; -+$$; - ---- DROP FUNCTION clone_schema(text, text, boolean, boolean); - -+-- Function: clone_schema(text, text, boolean, boolean, boolean) -+-- DROP FUNCTION clone_schema(text, text, boolean, boolean, boolean); -+-- DROP FUNCTION IF EXISTS public.clone_schema(text, text, boolean, boolean); -+ -+DROP FUNCTION IF EXISTS public.clone_schema(text, text, cloneparms[]); - CREATE OR REPLACE FUNCTION public.clone_schema( - source_schema text, - dest_schema text, -- include_recs boolean, -- ddl_only boolean) -+ VARIADIC arr public.cloneparms[] DEFAULT '{{}}':: public.cloneparms[]) - RETURNS void AS - $BODY$ - - -- This function will clone all sequences, tables, data, views & functions from any existing schema to a new one - -- SAMPLE CALL: ---- SELECT clone_schema('public', 'new_schema', True, False); -+-- SELECT clone_schema('sample', 'sample_clone2'); - - DECLARE - src_oid oid; -@@ -32,20 +600,37 @@ - object text; - buffer text; - buffer2 text; -+ buffer3 text; - srctbl text; -+ aname text; - default_ text; - column_ text; - qry text; - ix_old_name text; - ix_new_name text; -+ relpersist text; -+ udt_name text; -+ udt_schema text; -+ bRelispart bool; -+ bChild bool; -+ relknd text; -+ data_type text; -+ ocomment text; -+ adef text; - dest_qry text; - v_def text; -+ part_range text; - src_path_old text; -+ src_path_new text; - aclstr text; -+ -- issue#80 initialize arrays properly -+ tblarray text[] := '{{}}'; -+ tblarray2 text[] := '{{}}'; -+ tblarray3 text[] := '{{}}'; -+ tblelement text; - grantor text; - grantee text; - privs text; -- records_count bigint; - seqval bigint; - sq_last_value bigint; - sq_max_value bigint; -@@ -53,16 +638,28 @@ - sq_increment_by bigint; - sq_min_value bigint; - sq_cache_value bigint; -- sq_is_called boolean; -+ sq_is_called boolean := True; - sq_is_cycled boolean; -+ is_prokind boolean; -+ abool boolean; - sq_data_type text; - sq_cycled char(10); -+ sq_owned text; -+ sq_version text; -+ sq_server_version text; -+ sq_server_version_num integer; -+ bWindows boolean; - arec RECORD; - cnt integer; -+ cnt1 integer; - cnt2 integer; -- seq_cnt integer; -+ cnt3 integer; -+ cnt4 integer; - pos integer; -+ tblscopied integer := 0; -+ l_child integer; - action text := 'N/A'; -+ tblname text; - v_ret text; - v_diag1 text; - v_diag2 text; -@@ -70,48 +667,209 @@ - v_diag4 text; - v_diag5 text; - v_diag6 text; -+ v_dummy text; -+ spath text; -+ spath_tmp text; -+ -- issue#86 fix -+ isGenerated text; -+ -+ -- issue#91 fix -+ tblowner text; -+ func_owner text; -+ func_name text; -+ func_args text; -+ func_argno integer; -+ view_owner text; -+ -+ -- issue#92 -+ calleruser text; -+ -+ -- issue#94 -+ bData boolean := False; -+ bDDLOnly boolean := False; -+ bVerbose boolean := False; -+ bDebug boolean := False; -+ bNoACL boolean := False; -+ bNoOwner boolean := False; -+ arglen integer; -+ vargs text; -+ avarg public.cloneparms; -+ -+ -- issue#98 -+ mvarray text[] := '{{}}'; -+ mvscopied integer := 0; -+ -+ -- issue#99 tablespaces -+ tblspace text; -+ -+ -- issue#101 -+ bFileCopy boolean := False; -+ -+ t timestamptz := clock_timestamp(); -+ r timestamptz; -+ s timestamptz; -+ lastsql text := ''; -+ v_version text := '1.19 September 07, 2023'; - - BEGIN -+ -- Make sure NOTICE are shown -+ SET client_min_messages = 'notice'; -+ RAISE NOTICE 'clone_schema version %', v_version; -+ -+ IF 'DEBUG' = ANY ($3) THEN bDebug = True; END IF; -+ IF 'VERBOSE' = ANY ($3) THEN bVerbose = True; END IF; -+ -+ -- IF bVerbose THEN RAISE NOTICE 'START: %',clock_timestamp() - t; END IF; -+ -+ arglen := array_length($3, 1); -+ IF arglen IS NULL THEN -+ -- nothing to do, so defaults are assumed -+ NULL; -+ ELSE -+ -- loop thru args -+ -- IF 'NO_TRIGGERS' = ANY ($3) -+ -- select array_to_string($3, ',', '***') INTO vargs; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: arguments=%', $3; END IF; -+ FOREACH avarg IN ARRAY $3 LOOP -+ IF bDebug THEN RAISE NOTICE 'DEBUG: arg=%', avarg; END IF; -+ IF avarg = 'DATA' THEN -+ bData = True; -+ ELSEIF avarg = 'NODATA' THEN -+ -- already set to that by default -+ bData = False; -+ ELSEIF avarg = 'DDLONLY' THEN -+ bDDLOnly = True; -+ ELSEIF avarg = 'NOACL' THEN -+ bNoACL = True; -+ ELSEIF avarg = 'NOOWNER' THEN -+ bNoOwner = True; -+ -- issue#101 fix -+ ELSEIF avarg = 'FILECOPY' THEN -+ bFileCopy = True; -+ END IF; -+ END LOOP; -+ IF bData and bDDLOnly THEN -+ RAISE WARNING 'You can only specify DDLONLY or DATA, but not both.'; -+ RETURN; -+ END IF; -+ END IF; -+ -+ -- Get server version info to handle certain things differently based on the version. -+ SELECT setting INTO sq_server_version -+ FROM pg_settings -+ WHERE name = 'server_version'; -+ SELECT version() INTO sq_version; -+ -+ IF POSITION('compiled by Visual C++' IN sq_version) > 0 THEN -+ bWindows = True; -+ RAISE NOTICE 'Windows: %', sq_version; -+ ELSE -+ bWindows = False; -+ RAISE NOTICE 'Linux: %', sq_version; -+ END IF; -+ SELECT setting INTO sq_server_version_num -+ FROM pg_settings -+ WHERE name = 'server_version_num'; -+ -+ IF sq_server_version_num < 100000 THEN -+ IF sq_server_version_num > 90600 THEN -+ RAISE WARNING 'Server Version:% Number:% PG Versions older than v10 are not supported. Will try however for PG 9.6...', sq_server_version, sq_server_version_num; -+ ELSE -+ RAISE WARNING 'Server Version:% Number:% PG Versions older than v10 are not supported. You need to be at minimum version 9.6 to at least try', sq_server_version, sq_server_version_num; -+ RETURN; -+ END IF; -+ END IF; - - -- Check that source_schema exists - SELECT oid INTO src_oid -- FROM pg_namespace -- WHERE nspname = quote_ident(source_schema); -+ FROM pg_namespace -+ WHERE nspname = quote_ident(source_schema); -+ - IF NOT FOUND - THEN -- RAISE NOTICE 'source schema % does not exist!', source_schema; -+ RAISE NOTICE ' source schema % does not exist!', source_schema; - RETURN ; - END IF; - -+ -- Check for case-sensitive target schemas and reject them for now. -+ SELECT lower(dest_schema) = dest_schema INTO abool; -+ IF not abool THEN -+ RAISE NOTICE 'Case-sensitive target schemas are not supported at this time.'; -+ RETURN; -+ END IF; -+ - -- Check that dest_schema does not yet exist - PERFORM nspname -- FROM pg_namespace -- WHERE nspname = quote_ident(dest_schema); -+ FROM pg_namespace -+ WHERE nspname = quote_ident(dest_schema); -+ - IF FOUND - THEN -- RAISE NOTICE 'dest schema % already exists!', dest_schema; -+ RAISE NOTICE ' dest schema % already exists!', dest_schema; - RETURN ; - END IF; -- IF ddl_only and include_recs THEN -+ IF bDDLOnly and bData THEN - RAISE WARNING 'You cannot specify to clone data and generate ddl at the same time.'; - RETURN ; - END IF; - -+ -- Issue#92 -+ SELECT current_user into calleruser; -+ - -- Set the search_path to source schema. Before exiting set it back to what it was before. -- SELECT setting INTO src_path_old FROM pg_settings WHERE name='search_path'; -+ -- In order to avoid issues with the special schema name "$user" that may be -+ -- returned unquoted by some applications, we ensure it remains double quoted. -+ -- MJV FIX: #47 -+ SELECT setting INTO v_dummy FROM pg_settings WHERE name='search_path'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path=%', v_dummy; END IF; -+ -+ SELECT REPLACE(REPLACE(setting, '"$user"', '$user'), '$user', '"$user"') INTO src_path_old -+ FROM pg_settings WHERE name = 'search_path'; -+ -+ IF bDebug THEN RAISE NOTICE 'DEBUG: src_path_old=%', src_path_old; END IF; -+ - EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; -- -- RAISE NOTICE 'Using source search_path=%', buffer; -+ SELECT setting INTO src_path_new FROM pg_settings WHERE name='search_path'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: new search_path=%', src_path_new; END IF; - - -- Validate required types exist. If not, create them. -- select a.objtypecnt, b.permtypecnt INTO cnt, cnt2 FROM -- (SELECT count(*) as objtypecnt FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace -- WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) -- AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) -- AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' AND pg_catalog.pg_type_is_visible(t.oid) AND pg_catalog.format_type(t.oid, NULL) = 'obj_type') a, -- (SELECT count(*) as permtypecnt FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace -- WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) -- AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) -- AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema' AND pg_catalog.pg_type_is_visible(t.oid) AND pg_catalog.format_type(t.oid, NULL) = 'perm_type') b; -+ SELECT a.objtypecnt, b.permtypecnt INTO cnt, cnt2 -+ FROM ( -+ SELECT count(*) AS objtypecnt -+ FROM pg_catalog.pg_type t -+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace -+ WHERE (t.typrelid = 0 -+ OR ( -+ SELECT c.relkind = 'c' -+ FROM pg_catalog.pg_class c -+ WHERE c.oid = t.typrelid)) -+ AND NOT EXISTS ( -+ SELECT 1 -+ FROM pg_catalog.pg_type el -+ WHERE el.oid = t.typelem -+ AND el.typarray = t.oid) -+ AND n.nspname <> 'pg_catalog' -+ AND n.nspname <> 'information_schema' -+ AND pg_catalog.pg_type_is_visible(t.oid) -+ AND pg_catalog.format_type(t.oid, NULL) = 'obj_type') a, ( -+ SELECT count(*) AS permtypecnt -+ FROM pg_catalog.pg_type t -+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace -+ WHERE (t.typrelid = 0 -+ OR ( -+ SELECT c.relkind = 'c' -+ FROM pg_catalog.pg_class c -+ WHERE c.oid = t.typrelid)) -+ AND NOT EXISTS ( -+ SELECT 1 -+ FROM pg_catalog.pg_type el -+ WHERE el.oid = t.typelem -+ AND el.typarray = t.oid) -+ AND n.nspname <> 'pg_catalog' -+ AND n.nspname <> 'information_schema' -+ AND pg_catalog.pg_type_is_visible(t.oid) -+ AND pg_catalog.format_type(t.oid, NULL) = 'perm_type') b; -+ - IF cnt = 0 THEN - CREATE TYPE obj_type AS ENUM ('TABLE','VIEW','COLUMN','SEQUENCE','FUNCTION','SCHEMA','DATABASE'); - END IF; -@@ -119,53 +877,148 @@ - CREATE TYPE perm_type AS ENUM ('SELECT','INSERT','UPDATE','DELETE','TRUNCATE','REFERENCES','TRIGGER','USAGE','CREATE','EXECUTE','CONNECT','TEMPORARY'); - END IF; - -- IF ddl_only THEN -- RAISE NOTICE 'Only generating DDL, not actually creating anything...'; -+ -- Issue#95 -+ SELECT pg_catalog.pg_get_userbyid(nspowner) INTO buffer FROM pg_namespace WHERE nspname = quote_ident(source_schema); -+ -+ IF bDDLOnly THEN -+ RAISE NOTICE ' Only generating DDL, not actually creating anything...'; -+ -- issue#95 -+ IF bNoOwner THEN -+ RAISE INFO 'CREATE SCHEMA %;', quote_ident(dest_schema); -+ ELSE -+ RAISE INFO 'CREATE SCHEMA % AUTHORIZATION %;', quote_ident(dest_schema), buffer; -+ END IF; -+ RAISE NOTICE 'SET search_path=%;', quote_ident(dest_schema); -+ ELSE -+ -- issue#95 -+ IF bNoOwner THEN -+ EXECUTE 'CREATE SCHEMA ' || quote_ident(dest_schema) ; -+ ELSE -+ EXECUTE 'CREATE SCHEMA ' || quote_ident(dest_schema) || ' AUTHORIZATION ' || buffer; -+ END IF; - END IF; - -- IF ddl_only THEN -- RAISE NOTICE '%', 'CREATE SCHEMA ' || quote_ident(dest_schema); -+ -- Do system table validations for subsequent system table queries -+ -- Issue#65 Fix -+ SELECT count(*) into cnt -+ FROM pg_attribute -+ WHERE attrelid = 'pg_proc'::regclass AND attname = 'prokind'; -+ -+ IF cnt = 0 THEN -+ is_prokind = False; - ELSE -- EXECUTE 'CREATE SCHEMA ' || quote_ident(dest_schema) ; -+ is_prokind = True; - END IF; - - -- MV: Create Collations - action := 'Collations'; - cnt := 0; -- FOR arec IN -- SELECT n.nspname as schemaname, a.rolname as ownername , c.collname, c.collprovider, c.collcollate as locale, -- 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || CASE WHEN c.collprovider = 'i' THEN 'icu' WHEN c.collprovider = 'c' THEN 'libc' ELSE '' END || ', locale = ''' || c.collcollate || ''');' as COLL_DDL -- FROM pg_collation c JOIN pg_namespace n ON (c.collnamespace = n.oid) JOIN pg_roles a ON (c.collowner = a.oid) WHERE n.nspname = quote_ident(source_schema) order by c.collname -- LOOP -- BEGIN -- cnt := cnt + 1; -- IF ddl_only THEN -- RAISE INFO '%', arec.coll_ddl; -- ELSE -- EXECUTE arec.coll_ddl; -- END IF; -- END; -- END LOOP; -+ -- Issue#96 Handle differently based on PG Versions (PG15 rely on colliculocale, not collcolocate) -+ -- perhaps use this logic instead: COALESCE(c.collcollate, c.colliculocale) AS lc_collate, COALESCE(c.collctype, c.colliculocale) AS lc_type -+ IF sq_server_version_num > 150000 THEN -+ FOR arec IN -+ SELECT n.nspname AS schemaname, a.rolname AS ownername, c.collname, c.collprovider, c.collcollate AS locale, -+ 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || -+ CASE WHEN c.collprovider = 'i' THEN 'icu' WHEN c.collprovider = 'c' THEN 'libc' ELSE '' END || -+ ', locale = ''' || c.colliculocale || ''');' AS COLL_DDL -+ FROM pg_collation c -+ JOIN pg_namespace n ON (c.collnamespace = n.oid) -+ JOIN pg_roles a ON (c.collowner = a.oid) -+ WHERE n.nspname = quote_ident(source_schema) -+ ORDER BY c.collname -+ LOOP -+ BEGIN -+ cnt := cnt + 1; -+ IF bDDLOnly THEN -+ RAISE INFO '%', arec.coll_ddl; -+ ELSE -+ EXECUTE arec.coll_ddl; -+ END IF; -+ END; -+ END LOOP; -+ ELSIF sq_server_version_num > 100000 THEN -+ FOR arec IN -+ SELECT n.nspname AS schemaname, a.rolname AS ownername, c.collname, c.collprovider, c.collcollate AS locale, -+ 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || -+ CASE WHEN c.collprovider = 'i' THEN 'icu' WHEN c.collprovider = 'c' THEN 'libc' ELSE '' END || -+ ', locale = ''' || c.collcollate || ''');' AS COLL_DDL -+ FROM pg_collation c -+ JOIN pg_namespace n ON (c.collnamespace = n.oid) -+ JOIN pg_roles a ON (c.collowner = a.oid) -+ WHERE n.nspname = quote_ident(source_schema) -+ ORDER BY c.collname -+ LOOP -+ BEGIN -+ cnt := cnt + 1; -+ IF bDDLOnly THEN -+ RAISE INFO '%', arec.coll_ddl; -+ ELSE -+ EXECUTE arec.coll_ddl; -+ END IF; -+ END; -+ END LOOP; -+ ELSE -+ -- handle 9.6 that is missing some columns in pg_collation -+ FOR arec IN -+ SELECT n.nspname AS schemaname, a.rolname AS ownername, c.collname, c.collcollate AS locale, -+ 'CREATE COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" (provider = ' || -+ ', locale = ''' || c.collcollate || ''');' AS COLL_DDL -+ FROM pg_collation c -+ JOIN pg_namespace n ON (c.collnamespace = n.oid) -+ JOIN pg_roles a ON (c.collowner = a.oid) -+ WHERE n.nspname = quote_ident(source_schema) -+ ORDER BY c.collname -+ LOOP -+ BEGIN -+ cnt := cnt + 1; -+ IF bDDLOnly THEN -+ RAISE INFO '%', arec.coll_ddl; -+ ELSE -+ EXECUTE arec.coll_ddl; -+ END IF; -+ END; -+ END LOOP; -+ END IF; - RAISE NOTICE ' COLLATIONS cloned: %', LPAD(cnt::text, 5, ' '); - - -- MV: Create Domains - action := 'Domains'; - cnt := 0; - FOR arec IN -- SELECT n.nspname as "Schema", t.typname as "Name", pg_catalog.format_type(t.typbasetype, t.typtypmod) as "Type", -- (SELECT c.collname FROM pg_catalog.pg_collation c, pg_catalog.pg_type bt WHERE c.oid = t.typcollation AND -- bt.oid = t.typbasetype AND t.typcollation <> bt.typcollation) as "Collation", -- CASE WHEN t.typnotnull THEN 'not null' END as "Nullable", t.typdefault as "Default", -- pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM pg_catalog.pg_constraint r WHERE t.oid = r.contypid), ' ') as "Check", -- 'CREATE DOMAIN ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ' || pg_catalog.format_type(t.typbasetype, t.typtypmod) || -- CASE WHEN t.typnotnull IS NOT NULL THEN ' NOT NULL ' ELSE ' ' END || CASE WHEN t.typdefault IS NOT NULL THEN 'DEFAULT ' || t.typdefault || ' ' ELSE ' ' END || -- pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM pg_catalog.pg_constraint r WHERE t.oid = r.contypid), ' ') || ';' AS DOM_DDL -- FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace -- WHERE t.typtype = 'd' AND n.nspname = quote_ident(source_schema) AND pg_catalog.pg_type_is_visible(t.oid) ORDER BY 1, 2 -+ SELECT n.nspname AS "Schema", t.typname AS "Name", pg_catalog.format_type(t.typbasetype, t.typtypmod) AS "Type", ( -+ SELECT c.collname -+ FROM pg_catalog.pg_collation c, pg_catalog.pg_type bt -+ WHERE c.oid = t.typcollation -+ AND bt.oid = t.typbasetype -+ AND t.typcollation <> bt.typcollation) AS "Collation", CASE WHEN t.typnotnull THEN -+ 'not null' -+ END AS "Nullable", t.typdefault AS "Default", pg_catalog.array_to_string(ARRAY ( -+ SELECT pg_catalog.pg_get_constraintdef(r.oid, TRUE) -+ FROM pg_catalog.pg_constraint r -+ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on t.typename -+ WHERE t.oid = r.contypid), ' ') AS "Check", 'CREATE DOMAIN ' || quote_ident(dest_schema) || '.' || quote_ident(t.typname) || ' AS ' || pg_catalog.format_type(t.typbasetype, t.typtypmod) || -+ CASE WHEN t.typnotnull IS NOT NULL THEN -+ ' NOT NULL ' -+ ELSE -+ ' ' -+ END || CASE WHEN t.typdefault IS NOT NULL THEN -+ 'DEFAULT ' || t.typdefault || ' ' -+ ELSE -+ ' ' -+ END || pg_catalog.array_to_string(ARRAY ( -+ SELECT pg_catalog.pg_get_constraintdef(r.oid, TRUE) -+ FROM pg_catalog.pg_constraint r -+ WHERE t.oid = r.contypid), ' ') || ';' AS DOM_DDL -+ FROM pg_catalog.pg_type t -+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace -+ WHERE t.typtype = 'd' -+ AND n.nspname = quote_ident(source_schema) -+ AND pg_catalog.pg_type_is_visible(t.oid) -+ ORDER BY 1, 2 - LOOP - BEGIN - cnt := cnt + 1; -- IF ddl_only THEN -+ IF bDDLOnly THEN - RAISE INFO '%', arec.dom_ddl; - ELSE - EXECUTE arec.dom_ddl; -@@ -177,36 +1030,70 @@ - -- MV: Create types - action := 'Types'; - cnt := 0; -+ lastsql = ''; - FOR arec IN -- SELECT c.relkind, n.nspname AS schemaname, t.typname AS typname, t.typcategory, CASE WHEN t.typcategory='C' THEN -- 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS (' || array_to_string(array_agg(a.attname || ' ' || pg_catalog.format_type(a.atttypid, a.atttypmod) ORDER BY c.relname, a.attnum),', ') || ');' -- WHEN t.typcategory='E' THEN -- 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ENUM (' || REPLACE(quote_literal(array_to_string(array_agg(e.enumlabel ORDER BY e.enumsortorder),',')), ',', ''',''') || ');' -- ELSE '' END AS type_ddl FROM pg_type t JOIN pg_namespace n ON (n.oid = t.typnamespace) -- LEFT JOIN pg_enum e ON (t.oid = e.enumtypid) -- LEFT JOIN pg_class c ON (c.reltype = t.oid) LEFT JOIN pg_attribute a ON (a.attrelid = c.oid) -- WHERE n.nspname = quote_ident(source_schema) and (c.relkind IS NULL or c.relkind = 'c') and t.typcategory in ('C', 'E') group by 1,2,3,4 order by n.nspname, t.typcategory, t.typname -+ -- Fixed Issue#108:enclose double-quote roles with special characters for setting "OWNER TO" -+ -- SELECT c.relkind, n.nspname AS schemaname, t.typname AS typname, t.typcategory, pg_catalog.pg_get_userbyid(t.typowner) AS owner, CASE WHEN t.typcategory = 'C' THEN -+ SELECT c.relkind, n.nspname AS schemaname, t.typname AS typname, t.typcategory, '"' || pg_catalog.pg_get_userbyid(t.typowner) || '"' AS owner, CASE WHEN t.typcategory = 'C' THEN -+ 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS (' || array_to_string(array_agg(a.attname || ' ' || pg_catalog.format_type(a.atttypid, a.atttypmod) -+ ORDER BY c.relname, a.attnum), ', ') || ');' -+ WHEN t.typcategory = 'E' THEN -+ 'CREATE TYPE ' || quote_ident(dest_schema) || '.' || t.typname || ' AS ENUM (' || REPLACE(quote_literal(array_to_string(array_agg(e.enumlabel ORDER BY e.enumsortorder), ',')), ',', ''',''') || ');' -+ ELSE -+ '' -+ END AS type_ddl -+ FROM pg_type t -+ JOIN pg_namespace n ON (n.oid = t.typnamespace) -+ LEFT JOIN pg_enum e ON (t.oid = e.enumtypid) -+ LEFT JOIN pg_class c ON (c.reltype = t.oid) -+ LEFT JOIN pg_attribute a ON (a.attrelid = c.oid) -+ WHERE n.nspname = quote_ident(source_schema) -+ AND (c.relkind IS NULL -+ OR c.relkind = 'c') -+ AND t.typcategory IN ('C', 'E') -+ GROUP BY 1, 2, 3, 4, 5 -+ ORDER BY n.nspname, t.typcategory, t.typname -+ - LOOP - BEGIN - cnt := cnt + 1; - -- Keep composite and enum types in separate branches for fine tuning later if needed. - IF arec.typcategory = 'E' THEN -- -- RAISE NOTICE '%', arec.type_ddl; -- IF ddl_only THEN -- RAISE INFO '%', arec.type_ddl; -- ELSE -- EXECUTE arec.type_ddl; -- END IF; -+ IF bDDLOnly THEN -+ RAISE INFO '%', arec.type_ddl; -+ -+ --issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO 'ALTER TYPE % OWNER TO %;', quote_ident(dest_schema) || '.' || arec.typname, arec.owner; -+ END IF; -+ ELSE -+ EXECUTE arec.type_ddl; - -- ELSEIF arec.typcategory = 'C' THEN -- -- RAISE NOTICE '%', arec.type_ddl; -- IF ddl_only THEN -+ --issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ EXECUTE 'ALTER TYPE ' || quote_ident(dest_schema) || '.' || arec.typname || ' OWNER TO ' || arec.owner; -+ END IF; -+ END IF; -+ ELSIF arec.typcategory = 'C' THEN -+ IF bDDLOnly THEN - RAISE INFO '%', arec.type_ddl; -+ --issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO 'ALTER TYPE % OWNER TO %;', quote_ident(dest_schema) || '.' || arec.typname, arec.owner; -+ END IF; - ELSE - EXECUTE arec.type_ddl; -+ --issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ EXECUTE 'ALTER TYPE ' || quote_ident(dest_schema) || '.' || arec.typname || ' OWNER TO ' || arec.owner; -+ END IF; - END IF; - ELSE -- RAISE NOTICE 'Unhandled type:%-%', arec.typcategory, arec.typname; -+ RAISE NOTICE ' Unhandled type:%-%', arec.typcategory, arec.typname; - END IF; - END; - END LOOP; -@@ -214,82 +1101,361 @@ - - -- Create sequences - action := 'Sequences'; -- seq_cnt := 0; -- -- TODO: Find a way to make this sequence's owner is the correct table. -- FOR object IN -- SELECT sequence_name::text -- FROM information_schema.sequences -- WHERE sequence_schema = quote_ident(source_schema) -+ -+ cnt := 0; -+ -- fix#63 get from pg_sequences not information_schema -+ -- fix#63 take 2: get it from information_schema.sequences since we need to treat IDENTITY columns differently. -+ -- fix#95 get owner as well by joining to pg_sequences -+ -- fix#106 we can get owner info with pg_class, pg_user/pg_group, and information_schema.sequences, so we can avoid the hit to pg_sequences which is not available in 9.6 -+ FOR object, buffer IN -+ -- Fixed Issue#108: -+ -- SELECT s1.sequence_name::text, s2.sequenceowner FROM information_schema.sequences s1 JOIN pg_sequences s2 ON (s1.sequence_schema = s2.schemaname AND s1.sequence_name = s2.sequencename) AND s1.sequence_schema = quote_ident(source_schema) -+ -- SELECT s.sequence_name::text, '"' || u.usename || '"' as owner FROM information_schema.sequences s JOIN pg_class c ON (s.sequence_name = c.relname AND s.sequence_schema = c.relnamespace::regnamespace::text) JOIN pg_user u ON (c.relowner = u.usesysid) -+ -- WHERE c.relkind = 'S' AND s.sequence_schema = quote_ident(source_schema) -+ -- UNION SELECT s.sequence_name::text, g.groname as owner FROM information_schema.sequences s JOIN pg_class c ON (s.sequence_name = c.relname AND s.sequence_schema = c.relnamespace::regnamespace::text) JOIN pg_group g ON (c.relowner = g.grosysid) -+ -- WHERE c.relkind = 'S' AND s.sequence_schema = quote_ident(source_schema) -+ SELECT sequencename::text, sequenceowner FROM pg_catalog.pg_sequences WHERE schemaname = quote_ident(source_schema) - LOOP -- seq_cnt := seq_cnt + 1; -- IF ddl_only THEN -+ cnt := cnt + 1; -+ IF bDDLOnly THEN -+ -- issue#95 - RAISE INFO '%', 'CREATE SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ';'; -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO '%', 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ' OWNER TO ' || buffer || ';'; -+ END IF; - ELSE - EXECUTE 'CREATE SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object); -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ EXECUTE 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ' OWNER TO ' || buffer; -+ END IF; - END IF; - srctbl := quote_ident(source_schema) || '.' || quote_ident(object); - -- EXECUTE 'SELECT last_value, is_called -- FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';' -- INTO sq_last_value, sq_is_called; -- -- EXECUTE 'SELECT max_value, start_value, increment_by, min_value, cache_size, cycle, data_type -- FROM pg_catalog.pg_sequences WHERE schemaname='|| quote_literal(source_schema) || ' AND sequencename=' || quote_literal(object) || ';' -- INTO sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_is_cycled, sq_data_type ; -+ IF sq_server_version_num < 100000 THEN -+ EXECUTE 'SELECT last_value, is_called FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';' INTO sq_last_value, sq_is_called; -+ EXECUTE 'SELECT maximum_value, start_value, increment, minimum_value, 1 cache_size, cycle_option, data_type -+ FROM information_schema.sequences WHERE sequence_schema='|| quote_literal(source_schema) || ' AND sequence_name=' || quote_literal(object) || ';' -+ INTO sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_is_cycled, sq_data_type; -+ IF sq_is_cycled -+ THEN -+ sq_cycled := 'CYCLE'; -+ ELSE -+ sq_cycled := 'NO CYCLE'; -+ END IF; - -- IF sq_is_cycled -- THEN -- sq_cycled := 'CYCLE'; -+ qry := 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) -+ || ' INCREMENT BY ' || sq_increment_by -+ || ' MINVALUE ' || sq_min_value -+ || ' MAXVALUE ' || sq_max_value -+ -- will update current sequence value after this -+ || ' START WITH ' || sq_start_value -+ || ' RESTART ' || sq_min_value -+ || ' CACHE ' || sq_cache_value -+ || ' ' || sq_cycled || ' ;' ; - ELSE -- sq_cycled := 'NO CYCLE'; -- END IF; -+ EXECUTE 'SELECT max_value, start_value, increment_by, min_value, cache_size, cycle, data_type, COALESCE(last_value, 1) -+ FROM pg_catalog.pg_sequences WHERE schemaname='|| quote_literal(source_schema) || ' AND sequencename=' || quote_literal(object) || ';' -+ INTO sq_max_value, sq_start_value, sq_increment_by, sq_min_value, sq_cache_value, sq_is_cycled, sq_data_type, sq_last_value; -+ IF sq_is_cycled -+ THEN -+ sq_cycled := 'CYCLE'; -+ ELSE -+ sq_cycled := 'NO CYCLE'; -+ END IF; - -- qry := 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) -- || ' AS ' || sq_data_type -- || ' INCREMENT BY ' || sq_increment_by -- || ' MINVALUE ' || sq_min_value -- || ' MAXVALUE ' || sq_max_value -- || ' START WITH ' || sq_start_value -- || ' RESTART ' || sq_min_value -- || ' CACHE ' || sq_cache_value -- || ' ' || sq_cycled || ' ;' ; -+ qry := 'ALTER SEQUENCE ' || quote_ident(dest_schema) || '.' || quote_ident(object) -+ || ' AS ' || sq_data_type -+ || ' INCREMENT BY ' || sq_increment_by -+ || ' MINVALUE ' || sq_min_value -+ || ' MAXVALUE ' || sq_max_value -+ -- will update current sequence value after this -+ || ' START WITH ' || sq_start_value -+ || ' RESTART ' || sq_min_value -+ || ' CACHE ' || sq_cache_value -+ || ' ' || sq_cycled || ' ;' ; -+ END IF; - -- IF ddl_only THEN -+ IF bDDLOnly THEN - RAISE INFO '%', qry; - ELSE - EXECUTE qry; - END IF; - - buffer := quote_ident(dest_schema) || '.' || quote_ident(object); -- IF include_recs THEN -+ IF bData THEN - EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; - ELSE -- if ddl_only THEN -- RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; -+ if bDDLOnly THEN -+ -- fix#63 -+ -- RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; -+ RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; - ELSE -- EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; -+ -- fix#63 -+ -- EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_start_value || ', ' || sq_is_called || ');' ; -+ EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; - END IF; - - END IF; - END LOOP; -- RAISE NOTICE ' SEQUENCES cloned: %', LPAD(seq_cnt::text, 5, ' '); -+ RAISE NOTICE ' SEQUENCES cloned: %', LPAD(cnt::text, 5, ' '); -+ - ---- Create tables -+ -- Create tables including partitioned ones (parent/children) and unlogged ones. Order by is critical since child partition range logic is dependent on it. - action := 'Tables'; -- cnt := 0; -- FOR object IN -- SELECT TABLE_NAME::text -- FROM information_schema.tables -- WHERE table_schema = quote_ident(source_schema) -- AND table_type = 'BASE TABLE' -+ SELECT setting INTO v_dummy FROM pg_settings WHERE name='search_path'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path=%', v_dummy; END IF; - -+ cnt := 0; -+ -- Issue#61 FIX: use set_config for empty string -+ -- SET search_path = ''; -+ SELECT set_config('search_path', '', false) into v_dummy; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: setting search_path to empty string:%', v_dummy; END IF; -+ -- Fix#86 add isgenerated to column list -+ -- Fix#91 add tblowner for setting the table ownership to that of the source -+ -- Fix#99 added join to pg_tablespace -+ -+ -- Handle PG versions greater than last major/minor version of PG 9.6.24 -+ IF sq_server_version_num > 90624 THEN -+ FOR tblname, relpersist, bRelispart, relknd, data_type, udt_name, udt_schema, ocomment, l_child, isGenerated, tblowner, tblspace IN -+ -- 2021-03-08 MJV #39 fix: change sql to get indicator of user-defined columns to issue warnings -+ -- select c.relname, c.relpersistence, c.relispartition, c.relkind -+ -- FROM pg_class c, pg_namespace n where n.oid = c.relnamespace and n.nspname = quote_ident(source_schema) and c.relkind in ('r','p') and -+ -- order by c.relkind desc, c.relname -+ --Fix#65 add another left join to distinguish child tables by inheritance -+ -- Fix#86 add is_generated to column select -+ -- Fix#91 add tblowner to the select -+ -- Fix#105 need a different kinda distinct to avoid retrieving a table twice in the case of a table with multiple USER-DEFINED datatypes using DISTINCT ON instead of just DISTINCT -+ --SELECT DISTINCT c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, -+ -- COALESCE(co.is_generated, ''), pg_catalog.pg_get_userbyid(c.relowner) as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace -+ -- fixed #108 by enclosing owner in double quotes to avoid errors for bad characters like #.@... -+ -- SELECT DISTINCT ON (c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type) c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, -+ SELECT DISTINCT ON (c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type) c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, -+ COALESCE(co.is_generated, ''), '"' || pg_catalog.pg_get_userbyid(c.relowner) || '"' as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace -+ FROM pg_class c -+ JOIN pg_namespace n ON (n.oid = c.relnamespace -+ AND n.nspname = quote_ident(source_schema) -+ AND c.relkind IN ('r', 'p')) -+ LEFT JOIN information_schema.columns co ON (co.table_schema = n.nspname -+ AND co.table_name = c.relname -+ AND (co.data_type = 'USER-DEFINED' OR co.is_generated = 'ALWAYS')) -+ LEFT JOIN pg_inherits i ON (c.oid = i.inhrelid) -+ -- issue#99 added join -+ LEFT JOIN pg_tablespace ts ON (c.reltablespace = ts.oid) -+ ORDER BY c.relkind DESC, c.relname - LOOP - cnt := cnt + 1; -- buffer := quote_ident(dest_schema) || '.' || quote_ident(object); -- IF ddl_only THEN -- RAISE INFO '%', 'CREATE TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(object) || ' INCLUDING ALL)'; -+ lastsql = ''; -+ IF l_child IS NULL THEN -+ bChild := False; - ELSE -- EXECUTE 'CREATE TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(object) || ' INCLUDING ALL)'; -+ bChild := True; -+ END IF; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: TABLE START --> table=% bRelispart=% relkind=% bChild=%',tblname, bRelispart, relknd, bChild; END IF; -+ -+ IF data_type = 'USER-DEFINED' THEN -+ -- RAISE NOTICE ' Table (%) has column(s) with user-defined types so using get_table_ddl() instead of CREATE TABLE LIKE construct.',tblname; -+ cnt :=cnt; -+ END IF; -+ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); -+ buffer2 := ''; -+ IF relpersist = 'u' THEN -+ buffer2 := 'UNLOGGED '; -+ END IF; -+ IF relknd = 'r' THEN -+ IF bDDLOnly THEN -+ IF data_type = 'USER-DEFINED' THEN -+ -- FIXED #65, #67 -+ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); -+ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); -+ -+ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); -+ RAISE INFO '%', buffer3; -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; -+ END IF; -+ ELSE -+ IF NOT bChild THEN -+ RAISE INFO '%', 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL);'; -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; -+ END IF; -+ -+ -- issue#99 -+ IF tblspace <> 'pg_default' THEN -+ -- replace with user-defined tablespace -+ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; -+ RAISE INFO 'ALTER TABLE IF EXISTS % SET TABLESPACE %;', quote_ident(dest_schema) || '.' || tblname, tblspace; -+ END IF; -+ ELSE -+ -- FIXED #65, #67 -+ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); -+ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); -+ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); -+ RAISE INFO '%', buffer3; -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; -+ END IF; -+ END IF; -+ END IF; -+ ELSE -+ IF data_type = 'USER-DEFINED' THEN -+ -- FIXED #65, #67 -+ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); -+ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); -+ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef01:%', buffer3; END IF; -+ -- #82: Table def should be fully qualified with target schema, -+ -- so just make search path = public to handle extension types that should reside in public schema -+ v_dummy = 'public'; -+ SELECT set_config('search_path', v_dummy, false) into v_dummy; -+ EXECUTE buffer3; -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; -+ lastsql = buffer3; -+ EXECUTE buffer3; -+ END IF; -+ ELSE -+ IF (NOT bChild OR bRelispart) THEN -+ buffer3 := 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL)'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef02:%', buffer3; END IF; -+ EXECUTE buffer3; -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; -+ lastsql = buffer3; -+ EXECUTE buffer3; -+ END IF; -+ -+ -- issue#99 -+ IF tblspace <> 'pg_default' THEN -+ -- replace with user-defined tablespace -+ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; -+ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' SET TABLESPACE ' || tblspace; -+ EXECUTE buffer3; -+ END IF; -+ -+ ELSE -+ -- FIXED #65, #67 -+ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); -+ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); -+ -+ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); -+ -- set client_min_messages higher to avoid messages like this: -+ -- NOTICE: merging column "city_id" with inherited definition -+ set client_min_messages = 'WARNING'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef03:%', buffer3; END IF; -+ EXECUTE buffer3; -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; -+ lastsql = buffer3; -+ EXECUTE buffer3; -+ END IF; -+ -+ -- reset it back, only get these for inheritance-based tables -+ set client_min_messages = 'notice'; -+ END IF; -+ END IF; -+ -- Add table comment. -+ IF ocomment IS NOT NULL THEN -+ EXECUTE 'COMMENT ON TABLE ' || buffer || ' IS ' || quote_literal(ocomment); -+ END IF; -+ END IF; -+ ELSIF relknd = 'p' THEN -+ -- define parent table and assume child tables have already been created based on top level sort order. -+ -- Issue #103 Put the complex query into its own function, get_table_ddl_complex() -+ SELECT * INTO qry FROM public.get_table_ddl_complex(source_schema, dest_schema, tblname, sq_server_version_num); -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 - %', buffer; END IF; -+ -+ -- consider replacing complicated query above with this simple call to get_table_ddl()... -+ -- SELECT * INTO qry FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); -+ -- qry := REPLACE(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); -+ -+ IF bDDLOnly THEN -+ RAISE INFO '%', qry; -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || quote_ident(tblname), tblowner; -+ END IF; -+ ELSE -+ -- Issue#103: we need to always set search_path priority to target schema when we execute DDL -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 context: old search path=% new search path=% current search path=%', src_path_old, src_path_new, v_dummy; END IF; -+ SELECT setting INTO spath_tmp FROM pg_settings WHERE name = 'search_path'; -+ IF spath_tmp <> dest_schema THEN -+ -- change it to target schema and don't forget to change it back after we execute the DDL -+ spath = 'SET search_path = "' || dest_schema || '"'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: changing search_path --> %', spath; END IF; -+ EXECUTE spath; -+ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed to %', v_dummy; END IF; -+ END IF; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04:%', qry; END IF; -+ EXECUTE qry; -+ -+ -- Issue#103 -+ -- Set search path back to what it was -+ spath = 'SET search_path = "' || spath_tmp || '"'; -+ EXECUTE spath; -+ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed back to %', v_dummy; END IF; -+ -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; -+ lastsql = buffer3; -+ EXECUTE buffer3; -+ END IF; -+ -+ END IF; -+ -- loop for child tables and alter them to attach to parent for specific partition method. -+ -- Issue#103 fix: only loop for the table we are currently processing, tblname! -+ FOR aname, part_range, object IN -+ SELECT quote_ident(dest_schema) || '.' || c1.relname as tablename, pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) as partrange, quote_ident(dest_schema) || '.' || c2.relname as object -+ FROM pg_catalog.pg_class c1, pg_namespace n, pg_catalog.pg_inherits i, pg_class c2 -+ WHERE n.nspname = quote_ident(source_schema) AND c1.relnamespace = n.oid AND c1.relkind = 'r' -+ -- Issue#103: added this condition to only work on current partitioned table. The problem was regression testing previously only worked on one partition table clone case -+ AND c2.relname = tblname AND -+ c1.relispartition AND c1.oid=i.inhrelid AND i.inhparent = c2.oid AND c2.relnamespace = n.oid ORDER BY pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) = 'DEFAULT', -+ c1.oid::pg_catalog.regclass::pg_catalog.text -+ LOOP -+ qry := 'ALTER TABLE ONLY ' || object || ' ATTACH PARTITION ' || aname || ' ' || part_range || ';'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: %',qry; END IF; -+ -- issue#91, not sure if we need to do this for child tables -+ -- issue#95 we dont set ownership here -+ IF bDDLOnly THEN -+ RAISE INFO '%', qry; -+ IF NOT bNoOwner THEN -+ NULL; -+ END IF; -+ ELSE -+ EXECUTE qry; -+ IF NOT bNoOwner THEN -+ NULL; -+ END IF; -+ END IF; -+ END LOOP; - END IF; - - -- INCLUDING ALL creates new index names, we restore them to the old name. -@@ -300,171 +1466,975 @@ - WHERE old.schemaname = source_schema - AND new.schemaname = dest_schema - AND old.tablename = new.tablename -- AND old.tablename = object -+ AND old.tablename = tblname - AND old.indexname <> new.indexname - AND regexp_replace(old.indexdef, E'.*USING','') = regexp_replace(new.indexdef, E'.*USING','') -- ORDER BY old.indexname, new.indexname -+ ORDER BY old.indexdef, new.indexdef - LOOP -- IF ddl_only THEN -+ IF bDDLOnly THEN - RAISE INFO '%', 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; - ELSE -- EXECUTE 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; -+ -- The SELECT query above may return duplicate names when a column is -+ -- indexed twice the same manner with 2 different names. Therefore, to -+ -- avoid a 'relation "xxx" already exists' we test if the index name -+ -- is in use or free. Skipping existing index will fallback on unused -+ -- ones and every duplicate will be mapped to distinct old names. -+ IF NOT EXISTS ( -+ SELECT TRUE -+ FROM pg_indexes -+ WHERE schemaname = dest_schema -+ AND tablename = tblname -+ AND indexname = quote_ident(ix_old_name)) -+ AND EXISTS ( -+ SELECT TRUE -+ FROM pg_indexes -+ WHERE schemaname = dest_schema -+ AND tablename = tblname -+ AND indexname = quote_ident(ix_new_name)) -+ THEN -+ EXECUTE 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; -+ END IF; - END IF; - END LOOP; - -- records_count := 0; -- IF include_recs -- THEN -+ lastsql = ''; -+ IF bData THEN - -- Insert records from source table -- RAISE NOTICE 'Populating cloned table, %', buffer; -- EXECUTE 'INSERT INTO ' || buffer || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(object) || ';'; -- -- -- restart the counter for PK's internal identity sequence -- EXECUTE 'SELECT count(*) FROM ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ';' INTO records_count; -- FOR column_ IN -- SELECT column_name::text -- FROM information_schema.columns -- WHERE -- table_schema = dest_schema AND -- table_name = object AND -- is_identity = 'YES' -- LOOP -- EXECUTE 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || quote_ident(object) || ' ALTER COLUMN ' || quote_ident(column_) || ' RESTART WITH ' || records_count + 1 || ';'; -- END LOOP; -+ -+ -- 2021-03-03 MJV FIX -+ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); -+ -+ -- 2020/06/18 - Issue #31 fix: add "OVERRIDING SYSTEM VALUE" for IDENTITY columns marked as GENERATED ALWAYS. -+ select count(*) into cnt2 from pg_class c, pg_attribute a, pg_namespace n -+ where a.attrelid = c.oid and c.relname = quote_ident(tblname) and n.oid = c.relnamespace and n.nspname = quote_ident(source_schema) and a.attidentity = 'a'; -+ buffer3 := ''; -+ IF cnt2 > 0 THEN -+ buffer3 := ' OVERRIDING SYSTEM VALUE'; -+ END IF; -+ -- BUG for inserting rows from tables with user-defined columns -+ -- INSERT INTO sample_clone.address OVERRIDING SYSTEM VALUE SELECT * FROM sample.address; -+ -- ERROR: column "id2" is of type sample_clone.udt_myint but expression is of type udt_myint -+ -+ -- Issue#86 fix: -+ -- IF data_type = 'USER-DEFINED' THEN -+ IF bDebug THEN RAISE NOTICE 'DEBUG: includerecs branch table=% data_type=% isgenerated=% buffer3=%', tblname, data_type, isGenerated, buffer3; END IF; -+ IF data_type = 'USER-DEFINED' OR isGenerated = 'ALWAYS' THEN -+ -+ -- RAISE WARNING 'Bypassing copying rows for table (%) with user-defined data types. You must copy them manually.', tblname; -+ -- won't work --> INSERT INTO clone1.address (id2, id3, addr) SELECT cast(id2 as clone1.udt_myint), cast(id3 as clone1.udt_myint), addr FROM sample.address; -+ -- Issue#101 --> INSERT INTO clone1.address2 (id2, id3, addr) SELECT id2::text::clone1.udt_myint, id3::text::clone1.udt_myint, addr FROM sample.address; -+ -+ -- Issue#79 implementation follows -+ -- COPY sample.statuses(id, s) TO '/tmp/statuses.txt' WITH DELIMITER AS ','; -+ -- COPY sample_clone1.statuses FROM '/tmp/statuses.txt' (DELIMITER ',', NULL ''); -+ -- Issue#101 fix: use text cast to get around the problem. -+ IF bFileCopy THEN -+ IF bWindows THEN -+ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''C:\WINDOWS\TEMP\cloneschema.tmp'' WITH DELIMITER AS '','';'; -+ tblarray2 := tblarray2 || buffer2; -+ -- Issue #81 reformat COPY command for upload -+ -- buffer2:= 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL '''');'; -+ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; -+ tblarray2 := tblarray2 || buffer2; -+ ELSE -+ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''/tmp/cloneschema.tmp'' WITH DELIMITER AS '','';'; -+ tblarray2 := tblarray2 || buffer2; -+ -- Issue #81 reformat COPY command for upload -+ -- buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL '''');'; -+ -- works--> COPY sample.timestamptbl2 FROM '/tmp/cloneschema.tmp' WITH (DELIMITER ',', NULL '\N', FORMAT CSV) ; -+ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; -+ tblarray2 := tblarray2 || buffer2; -+ END IF; -+ ELSE -+ -- Issue#101: assume direct copy with text cast, add to separate array -+ SELECT * INTO buffer3 FROM public.get_insert_stmt_ddl(quote_ident(source_schema), quote_ident(dest_schema), quote_ident(tblname), True); -+ tblarray3 := tblarray3 || buffer3; -+ END IF; -+ ELSE -+ -- bypass child tables since we populate them when we populate the parents -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tblname=% bRelispart=% relknd=% l_child=% bChild=%', tblname, bRelispart, relknd, l_child, bChild; END IF; -+ IF NOT bRelispart AND NOT bChild THEN -+ -- Issue#75: Must defer population of tables until child tables have been added to parents -+ -- Issue#101 Offer alternative of copy to/from file. Although originally intended for tables with UDTs, it is now expanded to handle all cases for performance improvement perhaps for large tables. -+ -- Issue#106 buffer3 shouldn't be in the mix -+ -- revisited: buffer3 should be in play for PG versions that handle IDENTITIES -+ buffer2 := 'INSERT INTO ' || buffer || buffer3 || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; -+ -- buffer2 := 'INSERT INTO ' || buffer || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: buffer2=%',buffer2; END IF; -+ IF bFileCopy THEN -+ tblarray2:= tblarray2 || buffer2; -+ ELSE -+ tblarray := tblarray || buffer2; -+ END IF; -+ END IF; -+ END IF; - END IF; - -- SET search_path = ''; -+ -- Issue#61 FIX: use set_config for empty string -+ -- SET search_path = ''; -+ SELECT set_config('search_path', '', false) into v_dummy; -+ - FOR column_, default_ IN - SELECT column_name::text, -- REPLACE(column_default::text, source_schema, dest_schema) -- FROM information_schema.COLUMNS -- WHERE table_schema = source_schema -- AND TABLE_NAME = object -- AND column_default LIKE 'nextval(%' || quote_ident(source_schema) || '%::regclass)' -+ REPLACE(column_default::text, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') -+ FROM information_schema.COLUMNS -+ WHERE table_schema = source_schema -+ AND TABLE_NAME = tblname -+ AND column_default LIKE 'nextval(%' || quote_ident(source_schema) || '%::regclass)' - LOOP -- IF ddl_only THEN -+ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on column name -+ buffer2 = 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || quote_ident(column_) || ' SET DEFAULT ' || default_ || ';'; -+ IF bDDLOnly THEN - -- May need to come back and revisit this since previous sql will not return anything since no schema as created! -- RAISE INFO '%', 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || column_ || ' SET DEFAULT ' || default_ || ';'; -+ RAISE INFO '%', buffer2; - ELSE -- EXECUTE 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || column_ || ' SET DEFAULT ' || default_; -+ EXECUTE buffer2; - END IF; - END LOOP; -- EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; - -+ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; - END LOOP; -- RAISE NOTICE ' TABLES cloned: %', LPAD(cnt::text, 5, ' '); -- -- -- add FK constraint -- action := 'FK Constraints'; -- cnt := 0; -- SET search_path = ''; -- FOR qry IN -- SELECT 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || quote_ident(rn.relname) -- || ' ADD CONSTRAINT ' || quote_ident(ct.conname) || ' ' || REPLACE(pg_get_constraintdef(ct.oid), 'REFERENCES ' ||quote_ident(source_schema), 'REFERENCES ' || quote_ident(dest_schema)) || ';' -- FROM pg_constraint ct -- JOIN pg_class rn ON rn.oid = ct.conrelid -- WHERE connamespace = src_oid -- AND rn.relkind = 'r' -- AND ct.contype = 'f' -+ ELSE -+ -- Handle 9.6 versions 90600 -+ FOR tblname, relpersist, relknd, data_type, udt_name, udt_schema, ocomment, l_child, isGenerated, tblowner, tblspace IN -+ -- 2021-03-08 MJV #39 fix: change sql to get indicator of user-defined columns to issue warnings -+ -- select c.relname, c.relpersistence, c.relispartition, c.relkind -+ -- FROM pg_class c, pg_namespace n where n.oid = c.relnamespace and n.nspname = quote_ident(source_schema) and c.relkind in ('r','p') and -+ -- order by c.relkind desc, c.relname -+ --Fix#65 add another left join to distinguish child tables by inheritance -+ -- Fix#86 add is_generated to column select -+ -- Fix#91 add tblowner to the select -+ -- Fix#105 need a different kinda distinct to avoid retrieving a table twice in the case of a table with multiple USER-DEFINED datatypes using DISTINCT ON instead of just DISTINCT -+ -- Fixed Issue#108: double quote roles to avoid problems with special characters in OWNER TO statements -+ --SELECT DISTINCT c.relname, c.relpersistence, c.relispartition, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, -+ -- COALESCE(co.is_generated, ''), pg_catalog.pg_get_userbyid(c.relowner) as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace -+ -- SELECT DISTINCT ON (c.relname, c.relpersistence, c.relkind, co.data_type) c.relname, c.relpersistence, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, -+ -- COALESCE(co.is_generated, ''), pg_catalog.pg_get_userbyid(c.relowner) as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace -+ SELECT DISTINCT ON (c.relname, c.relpersistence, c.relkind, co.data_type) c.relname, c.relpersistence, c.relkind, co.data_type, co.udt_name, co.udt_schema, obj_description(c.oid), i.inhrelid, -+ COALESCE(co.is_generated, ''), '"' || pg_catalog.pg_get_userbyid(c.relowner) || '"' as "Owner", CASE WHEN reltablespace = 0 THEN 'pg_default' ELSE ts.spcname END as tablespace -+ FROM pg_class c -+ JOIN pg_namespace n ON (n.oid = c.relnamespace -+ AND n.nspname = quote_ident(source_schema) -+ AND c.relkind IN ('r', 'p')) -+ LEFT JOIN information_schema.columns co ON (co.table_schema = n.nspname -+ AND co.table_name = c.relname -+ AND (co.data_type = 'USER-DEFINED' OR co.is_generated = 'ALWAYS')) -+ LEFT JOIN pg_inherits i ON (c.oid = i.inhrelid) -+ -- issue#99 added join -+ LEFT JOIN pg_tablespace ts ON (c.reltablespace = ts.oid) -+ ORDER BY c.relkind DESC, c.relname - LOOP - cnt := cnt + 1; -- IF ddl_only THEN -- RAISE INFO '%', qry; -+ IF l_child IS NULL THEN -+ bChild := False; - ELSE -- EXECUTE qry; -+ bChild := True; - END IF; -- END LOOP; -- EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; -- RAISE NOTICE ' FKEYS cloned: %', LPAD(cnt::text, 5, ' '); -- ---- Create views -- action := 'Views'; -- cnt := 0; -- FOR object IN -- SELECT table_name::text, -- view_definition -- FROM information_schema.views -- WHERE table_schema = quote_ident(source_schema) -- -- LOOP -- cnt := cnt + 1; -- buffer := quote_ident(dest_schema) || '.' || quote_ident(object); -- SELECT view_definition INTO v_def -- FROM information_schema.views -- WHERE table_schema = quote_ident(source_schema) -- AND table_name = quote_ident(object); -+ IF bDebug THEN RAISE NOTICE 'DEBUG: TABLE START --> table=% bRelispart=NA relkind=% bChild=%',tblname, relknd, bChild; END IF; - -- IF ddl_only THEN -- RAISE INFO '%', 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def || ';' ; -- ELSE -- EXECUTE 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def || ';' ; -+ IF data_type = 'USER-DEFINED' THEN -+ -- RAISE NOTICE ' Table (%) has column(s) with user-defined types so using get_table_ddl() instead of CREATE TABLE LIKE construct.',tblname; -+ cnt :=cnt; - END IF; -- END LOOP; -- RAISE NOTICE ' VIEWS cloned: %', LPAD(cnt::text, 5, ' '); -- -- -- Create Materialized views -- action := 'Mat. Views'; -- cnt := 0; -- FOR object IN -- SELECT matviewname::text, -- definition -- FROM pg_catalog.pg_matviews -- WHERE schemaname = quote_ident(source_schema) -- -- LOOP -- cnt := cnt + 1; -- buffer := dest_schema || '.' || quote_ident(object); -- SELECT replace(definition,';','') INTO v_def -- FROM pg_catalog.pg_matviews -- WHERE schemaname = quote_ident(source_schema) -- AND matviewname = quote_ident(object); -- -- IF include_recs THEN -- EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ';' ; -- ELSE -- IF ddl_only THEN -- RAISE INFO '%', 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ' WITH NO DATA;' ; -- ELSE -- EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || v_def || ' WITH NO DATA;' ; -- END IF; -+ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); -+ buffer2 := ''; -+ IF relpersist = 'u' THEN -+ buffer2 := 'UNLOGGED '; -+ END IF; -+ IF relknd = 'r' THEN -+ IF bDDLOnly THEN -+ IF data_type = 'USER-DEFINED' THEN -+ -- FIXED #65, #67 -+ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); -+ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); -+ -+ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); -+ RAISE INFO '%', buffer3; -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; -+ END IF; -+ ELSE -+ IF NOT bChild THEN -+ RAISE INFO '%', 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL);'; -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; -+ END IF; - -- END IF; -+ -- issue#99 -+ IF tblspace <> 'pg_default' THEN -+ -- replace with user-defined tablespace -+ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; -+ RAISE INFO 'ALTER TABLE IF EXISTS % SET TABLESPACE %;', quote_ident(dest_schema) || '.' || tblname, tblspace; -+ END IF; -+ ELSE -+ -- FIXED #65, #67 -+ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); -+ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); -+ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); -+ RAISE INFO '%', buffer3; -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || tblname, tblowner; -+ END IF; -+ END IF; -+ END IF; -+ ELSE -+ IF data_type = 'USER-DEFINED' THEN -+ -- FIXED #65, #67 -+ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); -+ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); -+ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef01:%', buffer3; END IF; -+ -- #82: Table def should be fully qualified with target schema, -+ -- so just make search path = public to handle extension types that should reside in public schema -+ v_dummy = 'public'; -+ SELECT set_config('search_path', v_dummy, false) into v_dummy; -+ EXECUTE buffer3; -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; -+ lastsql = buffer3; -+ EXECUTE buffer3; -+ END IF; -+ ELSE -+ IF (NOT bChild) THEN -+ buffer3 := 'CREATE ' || buffer2 || 'TABLE ' || buffer || ' (LIKE ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' INCLUDING ALL)'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef02:%', buffer3; END IF; -+ EXECUTE buffer3; -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; -+ lastsql = buffer3; -+ EXECUTE buffer3; -+ END IF; -+ -+ -- issue#99 -+ IF tblspace <> 'pg_default' THEN -+ -- replace with user-defined tablespace -+ -- ALTER TABLE myschema.mytable SET TABLESPACE usrtblspc; -+ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' SET TABLESPACE ' || tblspace; -+ EXECUTE buffer3; -+ END IF; -+ -+ ELSE -+ -- FIXED #65, #67 -+ -- SELECT * INTO buffer3 FROM public.pg_get_tabledef(quote_ident(source_schema), tblname); -+ SELECT * INTO buffer3 FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); -+ -+ buffer3 := REPLACE(buffer3, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); -+ -- set client_min_messages higher to avoid messages like this: -+ -- NOTICE: merging column "city_id" with inherited definition -+ set client_min_messages = 'WARNING'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef03:%', buffer3; END IF; -+ EXECUTE buffer3; -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || tblname || ' OWNER TO ' || tblowner; -+ lastsql = buffer3; -+ EXECUTE buffer3; -+ END IF; -+ -+ -- reset it back, only get these for inheritance-based tables -+ set client_min_messages = 'notice'; -+ END IF; -+ END IF; -+ -- Add table comment. -+ IF ocomment IS NOT NULL THEN -+ EXECUTE 'COMMENT ON TABLE ' || buffer || ' IS ' || quote_literal(ocomment); -+ END IF; -+ END IF; -+ ELSIF relknd = 'p' THEN -+ -- define parent table and assume child tables have already been created based on top level sort order. -+ -- Issue #103 Put the complex query into its own function, get_table_ddl_complex() -+ SELECT * INTO qry FROM public.get_table_ddl_complex(source_schema, dest_schema, tblname, sq_server_version_num); -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 - %', buffer; END IF; -+ -+ -- consider replacing complicated query above with this simple call to get_table_ddl()... -+ -- SELECT * INTO qry FROM public.get_table_ddl(quote_ident(source_schema), tblname, False); -+ -- qry := REPLACE(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); -+ -+ IF bDDLOnly THEN -+ RAISE INFO '%', qry; -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO 'ALTER TABLE IF EXISTS % OWNER TO %;', quote_ident(dest_schema) || '.' || quote_ident(tblname), tblowner; -+ END IF; -+ ELSE -+ -- Issue#103: we need to always set search_path priority to target schema when we execute DDL -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04 context: old search path=% new search path=% current search path=%', src_path_old, src_path_new, v_dummy; END IF; -+ SELECT setting INTO spath_tmp FROM pg_settings WHERE name = 'search_path'; -+ IF spath_tmp <> dest_schema THEN -+ -- change it to target schema and don't forget to change it back after we execute the DDL -+ spath = 'SET search_path = "' || dest_schema || '"'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: changing search_path --> %', spath; END IF; -+ EXECUTE spath; -+ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed to %', v_dummy; END IF; -+ END IF; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tabledef04:%', qry; END IF; -+ EXECUTE qry; -+ -+ -- Issue#103 -+ -- Set search path back to what it was -+ spath = 'SET search_path = "' || spath_tmp || '"'; -+ EXECUTE spath; -+ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path changed back to %', v_dummy; END IF; -+ -+ -- issue#91 fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ buffer3 = 'ALTER TABLE IF EXISTS ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' OWNER TO ' || tblowner; -+ EXECUTE buffer3; -+ END IF; -+ -+ END IF; -+ -- loop for child tables and alter them to attach to parent for specific partition method. -+ -- Issue#103 fix: only loop for the table we are currently processing, tblname! -+ FOR aname, part_range, object IN -+ SELECT quote_ident(dest_schema) || '.' || c1.relname as tablename, pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) as partrange, quote_ident(dest_schema) || '.' || c2.relname as object -+ FROM pg_catalog.pg_class c1, pg_namespace n, pg_catalog.pg_inherits i, pg_class c2 -+ WHERE n.nspname = quote_ident(source_schema) AND c1.relnamespace = n.oid AND c1.relkind = 'r' -+ -- Issue#103: added this condition to only work on current partitioned table. The problem was regression testing previously only worked on one partition table clone case -+ AND c2.relname = tblname AND -+ c1.relispartition AND c1.oid=i.inhrelid AND i.inhparent = c2.oid AND c2.relnamespace = n.oid ORDER BY pg_catalog.pg_get_expr(c1.relpartbound, c1.oid) = 'DEFAULT', -+ c1.oid::pg_catalog.regclass::pg_catalog.text -+ LOOP -+ qry := 'ALTER TABLE ONLY ' || object || ' ATTACH PARTITION ' || aname || ' ' || part_range || ';'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: %',qry; END IF; -+ -- issue#91, not sure if we need to do this for child tables -+ -- issue#95 we dont set ownership here -+ IF bDDLOnly THEN -+ RAISE INFO '%', qry; -+ IF NOT bNoOwner THEN -+ NULL; -+ END IF; -+ ELSE -+ EXECUTE qry; -+ IF NOT bNoOwner THEN -+ NULL; -+ END IF; -+ END IF; -+ END LOOP; -+ END IF; -+ -+ -- INCLUDING ALL creates new index names, we restore them to the old name. -+ -- There should be no conflicts since they live in different schemas -+ FOR ix_old_name, ix_new_name IN -+ SELECT old.indexname, new.indexname -+ FROM pg_indexes old, pg_indexes new -+ WHERE old.schemaname = source_schema -+ AND new.schemaname = dest_schema -+ AND old.tablename = new.tablename -+ AND old.tablename = tblname -+ AND old.indexname <> new.indexname -+ AND regexp_replace(old.indexdef, E'.*USING','') = regexp_replace(new.indexdef, E'.*USING','') -+ ORDER BY old.indexdef, new.indexdef -+ LOOP -+ lastsql = ''; -+ IF bDDLOnly THEN -+ RAISE INFO '%', 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; -+ ELSE -+ -- The SELECT query above may return duplicate names when a column is -+ -- indexed twice the same manner with 2 different names. Therefore, to -+ -- avoid a 'relation "xxx" already exists' we test if the index name -+ -- is in use or free. Skipping existing index will fallback on unused -+ -- ones and every duplicate will be mapped to distinct old names. -+ IF NOT EXISTS ( -+ SELECT TRUE -+ FROM pg_indexes -+ WHERE schemaname = dest_schema -+ AND tablename = tblname -+ AND indexname = quote_ident(ix_old_name)) -+ AND EXISTS ( -+ SELECT TRUE -+ FROM pg_indexes -+ WHERE schemaname = dest_schema -+ AND tablename = tblname -+ AND indexname = quote_ident(ix_new_name)) -+ THEN -+ EXECUTE 'ALTER INDEX ' || quote_ident(dest_schema) || '.' || quote_ident(ix_new_name) || ' RENAME TO ' || quote_ident(ix_old_name) || ';'; -+ END IF; -+ END IF; -+ END LOOP; - -+ IF bData THEN -+ -- Insert records from source table -+ -+ -- 2021-03-03 MJV FIX -+ buffer := quote_ident(dest_schema) || '.' || quote_ident(tblname); -+ -+ -- Issue#86 fix: -+ -- IF data_type = 'USER-DEFINED' THEN -+ IF bDebug THEN RAISE NOTICE 'DEBUG: includerecs branch table=% data_type=% isgenerated=%', tblname, data_type, isGenerated; END IF; -+ IF data_type = 'USER-DEFINED' OR isGenerated = 'ALWAYS' THEN -+ -+ -- RAISE WARNING 'Bypassing copying rows for table (%) with user-defined data types. You must copy them manually.', tblname; -+ -- won't work --> INSERT INTO clone1.address (id2, id3, addr) SELECT cast(id2 as clone1.udt_myint), cast(id3 as clone1.udt_myint), addr FROM sample.address; -+ -- Issue#101 --> INSERT INTO clone1.address2 (id2, id3, addr) SELECT id2::text::clone1.udt_myint, id3::text::clone1.udt_myint, addr FROM sample.address; -+ -+ -- Issue#79 implementation follows -+ -- COPY sample.statuses(id, s) TO '/tmp/statuses.txt' WITH DELIMITER AS ','; -+ -- COPY sample_clone1.statuses FROM '/tmp/statuses.txt' (DELIMITER ',', NULL ''); -+ -- Issue#101 fix: use text cast to get around the problem. -+ IF bFileCopy THEN -+ IF bWindows THEN -+ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''C:\WINDOWS\TEMP\cloneschema.tmp'' WITH DELIMITER AS '','';'; -+ tblarray2 := tblarray2 || buffer2; -+ -- Issue #81 reformat COPY command for upload -+ -- buffer2:= 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL '''');'; -+ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''C:\WINDOWS\TEMP\cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; -+ tblarray2 := tblarray2 || buffer2; -+ ELSE -+ buffer2 := 'COPY ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ' TO ''/tmp/cloneschema.tmp'' WITH DELIMITER AS '','';'; -+ tblarray2 := tblarray2 || buffer2; -+ -- Issue #81 reformat COPY command for upload -+ -- buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL '''');'; -+ -- works--> COPY sample.timestamptbl2 FROM '/tmp/cloneschema.tmp' WITH (DELIMITER ',', NULL '\N', FORMAT CSV) ; -+ buffer2 := 'COPY ' || quote_ident(dest_schema) || '.' || quote_ident(tblname) || ' FROM ''/tmp/cloneschema.tmp'' (DELIMITER '','', NULL ''\N'', FORMAT CSV);'; -+ tblarray2 := tblarray2 || buffer2; -+ END IF; -+ ELSE -+ -- Issue#101: assume direct copy with text cast, add to separate array -+ SELECT * INTO buffer3 FROM public.get_insert_stmt_ddl(quote_ident(source_schema), quote_ident(dest_schema), quote_ident(tblname), True); -+ tblarray3 := tblarray3 || buffer3; -+ END IF; -+ ELSE -+ -- bypass child tables since we populate them when we populate the parents -+ IF bDebug THEN RAISE NOTICE 'DEBUG: tblname=% bRelispart=NA relknd=% l_child=% bChild=%', tblname, relknd, l_child, bChild; END IF; -+ -+ IF NOT bChild THEN -+ -- Issue#75: Must defer population of tables until child tables have been added to parents -+ -- Issue#101 Offer alternative of copy to/from file. Although originally intended for tables with UDTs, it is now expanded to handle all cases for performance improvement perhaps for large tables. -+ -- buffer2 := 'INSERT INTO ' || buffer || buffer3 || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; -+ buffer2 := 'INSERT INTO ' || buffer || ' SELECT * FROM ' || quote_ident(source_schema) || '.' || quote_ident(tblname) || ';'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: buffer2=%',buffer2; END IF; -+ IF bFileCopy THEN -+ tblarray2:= tblarray2 || buffer2; -+ ELSE -+ tblarray := tblarray || buffer2; -+ END IF; -+ END IF; -+ END IF; -+ END IF; -+ -+ -- Issue#61 FIX: use set_config for empty string -+ -- SET search_path = ''; -+ SELECT set_config('search_path', '', false) into v_dummy; -+ -+ FOR column_, default_ IN -+ SELECT column_name::text, -+ REPLACE(column_default::text, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') -+ FROM information_schema.COLUMNS -+ WHERE table_schema = source_schema -+ AND TABLE_NAME = tblname -+ AND column_default LIKE 'nextval(%' || quote_ident(source_schema) || '%::regclass)' -+ LOOP -+ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on column name -+ buffer2 = 'ALTER TABLE ' || buffer || ' ALTER COLUMN ' || quote_ident(column_) || ' SET DEFAULT ' || default_ || ';'; -+ IF bDDLOnly THEN -+ -- May need to come back and revisit this since previous sql will not return anything since no schema as created! -+ RAISE INFO '%', buffer2; -+ ELSE -+ EXECUTE buffer2; -+ END IF; - END LOOP; -- RAISE NOTICE ' MAT VIEWS cloned: %', LPAD(cnt::text, 5, ' '); - ---- Create functions -- action := 'Functions'; -+ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; -+ END LOOP; -+ END IF; -+ -- end of 90600 branch -+ -+ RAISE NOTICE ' TABLES cloned: %', LPAD(cnt::text, 5, ' '); -+ -+ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: search_path=%', v_dummy; END IF; -+ -+ -- Assigning sequences to table columns. -+ action := 'Sequences assigning'; - cnt := 0; -- FOR func_oid IN -- SELECT oid -- FROM pg_proc -- WHERE pronamespace = src_oid -+ FOR object IN -+ SELECT sequence_name::text -+ FROM information_schema.sequences -+ WHERE sequence_schema = quote_ident(source_schema) - LOOP - cnt := cnt + 1; -- SELECT pg_get_functiondef(func_oid) INTO qry; -- SELECT replace(qry, source_schema, dest_schema) INTO dest_qry; -- IF ddl_only THEN -- RAISE INFO '%', dest_qry; -+ srctbl := quote_ident(source_schema) || '.' || quote_ident(object); -+ -+ -- Get owning column, inspired from Sadique Ali post at: -+ -- https://sadique.io/blog/2019/05/07/viewing-sequence-ownership-information-in-postgres/ -+ -- Fixed via pull request#109 -+ SELECT ' OWNED BY ' -+ || quote_ident(dest_schema) -+ || '.' -+ || quote_ident(dc.relname) -+ || '.' -+ || quote_ident(a.attname) -+ INTO sq_owned -+ FROM pg_class AS c -+ JOIN pg_namespace n ON c.relnamespace = n.oid -+ JOIN pg_depend AS d ON c.relfilenode = d.objid -+ JOIN pg_class AS dc ON ( -+ d.refobjid = dc.relfilenode -+ AND dc.relnamespace = n.oid -+ ) -+ JOIN pg_attribute AS a ON ( -+ a.attnum = d.refobjsubid -+ AND a.attrelid = d.refobjid -+ ) -+ WHERE n.nspname = quote_ident(source_schema) -+ AND c.relkind = 'S' -+ AND c.relname = object; -+ -+ IF sq_owned IS NOT NULL THEN -+ qry := 'ALTER SEQUENCE ' -+ || quote_ident(dest_schema) -+ || '.' -+ || quote_ident(object) -+ || sq_owned -+ || ';'; -+ -+ IF bDDLOnly THEN -+ RAISE NOTICE 'DEBUG: %',qry; -+ RAISE INFO '%', qry; -+ ELSE -+ EXECUTE qry; -+ END IF; -+ -+ END IF; -+ -+ END LOOP; -+ RAISE NOTICE ' SEQUENCES set: %', LPAD(cnt::text, 5, ' '); -+ -+ -- Update IDENTITY sequences to the last value, bypass 9.6 versions -+ IF sq_server_version_num > 90624 THEN -+ action := 'Identity updating'; -+ cnt := 0; -+ FOR object, sq_last_value IN -+ SELECT sequencename::text, COALESCE(last_value, -999) from pg_sequences where schemaname = quote_ident(source_schema) -+ AND NOT EXISTS -+ (select 1 from information_schema.sequences where sequence_schema = quote_ident(source_schema) and sequence_name = sequencename) -+ LOOP -+ IF sq_last_value = -999 THEN -+ continue; -+ END IF; -+ cnt := cnt + 1; -+ buffer := quote_ident(dest_schema) || '.' || quote_ident(object); -+ IF bData THEN -+ EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; -+ ELSE -+ if bDDLOnly THEN -+ -- fix#63 -+ RAISE INFO '%', 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; -+ ELSE -+ -- fix#63 -+ EXECUTE 'SELECT setval( ''' || buffer || ''', ' || sq_last_value || ', ' || sq_is_called || ');' ; -+ END IF; -+ END IF; -+ END LOOP; -+ -- Fixed Issue#107: set lpad from 2 to 5 -+ RAISE NOTICE ' IDENTITIES set: %', LPAD(cnt::text, 5, ' '); -+ ELSE -+ -- Fixed Issue#107: set lpad from 2 to 5 -+ RAISE NOTICE ' IDENTITIES set: %', LPAD('-1'::text, 5, ' '); -+ END IF; -+ -+ -- Issue#78 forces us to defer FKeys until the end since we previously did row copies before FKeys -+ -- add FK constraint -+ -- action := 'FK Constraints'; -+ -+ -- Issue#62: Add comments on indexes, and then removed them from here and reworked later below. -+ -+ -- Issue 90: moved functions to here, before views or MVs that might use them -+ -- Create functions -+ action := 'Functions'; -+ cnt := 0; -+ -- MJV FIX per issue# 34 -+ -- SET search_path = ''; -+ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; -+ -+ -- Fixed Issue#65 -+ -- Fixed Issue#97 -+ -- FOR func_oid IN SELECT oid FROM pg_proc WHERE pronamespace = src_oid AND prokind != 'a' -+ IF is_prokind THEN -+ FOR func_oid, func_owner, func_name, func_args, func_argno, buffer3 IN -+ SELECT p.oid, pg_catalog.pg_get_userbyid(p.proowner), p.proname, oidvectortypes(p.proargtypes), p.pronargs, -+ CASE WHEN prokind = 'p' THEN 'PROCEDURE' WHEN prokind = 'f' THEN 'FUNCTION' ELSE '' END -+ FROM pg_proc p WHERE p.pronamespace = src_oid AND p.prokind != 'a' -+ LOOP -+ cnt := cnt + 1; -+ SELECT pg_get_functiondef(func_oid) -+ INTO qry; -+ -+ SELECT replace(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO dest_qry; -+ IF bDDLOnly THEN -+ RAISE INFO '%;', dest_qry; -+ -- Issue#91 Fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ IF func_argno = 0 THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO 'ALTER % %() OWNER TO %', buffer3, quote_ident(dest_schema) || '.' || quote_ident(func_name), '"' || func_owner || '";'; -+ ELSE -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO 'ALTER % % OWNER TO %', buffer3, quote_ident(dest_schema) || '.' || quote_ident(func_name) || '(' || func_args || ')', '"' || func_owner || '";'; -+ END IF; -+ END IF; -+ ELSE -+ IF bDebug THEN RAISE NOTICE 'DEBUG: %', dest_qry; END IF; -+ EXECUTE dest_qry; -+ -+ -- Issue#91 Fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ IF func_argno = 0 THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ dest_qry = 'ALTER ' || buffer3 || ' ' || quote_ident(dest_schema) || '.' || quote_ident(func_name) || '() OWNER TO ' || '"' || func_owner || '";'; -+ ELSE -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ dest_qry = 'ALTER ' || buffer3 || ' ' || quote_ident(dest_schema) || '.' || quote_ident(func_name) || '(' || func_args || ') OWNER TO ' || '"' || func_owner || '";'; -+ END IF; -+ END IF; -+ EXECUTE dest_qry; -+ END IF; -+ END LOOP; -+ ELSE -+ FOR func_oid IN SELECT oid -+ FROM pg_proc -+ WHERE pronamespace = src_oid AND not proisagg -+ LOOP -+ cnt := cnt + 1; -+ SELECT pg_get_functiondef(func_oid) INTO qry; -+ SELECT replace(qry, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO dest_qry; -+ IF bDDLOnly THEN -+ RAISE INFO '%;', dest_qry; -+ ELSE -+ EXECUTE dest_qry; -+ END IF; -+ END LOOP; -+ END IF; -+ -+ -- Create aggregate functions. -+ -- Fixed Issue#65 -+ -- FOR func_oid IN SELECT oid FROM pg_proc WHERE pronamespace = src_oid AND prokind = 'a' -+ IF is_prokind THEN -+ FOR func_oid IN -+ SELECT oid -+ FROM pg_proc -+ WHERE pronamespace = src_oid AND prokind = 'a' -+ LOOP -+ cnt := cnt + 1; -+ SELECT -+ 'CREATE AGGREGATE ' -+ || dest_schema -+ || '.' -+ || p.proname -+ || '(' -+ -- || format_type(a.aggtranstype, NULL) -+ -- Issue#65 Fixes for specific datatype mappings -+ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8' -+ WHEN format_type(a.aggtranstype, NULL) = 'anyarray' THEN 'anyelement' -+ ELSE format_type(a.aggtranstype, NULL) END -+ || ') (sfunc = ' -+ || regexp_replace(a.aggtransfn::text, '(^|\W)' || quote_ident(source_schema) || '\.', '\1' || quote_ident(dest_schema) || '.') -+ || ', stype = ' -+ -- || format_type(a.aggtranstype, NULL) -+ -- Issue#65 Fixes for specific datatype mappings -+ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8[]' ELSE format_type(a.aggtranstype, NULL) END -+ || CASE -+ WHEN op.oprname IS NULL THEN '' -+ ELSE ', sortop = ' || op.oprname -+ END -+ || CASE -+ WHEN a.agginitval IS NULL THEN '' -+ ELSE ', initcond = ''' || a.agginitval || '''' -+ END -+ || ')' -+ INTO dest_qry -+ FROM pg_proc p -+ JOIN pg_aggregate a ON a.aggfnoid = p.oid -+ LEFT JOIN pg_operator op ON op.oid = a.aggsortop -+ WHERE p.oid = func_oid; -+ -+ IF bDDLOnly THEN -+ RAISE INFO '%;', dest_qry; -+ ELSE -+ EXECUTE dest_qry; -+ END IF; -+ -+ END LOOP; -+ RAISE NOTICE ' FUNCTIONS cloned: %', LPAD(cnt::text, 5, ' '); -+ - ELSE -- EXECUTE dest_qry; -+ FOR func_oid IN SELECT oid FROM pg_proc WHERE pronamespace = src_oid AND proisagg -+ LOOP -+ cnt := cnt + 1; -+ SELECT -+ 'CREATE AGGREGATE ' -+ || dest_schema -+ || '.' -+ || p.proname -+ || '(' -+ -- || format_type(a.aggtranstype, NULL) -+ -- Issue#65 Fixes for specific datatype mappings -+ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8' -+ WHEN format_type(a.aggtranstype, NULL) = 'anyarray' THEN 'anyelement' -+ ELSE format_type(a.aggtranstype, NULL) END -+ || ') (sfunc = ' -+ || regexp_replace(a.aggtransfn::text, '(^|\W)' || quote_ident(source_schema) || '\.', '\1' || quote_ident(dest_schema) || '.') -+ || ', stype = ' -+ -- || format_type(a.aggtranstype, NULL) -+ -- Issue#65 Fixes for specific datatype mappings -+ || CASE WHEN format_type(a.aggtranstype, NULL) = 'double precision[]' THEN 'float8[]' ELSE format_type(a.aggtranstype, NULL) END -+ || CASE -+ WHEN op.oprname IS NULL THEN '' -+ ELSE ', sortop = ' || op.oprname -+ END -+ || CASE -+ WHEN a.agginitval IS NULL THEN '' -+ ELSE ', initcond = ''' || a.agginitval || '''' -+ END -+ || ')' -+ INTO dest_qry -+ FROM pg_proc p -+ JOIN pg_aggregate a ON a.aggfnoid = p.oid -+ LEFT JOIN pg_operator op ON op.oid = a.aggsortop -+ WHERE p.oid = func_oid; -+ -+ IF bDDLOnly THEN -+ RAISE INFO '%;', dest_qry; -+ ELSE -+ EXECUTE dest_qry; -+ END IF; -+ -+ END LOOP; -+ RAISE NOTICE ' FUNCTIONS cloned: %', LPAD(cnt::text, 5, ' '); - END IF; - -+ -- Create views -+ action := 'Views'; -+ -+ -- Issue#61 FIX: use set_config for empty string -+ -- MJV FIX #43: also had to reset search_path from source schema to empty. -+ -- SET search_path = ''; -+ SELECT set_config('search_path', '', false) -+ INTO v_dummy; -+ -+ cnt := 0; -+ --FOR object IN -+ -- SELECT table_name::text, view_definition -+ -- FROM information_schema.views -+ -- WHERE table_schema = quote_ident(source_schema) -+ -+ -- Issue#73 replace loop query to handle dependencies -+ -- Issue#91 get view_owner -+ FOR srctbl, aname, view_owner, object IN -+ WITH RECURSIVE views AS ( -+ SELECT n.nspname as schemaname, v.relname as tablename, v.oid::regclass AS viewname, -+ v.relkind = 'm' AS is_materialized, pg_catalog.pg_get_userbyid(v.relowner) as owner, -+ 1 AS level -+ FROM pg_depend AS d -+ JOIN pg_rewrite AS r -+ ON r.oid = d.objid -+ JOIN pg_class AS v -+ ON v.oid = r.ev_class -+ JOIN pg_namespace n -+ ON n.oid = v.relnamespace -+ -- WHERE v.relkind IN ('v', 'm') -+ WHERE v.relkind IN ('v') -+ AND d.classid = 'pg_rewrite'::regclass -+ AND d.refclassid = 'pg_class'::regclass -+ AND d.deptype = 'n' -+ UNION -+ -- add the views that depend on these -+ SELECT n.nspname as schemaname, v.relname as tablename, v.oid::regclass AS viewname, -+ v.relkind = 'm', pg_catalog.pg_get_userbyid(v.relowner) as owner, -+ views.level + 1 -+ FROM views -+ JOIN pg_depend AS d -+ ON d.refobjid = views.viewname -+ JOIN pg_rewrite AS r -+ ON r.oid = d.objid -+ JOIN pg_class AS v -+ ON v.oid = r.ev_class -+ JOIN pg_namespace n -+ ON n.oid = v.relnamespace -+ -- WHERE v.relkind IN ('v', 'm') -+ WHERE v.relkind IN ('v') -+ AND d.classid = 'pg_rewrite'::regclass -+ AND d.refclassid = 'pg_class'::regclass -+ AND d.deptype = 'n' -+ AND v.oid <> views.viewname -+ ) -+ SELECT tablename, viewname, owner, format('CREATE OR REPLACE%s VIEW %s AS%s', -+ CASE WHEN is_materialized -+ THEN ' MATERIALIZED' -+ ELSE '' -+ END, -+ viewname, -+ pg_get_viewdef(viewname)) -+ FROM views -+ WHERE schemaname = quote_ident(source_schema) -+ GROUP BY schemaname, tablename, viewname, owner, is_materialized -+ ORDER BY max(level), schemaname, tablename -+ LOOP -+ cnt := cnt + 1; -+ -- Issue#73 replace logic based on new loop sql -+ buffer := quote_ident(dest_schema) || '.' || quote_ident(aname); -+ -- MJV FIX: #43 -+ -- SELECT view_definition INTO v_def -+ -- SELECT REPLACE(view_definition, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO v_def -+ -- FROM information_schema.views -+ -- WHERE table_schema = quote_ident(source_schema) -+ -- AND table_name = quote_ident(object); -+ SELECT REPLACE(object, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO v_def; -+ -- NOTE: definition already includes the closing statement semicolon -+ SELECT REPLACE(aname, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO buffer3; -+ IF bDDLOnly THEN -+ RAISE INFO '%', v_def; -+ -- Issue#91 Fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ -- RAISE INFO 'ALTER TABLE % OWNER TO %', buffer3, view_owner || ';'; -+ RAISE INFO 'ALTER TABLE % OWNER TO %', buffer3, '"' ||view_owner || '";'; -+ END IF; -+ ELSE -+ -- EXECUTE 'CREATE OR REPLACE VIEW ' || buffer || ' AS ' || v_def; -+ EXECUTE v_def; -+ -- Issue#73: commented out comment logic for views since we do it elsewhere now. -+ -- Issue#91 Fix -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ v_def = 'ALTER TABLE ' || buffer3 || ' OWNER TO ' || '"' || view_owner || '";'; -+ EXECUTE v_def; -+ END IF; -+ END IF; - END LOOP; -- RAISE NOTICE ' FUNCTIONS cloned: %', LPAD(cnt::text, 5, ' '); -+ RAISE NOTICE ' VIEWS cloned: %', LPAD(cnt::text, 5, ' '); -+ -+ -- Create Materialized views -+ action := 'Mat. Views'; -+ cnt := 0; -+ -- Issue#91 get view_owner -+ FOR object, view_owner, v_def IN -+ SELECT matviewname::text, '"' || matviewowner::text || '"', replace(definition,';','') FROM pg_catalog.pg_matviews WHERE schemaname = quote_ident(source_schema) -+ LOOP -+ cnt := cnt + 1; -+ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on target schema and object -+ buffer := quote_ident(dest_schema) || '.' || quote_ident(object); -+ -+ -- MJV FIX: #72 remove source schema in MV def -+ SELECT REPLACE(v_def, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') INTO buffer2; -+ -+ IF bData THEN -+ -- issue#98 defer creation until after regular tables are populated. Also defer the ownership as well. -+ -- EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH DATA;' ; -+ buffer3 = 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH DATA;'; -+ mvarray := mvarray || buffer3; -+ -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- buffer3 = 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; -+ -- EXECUTE buffer3; -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ buffer3 = 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; -+ mvarray := mvarray || buffer3; -+ END IF; -+ ELSE -+ IF bDDLOnly THEN -+ RAISE INFO '%', 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH NO DATA;' ; -+ -- Issue#91 -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ RAISE INFO '%', 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; -+ END IF; -+ ELSE -+ EXECUTE 'CREATE MATERIALIZED VIEW ' || buffer || ' AS ' || buffer2 || ' WITH NO DATA;' ; -+ -- Issue#91 -+ -- issue#95 -+ IF NOT bNoOwner THEN -+ -- Fixed Issue#108: double-quote roles in case they have special characters -+ buffer3 = 'ALTER MATERIALIZED VIEW ' || buffer || ' OWNER TO ' || view_owner || ';' ; -+ EXECUTE buffer3; -+ END IF; -+ END IF; -+ END IF; -+ SELECT coalesce(obj_description(oid), '') into adef from pg_class where relkind = 'm' and relname = object; -+ IF adef <> '' THEN -+ IF bDDLOnly THEN -+ RAISE INFO '%', 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; -+ ELSE -+ -- Issue#$98: also defer if copy rows is on since we defer MVIEWS in that case -+ IF bData THEN -+ buffer3 = 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; -+ mvarray = mvarray || buffer3; -+ ELSE -+ EXECUTE 'COMMENT ON MATERIALIZED VIEW ' || quote_ident(dest_schema) || '.' || object || ' IS ''' || adef || ''';'; -+ END IF; -+ -+ END IF; -+ END IF; -+ -+ FOR aname, adef IN -+ SELECT indexname, replace(indexdef, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.') as newdef FROM pg_indexes where schemaname = quote_ident(source_schema) and tablename = object order by indexname -+ LOOP -+ IF bDDLOnly THEN -+ RAISE INFO '%', adef || ';'; -+ ELSE -+ EXECUTE adef || ';'; -+ END IF; -+ END LOOP; -+ -+ END LOOP; -+ RAISE NOTICE ' MAT VIEWS cloned: %', LPAD(cnt::text, 5, ' '); -+ -+ -- Issue 90 Move create functions to before views - - -- MV: Create Triggers -+ -+ -- MJV FIX: #38 -+ -- EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; -+ -+ -- Issue#61 FIX: use set_config for empty string -+ -- SET search_path = ''; -+ SELECT set_config('search_path', '', false) into v_dummy; -+ - action := 'Triggers'; - cnt := 0; - FOR arec IN -- SELECT trigger_schema, trigger_name, event_object_table, action_order, action_condition, action_statement, action_orientation, action_timing, array_to_string(array_agg(event_manipulation::text), ' OR '), -- 'CREATE TRIGGER ' || trigger_name || ' ' || action_timing || ' ' || array_to_string(array_agg(event_manipulation::text), ' OR ') || ' ON ' || quote_ident(dest_schema) || '.' || event_object_table || -- ' FOR EACH ' || action_orientation || ' ' || action_statement || ';' as TRIG_DDL -- FROM information_schema.triggers where trigger_schema = quote_ident(source_schema) GROUP BY 1,2,3,4,5,6,7,8 -+ -- 2021-03-09 MJV FIX: #40 fixed sql to get the def using pg_get_triggerdef() sql -+ SELECT n.nspname, c.relname, t.tgname, p.proname, REPLACE(pg_get_triggerdef(t.oid), quote_ident(source_schema), quote_ident(dest_schema)) || ';' AS trig_ddl -+ FROM pg_trigger t, pg_class c, pg_namespace n, pg_proc p -+ WHERE n.nspname = quote_ident(source_schema) -+ AND n.oid = c.relnamespace -+ AND c.relkind in ('r','p') -+ AND n.oid = p.pronamespace -+ AND c.oid = t.tgrelid -+ AND p.oid = t.tgfoid -+ ORDER BY c.relname, t.tgname - LOOP - BEGIN - cnt := cnt + 1; -- IF ddl_only THEN -+ IF bDDLOnly THEN - RAISE INFO '%', arec.trig_ddl; - ELSE - EXECUTE arec.trig_ddl; -@@ -474,55 +2444,383 @@ - END LOOP; - RAISE NOTICE ' TRIGGERS cloned: %', LPAD(cnt::text, 5, ' '); - -- -- --------------------- -- -- MV: Permissions: Defaults -- -- --------------------- -- action := 'PRIVS: Defaults'; -+ -+ -- MV: Create Rules -+ -- Fixes Issue#59 Implement Rules -+ action := 'Rules'; - cnt := 0; - FOR arec IN -- SELECT pg_catalog.pg_get_userbyid(d.defaclrole) AS "owner", n.nspname AS schema, -- CASE d.defaclobjtype WHEN 'r' THEN 'table' WHEN 'S' THEN 'sequence' WHEN 'f' THEN 'function' WHEN 'T' THEN 'type' WHEN 'n' THEN 'schema' END AS atype, -- d.defaclacl as defaclacl, pg_catalog.array_to_string(d.defaclacl, ',') as defaclstr -- FROM pg_catalog.pg_default_acl d LEFT JOIN pg_catalog.pg_namespace n ON (n.oid = d.defaclnamespace) WHERE n.nspname IS NOT NULL and n.nspname = quote_ident(source_schema) ORDER BY 3, 2, 1 -+ SELECT regexp_replace(definition, E'[\\n\\r]+', ' ', 'g' ) as definition -+ FROM pg_rules -+ WHERE schemaname = quote_ident(source_schema) - LOOP -- BEGIN -- -- RAISE NOTICE 'owner=% type=% defaclacl=% defaclstr=%', arec.owner, arec.atype, arec.defaclacl, arec.defaclstr; -+ cnt := cnt + 1; -+ buffer := REPLACE(arec.definition, quote_ident(source_schema) || '.', quote_ident(dest_schema) || '.'); -+ IF bDDLOnly THEN -+ RAISE INFO '%', buffer; -+ ELSE -+ EXECUTE buffer; -+ END IF; -+ END LOOP; -+ RAISE NOTICE ' RULES cloned: %', LPAD(cnt::text, 5, ' '); -+ -+ -+ -- MV: Create Policies -+ -- Fixes Issue#66 Implement Security policies for RLS -+ action := 'Policies'; -+ cnt := 0; -+ -- #106 Handle 9.6 which doesn't have "permissive" -+ IF sq_server_version_num > 90624 THEN -+ FOR arec IN -+ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on policy, tablename -+ SELECT schemaname as schemaname, tablename as tablename, 'CREATE POLICY ' || policyname || ' ON ' || quote_ident(dest_schema) || '.' || quote_ident(tablename) || ' AS ' || permissive || ' FOR ' || cmd || ' TO ' -+ || array_to_string(roles, ',', '*') || ' USING (' || regexp_replace(qual, E'[\\n\\r]+', ' ', 'g' ) || ')' -+ || CASE WHEN with_check IS NOT NULL THEN ' WITH CHECK (' ELSE '' END || coalesce(with_check, '') || CASE WHEN with_check IS NOT NULL THEN ');' ELSE ';' END as definition -+ FROM pg_policies -+ WHERE schemaname = quote_ident(source_schema) -+ ORDER BY policyname -+ LOOP -+ cnt := cnt + 1; -+ IF bDDLOnly THEN -+ RAISE INFO '%', arec.definition; -+ ELSE -+ EXECUTE arec.definition; -+ END IF; -+ -+ -- Issue#76: Enable row security if indicated -+ SELECT c.relrowsecurity INTO abool FROM pg_class c, pg_namespace n where n.nspname = quote_ident(arec.schemaname) AND n.oid = c.relnamespace AND c.relname = quote_ident(arec.tablename) and c.relkind = 'r'; -+ IF abool THEN -+ buffer = 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || arec.tablename || ' ENABLE ROW LEVEL SECURITY;'; -+ IF bDDLOnly THEN -+ RAISE INFO '%', buffer; -+ ELSE -+ EXECUTE buffer; -+ END IF; -+ END IF; -+ END LOOP; -+ ELSE -+ -- handle 9.6 versions -+ FOR arec IN -+ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on policy, tablename -+ SELECT schemaname as schemaname, tablename as tablename, 'CREATE POLICY ' || policyname || ' ON ' || quote_ident(dest_schema) || '.' || quote_ident(tablename) || ' FOR ' || cmd || ' TO ' -+ || array_to_string(roles, ',', '*') || ' USING (' || regexp_replace(qual, E'[\\n\\r]+', ' ', 'g' ) || ')' -+ || CASE WHEN with_check IS NOT NULL THEN ' WITH CHECK (' ELSE '' END || coalesce(with_check, '') || CASE WHEN with_check IS NOT NULL THEN ');' ELSE ';' END as definition -+ FROM pg_policies -+ WHERE schemaname = quote_ident(source_schema) -+ ORDER BY policyname -+ LOOP -+ cnt := cnt + 1; -+ IF bDDLOnly THEN -+ RAISE INFO '%', arec.definition; -+ ELSE -+ EXECUTE arec.definition; -+ END IF; -+ -+ -- Issue#76: Enable row security if indicated -+ SELECT c.relrowsecurity INTO abool FROM pg_class c, pg_namespace n where n.nspname = quote_ident(arec.schemaname) AND n.oid = c.relnamespace AND c.relname = quote_ident(arec.tablename) and c.relkind = 'r'; -+ IF abool THEN -+ buffer = 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || arec.tablename || ' ENABLE ROW LEVEL SECURITY;'; -+ IF bDDLOnly THEN -+ RAISE INFO '%', buffer; -+ ELSE -+ EXECUTE buffer; -+ END IF; -+ END IF; -+ END LOOP; -+ END IF; -+ RAISE NOTICE ' POLICIES cloned: %', LPAD(cnt::text, 5, ' '); -+ -+ -+ -- MJV Fixed #62 for comments (PASS 1) -+ action := 'Comments1'; -+ cnt := 0; -+ FOR qry IN -+ -- Issue#74 Fix: Change schema from source to target. Also, do not include comments on foreign tables since we do not clone foreign tables at this time. -+ SELECT 'COMMENT ON ' || CASE WHEN c.relkind in ('r','p') AND a.attname IS NULL THEN 'TABLE ' WHEN c.relkind in ('r','p') AND -+ a.attname IS NOT NULL THEN 'COLUMN ' WHEN c.relkind = 'f' THEN 'FOREIGN TABLE ' WHEN c.relkind = 'm' THEN 'MATERIALIZED VIEW ' WHEN c.relkind = 'v' THEN 'VIEW ' -+ WHEN c.relkind = 'i' THEN 'INDEX ' WHEN c.relkind = 'S' THEN 'SEQUENCE ' ELSE 'XX' END || quote_ident(dest_schema) || '.' || CASE WHEN c.relkind in ('r','p') AND -+ -- Issue#78: handle case-sensitive names with quote_ident() -+ a.attname IS NOT NULL THEN quote_ident(c.relname) || '.' || a.attname ELSE quote_ident(c.relname) END || -+ -- Issue#74 Fix -+ -- ' IS ''' || d.description || ''';' as ddl -+ ' IS ' || quote_literal(d.description) || ';' as ddl -+ FROM pg_class c -+ JOIN pg_namespace n ON (n.oid = c.relnamespace) -+ LEFT JOIN pg_description d ON (c.oid = d.objoid) -+ LEFT JOIN pg_attribute a ON (c.oid = a.attrelid -+ AND a.attnum > 0 and a.attnum = d.objsubid) -+ WHERE c.relkind <> 'f' AND d.description IS NOT NULL AND n.nspname = quote_ident(source_schema) -+ ORDER BY ddl -+ LOOP -+ cnt := cnt + 1; -+ -+ -- BAD : "COMMENT ON SEQUENCE sample_clone2.CaseSensitive_ID_seq IS 'just a comment on CaseSensitive sequence';" -+ -- GOOD: "COMMENT ON SEQUENCE "CaseSensitive_ID_seq" IS 'just a comment on CaseSensitive sequence';" -+ -+ -- Issue#98 For MVs we create comments when we create the MVs -+ IF substring(qry,1,28) = 'COMMENT ON MATERIALIZED VIEW' THEN -+ IF bDebug THEN RAISE NOTICE 'DEBUG: deferring comments on MVs'; END IF; -+ cnt = cnt - 1; -+ continue; -+ END IF; -+ -+ IF bDDLOnly THEN -+ RAISE INFO '%', qry; -+ ELSE -+ EXECUTE qry; -+ END IF; -+ END LOOP; -+ RAISE NOTICE ' COMMENTS(1) cloned: %', LPAD(cnt::text, 5, ' '); -+ -+ -- MJV Fixed #62 for comments (PASS 2) -+ action := 'Comments2'; -+ cnt2 := 0; -+ IF is_prokind THEN -+ FOR qry IN -+ -- Issue#74 Fix: Change schema from source to target. -+ SELECT 'COMMENT ON SCHEMA ' || quote_ident(dest_schema) || -+ -- Issue#74 Fix -+ -- ' IS ''' || d.description || ''';' as ddl -+ ' IS ' || quote_literal(d.description) || ';' as ddl -+ from pg_namespace n, pg_description d where d.objoid = n.oid and n.nspname = quote_ident(source_schema) -+ UNION -+ -- Issue#74 Fix: need to replace source schema inline -+ -- SELECT 'COMMENT ON TYPE ' || pg_catalog.format_type(t.oid, NULL) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl -+ SELECT 'COMMENT ON TYPE ' || REPLACE(pg_catalog.format_type(t.oid, NULL), quote_ident(source_schema), quote_ident(dest_schema)) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl -+ FROM pg_catalog.pg_type t -+ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace -+ WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) -+ AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid) -+ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default -+ AND pg_catalog.obj_description(t.oid, 'pg_type') IS NOT NULL and t.typtype = 'c' -+ UNION -+ -- Issue#78: handle case-sensitive names with quote_ident() -+ SELECT 'COMMENT ON COLLATION ' || quote_ident(dest_schema) || '.' || quote_ident(c.collname) || ' IS ''' || pg_catalog.obj_description(c.oid, 'pg_collation') || ''';' as ddl -+ FROM pg_catalog.pg_collation c, pg_catalog.pg_namespace n -+ WHERE n.oid = c.collnamespace AND c.collencoding IN (-1, pg_catalog.pg_char_to_encoding(pg_catalog.getdatabaseencoding())) -+ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default AND pg_catalog.obj_description(c.oid, 'pg_collation') IS NOT NULL -+ UNION -+ SELECT 'COMMENT ON ' || CASE WHEN p.prokind = 'f' THEN 'FUNCTION ' WHEN p.prokind = 'p' THEN 'PROCEDURE ' WHEN p.prokind = 'a' THEN 'AGGREGATE ' END || -+ quote_ident(dest_schema) || '.' || p.proname || ' (' || oidvectortypes(p.proargtypes) || ')' -+ -- Issue#74 Fix -+ -- ' IS ''' || d.description || ''';' as ddl -+ ' IS ' || quote_literal(d.description) || ';' as ddl -+ FROM pg_catalog.pg_namespace n -+ JOIN pg_catalog.pg_proc p ON p.pronamespace = n.oid -+ JOIN pg_description d ON (d.objoid = p.oid) -+ WHERE n.nspname = quote_ident(source_schema) -+ UNION -+ SELECT 'COMMENT ON POLICY ' || p1.policyname || ' ON ' || quote_ident(dest_schema) || '.' || p1.tablename || -+ -- Issue#74 Fix -+ -- ' IS ''' || d.description || ''';' as ddl -+ ' IS ' || quote_literal(d.description) || ';' as ddl -+ FROM pg_policies p1, pg_policy p2, pg_class c, pg_namespace n, pg_description d -+ WHERE p1.schemaname = n.nspname AND p1.tablename = c.relname AND n.oid = c.relnamespace -+ AND c.relkind in ('r','p') AND p1.policyname = p2.polname AND d.objoid = p2.oid AND p1.schemaname = quote_ident(source_schema) -+ UNION -+ SELECT 'COMMENT ON DOMAIN ' || quote_ident(dest_schema) || '.' || t.typname || -+ -- Issue#74 Fix -+ -- ' IS ''' || d.description || ''';' as ddl -+ ' IS ' || quote_literal(d.description) || ';' as ddl -+ FROM pg_catalog.pg_type t -+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace -+ JOIN pg_catalog.pg_description d ON d.classoid = t.tableoid AND d.objoid = t.oid AND d.objsubid = 0 -+ WHERE t.typtype = 'd' AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default -+ ORDER BY 1 -+ LOOP -+ cnt2 := cnt2 + 1; -+ IF bDDLOnly THEN -+ RAISE INFO '%', qry; -+ ELSE -+ EXECUTE qry; -+ END IF; -+ END LOOP; -+ ELSE -- must be v 10 or less -+ FOR qry IN -+ -- Issue#74 Fix: Change schema from source to target. -+ SELECT 'COMMENT ON SCHEMA ' || quote_ident(dest_schema) || -+ -- Issue#74 Fix -+ -- ' IS ''' || d.description || ''';' as ddl -+ ' IS ' || quote_literal(d.description) || ';' as ddl -+ from pg_namespace n, pg_description d where d.objoid = n.oid and n.nspname = quote_ident(source_schema) -+ UNION -+ -- Issue#74 Fix: need to replace source schema inline -+ -- SELECT 'COMMENT ON TYPE ' || pg_catalog.format_type(t.oid, NULL) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl -+ SELECT 'COMMENT ON TYPE ' || REPLACE(pg_catalog.format_type(t.oid, NULL), quote_ident(source_schema), quote_ident(dest_schema)) || ' IS ''' || pg_catalog.obj_description(t.oid, 'pg_type') || ''';' as ddl -+ FROM pg_catalog.pg_type t -+ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace -+ WHERE (t.typrelid = 0 OR (SELECT c.relkind = 'c' -+ FROM pg_catalog.pg_class c -+ WHERE c.oid = t.typrelid)) -+ AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type el -+ WHERE el.oid = t.typelem AND el.typarray = t.oid) -+ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default -+ AND pg_catalog.obj_description(t.oid, 'pg_type') IS NOT NULL and t.typtype = 'c' -+ UNION -+ -- FIX Issue#87 by adding double quotes around collation name -+ SELECT 'COMMENT ON COLLATION ' || quote_ident(dest_schema) || '."' || c.collname || '" IS ''' || pg_catalog.obj_description(c.oid, 'pg_collation') || ''';' as ddl -+ FROM pg_catalog.pg_collation c, pg_catalog.pg_namespace n -+ WHERE n.oid = c.collnamespace AND c.collencoding IN (-1, pg_catalog.pg_char_to_encoding(pg_catalog.getdatabaseencoding())) -+ AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default AND pg_catalog.obj_description(c.oid, 'pg_collation') IS NOT NULL -+ UNION -+ SELECT 'COMMENT ON ' || CASE WHEN proisagg THEN 'AGGREGATE ' ELSE 'FUNCTION ' END || -+ quote_ident(dest_schema) || '.' || p.proname || ' (' || oidvectortypes(p.proargtypes) || ')' -+ -- Issue#74 Fix -+ -- ' IS ''' || d.description || ''';' as ddl -+ ' IS ' || quote_literal(d.description) || ';' as ddl -+ FROM pg_catalog.pg_namespace n -+ JOIN pg_catalog.pg_proc p ON p.pronamespace = n.oid -+ JOIN pg_description d ON (d.objoid = p.oid) -+ WHERE n.nspname = quote_ident(source_schema) -+ UNION -+ SELECT 'COMMENT ON POLICY ' || p1.policyname || ' ON ' || quote_ident(dest_schema) || '.' || p1.tablename || -+ -- Issue#74 Fix -+ -- ' IS ''' || d.description || ''';' as ddl -+ ' IS ' || quote_literal(d.description) || ';' as ddl -+ FROM pg_policies p1, pg_policy p2, pg_class c, pg_namespace n, pg_description d -+ WHERE p1.schemaname = n.nspname AND p1.tablename = c.relname AND n.oid = c.relnamespace -+ AND c.relkind in ('r','p') AND p1.policyname = p2.polname AND d.objoid = p2.oid AND p1.schemaname = quote_ident(source_schema) -+ UNION -+ SELECT 'COMMENT ON DOMAIN ' || quote_ident(dest_schema) || '.' || t.typname || -+ -- Issue#74 Fix -+ -- ' IS ''' || d.description || ''';' as ddl -+ ' IS ' || quote_literal(d.description) || ';' as ddl -+ FROM pg_catalog.pg_type t -+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace -+ JOIN pg_catalog.pg_description d ON d.classoid = t.tableoid AND d.objoid = t.oid AND d.objsubid = 0 -+ WHERE t.typtype = 'd' AND n.nspname = quote_ident(source_schema) COLLATE pg_catalog.default -+ ORDER BY 1 -+ LOOP -+ cnt2 := cnt2 + 1; -+ IF bDDLOnly THEN -+ RAISE INFO '%', qry; -+ ELSE -+ EXECUTE qry; -+ END IF; -+ END LOOP; -+ END IF; -+ RAISE NOTICE ' COMMENTS(2) cloned: %', LPAD(cnt2::text, 5, ' '); - -- FOREACH aclstr IN ARRAY arec.defaclacl -- LOOP -- cnt := cnt + 1; -- -- RAISE NOTICE 'aclstr=%', aclstr; -- -- break up into grantor, grantee, and privs, mydb_update=rwU/mydb_owner -- SELECT split_part(aclstr, '=',1) INTO grantee; -- SELECT split_part(aclstr, '=',2) INTO grantor; -- SELECT split_part(grantor, '/',1) INTO privs; -- SELECT split_part(grantor, '/',2) INTO grantor; -- -- RAISE NOTICE 'grantor=% grantee=% privs=%', grantor, grantee, privs; -- -- IF arec.atype = 'function' THEN -- -- Just having execute is enough to grant all apparently. -- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON FUNCTIONS TO "' || grantee || '";'; -- IF ddl_only THEN -- RAISE INFO '%', buffer; -- ELSE -- EXECUTE buffer; -- END IF; - -- ELSIF arec.atype = 'sequence' THEN -- IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN -- -- arU is enough for all privs -- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON SEQUENCES TO "' || grantee || '";'; -- IF ddl_only THEN -+ -- Issue#95 bypass if No ACL specified. -+ IF NOT bNoACL THEN -+ -- --------------------- -+ -- MV: Permissions: Defaults -+ -- --------------------- -+ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; -+ action := 'PRIVS: Defaults'; -+ cnt := 0; -+ FOR arec IN -+ SELECT pg_catalog.pg_get_userbyid(d.defaclrole) AS "owner", n.nspname AS schema, -+ CASE d.defaclobjtype WHEN 'r' THEN 'table' WHEN 'S' THEN 'sequence' WHEN 'f' THEN 'function' WHEN 'T' THEN 'type' WHEN 'n' THEN 'schema' END AS atype, -+ d.defaclacl as defaclacl, pg_catalog.array_to_string(d.defaclacl, ',') as defaclstr -+ FROM pg_catalog.pg_default_acl d LEFT JOIN pg_catalog.pg_namespace n ON (n.oid = d.defaclnamespace) -+ WHERE n.nspname IS NOT NULL AND n.nspname = quote_ident(source_schema) -+ ORDER BY 3, 2, 1 -+ LOOP -+ BEGIN -+ -- RAISE NOTICE ' owner=% type=% defaclacl=% defaclstr=%', arec.owner, arec.atype, arec.defaclacl, arec.defaclstr; -+ -+ FOREACH aclstr IN ARRAY arec.defaclacl -+ LOOP -+ cnt := cnt + 1; -+ -- RAISE NOTICE ' aclstr=%', aclstr; -+ -- break up into grantor, grantee, and privs, mydb_update=rwU/mydb_owner -+ SELECT split_part(aclstr, '=',1) INTO grantee; -+ SELECT split_part(aclstr, '=',2) INTO grantor; -+ SELECT split_part(grantor, '/',1) INTO privs; -+ SELECT split_part(grantor, '/',2) INTO grantor; -+ -- RAISE NOTICE ' grantor=% grantee=% privs=%', grantor, grantee, privs; -+ -+ IF arec.atype = 'function' THEN -+ -- Just having execute is enough to grant all apparently. -+ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON FUNCTIONS TO "' || grantee || '";'; -+ -+ -- Issue#92 Fix -+ -- set role = cm_stage_ro_grp; -+ -- ALTER DEFAULT PRIVILEGES FOR ROLE cm_stage_ro_grp IN SCHEMA cm_stage GRANT REFERENCES, TRIGGER ON TABLES TO cm_stage_ro_grp; -+ IF grantor = grantee THEN -+ -- append set role to statement -+ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; -+ END IF; -+ -+ IF bDDLOnly THEN - RAISE INFO '%', buffer; - ELSE - EXECUTE buffer; - END IF; -+ -- Issue#92 Fix: -+ EXECUTE 'SET ROLE = ' || calleruser; -+ -+ ELSIF arec.atype = 'sequence' THEN -+ IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN -+ -- arU is enough for all privs -+ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON SEQUENCES TO "' || grantee || '";'; -+ -+ -- Issue#92 Fix -+ IF grantor = grantee THEN -+ -- append set role to statement -+ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; -+ END IF; - -- ELSE -- -- have to specify each priv individually -+ IF bDDLOnly THEN -+ RAISE INFO '%', buffer; -+ ELSE -+ EXECUTE buffer; -+ END IF; -+ -- Issue#92 Fix: -+ EXECUTE 'SET ROLE = ' || calleruser; -+ -+ ELSE -+ -- have to specify each priv individually -+ buffer2 := ''; -+ IF POSITION('r' IN privs) > 0 THEN -+ buffer2 := 'SELECT'; -+ END IF; -+ IF POSITION('w' IN privs) > 0 THEN -+ IF buffer2 = '' THEN -+ buffer2 := 'UPDATE'; -+ ELSE -+ buffer2 := buffer2 || ', UPDATE'; -+ END IF; -+ END IF; -+ IF POSITION('U' IN privs) > 0 THEN -+ IF buffer2 = '' THEN -+ buffer2 := 'USAGE'; -+ ELSE -+ buffer2 := buffer2 || ', USAGE'; -+ END IF; -+ END IF; -+ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON SEQUENCES TO "' || grantee || '";'; -+ -+ -- Issue#92 Fix -+ IF grantor = grantee THEN -+ -- append set role to statement -+ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; -+ END IF; -+ -+ IF bDDLOnly THEN -+ RAISE INFO '%', buffer; -+ ELSE -+ EXECUTE buffer; -+ END IF; -+ select current_user into buffer; -+ -- Issue#92 Fix: -+ EXECUTE 'SET ROLE = ' || calleruser; -+ END IF; -+ -+ ELSIF arec.atype = 'table' THEN -+ -- do each priv individually, jeeeesh! - buffer2 := ''; -+ IF POSITION('a' IN privs) > 0 THEN -+ buffer2 := 'INSERT'; -+ END IF; - IF POSITION('r' IN privs) > 0 THEN -- buffer2 := 'SELECT'; -+ IF buffer2 = '' THEN -+ buffer2 := 'SELECT'; -+ ELSE -+ buffer2 := buffer2 || ', SELECT'; -+ END IF; - END IF; - IF POSITION('w' IN privs) > 0 THEN - IF buffer2 = '' THEN -@@ -531,181 +2829,431 @@ - buffer2 := buffer2 || ', UPDATE'; - END IF; - END IF; -- IF POSITION('U' IN privs) > 0 THEN -- IF buffer2 = '' THEN -- buffer2 := 'USAGE'; -+ IF POSITION('d' IN privs) > 0 THEN -+ IF buffer2 = '' THEN -+ buffer2 := 'DELETE'; - ELSE -- buffer2 := buffer2 || ', USAGE'; -+ buffer2 := buffer2 || ', DELETE'; - END IF; - END IF; -- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON SEQUENCES TO "' || grantee || '";'; -- IF ddl_only THEN -- RAISE INFO '%', buffer; -- ELSE -- EXECUTE buffer; -- END IF; -- -- END IF; -- ELSIF arec.atype = 'table' THEN -- -- do each priv individually, jeeeesh! -- buffer2 := ''; -- IF POSITION('a' IN privs) > 0 THEN -- buffer2 := 'INSERT'; -- END IF; -- IF POSITION('r' IN privs) > 0 THEN -- IF buffer2 = '' THEN -- buffer2 := 'SELECT'; -- ELSE -- buffer2 := buffer2 || ', SELECT'; -+ IF POSITION('t' IN privs) > 0 THEN -+ IF buffer2 = '' THEN -+ buffer2 := 'TRIGGER'; -+ ELSE -+ buffer2 := buffer2 || ', TRIGGER'; -+ END IF; - END IF; -- END IF; -- IF POSITION('w' IN privs) > 0 THEN -- IF buffer2 = '' THEN -- buffer2 := 'UPDATE'; -- ELSE -- buffer2 := buffer2 || ', UPDATE'; -+ IF POSITION('T' IN privs) > 0 THEN -+ IF buffer2 = '' THEN -+ buffer2 := 'TRUNCATE'; -+ ELSE -+ buffer2 := buffer2 || ', TRUNCATE'; -+ END IF; - END IF; -- END IF; -- IF POSITION('d' IN privs) > 0 THEN -- IF buffer2 = '' THEN -- buffer2 := 'DELETE'; -- ELSE -- buffer2 := buffer2 || ', DELETE'; -+ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON TABLES TO "' || grantee || '";'; -+ -+ -- Issue#92 Fix -+ IF grantor = grantee THEN -+ -- append set role to statement -+ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; - END IF; -- END IF; -- IF POSITION('t' IN privs) > 0 THEN -- IF buffer2 = '' THEN -- buffer2 := 'TRIGGER'; -+ -+ IF bDDLOnly THEN -+ RAISE INFO '%', buffer; - ELSE -- buffer2 := buffer2 || ', TRIGGER'; -+ EXECUTE buffer; - END IF; -- END IF; -- IF POSITION('T' IN privs) > 0 THEN -- IF buffer2 = '' THEN -- buffer2 := 'TRUNCATE'; -+ select current_user into buffer; -+ -- Issue#92 Fix: -+ EXECUTE 'SET ROLE = ' || calleruser; -+ -+ ELSIF arec.atype = 'type' THEN -+ IF POSITION('r' IN privs) > 0 AND POSITION('w' IN privs) > 0 AND POSITION('U' IN privs) > 0 THEN -+ -- arU is enough for all privs -+ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ALL ON TYPES TO "' || grantee || '";'; -+ -+ -- Issue#92 Fix -+ IF grantor = grantee THEN -+ -- append set role to statement -+ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; -+ END IF; -+ -+ IF bDDLOnly THEN -+ RAISE INFO '%', buffer; -+ ELSE -+ EXECUTE buffer; -+ END IF; -+ -- Issue#92 Fix: -+ EXECUTE 'SET ROLE = ' || calleruser; -+ -+ ELSIF POSITION('U' IN privs) THEN -+ buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT USAGE ON TYPES TO "' || grantee || '";'; -+ -+ -- Issue#92 Fix -+ IF grantor = grantee THEN -+ -- append set role to statement -+ buffer = 'SET ROLE = ' || grantor || '; ' || buffer; -+ END IF; -+ -+ IF bDDLOnly THEN -+ RAISE INFO '%', buffer; -+ ELSE -+ EXECUTE buffer; -+ END IF; -+ -- Issue#92 Fix: -+ EXECUTE 'SET ROLE = ' || calleruser; -+ - ELSE -- buffer2 := buffer2 || ', TRUNCATE'; -- END IF; -+ RAISE WARNING 'Unhandled TYPE Privs:: type=% privs=% owner=% defaclacl=% defaclstr=% grantor=% grantee=% ', arec.atype, privs, arec.owner, arec.defaclacl, arec.defaclstr, grantor, grantee; - END IF; -- buffer := 'ALTER DEFAULT PRIVILEGES FOR ROLE ' || grantor || ' IN SCHEMA ' || quote_ident(dest_schema) || ' GRANT ' || buffer2 || ' ON TABLES TO "' || grantee || '";'; -- IF ddl_only THEN -- RAISE INFO '%', buffer; -- ELSE -- EXECUTE buffer; -- END IF; -- - ELSE -- RAISE WARNING 'Doing nothing for type=% privs=%', arec.atype, privs; -+ RAISE WARNING 'Unhandled Privs:: type=% privs=% owner=% defaclacl=% defaclstr=% grantor=% grantee=% ', arec.atype, privs, arec.owner, arec.defaclacl, arec.defaclstr, grantor, grantee; - END IF; -- END LOOP; -- END; -- END LOOP; -+ END LOOP; -+ END; -+ END LOOP; - -- RAISE NOTICE ' DFLT PRIVS cloned: %', LPAD(cnt::text, 5, ' '); -+ RAISE NOTICE ' DFLT PRIVS cloned: %', LPAD(cnt::text, 5, ' '); -+ END IF; -- NO ACL BRANCH - -- -- MV: PRIVS: schema -- -- crunchy data extension, check_access -- -- SELECT role_path, base_role, as_role, objtype, schemaname, objname, array_to_string(array_agg(privname),',') as privs FROM all_access() -- -- WHERE base_role != CURRENT_USER and objtype = 'schema' and schemaname = 'public' group by 1,2,3,4,5,6; -+ -- Issue#95 bypass if No ACL specified -+ IF NOT bNoACL THEN -+ -- MV: PRIVS: schema -+ -- crunchy data extension, check_access -+ -- SELECT role_path, base_role, as_role, objtype, schemaname, objname, array_to_string(array_agg(privname),',') as privs FROM all_access() -+ -- WHERE base_role != CURRENT_USER and objtype = 'schema' and schemaname = 'public' group by 1,2,3,4,5,6; - -- action := 'PRIVS: Schema'; -- cnt := 0; -- FOR arec IN -- SELECT 'GRANT ' || p.perm::perm_type || ' ON SCHEMA ' || quote_ident(dest_schema) || ' TO "' || r.rolname || '";' as schema_ddl -- FROM pg_catalog.pg_namespace AS n CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES ('USAGE'), ('CREATE')) AS p(perm) -- WHERE n.nspname = quote_ident(source_schema) AND NOT r.rolsuper AND has_schema_privilege(r.oid, n.oid, p.perm) order by r.rolname, p.perm::perm_type -- LOOP -- BEGIN -- cnt := cnt + 1; -- IF ddl_only THEN -- RAISE INFO '%', arec.schema_ddl; -- ELSE -- EXECUTE arec.schema_ddl; -- END IF; -+ action := 'PRIVS: Schema'; -+ cnt := 0; -+ FOR arec IN -+ SELECT 'GRANT ' || p.perm::perm_type || ' ON SCHEMA ' || quote_ident(dest_schema) || ' TO "' || r.rolname || '";' as schema_ddl -+ FROM pg_catalog.pg_namespace AS n -+ CROSS JOIN pg_catalog.pg_roles AS r -+ CROSS JOIN (VALUES ('USAGE'), ('CREATE')) AS p(perm) -+ WHERE n.nspname = quote_ident(source_schema) AND NOT r.rolsuper AND has_schema_privilege(r.oid, n.oid, p.perm) -+ ORDER BY r.rolname, p.perm::perm_type -+ LOOP -+ BEGIN -+ cnt := cnt + 1; -+ IF bDDLOnly THEN -+ RAISE INFO '%', arec.schema_ddl; -+ ELSE -+ EXECUTE arec.schema_ddl; -+ END IF; - -- END; -- END LOOP; -- RAISE NOTICE 'SCHEMA PRIVS cloned: %', LPAD(cnt::text, 5, ' '); -+ END; -+ END LOOP; -+ RAISE NOTICE 'SCHEMA PRIVS cloned: %', LPAD(cnt::text, 5, ' '); -+ END IF; -- NO ACL BRANCH - -- -- MV: PRIVS: sequences -- action := 'PRIVS: Sequences'; -- cnt := 0; -- FOR arec IN -- SELECT 'GRANT ' || p.perm::perm_type || ' ON ' || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as seq_ddl -- FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES ('SELECT'), ('USAGE'), ('UPDATE')) AS p(perm) -- WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind = 'S' AND NOT r.rolsuper AND has_sequence_privilege(r.oid, t.oid, p.perm) -- LOOP -- BEGIN -- cnt := cnt + 1; -- IF ddl_only OR seq_cnt = 0 THEN -- RAISE INFO '%', arec.seq_ddl; -- ELSE -- EXECUTE arec.seq_ddl; -- END IF; -+ -- Issue#95 bypass if No ACL specified -+ IF NOT bNoACL THEN -+ -- MV: PRIVS: sequences -+ action := 'PRIVS: Sequences'; -+ cnt := 0; -+ FOR arec IN -+ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on t.relname -+ SELECT 'GRANT ' || p.perm::perm_type || ' ON ' || quote_ident(dest_schema) || '.' || quote_ident(t.relname::text) || ' TO "' || r.rolname || '";' as seq_ddl -+ FROM pg_catalog.pg_class AS t -+ CROSS JOIN pg_catalog.pg_roles AS r -+ CROSS JOIN (VALUES ('SELECT'), ('USAGE'), ('UPDATE')) AS p(perm) -+ WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind = 'S' AND NOT r.rolsuper AND has_sequence_privilege(r.oid, t.oid, p.perm) -+ LOOP -+ BEGIN -+ cnt := cnt + 1; -+ -- IF bDebug THEN RAISE NOTICE 'DEBUG: ddl=%', arec.seq_ddl; END IF; -+ IF bDDLOnly THEN -+ RAISE INFO '%', arec.seq_ddl; -+ ELSE -+ EXECUTE arec.seq_ddl; -+ END IF; -+ END; -+ END LOOP; -+ RAISE NOTICE ' SEQ. PRIVS cloned: %', LPAD(cnt::text, 5, ' '); -+ END IF; -- NO ACL BRANCH - -- END; -- END LOOP; -- RAISE NOTICE ' SEQ. PRIVS cloned: %', LPAD(cnt::text, 5, ' '); -+ -- Issue#95 bypass if No ACL specified -+ IF NOT bNoACL THEN -+ -- MV: PRIVS: functions -+ action := 'PRIVS: Functions/Procedures'; -+ cnt := 0; - -- -- MV: PRIVS: functions -- action := 'PRIVS: Functions'; -- cnt := 0; -- FOR arec IN -- SELECT 'GRANT EXECUTE ON FUNCTION ' || quote_ident(dest_schema) || '.' || regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', '') || ' TO "' || r.rolname || '";' as func_ddl -- FROM pg_catalog.pg_proc f CROSS JOIN pg_catalog.pg_roles AS r WHERE f.pronamespace::regnamespace::name = quote_ident(source_schema) AND NOT r.rolsuper AND has_function_privilege(r.oid, f.oid, 'EXECUTE') -- order by regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', '') -- LOOP -- BEGIN -- cnt := cnt + 1; -- IF ddl_only THEN -- RAISE INFO '%', arec.func_ddl; -- ELSE -- EXECUTE arec.func_ddl; -+ -- Issue#61 FIX: use set_config for empty string -+ -- SET search_path = ''; -+ SELECT set_config('search_path', '', false) into v_dummy; -+ -+ -- RAISE NOTICE ' source_schema=% dest_schema=%',source_schema, dest_schema; -+ FOR arec IN -+ -- 2021-03-05 MJV FIX: issue#35: caused exception in some functions with parameters and gave privileges to other users that should not have gotten them. -+ -- SELECT 'GRANT EXECUTE ON FUNCTION ' || quote_ident(dest_schema) || '.' || replace(regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', ''), source_schema, dest_schema) || ' TO "' || r.rolname || '";' as func_ddl -+ -- FROM pg_catalog.pg_proc f CROSS JOIN pg_catalog.pg_roles AS r WHERE f.pronamespace::regnamespace::name = quote_ident(source_schema) AND NOT r.rolsuper AND has_function_privilege(r.oid, f.oid, 'EXECUTE') -+ -- order by regexp_replace(f.oid::regprocedure::text, '^((("[^"]*")|([^"][^.]*))\.)?', '') -+ -+ -- 2021-03-05 MJV FIX: issue#37: defaults cause problems, use system function that returns args WITHOUT DEFAULTS -+ -- COALESCE(r.routine_type, 'FUNCTION'): for aggregate functions, information_schema.routines contains NULL as routine_type value. -+ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on rp.routine_name -+ SELECT 'GRANT ' || rp.privilege_type || ' ON ' || COALESCE(r.routine_type, 'FUNCTION') || ' ' || quote_ident(dest_schema) || '.' || quote_ident(rp.routine_name) || ' (' || pg_get_function_identity_arguments(p.oid) || ') TO ' || string_agg(distinct rp.grantee, ',') || ';' as func_dcl -+ FROM information_schema.routine_privileges rp, information_schema.routines r, pg_proc p, pg_namespace n -+ WHERE rp.routine_schema = quote_ident(source_schema) -+ AND rp.is_grantable = 'YES' -+ AND rp.routine_schema = r.routine_schema -+ AND rp.routine_name = r.routine_name -+ AND rp.routine_schema = n.nspname -+ AND n.oid = p.pronamespace -+ AND p.proname = r.routine_name -+ GROUP BY rp.privilege_type, r.routine_type, rp.routine_name, pg_get_function_identity_arguments(p.oid) -+ LOOP -+ BEGIN -+ cnt := cnt + 1; -+ IF bDDLOnly THEN -+ RAISE INFO '%', arec.func_dcl; -+ ELSE -+ EXECUTE arec.func_dcl; -+ END IF; -+ END; -+ END LOOP; -+ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; -+ RAISE NOTICE ' FUNC PRIVS cloned: %', LPAD(cnt::text, 5, ' '); -+ END IF; -- NO ACL BRANCH -+ -+ -- Issue#95 bypass if No ACL specified -+ IF NOT bNoACL THEN -+ -- MV: PRIVS: tables -+ action := 'PRIVS: Tables'; -+ -- regular, partitioned, and foreign tables plus view and materialized view permissions. Ignored for now: implement foreign table defs. -+ cnt := 0; -+ FOR arec IN -+ -- SELECT 'GRANT ' || p.perm::perm_type || CASE WHEN t.relkind in ('r', 'p', 'f') THEN ' ON TABLE ' WHEN t.relkind in ('v', 'm') THEN ' ON ' END || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as tbl_ddl, -+ -- has_table_privilege(r.oid, t.oid, p.perm) AS granted, t.relkind -+ -- FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES (TEXT 'SELECT'), ('INSERT'), ('UPDATE'), ('DELETE'), ('TRUNCATE'), ('REFERENCES'), ('TRIGGER')) AS p(perm) -+ -- WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind in ('r', 'p', 'f', 'v', 'm') AND NOT r.rolsuper AND has_table_privilege(r.oid, t.oid, p.perm) order by t.relname::text, t.relkind -+ -- 2021-03-05 MJV FIX: Fixed Issue#36 for tables -+ SELECT c.relkind, 'GRANT ' || tb.privilege_type || CASE WHEN c.relkind in ('r', 'p') THEN ' ON TABLE ' WHEN c.relkind in ('v', 'm') THEN ' ON ' END || -+ -- Issue#78 FIX: handle case-sensitive names with quote_ident() on t.relname -+ -- Issue#108 FIX: enclose double-quote grantees with special characters -+ -- quote_ident(dest_schema) || '.' || quote_ident(tb.table_name) || ' TO ' || string_agg(tb.grantee, ',') || ';' as tbl_dcl -+ quote_ident(dest_schema) || '.' || quote_ident(tb.table_name) || ' TO ' || string_agg('"' || tb.grantee || '"', ',') || ';' as tbl_dcl -+ FROM information_schema.table_privileges tb, pg_class c, pg_namespace n -+ WHERE tb.table_schema = quote_ident(source_schema) AND tb.table_name = c.relname AND c.relkind in ('r', 'p', 'v', 'm') -+ AND c.relnamespace = n.oid AND n.nspname = quote_ident(source_schema) -+ GROUP BY c.relkind, tb.privilege_type, tb.table_schema, tb.table_name -+ LOOP -+ BEGIN -+ cnt := cnt + 1; -+ -- IF bDebug THEN RAISE NOTICE 'DEBUG: ddl=%', arec.tbl_dcl; END IF; -+ -- Issue#46. Fixed reference to invalid record name (tbl_ddl --> tbl_dcl). -+ IF arec.relkind = 'f' THEN -+ RAISE WARNING 'Foreign tables are not currently implemented, so skipping privs for them. ddl=%', arec.tbl_dcl; -+ ELSE -+ IF bDDLOnly THEN -+ RAISE INFO '%', arec.tbl_dcl; -+ ELSE -+ EXECUTE arec.tbl_dcl; -+ END IF; - END IF; -+ END; -+ END LOOP; -+ RAISE NOTICE ' TABLE PRIVS cloned: %', LPAD(cnt::text, 5, ' '); -+ END IF; -- NO ACL BRANCH -+ -+ -- LOOP for regular tables and populate them if specified -+ -- Issue#75 moved from big table loop above to here. -+ IF bData THEN -+ r = clock_timestamp(); -+ -- IF bVerbose THEN RAISE NOTICE 'START: copy rows %',clock_timestamp() - t; END IF; -+ IF bVerbose THEN RAISE NOTICE 'Copying rows...'; END IF; -+ -+ EXECUTE 'SET search_path = ' || quote_ident(dest_schema) ; -+ action := 'Copy Rows'; -+ FOREACH tblelement IN ARRAY tblarray -+ LOOP -+ s = clock_timestamp(); -+ IF bDebug THEN RAISE NOTICE 'DEBUG1: no UDTs %', tblelement; END IF; -+ EXECUTE tblelement; -+ GET DIAGNOSTICS cnt = ROW_COUNT; -+ buffer = substring(tblelement, 13); -+ SELECT POSITION(' OVERRIDING SYSTEM VALUE SELECT ' IN buffer) INTO cnt2; -+ IF cnt2 = 0 THEN -+ SELECT POSITION(' SELECT ' IN buffer) INTO cnt2; -+ buffer = substring(buffer,1, cnt2); -+ ELSE -+ buffer = substring(buffer,1, cnt2); -+ END IF; -+ SELECT RPAD(buffer, 35, ' ') INTO buffer; -+ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); -+ IF bVerbose THEN RAISE NOTICE 'Populated cloned table, % Rows Copied: % seconds: %', buffer, LPAD(cnt::text, 10, ' '), LPAD(cnt2::text, 5, ' '); END IF; -+ tblscopied := tblscopied + 1; -+ END LOOP; - -- END; -- END LOOP; -- RAISE NOTICE ' FUNC PRIVS cloned: %', LPAD(cnt::text, 5, ' '); -+ -- Issue#79 implementation -+ -- Do same for tables with user-defined elements using copy to file method -+ FOREACH tblelement IN ARRAY tblarray2 -+ LOOP -+ s = clock_timestamp(); -+ IF bDebug THEN RAISE NOTICE 'DEBUG2: UDTs %', tblelement; END IF; -+ EXECUTE tblelement; -+ GET DIAGNOSTICS cnt = ROW_COUNT; -+ -+ -- STATEMENT LOOKS LIKE THIS: -+ -- INSERT INTO sample11.warehouses SELECT * FROM sample.warehouses; -+ -- INSERT INTO sample11.person OVERRIDING SYSTEM VALUE SELECT * FROM sample.person; -+ -- COPY sample.address TO '/tmp/cloneschema.tmp' WITH DELIMITER AS ',';\ -+ buffer = TRIM(tblelement::text); -+ -- RAISE NOTICE 'element=%', buffer; -+ cnt1 = POSITION('INSERT INTO' IN buffer); -+ cnt2 = POSITION('COPY ' IN buffer); -+ IF cnt1 > 0 THEN -+ buffer = substring(buffer, 12); -+ ELSIF cnt2 > 0 THEN -+ buffer = substring(buffer, 5); -+ ELSE -+ RAISE EXCEPTION 'Programming Error for parsing tblarray2.'; -+ END IF; -+ -+ -- RAISE NOTICE 'buffer1=%', buffer; -+ cnt1 = POSITION(' OVERRIDING ' IN buffer); -+ cnt2 = POSITION('SELECT * FROM ' IN buffer); -+ cnt3 = POSITION(' FROM ' IN buffer); -+ cnt4 = POSITION(' TO ' IN buffer); -+ IF cnt1 > 0 THEN -+ buffer = substring(buffer, 1, cnt1-2); -+ ELSIF cnt2 > 0 THEN -+ buffer = substring(buffer, 1, cnt2-2); -+ ELSIF cnt3 > 0 THEN -+ buffer = substring(buffer, 1, cnt3-1); -+ ELSIF cnt4 > 0 THEN -+ -- skip the COPY TO statements -+ continue; -+ ELSE -+ RAISE EXCEPTION 'Programming Error for parsing tblarray2.'; -+ END IF; -+ -- RAISE NOTICE 'buffer2=%', buffer; -+ -+ SELECT RPAD(buffer, 35, ' ') INTO buffer; -+ -- RAISE NOTICE 'buffer3=%', buffer; -+ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); -+ IF bVerbose THEN RAISE NOTICE 'Populated cloned table, % Rows Copied: % seconds: %', buffer, LPAD(cnt::text, 10, ' '), LPAD(cnt2::text, 5, ' '); END IF; -+ tblscopied := tblscopied + 1; -+ END LOOP; - -- -- MV: PRIVS: tables -- action := 'PRIVS: Tables'; -- -- regular, partitioned, and foreign tables plus view and materialized view permissions. TODO: implement foreign table defs. -+ -- Issue#101 -+ -- Do same for tables with user-defined elements using direct method with text cast -+ FOREACH tblelement IN ARRAY tblarray3 -+ LOOP -+ s = clock_timestamp(); -+ IF bDebug THEN RAISE NOTICE 'DEBUG3: UDTs %', tblelement; END IF; -+ EXECUTE tblelement; -+ GET DIAGNOSTICS cnt = ROW_COUNT; -+ cnt2 = POSITION(' (' IN tblelement::text); -+ IF cnt2 > 0 THEN -+ buffer = substring(tblelement, 1, cnt2); -+ buffer = substring(buffer, 6); -+ SELECT RPAD(buffer, 35, ' ') INTO buffer; -+ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); -+ IF bVerbose THEN RAISE NOTICE 'Populated cloned table, % Rows Copied: % seconds: %', buffer, LPAD(cnt::text, 10, ' '), LPAD(cnt2::text, 5, ' '); END IF; -+ tblscopied := tblscopied + 1; -+ END IF; -+ END LOOP; -+ -+ -- Issue#98 MVs deferred until now -+ FOREACH tblelement IN ARRAY mvarray -+ LOOP -+ s = clock_timestamp(); -+ EXECUTE tblelement; -+ -- get diagnostics for MV creates or refreshes does not work, always returns 1 -+ GET DIAGNOSTICS cnt = ROW_COUNT; -+ buffer = substring(tblelement, 25); -+ cnt2 = POSITION(' AS ' IN buffer); -+ IF cnt2 > 0 THEN -+ buffer = substring(buffer, 1, cnt2); -+ SELECT RPAD(buffer, 36, ' ') INTO buffer; -+ cnt2 := cast(extract(epoch from (clock_timestamp() - s)) as numeric(18,3)); -+ IF bVerbose THEN RAISE NOTICE 'Populated Mat. View, % Rows Inserted: ? seconds: %', buffer, LPAD(cnt2::text, 5, ' '); END IF; -+ mvscopied := mvscopied + 1; -+ END IF; -+ END LOOP; -+ -+ cnt := cast(extract(epoch from (clock_timestamp() - r)) as numeric(18,3)); -+ IF bVerbose THEN RAISE NOTICE 'Copy rows duration: % seconds',cnt; END IF; -+ END IF; -+ RAISE NOTICE ' TABLES copied: %', LPAD(tblscopied::text, 5, ' '); -+ RAISE NOTICE ' MATVIEWS refreshed: %', LPAD(mvscopied::text, 5, ' '); -+ -+ -+ -- Issue#78 forces us to defer FKeys until the end since we previously did row copies before FKeys -+ -- add FK constraint -+ action := 'FK Constraints'; - cnt := 0; -- FOR arec IN -- SELECT 'GRANT ' || p.perm::perm_type || CASE WHEN t.relkind in ('r', 'p', 'f') THEN ' ON TABLE ' WHEN t.relkind in ('v', 'm') THEN ' ON ' END || quote_ident(dest_schema) || '.' || t.relname::text || ' TO "' || r.rolname || '";' as tbl_ddl, -- has_table_privilege(r.oid, t.oid, p.perm) AS granted, t.relkind -- FROM pg_catalog.pg_class AS t CROSS JOIN pg_catalog.pg_roles AS r CROSS JOIN (VALUES (TEXT 'SELECT'), ('INSERT'), ('UPDATE'), ('DELETE'), ('TRUNCATE'), ('REFERENCES'), ('TRIGGER')) AS p(perm) -- WHERE t.relnamespace::regnamespace::name = quote_ident(source_schema) AND t.relkind in ('r', 'p', 'f', 'v', 'm') AND NOT r.rolsuper AND has_table_privilege(r.oid, t.oid, p.perm) order by t.relname::text, t.relkind -- LOOP -- BEGIN -- cnt := cnt + 1; -- -- RAISE NOTICE 'ddl=%', arec.tbl_ddl; -- IF arec.relkind = 'f' THEN -- RAISE WARNING 'Foreign tables are not currently implemented, so skipping privs for them. ddl=%', arec.tbl_ddl; -- ELSE -- IF ddl_only THEN -- RAISE INFO '%', arec.tbl_ddl; -- ELSE -- EXECUTE arec.tbl_ddl; -- END IF; - -- END IF; -- END; -+ -- Issue#61 FIX: use set_config for empty string -+ -- SET search_path = ''; -+ SELECT set_config('search_path', '', false) into v_dummy; -+ -+ FOR qry IN -+ SELECT 'ALTER TABLE ' || quote_ident(dest_schema) || '.' || quote_ident(rn.relname) -+ || ' ADD CONSTRAINT ' || quote_ident(ct.conname) || ' ' || REPLACE(pg_get_constraintdef(ct.oid), 'REFERENCES ' || quote_ident(source_schema) || '.', 'REFERENCES ' -+ || quote_ident(dest_schema) || '.') || ';' -+ FROM pg_constraint ct -+ JOIN pg_class rn ON rn.oid = ct.conrelid -+ -- Issue#103 needed to add this left join -+ LEFT JOIN pg_inherits i ON (rn.oid = i.inhrelid) -+ WHERE connamespace = src_oid -+ AND rn.relkind = 'r' -+ AND ct.contype = 'f' -+ -- Issue#103 fix: needed to also add this null check -+ AND i.inhrelid is null -+ LOOP -+ cnt := cnt + 1; -+ IF bDDLOnly THEN -+ RAISE INFO '%', qry; -+ ELSE -+ IF bDebug THEN RAISE NOTICE 'DEBUG: adding FKEY constraint: %', qry; END IF; -+ EXECUTE qry; -+ END IF; - END LOOP; -- RAISE NOTICE ' TABLE PRIVS cloned: %', LPAD(cnt::text, 5, ' '); -+ EXECUTE 'SET search_path = ' || quote_ident(source_schema) ; -+ RAISE NOTICE ' FKEYS cloned: %', LPAD(cnt::text, 5, ' '); - -- -- Set the search_path back to what it was before -- EXECUTE 'SET search_path = ' || src_path_old; -+ -+ IF src_path_old = '' OR src_path_old = '""' THEN -+ -- RAISE NOTICE 'Restoring old search_path to empty string'; -+ SELECT set_config('search_path', '', false) into v_dummy; -+ ELSE -+ -- RAISE NOTICE 'Restoring old search_path to:%', src_path_old; -+ EXECUTE 'SET search_path = ' || src_path_old; -+ END IF; -+ SELECT setting INTO v_dummy FROM pg_settings WHERE name = 'search_path'; -+ IF bDebug THEN RAISE NOTICE 'DEBUG: setting search_path back to what it was: %', v_dummy; END IF; -+ cnt := cast(extract(epoch from (clock_timestamp() - t)) as numeric(18,3)); -+ IF bVerbose THEN RAISE NOTICE 'clone_schema duration: % seconds',cnt; END IF; - - EXCEPTION - WHEN others THEN - BEGIN - GET STACKED DIAGNOSTICS v_diag1 = MESSAGE_TEXT, v_diag2 = PG_EXCEPTION_DETAIL, v_diag3 = PG_EXCEPTION_HINT, v_diag4 = RETURNED_SQLSTATE, v_diag5 = PG_CONTEXT, v_diag6 = PG_EXCEPTION_CONTEXT; -- -- v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1 || ' .' || v_diag2 || ' .' || v_diag3; -- v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1; -- RAISE EXCEPTION 'Action: % Diagnostics: %',action, v_ret; -- -- Set the search_path back to what it was before -- EXECUTE 'SET search_path = ' || src_path_old; -+ v_ret := 'line=' || v_diag6 || '. '|| v_diag4 || '. ' || v_diag1; -+ -- Issue#101: added version to exception output -+ -- RAISE NOTICE 'v_diag1=% v_diag2=% v_diag3=% v_diag4=% v_diag5=% v_diag6=%', v_diag1, v_diag2, v_diag3, v_diag4, v_diag5, v_diag6; -+ buffer2 = ''; -+ IF action = 'Copy Rows' AND v_diag4 = '42704' THEN -+ -- Issue#105 Help user to fix the problem. -+ buffer2 = 'It appears you have a USER-DEFINED column type mismatch. Try running clone_schema with the FILECOPY option. '; -+ END IF; -+ IF lastsql <> '' THEN -+ buffer = v_ret || E'\n'|| buffer2 || E'\n'|| lastsql; -+ ELSE -+ buffer = v_ret || E'\n'|| buffer2; -+ END IF; -+ RAISE EXCEPTION 'Version: % Action: % Diagnostics: %',v_version, action, buffer; -+ -+ IF src_path_old = '' THEN -+ -- RAISE NOTICE 'setting old search_path to empty string'; -+ SELECT set_config('search_path', '', false); -+ ELSE -+ -- RAISE NOTICE 'setting old search_path to:%', src_path_old; -+ EXECUTE 'SET search_path = ' || src_path_old; -+ END IF; -+ - RETURN; - END; - -@@ -713,14 +3261,14 @@ - END; - - $BODY$ -- LANGUAGE plpgsql VOLATILE -- COST 100; --ALTER FUNCTION public.clone_schema(text, text, boolean, boolean) OWNER TO "{db_user}"; --""" -+ LANGUAGE plpgsql VOLATILE COST 100; - -+ALTER FUNCTION public.clone_schema(text, text, cloneparms[]) OWNER TO "{db_user}"; -+-- REVOKE ALL PRIVILEGES ON FUNCTION clone_schema(text, text, cloneparms[]) FROM public; -+""" # noqa - --class CloneSchema: - -+class CloneSchema: - def _create_clone_schema_function(self): - """ - Creates a postgres function `clone_schema` that copies a schema and its -@@ -752,9 +3300,8 @@ def clone_schema(self, base_schema_name, new_schema_name, set_connection=True): - if schema_exists(new_schema_name): - raise ValidationError("New schema name already exists") - -- sql = 'SELECT clone_schema(%(base_schema)s, %(new_schema)s, true, false)' -+ sql = "SELECT clone_schema(%(base_schema)s, %(new_schema)s, 'DATA')" - cursor.execute( -- sql, -- {'base_schema': base_schema_name, 'new_schema': new_schema_name} -+ sql, {"base_schema": base_schema_name, "new_schema": new_schema_name} - ) - cursor.close() - -From c49b4a1c254ebe713259515a4c8373a9b19dd000 Mon Sep 17 00:00:00 2001 -From: Marc 'risson' Schmitt -Date: Thu, 16 Nov 2023 13:32:06 +0100 -Subject: [PATCH 2/3] clone: allow setting up the clone mode (DATA, NODATA) - -Signed-off-by: Marc 'risson' Schmitt ---- - django_tenants/clone.py | 13 ++++++++++--- - django_tenants/models.py | 11 ++++++++++- - 2 files changed, 20 insertions(+), 4 deletions(-) - -diff --git a/django_tenants/clone.py b/django_tenants/clone.py -index 3afce109..6fa52c04 100644 ---- a/django_tenants/clone.py -+++ b/django_tenants/clone.py -@@ -3281,7 +3281,9 @@ def _create_clone_schema_function(self): - cursor.execute(CLONE_SCHEMA_FUNCTION.format(db_user=db_user)) - cursor.close() - -- def clone_schema(self, base_schema_name, new_schema_name, set_connection=True): -+ def clone_schema( -+ self, base_schema_name, new_schema_name, clone_mode="DATA", set_connection=True -+ ): - """ - Creates a new schema `new_schema_name` as a clone of an existing schema - `old_schema_name`. -@@ -3300,8 +3302,13 @@ def clone_schema(self, base_schema_name, new_schema_name, set_connection=True): - if schema_exists(new_schema_name): - raise ValidationError("New schema name already exists") - -- sql = "SELECT clone_schema(%(base_schema)s, %(new_schema)s, 'DATA')" -+ sql = "SELECT clone_schema(%(base_schema)s, %(new_schema)s, %(clone_mode)s)" - cursor.execute( -- sql, {"base_schema": base_schema_name, "new_schema": new_schema_name} -+ sql, -+ { -+ "base_schema": base_schema_name, -+ "new_schema": new_schema_name, -+ "clone_mode": clone_mode, -+ }, - ) - cursor.close() -diff --git a/django_tenants/models.py b/django_tenants/models.py -index 0d1812d8..655e1994 100644 ---- a/django_tenants/models.py -+++ b/django_tenants/models.py -@@ -29,6 +29,13 @@ class TenantMixin(models.Model): - to be automatically created upon save. - """ - -+ clone_mode = "DATA" -+ """ -+ One of "DATA", "NODATA". -+ When using TENANT_BASE_SCHEMA, controls whether only the database -+ structure will be copied, or if data will be copied along with it. -+ """ -+ - schema_name = models.CharField(max_length=63, unique=True, db_index=True, - validators=[_check_schema_name]) - -@@ -184,7 +191,9 @@ def create_schema(self, check_if_exists=False, sync_schema=True, - # copy tables and data from provided model schema - base_schema = get_tenant_base_schema() - clone_schema = CloneSchema() -- clone_schema.clone_schema(base_schema, self.schema_name) -+ clone_schema.clone_schema( -+ base_schema, self.schema_name, self.clone_mode -+ ) - - call_command('migrate_schemas', - tenant=True, - -From 218fbcd3bfa555b20c6fb904e5fcf307d69f18af Mon Sep 17 00:00:00 2001 -From: Marc 'risson' Schmitt -Date: Thu, 16 Nov 2023 13:32:54 +0100 -Subject: [PATCH 3/3] clone: always (re-)create the clone_schema function - -Signed-off-by: Marc 'risson' Schmitt ---- - django_tenants/clone.py | 10 +++------- - 1 file changed, 3 insertions(+), 7 deletions(-) - -diff --git a/django_tenants/clone.py b/django_tenants/clone.py -index 6fa52c04..63fb8e22 100644 ---- a/django_tenants/clone.py -+++ b/django_tenants/clone.py -@@ -1,7 +1,6 @@ - from django.conf import settings - from django.core.exceptions import ValidationError - from django.db import connection, transaction --from django.db.utils import ProgrammingError - - from django_tenants.utils import schema_exists - -@@ -3292,12 +3291,9 @@ def clone_schema( - connection.set_schema_to_public() - cursor = connection.cursor() - -- # check if the clone_schema function already exists in the db -- try: -- cursor.execute("SELECT 'clone_schema'::regproc") -- except ProgrammingError: -- self._create_clone_schema_function() -- transaction.commit() -+ # create or update the clone_schema function in the db -+ self._create_clone_schema_function() -+ transaction.commit() - - if schema_exists(new_schema_name): - raise ValidationError("New schema name already exists") diff --git a/ilot/py3-django-tenants/APKBUILD b/ilot/py3-django-tenants/APKBUILD deleted file mode 100644 index 0183781..0000000 --- a/ilot/py3-django-tenants/APKBUILD +++ /dev/null @@ -1,43 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-django-tenants -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=django-tenants -pkgver=3.6.1 -pkgrel=1 -pkgdesc="Tenant support for Django using PostgreSQL schemas." -url="https://pypi.python.org/project/django-tenants" -arch="noarch" -license="KIT" -depends="py3-django py3-psycopg py3-gunicorn py3-coverage" -checkdepends="python3-dev py3-pytest" -makedepends="py3-setuptools py3-gpep517 py3-wheel" -source=" - $pkgname-$pkgver.tar.gz::https://codeload.github.com/django-tenants/django-tenants/tar.gz/refs/tags/v$pkgver - 997_update-from-pgclone-schema.patch - " -builddir="$srcdir/$_pkgreal-$pkgver" -options="!check" # Requires setting up test database -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - DJANGO_SETTINGS_MODULE=tests.settings .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -b18afce81ccc89e49fcc4ebe85d90be602415ca898c1660a4e71e2bef6a3ed2e8c724e94b61d8c6f48f3fb19eb2a87d6a6f5bbf449b3e2f661f87e4b5638eafb py3-django-tenants-3.6.1.tar.gz -f2424bb188db2e3c7d13c15e5bdf0959c6f794e68dbc677c8b876d4faa321f78aded5565539f1bfd97583c6df0fcc19ec05abe203b08407e4446dd7194756825 997_update-from-pgclone-schema.patch -" diff --git a/ilot/py3-scim2-filter-parser/APKBUILD b/ilot/py3-scim2-filter-parser/APKBUILD deleted file mode 100644 index 15d12e5..0000000 --- a/ilot/py3-scim2-filter-parser/APKBUILD +++ /dev/null @@ -1,38 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-scim2-filter-parser -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=scim2-filter-parser -pkgver=0.5.0 -pkgrel=1 -pkgdesc="A customizable parser/transpiler for SCIM2.0 filters" -url="https://pypi.python.org/project/scim2-filter-parser" -arch="noarch" -license="MIT" -depends="py3-django py3-sly" -checkdepends="py3-pytest" -makedepends="py3-setuptools py3-gpep517 py3-wheel poetry" -source="$pkgname-$pkgver.tar.gz::https://github.com/15five/scim2-filter-parser/archive/refs/tags/$pkgver.tar.gz" -builddir="$srcdir/$_pkgreal-$pkgver" -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -5347852af6b82a764a32bc491a7e0f05f06b4f4d93dfa375668b5ca1a15ee58f488702536e350100fe5c96a5c94c492ea8cbd0e1952c5920d5a10e1453357f8c py3-scim2-filter-parser-0.5.0.tar.gz -" diff --git a/ilot/py3-tenant-schemas-celery/APKBUILD b/ilot/py3-tenant-schemas-celery/APKBUILD deleted file mode 100644 index c5f9029..0000000 --- a/ilot/py3-tenant-schemas-celery/APKBUILD +++ /dev/null @@ -1,41 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=py3-tenant-schemas-celery -#_pkgreal is used by apkbuild-pypi to find modules at PyPI -_pkgreal=tenant-schemas-celery -pkgver=2.2.0 -pkgrel=1 -pkgdesc="Celery integration for django-tenant-schemas and django-tenants" -url="https://pypi.python.org/project/tenant-schemas-celery" -arch="noarch" -license="MIT" -depends="py3-django-tenants py3-celery" -checkdepends="python3-dev py3-pytest" -makedepends="py3-setuptools py3-gpep517 py3-wheel" -source=" - $pkgname-$pkgver.tar.gz::https://codeload.github.com/maciej-gol/tenant-schemas-celery/tar.gz/refs/tags/$pkgver - " -options="!check" # Test suite wants docker -builddir="$srcdir/$_pkgreal-$pkgver" -subpackages="$pkgname-pyc" - -build() { - gpep517 build-wheel \ - --wheel-dir .dist \ - --output-fd 3 3>&1 >&2 -} - -check() { - python3 -m venv --clear --without-pip --system-site-packages .testenv - .testenv/bin/python3 -m installer .dist/*.whl - DJANGO_SETTINGS_MODULE=tests.settings .testenv/bin/python3 -m pytest -v -} - -package() { - python3 -m installer -d "$pkgdir" \ - .dist/*.whl -} - -sha512sums=" -dad71011306936dc84d966797b113008780750e9e973513092bec892be0d1468e0a0e7e8e2fcca9765309a27767e1c72bdaad7c8aca16353ae1eef783c239148 py3-tenant-schemas-celery-2.2.0.tar.gz -" diff --git a/ilot/uptime-kuma/APKBUILD b/ilot/uptime-kuma/APKBUILD deleted file mode 100644 index f0acb67..0000000 --- a/ilot/uptime-kuma/APKBUILD +++ /dev/null @@ -1,49 +0,0 @@ -# Contributor: Antoine Martin (ayakael) -# Maintainer: Antoine Martin (ayakael) -pkgname=uptime-kuma -pkgver=1.23.13 -pkgrel=1 -pkgdesc='A fancy self-hosted monitoring tool' -arch="all" -url="https://github.com/louislam/uptime-kuma" -license="MIT" -depends="nodejs" -makedepends="npm" -source=" - uptime-kuma-$pkgver.tar.gz::https://github.com/louislam/uptime-kuma/archive/refs/tags/$pkgver.tar.gz - uptime-kuma.openrc - uptime-kuma.conf - " -subpackages="$pkgname-doc $pkgname-openrc" -install="$pkgname.pre-install" - -build() { - npm ci - npm run build - rm -Rf "$builddir"/node_modules - npm ci --omit=dev -} - -package() { - install -dm 755 \ - "$pkgdir"/usr/share/webapps \ - "$pkgdir"/usr/share/doc \ - "$pkgdir"/usr/share/licenses/uptime-kuma \ - "$pkgdir"/etc/init.d \ - "$pkgdir"/etc/conf.d - - # install - cp -a "$builddir" "$pkgdir/usr/share/webapps/uptime-kuma" - - # openrc - install -Dm755 "$srcdir"/uptime-kuma.openrc "$pkgdir"/etc/init.d/uptime-kuma - install -Dm755 "$srcdir"/uptime-kuma.conf "$pkgdir"/etc/conf.d/uptime-kuma - - # docs and licenses - mv "$pkgdir"/usr/share/webapps/uptime-kuma/LICENSE "$pkgdir"/usr/share/licenses/uptime-kuma/. -} -sha512sums=" -9045cdc69d46ce34011f7866844a8d1866eee21850be6eede3226e77b9c0d3ecc0190481671f04f25da40345b29cc2d13de07bcc27e7baeff7901b4bd9c8b93f uptime-kuma-1.23.13.tar.gz -0ceddb98a6f318029b8bd8b5a49b55c883e77a5f8fffe2b9b271c9abf0ac52dc7a6ea4dbb4a881124a7857f1e43040f18755c1c2a034479e6a94d2b65a73d847 uptime-kuma.openrc -1dbae536b23e3624e139155abbff383bba3209ff2219983da2616b4376b1a5041df812d1e5164716fc6e967a8446d94baae3b96ee575d400813cc6fdc2cc274e uptime-kuma.conf -" diff --git a/ilot/uptime-kuma/uptime-kuma.conf b/ilot/uptime-kuma/uptime-kuma.conf deleted file mode 100644 index f816a99..0000000 --- a/ilot/uptime-kuma/uptime-kuma.conf +++ /dev/null @@ -1,47 +0,0 @@ -# uptime-kuma config -# for more info -# see https://github.com/louislam/uptime-kuma/wiki/Environment-Variables - -# Set the directory where the data should be stored (could be relative) -# DATA_DIR=/var/lib/uptime-kuma - -# Host to bind to, could be an ip. -# UPTIME_KUMA_HOST=:: - -# Port to listen to -# UPTIME_KUMA_PORT=3001 - -# Path to SSL key -# UPTIME_KUMA_SSL_KEY= - -# Path to SSL certificate -# UPTIME_KUMA_SSL_CERT= - -# SSL Key Passphrase -# UPTIME_KUMA_SSL_KEY_PASSPHRASE= - -# Cloudflare Tunnel Token -# UPTIME_KUMA_CLOUDFLARED_TOKEN= - -# By default, Uptime Kuma is not allowed in iframe if the domain name is not -# the same as the parent. It protects your Uptime Kuma to be a phishing -# website. If you don't need this protection, you can set it to true -# UPTIME_KUMA_DISABLE_FRAME_SAMEORIGIN=false - -# By default, Uptime Kuma is verifying that the websockets ORIGIN-Header -# matches your servers hostname. If you don't need this protection, you can -# set it to bypass. See GHSA-mj22-23ff-2hrr for further context. -# UPTIME_KUMA_WS_ORIGIN_CHECK=cors-like - -# Allow to specify any executables as Chromium -# UPTIME_KUMA_ALLOW_ALL_CHROME_EXEC=0 - -# Add your self-signed ca certs. -# NODE_EXTRA_CA_CERTS= - -# Ignore all TLS errors -# NOTE_TLS_REJECT_UNAUTHORIZED=0 - -# Set it to --insecure-http-parser, if you encountered error Invalid header -# value char when your website using WAF -# NODE_OPTIONS= diff --git a/ilot/uptime-kuma/uptime-kuma.openrc b/ilot/uptime-kuma/uptime-kuma.openrc deleted file mode 100644 index ce7b00e..0000000 --- a/ilot/uptime-kuma/uptime-kuma.openrc +++ /dev/null @@ -1,48 +0,0 @@ -#!/sbin/openrc-run - -description="Uptime Kuma self-hosted monitoring tool" - -# Change $directory to path to uptime-kuma -directory=${directory:-/usr/share/webapps/uptime-kuma} -pidfile=${pidfile:-/run/$RC_SVCNAME.pid} -DATA_DIR=${DATA_DIR:-/var/lib/uptime-kuma} - -log_dir="/var/log/$RC_SVCNAME" -logfile=${logfile:-$log_dir/$RC_SVCNAME.log} -output_log="${output_log:-$logfile}" -error_log="${error_log:-$logfile}" - -command=${command:-/usr/bin/node} -command_args="$directory/server/server.js" -command_user=${command_user:-uptime-kuma:uptime-kuma} -command_background=true - -depend() { - need net -} - -start_pre() { - checkpath --owner=$command_user --directory $log_dir \ - $DATA_DIR \ - $DATA_DIR/upload - checkpath --owner=$command_user --file $logfile \ - $DATA_DIR/error.log - - [ ! -e $DATA_DIR/kuma.db ] && - cp $directory/db/kuma.db $DATA_DIR - - checkpath --owner=$command_user --mode 600 --file $DATA_DIR/kuma.db* - - cd $directory - - export DATA_DIR UPTIME_KUMA_HOST UPTIME_KUMA_PORT UPTIME_KUMA_SSL_KEY \ - UPTIME_KUMA_SSL_CERT UPTIME_KUMA_SSL_KEY_PASSPHRASE \ - UPTIME_KUMA_CLOUDFLARED_TOKEN UPTIME_KUMA_DISABLE_FRAME_SAMEORIGIN \ - UPTIME_KUMA_WS_ORIGIN_CHECK UPTIME_KUMA_ALLOW_ALL_CHROME_EXEC \ - NODE_EXTRA_CA_CERTS NODE_TLS_REJECT_UNAUTHORIZED NODE_OPTIONS -} - -start_post() { - # Wait for the server to be started - sleep 10 -} diff --git a/ilot/uptime-kuma/uptime-kuma.pre-install b/ilot/uptime-kuma/uptime-kuma.pre-install deleted file mode 100755 index 0217278..0000000 --- a/ilot/uptime-kuma/uptime-kuma.pre-install +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh - -DATADIR='/var/lib/uptime-kuma' - -if ! getent group uptime-kuma 1>/dev/null; then - echo '* Creating group uptime-kuma' 1>&2 - - addgroup -S uptime-kuma -fi - -if ! id uptime-kuma 2>/dev/null 1>&2; then - echo '* Creating user uptime-kuma' 1>&2 - - adduser -DHS -G uptime-kuma -h "$DATADIR" -s /bin/sh \ - -g "added by apk for uptime-kuma" uptime-kuma - passwd -u uptime-kuma 1>/dev/null # unlock -fi - -if ! id -Gn uptime-kuma | grep -Fq www-data; then - echo '* Adding user uptime-kuma to group www-data' 1>&2 - - addgroup uptime-kuma www-data -fi - -exit 0 diff --git a/ilot/wikijs/APKBUILD b/ilot/wikijs/APKBUILD deleted file mode 100644 index 43d8189..0000000 --- a/ilot/wikijs/APKBUILD +++ /dev/null @@ -1,59 +0,0 @@ -# Maintainer: Antoine Martin (ayakael) -# Contributor: Antoine Martin (ayakael) -pkgname=wikijs -pkgver=2.5.303 -pkgrel=1 -pkgdesc="Wiki.js | A modern, lightweight and powerful wiki app built on Node.js" -license="AGPL-3.0" -arch="!armv7 x86_64" -options="!check" # No test suite -depends=" - libcap-setcap - nodejs>=10.12.0 - postgresql - python3 - " -makedepends=" - yarn - npm - " -url="https://github.com/Requarks/wiki" -subpackages="$pkgname-openrc" -install="$pkgname.post-install $pkgname.pre-install" -builddir="$srcdir"/wiki-$pkgver -pkgusers="wikijs" -pkggroups="wikijs" -source=" - $pkgname-$pkgver.tar.gz::https://github.com/requarks/wiki/archive/refs/tags/v$pkgver.tar.gz - wikijs.initd - config.sample.yml.patch -" - -prepare() { - default_prepare - sed -i "s|\"version.*|\"version\": \"$pkgver\",|" "$builddir"/package.json - sed -i 's|"dev": true.*|"dev": "false",|' "$builddir"/package.json -} -build() { - yarn --frozen-lockfile --non-interactive - yarn build - rm -rf node_modules - yarn --production --frozen-lockfile --non-interactive -} - -package() { - install -Dm755 "$srcdir"/wikijs.initd "$pkgdir"/etc/init.d/wikijs - - install -Dm644 -o 5494 -g 5494 "$builddir"/config.sample.yml "$pkgdir"/etc/wikijs/config.yml - - install -Dm644 "$builddir"/package.json -t "$pkgdir"/usr/lib/bundles/wikijs - cp -aR "$builddir"/assets "$builddir"/server "$builddir"/node_modules "$pkgdir"/usr/lib/bundles/wikijs - - mkdir -p "$pkgdir"/var/lib/wikijs - chown 5494:5494 "$pkgdir"/var/lib/wikijs -} -sha512sums=" -a463d79ad0d8ff15dbe568b839094d697c6de0b2e991b77a4944e2a82f9789de6840e504a4673e4e0900d61596e880ca276008de86dac4f05f5823dc0427d2fc wikijs-2.5.303.tar.gz -355131ee5617348b82681cb8543c784eea59689990a268ecd3b77d44fe9abcca9c86fb8b047f0a8faeba079c650faa7790c5dd65418d313cd7561f38bb590c03 wikijs.initd -07b536c20e370d2a926038165f0e953283259c213a80a8648419565f5359ab05f528ac310e81606914013da212270df6feddb22e514cbcb2464c8274c956e4af config.sample.yml.patch -" diff --git a/ilot/wikijs/config.sample.yml.patch b/ilot/wikijs/config.sample.yml.patch deleted file mode 100644 index 6532c25..0000000 --- a/ilot/wikijs/config.sample.yml.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/config.sample.yml.orig b/config.sample.yml -index 47edd8d..458472a 100644 ---- a/config.sample.yml.orig -+++ b/config.sample.yml -@@ -136,7 +136,7 @@ ha: false - # Data Path - # --------------------------------------------------------------------- - # Writeable data path used for cache and temporary user uploads. --dataPath: ./data -+dataPath: /var/lib/wikijs/data - - # --------------------------------------------------------------------- - # Body Parser Limit diff --git a/ilot/wikijs/wikijs.initd b/ilot/wikijs/wikijs.initd deleted file mode 100644 index 680efbf..0000000 --- a/ilot/wikijs/wikijs.initd +++ /dev/null @@ -1,24 +0,0 @@ -#!/sbin/openrc-run -name="$RC_SVCNAME" -cfgfile="/etc/conf.d/$RC_SVCNAME" -pidfile="/var/run/$RC_SVCNAME.pid" -command="/usr/bin/node server" -command_args="" -command_user="wikijs" -command_group="wikijs" -supervisor="supervise-daemon" -start_stop_daemon_args="" -command_background="yes" -output_log="/var/log/$RC_SVCNAME/$RC_SVCNAME.log" -error_log="/var/log/$RC_SVCNAME/$RC_SVCNAME.err" -working_directory="/usr/lib/bundles/wikijs" - -start_pre() { - checkpath --directory --owner $command_user:$command_user --mode 0775 \ - /var/log/$RC_SVCNAME \ - /var/lib/$RC_SVCNAME - export NODE_ENV=production - export CONFIG_FILE=/etc/wikijs/config.yml - cd "$working_directory" -} - diff --git a/ilot/wikijs/wikijs.post-install b/ilot/wikijs/wikijs.post-install deleted file mode 100755 index 748e847..0000000 --- a/ilot/wikijs/wikijs.post-install +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh -set -eu - -group=wikijs -config_file='/etc/wikijs/config.yml' - -setcap 'cap_net_bind_service=+ep' /usr/bin/node - -cat >&2 <<-EOF -* -* 1. Adjust settings in /etc/wikijs/config.yml. -* -* 2. Create database for wikijs: -* -* psql -c "CREATE ROLE wikijs PASSWORD 'top-secret' INHERIT LOGIN;" -* psql -c "CREATE DATABASE wkijs OWNER wikijs ENCODING 'UTF-8';" -* -EOF diff --git a/ilot/wikijs/wikijs.pre-install b/ilot/wikijs/wikijs.pre-install deleted file mode 100644 index 579485d..0000000 --- a/ilot/wikijs/wikijs.pre-install +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh -# It's very important to set user/group correctly. - -wikijs_dir='/var/lib/wikijs' - -if ! getent group wikijs 1>/dev/null; then - echo '* Creating group wikijs' 1>&2 - - addgroup -S wikijs -g 5494 -fi - -if ! id wikijs 2>/dev/null 1>&2; then - echo '* Creating user wikijs' 1>&2 - - adduser -DHS -G wikijs -h "$wikijs_dir" -u 5494 -s /bin/sh \ - -g "added by apk for wikijs" wikijs - passwd -u wikijs 1>/dev/null # unlock -fi - -exit 0