diff --git a/.github/workflows/ci-lazypg.yml b/.github/workflows/ci-lazypg.yml new file mode 100644 index 000000000..22de627c4 --- /dev/null +++ b/.github/workflows/ci-lazypg.yml @@ -0,0 +1,288 @@ +# This workflow will build and test PL/Java against a version of PostgreSQL +# lazily obtained (either preinstalled in the GitHub Actions runner environment, +# or obtained from a package repository if the runner does not provide one). +# Arrange for the matrix to include a pg version, for cases where one must be +# installed. + +name: CI lazy getting PostgreSQL + +permissions: + contents: read + +on: + push: + branches: [ master, REL1_7_STABLE, REL1_6_STABLE ] + pull_request: + branches: [ master, REL1_7_STABLE, REL1_6_STABLE ] + +jobs: + build: + if: true + + runs-on: ${{ matrix.oscc.os }} + continue-on-error: true + strategy: + matrix: + oscc: + - os: ubuntu-latest + cc: gcc + - os: macos-13 + cc: clang + pg: 17 + - os: macos-14 + cc: clang + pg: 17 + - os: windows-latest + cc: msvc + - os: windows-latest + cc: mingw + java: [11, 17, 21, 23] + exclude: + - oscc: {os: windows-latest} + java: 17 + - oscc: {os: windows-latest} + java: 23 + + steps: + + - name: Check for JDK preinstalled + id: jdkcheck + shell: bash + env: + JAVAVER: ${{ matrix.java }} + run: | + if + candidate="JAVA_HOME_${JAVAVER}_${RUNNER_ARCH}" + echo -n "Environment contains $candidate? " + [[ -n ${!candidate+set} ]] + then + echo yes + echo >>"$GITHUB_ENV" "JAVA_HOME=${!candidate}" + echo >>"$GITHUB_OUTPUT" java_found=true + elif + candidate="JAVA_HOME_${JAVAVER}_$(tr A-Z a-z <<<${RUNNER_ARCH})" + echo -ne 'no\n'"Environment contains $candidate? " + [[ -n ${!candidate+set} ]] + then + echo yes + echo >>"$GITHUB_ENV" "JAVA_HOME=${!candidate}" + echo >>"$GITHUB_OUTPUT" java_found=true + else + echo -e 'no\n'"only: ${!JAVA_HOME_*}" + echo >>"$GITHUB_OUTPUT" java_found=false + fi + + - name: Fetch a JDK + if: ${{ 'false' == steps.jdkcheck.outputs.java_found }} + uses: actions/setup-java@7a6d8a8234af8eb26422e24e3006232cccaa061b + with: + distribution: temurin + java-version: ${{ matrix.java }} + + - name: Compute absolute paths for java and jshell + shell: bash + run: | + if [[ $RUNNER_OS == Windows ]] + then + echo >>"$GITHUB_ENV" "ABS_JAVA=$JAVA_HOME"'\bin\java' + echo >>"$GITHUB_ENV" "ABS_JSHELL=$JAVA_HOME"'\bin\jshell' + else + echo >>"$GITHUB_ENV" "ABS_JAVA=$JAVA_HOME/bin/java" + echo >>"$GITHUB_ENV" "ABS_JSHELL=$JAVA_HOME/bin/jshell" + fi + + - name: Set PGCONFIG in environment, getting PostgreSQL if needed (!mingw) + if: ${{ 'mingw' != matrix.oscc.cc }} + shell: bash + env: + PGVER: ${{ matrix.oscc.pg }} + run: | + if [[ $RUNNER_OS == Linux ]] + then + echo >>"$GITHUB_ENV" PGCONFIG=pg_config + elif [[ $RUNNER_OS == Windows ]] + then + echo >>"$GITHUB_ENV" PGCONFIG="$PGBIN"'\pg_config' + elif [[ $RUNNER_OS == macOS ]] + then + echo '::group::brew update' + brew update + echo '::endgroup::' + echo "::group::brew install postgresql@$PGVER" + # HOMEBREW_GITHUB_ACTIONS will suppress the formula's initdb + HOMEBREW_GITHUB_ACTIONS=1 HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK=1 \ + brew install postgresql@"$PGVER" + echo '::endgroup::' + pfx=$(brew --prefix postgresql@"$PGVER") + echo >>"$GITHUB_ENV" PGCONFIG="$pfx/bin/pg_config" + fi + + - name: Set PGCONFIG in environment, getting gcc and PostgreSQL (mingw) + if: ${{ 'mingw' == matrix.oscc.cc }} + shell: C:\shells\msys2bash.cmd {0} + run: | + pacman -S --noconfirm \ + mingw-w64-x86_64-gcc \ + mingw-w64-x86_64-postgresql + echo >>"$GITHUB_ENV" PGCONFIG='c:\msys64\mingw64\bin\pg_config' + + - name: Report Java, Maven, and PostgreSQL versions + shell: bash + run: | + "$ABS_JAVA" -version + mvn --version + "$PGCONFIG" + + - name: Obtain PG development files (Ubuntu, PGDG) + if: ${{ 'Linux' == runner.os }} + run: | + pgver=$("$PGCONFIG" --version) + pgver=${pgver##PostgreSQL } + pgver=${pgver%% *} + pgver=${pgver%.*} + echo '::group::Install PGDG key and repo' + curl -s -S https://www.postgresql.org/media/keys/ACCC4CF8.asc | + gpg --dearmor | + sudo dd of=/etc/apt/trusted.gpg.d/apt.postgresql.org.gpg + echo \ + deb \ + http://apt.postgresql.org/pub/repos/apt \ + "$(lsb_release -cs)-pgdg" \ + main | + sudo tee /etc/apt/sources.list.d/pgdg.list + echo '::endgroup::' + echo '::group::apt-get update' + sudo apt-get update + echo '::endgroup::' + echo "::group::apt-get install postgresql-server-dev-$pgver" + sudo apt-get install postgresql-server-dev-"$pgver" libkrb5-dev + echo '::endgroup::' + + - name: Confirm PostgreSQL development files are present + shell: python + run: | + from os import getenv + from os.path import join + from re import sub + from subprocess import check_output + + pgconfig = getenv('PGCONFIG') + + def ask_pg_config(what): + return check_output([pgconfig, '--'+what]).splitlines()[0] + + pgch = join(ask_pg_config('includedir-server'), b'pg_config.h') + + with open(pgch, 'r') as f: + line = [ln for ln in f if ln.startswith('#define PG_VERSION_STR ')][0] + + vers = sub(r'#define PG_VERSION_STR "(.*)"\n', r'\1', line) + + print('PostgreSQL development files are present:', vers, sep='\n') + + - name: Check out PL/Java + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + path: pljava + + - name: Set plethora of MSVC environment variables (Windows MSVC) + if: ${{ 'Windows' == runner.os && 'msvc' == matrix.oscc.cc }} + uses: ilammy/msvc-dev-cmd@0b201ec74fa43914dc39ae48a89fd1d8cb592756 + + - name: Build PL/Java (Windows MSVC) + if: ${{ 'Windows' == runner.os && 'msvc' == matrix.oscc.cc }} + working-directory: pljava + # shell: cmd because of the issue described for ilammy/msvc-dev-cmd + # with Actions bash prepending stuff to the just-carefully-created PATH + shell: cmd + run: | + mvn clean install --batch-mode ^ + -Dpgsql.pgconfig="%PGCONFIG%" ^ + -Psaxon-examples -Ppgjdbc ^ + -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn + + - name: Build PL/Java (Linux, macOS) + if: ${{ 'Windows' != runner.os }} + working-directory: pljava + run: | + mvn clean install --batch-mode \ + -Dpgsql.pgconfig="$PGCONFIG" \ + -Psaxon-examples -Ppgjdbc \ + -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn + + - name: Build PL/Java (Windows MinGW-w64) + if: ${{ 'Windows' == runner.os && 'mingw' == matrix.oscc.cc }} + working-directory: pljava + shell: C:\shells\msys2bash.cmd {0} + run: | + PATH='/c/msys64/mingw64/bin:'"$PATH" + "$M2"/mvn clean install --batch-mode \ + -Dpgsql.pgconfig="$PGCONFIG" \ + -Psaxon-examples -Ppgjdbc \ + -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn + + - name: Install and test PL/Java + working-directory: pljava + shell: bash + run: | + packageJar=$(find pljava-packaging -name pljava-pg*.jar -print) + + mavenRepo="$HOME/.m2/repository" + + saxonVer=$( + find "$mavenRepo/net/sf/saxon/Saxon-HE" \ + -name 'Saxon-HE-*.jar' -print | + sort | + tail -n 1 + ) + saxonVer=${saxonVer%/*} + saxonVer=${saxonVer##*/} + + jdbcJar=$( + find "$mavenRepo/org/postgresql/postgresql" \ + -name 'postgresql-*.jar' -print | + sort | + tail -n 1 + ) + + # + # The runner on a Unix-like OS is running as a non-privileged user, but + # has passwordless sudo available (needed to install the PL/Java files + # into the system directories where the supplied PostgreSQL lives). By + # contrast, on Windows the runner has admin privilege, and can install + # the files without any fuss (but later below, pg_ctl will have to be + # used when starting PostgreSQL; pg_ctl has a Windows-specific ability + # to drop admin privs so postgres will not refuse to start). + # + # The Git for Windows bash environment includes a find command, and the + # things found have unixy paths returned. Make them Windowsy here, with + # a hardcoded assumption they start with /c which should become c: (as + # appears to be the case in the Windows runner currently). + # + echo '::group::Install files from the package jar' + if [[ $RUNNER_OS == Windows ]] + then + pathSep=';' + "$ABS_JAVA" -Dpgconfig="$PGCONFIG" -jar "$packageJar" + function toWindowsPath() { + local p + p="c:${1#/c}" + printf "%s" "${p//\//\\}" + } + jdbcJar="$(toWindowsPath "$jdbcJar")" + mavenRepo="$(toWindowsPath "$mavenRepo")" + else + pathSep=':' + sudo "$ABS_JAVA" -Dpgconfig="$PGCONFIG" -jar "$packageJar" + fi + echo '::endgroup::' + + "$ABS_JSHELL" \ + -execution local \ + "-J--class-path=$packageJar$pathSep$jdbcJar" \ + "--class-path=$packageJar" \ + "-J--add-modules=java.sql.rowset,jdk.httpserver" \ + "-J-Dpgconfig=$PGCONFIG" \ + "-J-DmavenRepo=$mavenRepo" \ + "-J-DsaxonVer=$saxonVer" \ + CI/integration diff --git a/.github/workflows/ci-runnerpg.yml b/.github/workflows/ci-runnerpg.yml deleted file mode 100644 index d77d5a53a..000000000 --- a/.github/workflows/ci-runnerpg.yml +++ /dev/null @@ -1,560 +0,0 @@ -# This workflow will build and test PL/Java against the version of PostgreSQL -# preinstalled in the GitHub Actions runner environment. Naturally, this one -# does not have a PostgreSQL version in the build matrix. The version that's -# preinstalled is the version you get. - -name: PL/Java CI with PostgreSQL version supplied by the runner - -on: - push: - branches: [ master, REL1_6_STABLE ] - pull_request: - branches: [ master, REL1_6_STABLE ] - -jobs: - build: - if: true - - runs-on: ${{ matrix.oscc.os }} - continue-on-error: true - strategy: - matrix: - oscc: - - os: ubuntu-latest - cc: gcc - - os: macos-latest - cc: clang -# - os: windows-latest -# cc: msvc -# - os: windows-latest -# cc: mingw - java: [9, 11, 12, 13, 14, 15] - - steps: - - - name: Check out PL/Java - uses: actions/checkout@v2 - with: - path: pljava - - - name: Set up JDK - uses: actions/setup-java@v1 - with: - java-version: ${{ matrix.java }} - - - name: Report Java, Maven, and PostgreSQL versions (Linux, macOS) - if: ${{ 'Windows' != runner.os }} - run: | - java -version - mvn --version - pg_config - - - name: Report Java, Maven, and PostgreSQL versions (Windows) - if: ${{ 'Windows' == runner.os }} - run: | - java -version - mvn --version - & "$Env:PGBIN\pg_config" - - - name: Obtain PG development files (Ubuntu, PGDG) - if: ${{ 'Linux' == runner.os }} - run: | - echo \ - deb \ - http://apt.postgresql.org/pub/repos/apt \ - "$(lsb_release -cs)-pgdg" \ - main | - sudo tee /etc/apt/sources.list.d/pgdg.list - sudo apt-get update - sudo apt-get install postgresql-server-dev-14 libkrb5-dev - - - name: Build PL/Java (Linux, macOS) - if: ${{ 'Windows' != runner.os }} - working-directory: pljava - run: | - mvn clean install --batch-mode \ - -Psaxon-examples -Ppgjdbc-ng \ - -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn - - - name: Build PL/Java (Windows MinGW-w64) - if: ${{ 'Windows' == runner.os && 'mingw' == matrix.oscc.cc }} - working-directory: pljava - # - # GitHub Actions will allow 'bash' as a shell choice, even on a Windows - # runner, in which case it's the bash from Git for Windows. That isn't the - # same as the msys64\usr\bin\bash that we want; what's more, while both - # rely on a cygwin DLL, they don't rely on the same one, and an attempt - # to exec one from the other leads to a "fatal error - cygheap base - # mismatch". So, the bash we want has to be started by something other - # than the bash we've got. In this case, set shell: to a command that - # will use cmd to start the right bash. - # - # Some of the MinGW magic is set up by the bash profile run at "login", so - # bash must be started with -l. That profile ends with a cd $HOME, so to - # avoid changing the current directory, set HOME=. first (credit for that: - # https://superuser.com/a/806371). As set above, . is really the pljava - # working-directory, so the bash script should start by resetting HOME to - # the path of its parent. - # - # The runner is provisioned with a very long PATH that includes separate - # bin directories for pre-provisioned packages. The MinGW profile replaces - # that with a much shorter path, so mvn and pg_config below must be given - # as absolute paths (using M2 and PGBIN supplied in the environment) or - # they won't be found. As long as mvn itself can be found, it is able - # to find java without difficulty, using the JAVA_HOME that is also in - # the environment. - # - # Those existing variables in the environment are all spelled in Windows - # style with drive letters, colons, and backslashes, rather than the MinGW - # unixy style, but the mingw bash doesn't seem to object. - # - # If you use the runner-supplied bash to examine the environment, you will - # see MSYSTEM=MINGW64 already in it, but that apparently is something the - # runner-supplied bash does. It must be set here before invoking the MinGW - # bash directly. - # - env: - HOME: . - MSYSTEM: MINGW64 - shell: 'cmd /C "c:\msys64\usr\bin\bash -l "{0}""' - run: | - HOME=$( (cd .. && pwd) ) - "$M2"/mvn clean install --batch-mode \ - -Dpgsql.pgconfig="$PGBIN"'\pg_config' \ - -Psaxon-examples -Ppgjdbc-ng \ - -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn - - - name: Install and test PL/Java - if: ${{ '9' != matrix.java || 'Windows' != runner.os }} - working-directory: pljava - shell: bash - run: | - pgConfig=pg_config # runner-supplied, just get it from the PATH - - packageJar=$(find pljava-packaging -name pljava-pg*.jar -print) - - mavenRepo="$HOME/.m2/repository" - - saxonVer=$( - find "$mavenRepo/net/sf/saxon/Saxon-HE" \ - -name 'Saxon-HE-*.jar' -print | - sort | - tail -n 1 - ) - saxonVer=${saxonVer%/*} - saxonVer=${saxonVer##*/} - - jdbcJar=$( - find "$mavenRepo/com/impossibl/pgjdbc-ng/pgjdbc-ng-all" \ - -name 'pgjdbc-ng-all-*.jar' -print | - sort | - tail -n 1 - ) - - # - # The runner on a Unix-like OS is running as a non-privileged user, but - # has passwordless sudo available (needed to install the PL/Java files - # into the system directories where the supplied PostgreSQL lives). By - # contrast, on Windows the runner has admin privilege, and can install - # the files without any fuss (but later below, pg_ctl will have to be - # used when starting PostgreSQL; pg_ctl has a Windows-specific ability - # to drop admin privs so postgres will not refuse to start). - # - # The Windows runner seems to have an extra pg_config somewhere on the - # path, that reports it was built with MinGW and installed in paths - # containing Strawberry that don't really exist. $PGBIN\pg_config refers - # to a different build made with MSVC, and those directories really - # exist, so specify that one explicitly when running on Windows. - # - # The Git for Windows bash environment includes a find command, and the - # things found have unixy paths returned. Make them Windowsy here, with - # a hardcoded assumption they start with /c which should become c: (as - # appears to be the case in the Windows runner currently). - # - if [[ $RUNNER_OS == Windows ]] - then - pathSep=';' - pgConfig="$PGBIN"'\pg_config' - java -Dpgconfig="$pgConfig" -jar "$packageJar" - function toWindowsPath() { - local p - p="c:${1#/c}" - printf "%s" "${p//\//\\}" - } - jdbcJar="$(toWindowsPath "$jdbcJar")" - mavenRepo="$(toWindowsPath "$mavenRepo")" - else - pathSep=':' - sudo "$JAVA_HOME"/bin/java -Dpgconfig="$pgConfig" -jar "$packageJar" - fi - - jshell \ - -execution local \ - "-J--class-path=$packageJar$pathSep$jdbcJar" \ - "--class-path=$packageJar" \ - "-J--add-modules=java.sql.rowset" \ - "-J-Dpgconfig=$pgConfig" \ - "-J-Dcom.impossibl.shadow.io.netty.noUnsafe=true" \ - "-J-DmavenRepo=$mavenRepo" \ - "-J-DsaxonVer=$saxonVer" - <<\ENDJSHELL - - boolean succeeding = false; // begin pessimistic - - import static java.nio.file.Files.createTempFile - import static java.nio.file.Files.write - import java.nio.file.Path - import static java.nio.file.Paths.get - import java.sql.Connection - import java.sql.PreparedStatement - import java.sql.ResultSet - import org.postgresql.pljava.packaging.Node - import static org.postgresql.pljava.packaging.Node.q - import static org.postgresql.pljava.packaging.Node.stateMachine - import static org.postgresql.pljava.packaging.Node.isVoidResultSet - import static org.postgresql.pljava.packaging.Node.s_isWindows - - String javaHome = System.getProperty("java.home"); - - Path javaLibDir = get(javaHome, s_isWindows ? "bin" : "lib") - - Path libjvm = ( - "Mac OS X".equals(System.getProperty("os.name")) - ? Stream.of("libjli.dylib", "jli/libjli.dylib") - .map(s -> javaLibDir.resolve(s)) - .filter(Files::exists).findFirst().get() - : javaLibDir.resolve(s_isWindows ? "jvm.dll" : "server/libjvm.so") - ); - - String vmopts = "-enableassertions:org.postgresql.pljava... -Xcheck:jni" - - Node n1 = Node.get_new_node("TestNode1") - - if ( s_isWindows ) - n1.use_pg_ctl(true) - - /* - * Keep a tally of the three types of diagnostic notices that may be - * received, and, independently, how many represent no-good test results - * (error always, but also warning if seen from the tests in the - * examples.jar deployment descriptor). - */ - Map results = - Stream.of("info", "warning", "error", "ng").collect( - LinkedHashMap::new, - (m,k) -> m.put(k, 0), (r,s) -> {}) - - boolean isDiagnostic(Object o, Set whatIsNG) - { - if ( ! ( o instanceof Throwable ) ) - return false; - String[] parts = Node.classify((Throwable)o); - String type = parts[0]; - String message = parts[2]; - results.compute(type, (k,v) -> 1 + v); - if ( whatIsNG.contains(type) ) - if ( ! "warning".equals(type) - || ! message.startsWith("[JEP 411]") ) - results.compute("ng", (k,v) -> 1 + v); - return true; - } - - try ( - AutoCloseable t1 = n1.initialized_cluster(); - AutoCloseable t2 = n1.started_server(Map.of( - "client_min_messages", "info", - "pljava.vmoptions", vmopts, - "pljava.libjvm_location", libjvm.toString() - )); - ) - { - try ( Connection c = n1.connect() ) - { - succeeding = true; // become optimistic, will be using &= below - - succeeding &= stateMachine( - "create extension no result", - null, - - q(c, "CREATE EXTENSION pljava") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - // state 1: consume any diagnostics, or to state 2 with same item - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - - // state 2: must be end of input - (o,p,q) -> null == o - ); - } - - /* - * Get a new connection; 'create extension' always sets a near-silent - * logging level, and PL/Java only checks once at VM start time, so in - * the same session where 'create extension' was done, logging is - * somewhat suppressed. - */ - try ( Connection c = n1.connect() ) - { - succeeding &= stateMachine( - "saxon path examples path", - null, - - Node.installSaxonAndExamplesAndPath(c, - System.getProperty("mavenRepo"), - System.getProperty("saxonVer"), - true) - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - // states 1,2: diagnostics* then a void result set (saxon install) - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - - // states 3,4: diagnostics* then a void result set (set classpath) - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 3 : -4, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 5 : false, - - // states 5,6: diagnostics* then void result set (example install) - (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 5 : -6, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 7 : false, - - // states 7,8: diagnostics* then a void result set (set classpath) - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 7 : -8, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 9 : false, - - // state 9: must be end of input - (o,p,q) -> null == o - ); - - /* - * Exercise TrialPolicy some. Need another connection to change - * vmoptions. Uses some example functions, so insert here before the - * test of undeploying the examples. - */ - try ( Connection c2 = n1.connect() ) - { - Path trialPolicy = - createTempFile(n1.data_dir().getParent(), "trial", "policy"); - - write(trialPolicy, List.of( - "grant {", - " permission", - " org.postgresql.pljava.policy.TrialPolicy$Permission;", - "};" - )); - - PreparedStatement setVmOpts = c2.prepareStatement( - "SELECT null::pg_catalog.void" + - " FROM pg_catalog.set_config('pljava.vmoptions', ?, false)" - ); - - setVmOpts.setString(1, vmopts + - " -Dorg.postgresql.pljava.policy.trial=" + trialPolicy.toUri()); - - succeeding &= stateMachine( - "change pljava.vmoptions", - null, - - q(setVmOpts, setVmOpts::execute) - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - (o,p,q) -> null == o - ); - - PreparedStatement tryForbiddenRead = c2.prepareStatement( - "SELECT" + - " CASE WHEN javatest.java_getsystemproperty('java.home')" + - " OPERATOR(pg_catalog.=) ?" + - " THEN javatest.logmessage('INFO', 'trial policy test ok')" + - " ELSE javatest.logmessage('WARNING', 'trial policy test ng')" + - " END" - ); - - tryForbiddenRead.setString(1, javaHome); - - succeeding &= stateMachine( - "try to read a forbidden property", - null, - - q(tryForbiddenRead, tryForbiddenRead::execute) - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - (o,p,q) -> null == o - ); - // done with connection c2 - } - - /* - * Also confirm that the generated undeploy actions work. - */ - succeeding &= stateMachine( - "remove jar void result", - null, - - q(c, "SELECT sqlj.remove_jar('examples', true)") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - (o,p,q) -> null == o - ); - - /* - * Get another new connection and make sure the extension can be - * loaded in a non-superuser session. - */ - try ( Connection c2 = n1.connect() ) - { - succeeding &= stateMachine( - "become non-superuser", - null, - - q(c2, - "CREATE ROLE alice;" + - "GRANT USAGE ON SCHEMA sqlj TO alice;" + - "SET SESSION AUTHORIZATION alice") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - - succeeding &= stateMachine( - "load as non-superuser", - null, - - q(c2, "SELECT null::pg_catalog.void" + - " FROM sqlj.get_classpath('public')") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - (o,p,q) -> null == o - ); - // done with connection c2 again - } - - /* - * Make sure the extension drops cleanly and nothing - * is left in sqlj. - */ - succeeding &= stateMachine( - "drop extension and schema no result", - null, - - q(c, "DROP EXTENSION pljava;DROP SCHEMA sqlj") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - } - - /* - * Get another new connection and confirm that the old, pre-extension, - * LOAD method of installing PL/Java works. It is largely obsolete in - * the era of extensions, but still covers the use case of installing - * PL/Java without admin access on the server filesystem to where - * CREATE EXTENSION requires the files to be; they can still be - * installed in some other writable location the server can read, and - * pljava.module_path set to the right locations of the jars, and the - * correct shared-object path given to LOAD. - * - * Also test the after-the-fact packaging up with CREATE EXTENSION - * FROM unpackaged. That officially goes away in PG 13, where the - * equivalent sequence - * CREATE EXTENSION pljava VERSION unpackaged - * \c - * ALTER EXTENSION pljava UPDATE - * should be tested instead. - */ - try ( Connection c = n1.connect() ) - { - int majorVersion = c.getMetaData().getDatabaseMajorVersion(); - - succeeding &= stateMachine( - "load as non-extension", - null, - - Node.loadPLJava(c) - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - - if ( 13 <= majorVersion ) - { - succeeding &= stateMachine( - "create unpackaged (PG >= 13)", - null, - - q(c, "CREATE EXTENSION pljava VERSION unpackaged") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - } - } - - /* - * CREATE EXTENSION FROM unpackaged (or the second half of the - * PG >= 13 CREATE EXTENSION VERSION unpackaged;ALTER EXTENSION UPDATE - * sequence) has to happen over a new connection. - */ - try ( Connection c = n1.connect() ) - { - int majorVersion = c.getMetaData().getDatabaseMajorVersion(); - - succeeding &= stateMachine( - "package after loading", - null, - - q(c, 13 > majorVersion - ? "CREATE EXTENSION pljava FROM unpackaged" - : "ALTER EXTENSION pljava UPDATE") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - - /* - * Again make sure extension drops cleanly with nothing left behind. - */ - succeeding &= stateMachine( - "drop extension and schema no result", - null, - - q(c, "DROP EXTENSION pljava;DROP SCHEMA sqlj") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - } - } catch ( Throwable t ) - { - succeeding = false; - throw t; - } - - System.out.println(results); - succeeding &= (0 == results.get("ng")); - System.exit(succeeding ? 0 : 1) - ENDJSHELL diff --git a/.travis.yml b/.travis.yml index 3f3d9f36b..9af39d682 100644 --- a/.travis.yml +++ b/.travis.yml @@ -119,7 +119,7 @@ install: | "$mvn" clean install --batch-mode \ -Dpgsql.pgconfig="$pgConfig" \ -Dpljava.libjvmdefault="$libjvm" \ - -Psaxon-examples -Ppgjdbc-ng \ + -Psaxon-examples -Ppgjdbc \ -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn script: | @@ -136,8 +136,8 @@ script: | saxonVer=${saxonVer##*/} jdbcJar=$( - find "$mavenRepo/com/impossibl/pgjdbc-ng/pgjdbc-ng-all" \ - -name 'pgjdbc-ng-all-*.jar' -print | + find "$mavenRepo/org/postgresql/postgresql" \ + -name 'postgresql-*.jar' -print | sort | tail -n 1 ) @@ -150,349 +150,7 @@ script: | "--class-path=$packageJar" \ "-J--add-modules=java.sql.rowset" \ "-J-Dpgconfig=$pgConfig" \ - "-J-Dcom.impossibl.shadow.io.netty.noUnsafe=true" \ "-J-DmavenRepo=$mavenRepo" \ - "-J-DsaxonVer=$saxonVer" - <<\ENDJSHELL && # continues after here document - - boolean succeeding = false; // begin pessimistic - - import static java.nio.file.Files.createTempFile - import static java.nio.file.Files.write - import java.nio.file.Path - import static java.nio.file.Paths.get - import java.sql.Connection - import java.sql.PreparedStatement - import java.sql.ResultSet - import org.postgresql.pljava.packaging.Node - import static org.postgresql.pljava.packaging.Node.q - import static org.postgresql.pljava.packaging.Node.stateMachine - import static org.postgresql.pljava.packaging.Node.isVoidResultSet - - String vmopts = "-enableassertions:org.postgresql.pljava... -Xcheck:jni" - - Node n1 = Node.get_new_node("TestNode1") - - /* - * Keep a tally of the three types of diagnostic notices that may be received, - * and, independently, how many represent no-good test results (error always, - * but also warning if seen from the tests in the examples.jar deployment - * descriptor). - */ - Map results = - Stream.of("info", "warning", "error", "ng").collect( - LinkedHashMap::new, (m,k) -> m.put(k, 0), (r,s) -> {}) - - boolean isDiagnostic(Object o, Set whatIsNG) - { - if ( ! ( o instanceof Throwable ) ) - return false; - String[] parts = Node.classify((Throwable)o); - String type = parts[0]; - String message = parts[2]; - results.compute(type, (k,v) -> 1 + v); - if ( whatIsNG.contains(type) ) - if ( ! "warning".equals(type) || ! message.startsWith("[JEP 411]") ) - results.compute("ng", (k,v) -> 1 + v); - return true; - } - - try ( - AutoCloseable t1 = n1.initialized_cluster(); - AutoCloseable t2 = n1.started_server(Map.of( - "client_min_messages", "info", - "pljava.vmoptions", vmopts - )); - ) - { - try ( Connection c = n1.connect() ) - { - succeeding = true; // become optimistic, will be using &= below - - succeeding &= stateMachine( - "create extension no result", - null, - - q(c, "CREATE EXTENSION pljava") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - // state 1: consume any diagnostics, or go to state 2 without consuming - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - - // state 2: must be end of input - (o,p,q) -> null == o - ); - } - - /* - * Get a new connection; 'create extension' always sets a near-silent - * logging level, and PL/Java only checks once at VM start time, so in - * the same session where 'create extension' was done, logging is - * somewhat suppressed. - */ - try ( Connection c = n1.connect() ) - { - succeeding &= stateMachine( - "saxon path examples path", - null, - - Node.installSaxonAndExamplesAndPath(c, - System.getProperty("mavenRepo"), - System.getProperty("saxonVer"), - true) - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - // states 1,2: maybe diagnostics, then a void result set (saxon install) - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - - // states 3,4: maybe diagnostics, then a void result set (set classpath) - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 3 : -4, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 5 : false, - - // states 5,6: maybe diagnostics, then void result set (example install) - (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 5 : -6, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 7 : false, - - // states 7,8: maybe diagnostics, then a void result set (set classpath) - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 7 : -8, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 9 : false, - - // state 9: must be end of input - (o,p,q) -> null == o - ); - - /* - * Exercise TrialPolicy some. Need another connection to change - * vmoptions. Uses some example functions, so insert here before the - * test of undeploying the examples. - */ - try ( Connection c2 = n1.connect() ) - { - Path trialPolicy = - createTempFile(n1.data_dir().getParent(), "trial", "policy"); - - write(trialPolicy, List.of( - "grant {", - " permission", - " org.postgresql.pljava.policy.TrialPolicy$Permission;", - "};" - )); - - PreparedStatement setVmOpts = c2.prepareStatement( - "SELECT null::pg_catalog.void" + - " FROM pg_catalog.set_config('pljava.vmoptions', ?, false)" - ); - - setVmOpts.setString(1, vmopts + - " -Dorg.postgresql.pljava.policy.trial=" + trialPolicy.toUri()); - - succeeding &= stateMachine( - "change pljava.vmoptions", - null, - - q(setVmOpts, setVmOpts::execute) - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - (o,p,q) -> null == o - ); - - PreparedStatement tryForbiddenRead = c2.prepareStatement( - "SELECT" + - " CASE WHEN javatest.java_getsystemproperty('java.home')" + - " OPERATOR(pg_catalog.=) ?" + - " THEN javatest.logmessage('INFO', 'trial policy test ok')" + - " ELSE javatest.logmessage('WARNING', 'trial policy test ng')" + - " END" - ); - - tryForbiddenRead.setString(1, System.getProperty("java.home")); - - succeeding &= stateMachine( - "try to read a forbidden property", - null, - - q(tryForbiddenRead, tryForbiddenRead::execute) - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - (o,p,q) -> null == o - ); - // done with connection c2 - } - - /* - * Also confirm that the generated undeploy actions work. - */ - succeeding &= stateMachine( - "remove jar void result", - null, - - q(c, "SELECT sqlj.remove_jar('examples', true)") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - (o,p,q) -> null == o - ); - - /* - * Get another new connection and make sure the extension can be - * loaded in a non-superuser session. - */ - try ( Connection c2 = n1.connect() ) - { - succeeding &= stateMachine( - "become non-superuser", - null, - - q(c2, - "CREATE ROLE alice;" + - "GRANT USAGE ON SCHEMA sqlj TO alice;" + - "SET SESSION AUTHORIZATION alice") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - - succeeding &= stateMachine( - "load as non-superuser", - null, - - q(c2, "SELECT null::pg_catalog.void" + - " FROM sqlj.get_classpath('public')") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - (o,p,q) -> null == o - ); - // done with connection c2 again - } - - /* - * Make sure the extension drops cleanly and nothing - * is left in sqlj. - */ - succeeding &= stateMachine( - "drop extension and schema no result", - null, - - q(c, "DROP EXTENSION pljava;DROP SCHEMA sqlj") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - } - - /* - * Get another new connection and confirm that the old, pre-extension, - * LOAD method of installing PL/Java works. It is largely obsolete in - * the era of extensions, but still covers the use case of installing - * PL/Java without admin access on the server filesystem to where - * CREATE EXTENSION requires the files to be; they can still be - * installed in some other writable location the server can read, and - * pljava.module_path set to the right locations of the jars, and the - * correct shared-object path given to LOAD. - * - * Also test the after-the-fact packaging up with CREATE EXTENSION - * FROM unpackaged. That officially goes away in PG 13, where the - * equivalent sequence - * CREATE EXTENSION pljava VERSION unpackaged - * \c - * ALTER EXTENSION pljava UPDATE - * should be tested instead. - */ - try ( Connection c = n1.connect() ) - { - int majorVersion = c.getMetaData().getDatabaseMajorVersion(); - - succeeding &= stateMachine( - "load as non-extension", - null, - - Node.loadPLJava(c) - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - - if ( 13 <= majorVersion ) - { - succeeding &= stateMachine( - "create unpackaged (PG >= 13)", - null, - - q(c, "CREATE EXTENSION pljava VERSION unpackaged") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - } - } - - /* - * CREATE EXTENSION FROM unpackaged (or the second half of the - * PG >= 13 CREATE EXTENSION VERSION unpackaged;ALTER EXTENSION UPDATE - * sequence) has to happen over a new connection. - */ - try ( Connection c = n1.connect() ) - { - int majorVersion = c.getMetaData().getDatabaseMajorVersion(); - - succeeding &= stateMachine( - "package after loading", - null, - - q(c, 13 > majorVersion - ? "CREATE EXTENSION pljava FROM unpackaged" - : "ALTER EXTENSION pljava UPDATE") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - - /* - * Again make sure extension drops cleanly with nothing left behind. - */ - succeeding &= stateMachine( - "drop extension and schema no result", - null, - - q(c, "DROP EXTENSION pljava;DROP SCHEMA sqlj") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - } - } catch ( Throwable t ) - { - succeeding = false; - throw t; - } - - System.out.println(results); - succeeding &= (0 == results.get("ng")); - System.exit(succeeding ? 0 : 1) - ENDJSHELL - : travis wants something after the end of the here document + "-J-DsaxonVer=$saxonVer" \ + CI/integration + : travis wanted something here at the end once diff --git a/CI/integration b/CI/integration new file mode 100644 index 000000000..fd8da7676 --- /dev/null +++ b/CI/integration @@ -0,0 +1,614 @@ +/* + * Copyright (c) 2020-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + * Kartik Ohri + * + * This jshell script performs basic integration tests for PL/Java's CI. + * + * It must be executed with the built PL/Java packaged jar (produced by the + * pljava-packaging subproject) on the classpath, as well as a PGJDBC or + * pgjdbc-ng full jar. The PL/Java packaged jar includes a Node.class + * exporting functions not unlike the Perl module once called PostgresNode + * (and now called PostgreSQL::Test::Cluster) in the PostgreSQL distribution. + * The javadocs for Node.class explain the available functions. + * + * When jshell runs this script with -execution local, it needs both a + * --class-path and a -J--class-path argument. The former need only contain + * the PL/Java jar itself, so the contents are visible to jshell. The -J version + * passed to the underlying JVM needs both that jar and the PGJDBC or pgjdbc-ng + * driver jar. The driver classes need not be visible to jshell, but the JVM + * must be able to find them. + * + * Tests included in this script require + * -J--add-modules=java.sql.rowset,jdk.httpserver + * on the jshell command line. + * + * These Java properties must be set (as with -J-Dpgconfig=...) on the jshell + * command line: + * + * pgconfig + * the path to the pg_config executable that will be used to locate + * the PostgreSQL installation to be used in the tests + * mavenRepo + * the topmost directory of the local Maven repository. The Saxon jar + * downloaded as a dependency (when -Psaxon-examples was used on the mvn + * command line for building) will be found in this repository + * saxonVer + * the version of the Saxon library to use (appears in the library jar + * file name and as the name of its containing directory in the repository) + * + * These properties are optional (their absence is equivalent to a setting + * of false): + * + * redirectError + * if true, the standard error stream from the tests will be merged into + * the standard output stream. This can be desirable if this script is + * invoked from Windows PowerShell, which believes a standard error stream + * should only carry Error Records and makes an awful mess of anything else. + * extractFiles + * if true, begin by extracting and installing the PL/Java files from the jar + * into the proper locations indicated by the pg_config executable. If false, + * extraction will be skipped, assumed to have been done in a separate step + * simply running java -jar on the PL/Java packaged jar. Doing the extraction + * here can be useful, if this script is run with the needed permissions to + * write in the PostgreSQL install locations, when combined with redirectError + * if running under PowerShell, which would otherwise mess up the output. + * + * The script does not (yet) produce output in any standardized format such as + * TAP. The output will include numerous , , , or + * elements. If it runs to completion there will be a line with counts + * for info, warning, error, and ng. The count of ng results includes errors + * and certain warnings. The tests that are run from the deployment descriptor + * of the pljava-examples jar report test failures as warnings (to avoid cutting + * short the test as an error would), so those warnings are counted in ng. + * + * jshell will exit with a nonzero status if ng > 0 or anything else was seen + * to go wrong or the script did not run to completion. + */ +boolean succeeding = false; // begin pessimistic + +boolean redirectError = Boolean.getBoolean("redirectError"); + +if ( redirectError ) + System.setErr(System.out); // PowerShell makes a mess of stderr output + +UnaryOperator tweaks = + redirectError ? p -> p.redirectErrorStream(true) : UnaryOperator.identity(); + +import static java.nio.file.Files.createTempFile; +import static java.nio.file.Files.write; +import java.nio.file.Path; +import static java.nio.file.Paths.get; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import org.postgresql.pljava.packaging.Node; +import static org.postgresql.pljava.packaging.Node.q; +import static org.postgresql.pljava.packaging.Node.stateMachine; +import static org.postgresql.pljava.packaging.Node.isVoidResultSet; +import static org.postgresql.pljava.packaging.Node.s_isWindows; +import static + org.postgresql.pljava.packaging.Node.NOTHING_OR_PGJDBC_ZERO_COUNT; +/* + * Imports that will be needed to serve a jar file over http + * when the time comes for testing that. + */ +import static java.nio.charset.StandardCharsets.UTF_8; +import java.util.jar.Attributes; +import java.util.jar.Manifest; +import java.util.jar.JarOutputStream; +import java.util.zip.ZipEntry; +import com.sun.net.httpserver.BasicAuthenticator; +import com.sun.net.httpserver.HttpContext; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; + +if ( Boolean.getBoolean("extractFiles") ) + Node.main(new String[0]); // extract the files + +String javaHome = System.getProperty("java.home"); + +Path javaLibDir = get(javaHome, s_isWindows ? "bin" : "lib"); + +Path libjvm = ( + "Mac OS X".equals(System.getProperty("os.name")) + ? Stream.of("libjli.dylib", "jli/libjli.dylib") + .map(s -> javaLibDir.resolve(s)) + .filter(Files::exists).findFirst().get() + : javaLibDir.resolve(s_isWindows ? "server\\jvm.dll" : "server/libjvm.so") +); + +// Use deprecated major() here because feature() first appears in Java 10 +int jFeatureVersion = Runtime.version().major(); + +String vmopts = "-enableassertions:org.postgresql.pljava... -Xcheck:jni"; + +vmopts += " --limit-modules=org.postgresql.pljava.internal"; + +if ( 24 <= jFeatureVersion ) { + vmopts += " -Djava.security.manager=disallow"; // JEP 486 +} else if ( 18 <= jFeatureVersion ) + vmopts += " -Djava.security.manager=allow"; // JEP 411 + +if ( 23 <= jFeatureVersion ) + vmopts += " --sun-misc-unsafe-memory-access=deny"; // JEP 471 + +if ( 24 <= jFeatureVersion ) + vmopts += " --illegal-native-access=deny"; // JEP 472 + +Map serverOptions = new HashMap<>(Map.of( + "client_min_messages", "info", + "pljava.vmoptions", vmopts, + "pljava.libjvm_location", libjvm.toString() +)); +if ( 24 <= jFeatureVersion ) { + serverOptions.put("pljava.allow_unenforced", "java,java_tzset"); + serverOptions.put("pljava.allow_unenforced_udt", "on"); +} + +Node n1 = Node.get_new_node("TestNode1"); + +if ( s_isWindows ) + n1.use_pg_ctl(true); + +/* + * Keep a tally of the three types of diagnostic notices that may be + * received, and, independently, how many represent no-good test results + * (error always, but also warning if seen from the tests in the + * examples.jar deployment descriptor). + */ +Map results = + Stream.of("info", "warning", "error", "ng").collect( + LinkedHashMap::new, + (m,k) -> m.put(k, 0), (r,s) -> {}); + +boolean isDiagnostic(Object o, Set whatIsNG) +{ + if ( ! ( o instanceof Throwable ) ) + return false; + String[] parts = Node.classify((Throwable)o); + String type = parts[0]; + String message = parts[2]; + results.compute(type, (k,v) -> 1 + v); + if ( whatIsNG.contains(type) ) + if ( ! "warning".equals(type) || ! message.startsWith("[JEP 411]") ) + results.compute("ng", (k,v) -> 1 + v); + return true; +} + +/* + * Write a trial policy into a temporary file in n's data_dir, + * and set pljava.vmoptions accordingly over connection c. + * Returns the 'succeeding' flag from the state machine looking + * at the command results. + */ +boolean useTrialPolicy(Node n, Connection c, List contents) +throws Exception +{ + Path trialPolicy = + createTempFile(n.data_dir().getParent(), "trial", "policy"); + + write(trialPolicy, contents); + + PreparedStatement setVmOpts = c.prepareStatement( + "SELECT null::pg_catalog.void" + + " FROM pg_catalog.set_config('pljava.vmoptions', ?, false)" + ); + + setVmOpts.setString(1, vmopts + + " -Dorg.postgresql.pljava.policy.trial=" + trialPolicy.toUri()); + + return stateMachine( + "change pljava.vmoptions", + null, + + q(setVmOpts, setVmOpts::execute) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); +} + +try ( + AutoCloseable t1 = n1.initialized_cluster(tweaks); + AutoCloseable t2 = n1.started_server(serverOptions, tweaks); +) +{ + int pgMajorVersion; + + try ( Connection c = n1.connect() ) + { + pgMajorVersion = c.getMetaData().getDatabaseMajorVersion(); + + succeeding = true; // become optimistic, will be using &= below + + succeeding &= stateMachine( + "create extension no result", + null, + + q(c, "CREATE EXTENSION pljava") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + // state 1: consume any diagnostics, or to state 2 with same item + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + + NOTHING_OR_PGJDBC_ZERO_COUNT, // state 2 + + // state 3: must be end of input + (o,p,q) -> null == o + ); + } + + /* + * Get a new connection; 'create extension' always sets a near-silent + * logging level, and PL/Java only checks once at VM start time, so in + * the same session where 'create extension' was done, logging is + * somewhat suppressed. + */ + try ( Connection c = n1.connect() ) + { + succeeding &= stateMachine( + "saxon path examples path", + null, + + Node.installSaxonAndExamplesAndPath(c, + System.getProperty("mavenRepo"), + System.getProperty("saxonVer"), + true) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + // states 1,2: diagnostics* then a void result set (saxon install) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + + // states 3,4: diagnostics* then a void result set (set classpath) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 3 : -4, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 5 : false, + + // states 5,6: diagnostics* then void result set (example install) + (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 5 : -6, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 7 : false, + + // states 7,8: diagnostics* then a void result set (set classpath) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 7 : -8, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 9 : false, + + // state 9: must be end of input + (o,p,q) -> null == o + ); + + /* + * Exercise TrialPolicy some. Need another connection to change + * vmoptions. Uses some example functions, so insert here before the + * test of undeploying the examples. + */ + try ( Connection c2 = n1.connect() ) + { + succeeding &= useTrialPolicy(n1, c2, List.of( + "grant {", + " permission", + " org.postgresql.pljava.policy.TrialPolicy$Permission;", + "};" + )); + + PreparedStatement tryForbiddenRead = c2.prepareStatement( + "SELECT" + + " CASE WHEN javatest.java_getsystemproperty('java.home')" + + " OPERATOR(pg_catalog.=) ?" + + " THEN javatest.logmessage('INFO', 'trial policy test ok')" + + " ELSE javatest.logmessage('WARNING', 'trial policy test ng')" + + " END" + ); + + tryForbiddenRead.setString(1, javaHome); + + succeeding &= stateMachine( + "try to read a forbidden property", + null, + + q(tryForbiddenRead, tryForbiddenRead::execute) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + // done with connection c2 + } + + /* + * Spin up an http server with a little jar file to serve, and test + * that install_jar works with an http: url. + * + * First make a little jar empty but for a deployment descriptor. + */ + String ddrName = "foo.ddr"; + Attributes a = new Attributes(); + a.putValue("SQLJDeploymentDescriptor", "TRUE"); + Manifest m = new Manifest(); + m.getEntries().put(ddrName, a); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + JarOutputStream jos = new JarOutputStream(baos, m); + jos.putNextEntry(new ZipEntry(ddrName)); + jos.write( + ( + "SQLActions[]={\n\"BEGIN INSTALL\n" + + "SELECT javatest.logmessage('INFO'," + + " 'jar installed from http');\n" + + "END INSTALL\",\n\"BEGIN REMOVE\n" + + "BEGIN dummy\n" + + "END dummy;\n" + + "END REMOVE\"\n}\n" + ).getBytes(UTF_8) + ); + jos.closeEntry(); + jos.close(); + byte[] jar = baos.toByteArray(); + + /* + * Now an http server. + */ + HttpServer hs = + HttpServer.create(new InetSocketAddress("localhost", 0), 0); + + try ( + Connection c2 = n1.connect(); + AutoCloseable t = ((Supplier)() -> + { + hs.start(); + return () -> hs.stop(0); + } + ).get() + ) + { + InetSocketAddress addr = hs.getAddress(); + + String id = "bar", pw = "baz"; + + URL u = new URI( + "http", id+':'+pw, addr.getHostString(), addr.getPort(), + "/foo.jar", null, null + ).toURL(); + + HttpContext hc = hs.createContext( + u.getPath(), + new HttpHandler() + { + @Override + public void handle(HttpExchange t) throws IOException + { + try ( InputStream is = t.getRequestBody() ) { + is.readAllBytes(); + } + t.getResponseHeaders().add( + "Content-Type", "application/java-archive"); + t.sendResponseHeaders(200, jar.length); + try ( OutputStream os = t.getResponseBody() ) { + os.write(jar); + } + } + } + ); + + hc.setAuthenticator( + new BasicAuthenticator("CI realm") + // ("CI realm", UTF_8) only available in Java 14 or later + { + @Override + public boolean checkCredentials(String c_id, String c_pw) + { + return id.equals(c_id) && pw.equals(c_pw); + } + } + ); + + succeeding &= useTrialPolicy(n1, c2, List.of( + "grant codebase \"${org.postgresql.pljava.codesource}\" {", + " permission", + " java.net.URLPermission \"http:*\", \"GET:Accept\";", + "};" + )); + + succeeding &= stateMachine( + "install a jar over http", + null, + + Node.installJar(c2, u.toString(), "foo", true) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + + // done with connection c2 again, and the http server + } + + /* + * Also confirm that the generated undeploy actions work. + */ + succeeding &= stateMachine( + "remove jar void result", + null, + + q(c, "SELECT sqlj.remove_jar('examples', true)") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + + /* + * Get another new connection and make sure the extension can be + * loaded in a non-superuser session. + */ + try ( Connection c2 = n1.connect() ) + { + succeeding &= stateMachine( + "become non-superuser", + null, + + q(c2, + "CREATE ROLE alice;" + + "GRANT USAGE ON SCHEMA sqlj TO alice;" + + "SET SESSION AUTHORIZATION alice") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + NOTHING_OR_PGJDBC_ZERO_COUNT, + NOTHING_OR_PGJDBC_ZERO_COUNT, + NOTHING_OR_PGJDBC_ZERO_COUNT, + (o,p,q) -> null == o + ); + + succeeding &= stateMachine( + "load as non-superuser", + null, + + q(c2, "SELECT null::pg_catalog.void" + + " FROM sqlj.get_classpath('public')") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + // done with connection c2 again + } + + /* + * Make sure the extension drops cleanly and nothing + * is left in sqlj. + */ + succeeding &= stateMachine( + "drop extension and schema no result", + null, + + q(c, "DROP EXTENSION pljava;DROP SCHEMA sqlj") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + NOTHING_OR_PGJDBC_ZERO_COUNT, + NOTHING_OR_PGJDBC_ZERO_COUNT, + (o,p,q) -> null == o + ); + } + + /* + * Get another new connection and confirm that the old, pre-extension, + * LOAD method of installing PL/Java works. It is largely obsolete in + * the era of extensions, but still covers the use case of installing + * PL/Java without admin access on the server filesystem to where + * CREATE EXTENSION requires the files to be; they can still be + * installed in some other writable location the server can read, and + * pljava.module_path set to the right locations of the jars, and the + * correct shared-object path given to LOAD. + * + * Also test the after-the-fact packaging up with CREATE EXTENSION + * FROM unpackaged. That officially goes away in PG 13, where the + * equivalent sequence + * CREATE EXTENSION pljava VERSION unpackaged + * \c + * ALTER EXTENSION pljava UPDATE + * should be tested instead. + */ + try ( Connection c = n1.connect() ) + { + succeeding &= stateMachine( + "load as non-extension", + null, + + Node.loadPLJava(c) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + NOTHING_OR_PGJDBC_ZERO_COUNT, + (o,p,q) -> null == o + ); + + if ( 13 <= pgMajorVersion ) + { + succeeding &= stateMachine( + "create unpackaged (PG >= 13)", + null, + + q(c, "CREATE EXTENSION pljava VERSION unpackaged") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + NOTHING_OR_PGJDBC_ZERO_COUNT, + (o,p,q) -> null == o + ); + } + } + + /* + * CREATE EXTENSION FROM unpackaged (or the second half of the + * PG >= 13 CREATE EXTENSION VERSION unpackaged;ALTER EXTENSION UPDATE + * sequence) has to happen over a new connection. + */ + try ( Connection c = n1.connect() ) + { + succeeding &= stateMachine( + "package after loading", + null, + + q(c, 13 > pgMajorVersion + ? "CREATE EXTENSION pljava FROM unpackaged" + : "ALTER EXTENSION pljava UPDATE") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + NOTHING_OR_PGJDBC_ZERO_COUNT, + (o,p,q) -> null == o + ); + + /* + * Again make sure extension drops cleanly with nothing left behind. + */ + succeeding &= stateMachine( + "drop extension and schema no result", + null, + + q(c, "DROP EXTENSION pljava;DROP SCHEMA sqlj") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + NOTHING_OR_PGJDBC_ZERO_COUNT, + NOTHING_OR_PGJDBC_ZERO_COUNT, + (o,p,q) -> null == o + ); + } +} catch ( Throwable t ) +{ + succeeding = false; + throw t; +} + +System.out.println(results); +succeeding &= (0 == results.get("ng")); +System.exit(succeeding ? 0 : 1); diff --git a/appveyor.yml b/appveyor.yml index bf404ff0b..56524ef64 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,59 +1,44 @@ +# These only_commits and branches settings ought to pretty much suppress +# Appveyor, whose runs have all been failing lately because of Maven repository +# connection resets that don't seem reproducible locally. This can be revisited +# later to see if things might be working again. +only_commits: + message: /appveyor/ +branches: + only: + - appveyor image: Visual Studio 2019 environment: APPVEYOR_RDP_PASSWORD: MrRobot@2020 VCVARSALL: C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat matrix: -# - SYS: MINGW -# JDK: 9 -# PG: 12 -# - SYS: MINGW -# JDK: 10 -# PG: 12 - SYS: MINGW JDK: 11 - PG: 12 - - SYS: MINGW - JDK: 12 - PG: 12 + PG: pacman - SYS: MINGW - JDK: 13 - PG: 12 + JDK: 17 + PG: pacman - SYS: MINGW - JDK: 14 - PG: 12 + JDK: 19 + PG: pacman - SYS: MINGW - JDK: 15 - PG: 12 - - SYS: MSVC - JDK: 15 - PG: 12 - - SYS: MSVC - JDK: 14 - PG: 12 + JDK: 21 + PG: pacman - SYS: MSVC - JDK: 13 - PG: 12 + JDK: 21 + PG: 15 - SYS: MSVC - JDK: 12 - PG: 12 + JDK: 21 + PG: 14 - SYS: MSVC - JDK: 11 - PG: 12 + JDK: 21 + PG: 13 # - SYS: MSVC -# JDK: 10 +# JDK: 21 # PG: 12 # - SYS: MSVC -# JDK: 9 -# PG: 12 - - SYS: MSVC - JDK: 14 - PG: 11 - - SYS: MSVC - JDK: 14 - PG: 10 - - SYS: MSVC - JDK: 14 - PG: 9.6 +# JDK: 11 +# PG: 9.6 before_build: - ps: .appveyor/appveyor_download_java.ps1 - set JAVA_HOME=%ProgramFiles%\Java\jdk%JDK% @@ -80,366 +65,19 @@ test_script: Select-Object -Last 1 ).Directory.Name - $jdbcJar = (Join-Path $mavenRepo "com\impossibl\pgjdbc-ng\pgjdbc-ng-all" | - Get-ChildItem -Recurse -Filter pgjdbc-ng-all-*.jar | + $jdbcJar = (Join-Path $mavenRepo "org\postgresql\postgresql" | + Get-ChildItem -Recurse -Filter postgresql-*.jar | Select-Object -Last 1 ).FullName - @' - boolean succeeding = false; // begin pessimistic - - import static java.nio.file.Files.createTempFile - import static java.nio.file.Files.write - import java.nio.file.Path - import static java.nio.file.Paths.get - import java.sql.Connection - import java.sql.PreparedStatement - import java.sql.ResultSet - import org.postgresql.pljava.packaging.Node - import static org.postgresql.pljava.packaging.Node.q - import static org.postgresql.pljava.packaging.Node.stateMachine - import static org.postgresql.pljava.packaging.Node.isVoidResultSet - - System.setErr(System.out); // PowerShell makes a mess of stderr output - - Node.main(new String[0]); // Extract the files (with output to stdout) - - String vmopts = "-enableassertions:org.postgresql.pljava... -Xcheck:jni" - - Node n1 = Node.get_new_node("TestNode1") - - n1.use_pg_ctl(true) - - /* - * Keep a tally of the three types of diagnostic notices that may be - * received, and, independently, how many represent no-good test results - * (error always, but also warning if seen from the tests in the - * examples.jar deployment descriptor). - */ - Map results = - Stream.of("info", "warning", "error", "ng").collect( - LinkedHashMap::new, (m,k) -> m.put(k, 0), (r,s) -> {}) - - boolean isDiagnostic(Object o, Set whatIsNG) - { - if ( ! ( o instanceof Throwable ) ) - return false; - String[] parts = Node.classify((Throwable)o); - String type = parts[0]; - String message = parts[2]; - results.compute(type, (k,v) -> 1 + v); - if ( whatIsNG.contains(type) ) - if ( ! "warning".equals(type) || ! message.startsWith("[JEP 411]") ) - results.compute("ng", (k,v) -> 1 + v); - return true; - } - - try ( - AutoCloseable t1 = n1.initialized_cluster( - p->p.redirectErrorStream(true)); - AutoCloseable t2 = n1.started_server(Map.of( - "client_min_messages", "info", - "pljava.vmoptions", vmopts - ), p->p.redirectErrorStream(true)); - ) - { - try ( Connection c = n1.connect() ) - { - succeeding = true; // become optimistic, will be using &= below - - succeeding &= stateMachine( - "create extension no result", - null, - - q(c, "CREATE EXTENSION pljava") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - // state 1: consume any diagnostics, or show same item to state 2 - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - - // state 2: must be end of input - (o,p,q) -> null == o - ); - } - - /* - * Get a new connection; 'create extension' always sets a near-silent - * logging level, and PL/Java only checks once at VM start time, so in - * the same session where 'create extension' was done, logging is - * somewhat suppressed. - */ - try ( Connection c = n1.connect() ) - { - succeeding &= stateMachine( - "saxon path examples path", - null, - - Node.installSaxonAndExamplesAndPath(c, - System.getProperty("mavenRepo"), - System.getProperty("saxonVer"), - true) - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - // states 1,2: diagnostics* then a void result set (saxon install) - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - - // states 3,4: diagnostics* then a void result set (set classpath) - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 3 : -4, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 5 : false, - - // states 5,6: diagnostics* then void result set (example install) - (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 5 : -6, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 7 : false, - - // states 7,8: diagnostics* then a void result set (set classpath) - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 7 : -8, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 9 : false, - - // state 9: must be end of input - (o,p,q) -> null == o - ); - - /* - * Exercise TrialPolicy some. Need another connection to change - * vmoptions. Uses some example functions, so insert here before the - * test of undeploying the examples. - */ - try ( Connection c2 = n1.connect() ) - { - Path trialPolicy = - createTempFile(n1.data_dir().getParent(), "trial", "policy"); - - write(trialPolicy, List.of( - "grant {", - " permission", - " org.postgresql.pljava.policy.TrialPolicy$Permission;", - "};" - )); - - PreparedStatement setVmOpts = c2.prepareStatement( - "SELECT null::pg_catalog.void" + - " FROM pg_catalog.set_config('pljava.vmoptions', ?, false)" - ); - - setVmOpts.setString(1, vmopts + - " -Dorg.postgresql.pljava.policy.trial=" + trialPolicy.toUri()); - - succeeding &= stateMachine( - "change pljava.vmoptions", - null, - - q(setVmOpts, setVmOpts::execute) - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - (o,p,q) -> null == o - ); - - PreparedStatement tryForbiddenRead = c2.prepareStatement( - "SELECT" + - " CASE WHEN javatest.java_getsystemproperty('java.home')" + - " OPERATOR(pg_catalog.=) ?" + - " THEN javatest.logmessage('INFO', 'trial policy test ok')" + - " ELSE javatest.logmessage('WARNING', 'trial policy test ng')" + - " END" - ); - - tryForbiddenRead.setString(1, System.getProperty("java.home")); - - succeeding &= stateMachine( - "try to read a forbidden property", - null, - - q(tryForbiddenRead, tryForbiddenRead::execute) - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - (o,p,q) -> null == o - ); - // done with connection c2 - } - - /* - * Also confirm that the generated undeploy actions work. - */ - succeeding &= stateMachine( - "remove jar void result", - null, - - q(c, "SELECT sqlj.remove_jar('examples', true)") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - (o,p,q) -> null == o - ); - - /* - * Get another new connection and make sure the extension can be - * loaded in a non-superuser session. - */ - try ( Connection c2 = n1.connect() ) - { - succeeding &= stateMachine( - "become non-superuser", - null, - - q(c2, - "CREATE ROLE alice;" + - "GRANT USAGE ON SCHEMA sqlj TO alice;" + - "SET SESSION AUTHORIZATION alice") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - - succeeding &= stateMachine( - "load as non-superuser", - null, - - q(c2, "SELECT null::pg_catalog.void" + - " FROM sqlj.get_classpath('public')") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, - (o,p,q) -> null == o - ); - // done with connection c2 again - } - - /* - * Make sure the extension drops cleanly and nothing - * is left in sqlj. - */ - succeeding &= stateMachine( - "drop extension and schema no result", - null, - - q(c, "DROP EXTENSION pljava;DROP SCHEMA sqlj") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - } - - /* - * Get another new connection and confirm that the old, pre-extension, - * LOAD method of installing PL/Java works. It is largely obsolete in - * the era of extensions, but still covers the use case of installing - * PL/Java without admin access on the server filesystem to where - * CREATE EXTENSION requires the files to be; they can still be - * installed in some other writable location the server can read, and - * pljava.module_path set to the right locations of the jars, and the - * correct shared-object path given to LOAD. - * - * Also test the after-the-fact packaging up with CREATE EXTENSION - * FROM unpackaged. That officially goes away in PG 13, where the - * equivalent sequence - * CREATE EXTENSION pljava VERSION unpackaged - * \c - * ALTER EXTENSION pljava UPDATE - * should be tested instead. - */ - try ( Connection c = n1.connect() ) - { - int majorVersion = c.getMetaData().getDatabaseMajorVersion(); - - succeeding &= stateMachine( - "load as non-extension", - null, - - Node.loadPLJava(c) - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - - if ( 13 <= majorVersion ) - { - succeeding &= stateMachine( - "create unpackaged (PG >= 13)", - null, - - q(c, "CREATE EXTENSION pljava VERSION unpackaged") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - } - } - - /* - * CREATE EXTENSION FROM unpackaged (or the second half of the - * PG >= 13 CREATE EXTENSION VERSION unpackaged;ALTER EXTENSION UPDATE - * sequence) has to happen over a new connection. - */ - try ( Connection c = n1.connect() ) - { - int majorVersion = c.getMetaData().getDatabaseMajorVersion(); - - succeeding &= stateMachine( - "package after loading", - null, - - q(c, 13 > majorVersion - ? "CREATE EXTENSION pljava FROM unpackaged" - : "ALTER EXTENSION pljava UPDATE") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - - /* - * Again make sure extension drops cleanly with nothing left behind. - */ - succeeding &= stateMachine( - "drop extension and schema no result", - null, - - q(c, "DROP EXTENSION pljava;DROP SCHEMA sqlj") - .flatMap(Node::semiFlattenDiagnostics) - .peek(Node::peek), - - (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, - (o,p,q) -> null == o - ); - } - } catch ( Throwable t ) - { - succeeding = false; - throw t; - } - - System.out.println(results); - succeeding &= (0 == results.get("ng")); - System.exit(succeeding ? 0 : 1) - '@ | jshell ` -execution local ` "-J--class-path=$packageJar;$jdbcJar" ` "--class-path=$packageJar" ` - "-J--add-modules=java.sql.rowset" ` - "-J-Dcom.impossibl.shadow.io.netty.noUnsafe=true" ` + "-J--add-modules=java.sql.rowset,jdk.httpserver" ` "-J-Dpgconfig=$pgConfig" ` "-J-DmavenRepo=$mavenRepo" ` - "-J-DsaxonVer=$saxonVer" - + "-J-DsaxonVer=$saxonVer" ` + "-J-DredirectError=true" ` + "-J-DextractFiles=true" ` + CI\integration diff --git a/pljava-api/src/main/java/module-info.java b/pljava-api/src/main/java/module-info.java index fbfcb8bd4..d501d86a9 100644 --- a/pljava-api/src/main/java/module-info.java +++ b/pljava-api/src/main/java/module-info.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2020-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -21,14 +21,21 @@ requires transitive java.compiler; exports org.postgresql.pljava; + exports org.postgresql.pljava.adt; + exports org.postgresql.pljava.adt.spi; exports org.postgresql.pljava.annotation; + exports org.postgresql.pljava.model; exports org.postgresql.pljava.sqlgen; exports org.postgresql.pljava.annotation.processing to org.postgresql.pljava.internal; + uses org.postgresql.pljava.Adapter.Service; + uses org.postgresql.pljava.Session; + uses org.postgresql.pljava.model.CatalogObject.Factory; + provides javax.annotation.processing.Processor with org.postgresql.pljava.annotation.processing.DDRProcessor; } diff --git a/pljava-api/src/main/java/org/postgresql/pljava/Adapter.java b/pljava-api/src/main/java/org/postgresql/pljava/Adapter.java new file mode 100644 index 000000000..6783d7275 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/Adapter.java @@ -0,0 +1,2040 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles.Lookup; +import static java.lang.invoke.MethodHandles.collectArguments; +import static java.lang.invoke.MethodHandles.dropArguments; +import static java.lang.invoke.MethodHandles.lookup; +import static java.lang.invoke.MethodHandles.permuteArguments; +import java.lang.invoke.MethodType; +import static java.lang.invoke.MethodType.methodType; + +import static java.lang.reflect.Array.newInstance; +import java.lang.reflect.Method; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.lang.reflect.TypeVariable; + +import java.security.Permission; +import java.security.PermissionCollection; + +import java.sql.SQLException; +import java.sql.SQLDataException; + +import java.util.Arrays; +import static java.util.Arrays.stream; +import static java.util.Collections.emptyEnumeration; +import static java.util.Collections.enumeration; +import java.util.Enumeration; +import java.util.List; +import static java.util.Objects.requireNonNull; +import java.util.ServiceConfigurationError; +import java.util.ServiceLoader; + +import java.util.function.Consumer; +import java.util.function.Predicate; + +import org.postgresql.pljava.adt.spi.AbstractType; +import org.postgresql.pljava.adt.spi.AbstractType.Bindings; +import org.postgresql.pljava.adt.spi.AbstractType.MultiArray; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.adt.spi.TwosComplement; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegType; +import org.postgresql.pljava.model.TupleTableSlot.Indexed; + +import org.postgresql.pljava.model.SlotTester.Visible; // temporary for test jig + +import static org.postgresql.pljava.adt.spi.AbstractType.erase; +import static org.postgresql.pljava.adt.spi.AbstractType.isSubtype; +import static org.postgresql.pljava.adt.spi.AbstractType.refine; +import static org.postgresql.pljava.adt.spi.AbstractType.specialization; +import static org.postgresql.pljava.adt.spi.AbstractType.substitute; + +/** + * Base for classes that implement data types over raw PostgreSQL datums. + *

+ * A PL/Java data type adapter is a concrete subclass of this class that knows + * the structure of one or more PostgreSQL data types and can convert between + * their raw {@code Datum} form and an appropriate Java class or primitive type + * for use in PL/Java code. It will use the {@code Via...} enum declared here + * (to indicate how it will access the PostgreSQL {@code Datum}), and extend + * an {@code As...} abstract class declared here (to indicate the supported + * Java reference or primitive type). + *

+ * An adapter should be stateless and thread-safe. There should be no need to + * instantiate more than one instance of an adapter for a given type mapping. + *

+ * An adapter has a "top" type T, indicating the type it will present to client + * code, and an "under" type U, which client code can generally wildcard and + * ignore; an implementing class that can be composed over another adapter uses + * U to indicate what that "under" adapter's "top" type must be. The Java + * compiler records enough information for both parameters to allow PL/Java to + * reconstruct the type relationships in a stack of composed adapters. + *

+ * An implementing leaf adapter (which will work directly on PostgreSQL Datum + * rather than being composed over another adapter) can declare {@code Void} + * for U by convention. An adapter meant to be composed over another, where the + * "under" adapter has a primitive type, can declare the primitive type's boxed + * counterpart as U. + *

+ * For a primitive-typed adapter, the "top" type is implicit in the class name + * {@code AsLong}, {@code AsInt}, and so on, and the "under" type follows as the + * parameter U. For ease of reading, the type parameters of the two-parameter + * classes like {@code As} are also in that order, T first. + *

+ * The precise meaning of the "top" type T depends on whether an adapter is + * an instance of {@code As} or of {@code Primitive}. In the + * {@code As} case, the top type is a reference type and is given by T directly. + * In the primitive case, T is the boxed counterpart of the actual top type. + *

+ * To preserve type safety, only recognized "leaf" adapters (those registered + * to {@link #configure configure} with a non-null {@link Via via}) + * will be able to manipulate raw {@code Datum}s. An adapter class + * should avoid leaking a {@code Datum} to other code. + */ +public abstract class Adapter implements Visible +{ + /** + * The full generic type returned by this adapter, as refined at the time + * of construction, making use of the type returned by an "under" adapter + * or array contract, if used. + */ + final Type m_topType; + + /** + * The erasure of the type to be returned. + */ + final Class m_topErased; + + /** + * The "under" adapter in the composed case; null in a leaf adapter. + */ + final Adapter m_underAdapter; + + /** + * Method handle constructed for this adapter's fetch operation. + */ + final MethodHandle m_fetchHandle; + + /** + * In this private constructor, witness is declared as + * {@code Type} rather than {@code Class}. + *

+ * It can be invoked that way from {@code As} for array adapters; otherwise, + * the subclass constructors all declare the parameter as {@code Class}. + *

+ * The adapter and contract here are raw types. The accessible subclass + * constructors will constrain their type arguments to be compatible. + */ + private Adapter( + Configuration configuration, Adapter over, Contract using, Type witness) + { + requireNonNull(configuration, + () -> getClass() + " instantiated without a Configuration object"); + if ( getClass() != configuration.m_class ) + throw new IllegalArgumentException( + getClass() + " instantiated with a Configuration object " + + "for the wrong class"); + + if ( configuration instanceof Configuration.Leaf ) + { + if ( null != over ) + throw new IllegalArgumentException( + getClass() + " instantiated with non-null 'over' but is " + + "a leaf adapter"); + + Configuration.Leaf leaf = (Configuration.Leaf)configuration; + + Type top = leaf.m_top; + /* + * If instantiated with a subclass of Contract, the type with + * which it specializes Contract may tell us more than our top + * type precomputed at configuration. + */ + if ( null != using ) + { + if ( witness instanceof TypeWrapper ) + { + top = ((TypeWrapper)witness).wrapped; + witness = null; + } + else + top = specialization(using.getClass(), Contract.class)[0]; + } + + MethodHandle mh = leaf.m_fetch.bindTo(this); + + @SuppressWarnings("unchecked") + Class erased = (Class)erase(top); + + if ( null == witness ) + { + if ( top instanceof TypeVariable + && 1 == ((TypeVariable)top).getBounds().length ) + top = erased; + } + else + { + if ( ! isSubtype(witness, erased) ) + throw new IllegalArgumentException( + "cannot instantiate " + getClass() + " as " + + "adapter producing " + witness); + top = witness; + mh = mh.asType(mh.type().changeReturnType(erase(witness))); + } + m_topType = top; + m_topErased = erased; + m_underAdapter = null; + m_fetchHandle = mh; + return; + } + + /* + * Very well then, it is not a leaf adapter. + */ + + requireNonNull(over, + getClass() + " instantiated with null 'over' but is " + + "a non-leaf adapter"); + if ( null != using ) + throw new IllegalArgumentException( + getClass() + " instantiated with non-null 'using' but is " + + "not a leaf adapter"); + + Configuration.NonLeaf nonLeaf = (Configuration.NonLeaf)configuration; + + Type[] refined = refine(over.m_topType, nonLeaf.m_under, nonLeaf.m_top); + Type under = refined[0]; + Type top = refined[1]; + + if ( null != witness ) + { + if ( ! isSubtype(witness, top) ) + throw new IllegalArgumentException( + "cannot instantiate " + getClass() + " as " + + "adapter producing " + witness); + top = witness; + } + + m_topType = top; + + @SuppressWarnings("unchecked") + Class erased = (Class)erase(top); + m_topErased = erased; + + /* + * 'over' was declared as a raw type to make this constructor also + * usable from the Array subclass constructor. Here, being an ordinary + * composing adapter, we reassert that 'over' is parameterized , as + * the ordinary subclass constructor will have ensured. + */ + @SuppressWarnings("unchecked") + Adapter underAdapter = over; + m_underAdapter = underAdapter; + + MethodHandle producer = nonLeaf.m_adapt.bindTo(this); + MethodHandle fetcher = over.m_fetchHandle; + + MethodType mt = producer + .type() + .changeReturnType(erased) + .changeParameterType(1, erase(under)); + + producer = producer.asType(mt); + fetcher = fetcher.asType( + fetcher.type().changeReturnType(mt.parameterType(1))); + + mt = fetcher + .type() // this is the expected type of a fetcher, but it needs + .changeReturnType(erased); // new return type. After collect we will + fetcher = collectArguments(producer, 1, fetcher); // need 1st arg twice + fetcher = permuteArguments(fetcher, mt, 0, 0, 1, 2, 3, 4); // so do that + + m_fetchHandle = fetcher; + } + + /** + * Specifies, for a leaf adapter (one not composed over a lower adapter), + * the form in which the value fetched from PostgreSQL will be presented to + * it (or how it will produce a value to be stored to PostgreSQL). + *

+ * At this level, an adapter is free to use {@code Via.CHAR} and treat + * {@code char} internally as a 16-bit unsigned integral type with no other + * special meaning. If an adapter will return an unsigned 16-bit + * type, it should extend either {@code AsShort.Unsigned} or {@code AsChar}, + * based on whether the value it returns represents UTF-16 character data. + */ + protected enum Via + { + DATUM ( Datum.Input.class, "getDatum"), + INT64SX ( long.class, "getLongSignExtended"), + INT64ZX ( long.class, "getLongZeroExtended"), + DOUBLE ( double.class, "getDouble"), + INT32SX ( int.class, "getIntSignExtended"), + INT32ZX ( int.class, "getIntZeroExtended"), + FLOAT ( float.class, "getFloat"), + SHORT ( short.class, "getShort"), + CHAR ( char.class, "getChar"), + BYTE ( byte.class, "getByte"), + BOOLEAN ( boolean.class, "getBoolean"); + + Via(Class type, String method) + { + try + { + MethodHandle h; + h = lookup().findVirtual(Datum.Accessor.class, method, + type.isPrimitive() + ? methodType( + type, Object.class, int.class) + : methodType( + type, Object.class, int.class, Attribute.class)); + + if ( type.isPrimitive() ) + h = dropArguments(h, 3, Attribute.class); + + m_handle = h; + } + catch ( ReflectiveOperationException e ) + { + throw wrapped(e); + } + } + + MethodHandle m_handle; + } + + @Override + public String toString() + { + Class c = getClass(); + Module m = c.getModule(); + return + c.getModule().getName() + "/" + + c.getCanonicalName().substring(1 + c.getPackageName().length() ) + + " to produce " + topType(); + } + + /** + * Method that a leaf {@code Adapter} must implement to indicate whether it + * is capable of fetching a given PostgreSQL type. + *

+ * In a composing adapter, this default implementation delegates to + * the adapter beneath. + * @throws UnsupportedOperationException if called in a leaf adapter + */ + public boolean canFetch(RegType pgType) + { + if ( null != m_underAdapter ) + return m_underAdapter.canFetch(pgType); + throw new UnsupportedOperationException( + toString() + " is a leaf adapter and does not override canFetch"); + } + + /** + * Method that an {@code Adapter} may override to indicate whether it + * is capable of fetching a given PostgreSQL attribute. + *

+ * If not overridden, this implementation delegates to the adapter beneath, + * if composed; in a leaf adapter, it delegates to + * {@link #canFetch(RegType) canFetch} for the attribute's declared + * PostgreSQL type. + */ + public boolean canFetch(Attribute attr) + { + if ( null != m_underAdapter ) + return m_underAdapter.canFetch(attr); + return canFetch(attr.type()); + } + + /** + * Method that an {@code Adapter} must implement to indicate whether it + * is capable of returning some usable representation of SQL null values. + *

+ * An {@code Adapter} that cannot should only be used with values that + * are known never to be null; it will throw an exception if asked to fetch + * a value that is null. + *

+ * An adapter usable with null values can be formed by composing, for + * example, an adapter producing {@code Optional} over an adapter that + * cannot fetch nulls. + */ + public abstract boolean canFetchNull(); + + /** + * A static method to indicate the type returned by a given {@code Adapter} + * subclass, based only on the type information recorded for it by the Java + * compiler. + *

+ * The type returned could contain free type variables that may be given + * concrete values when the instance {@link #topType() topType} method is + * called on a particular instance of the class. + *

+ * When cls is a subclass of {@code Primitive}, this method + * returns the {@code Class} object for the actual primitive type, + * not the boxed type. + */ + public static Type topType(Class cls) + { + Type[] params = specialization(cls, Adapter.class); + if ( null == params ) + throw new IllegalArgumentException( + cls + " does not extend Adapter"); + Type top = params[0]; + if ( Primitive.class.isAssignableFrom(cls) ) + { + top = methodType((Class)top).unwrap().returnType(); + assert ((Class)top).isPrimitive(); + } + return top; + } + + /** + * The full generic {@link Type Type} this Adapter presents to Java. + *

+ * Unlike the static method, this instance method, on an adapter formed + * by composition, returns the actual type obtained by unifying + * the "under" adapter's top type with the top adapter's "under" type, then + * making the indicated substitutions in the top adapter's "top" type. + *

+ * Likewise, for an adapter constructed with an array contract and an + * adapter for the element type, the element adapter's "top" type is unified + * with the contract's element type, and this method returns the contract's + * result type with the same substitutions made. + */ + public Type topType() + { + return m_topType; + } + + /** + * A static method to indicate the "under" type expected by a given + * {@code Adapter} subclass that is intended for composition over another + * adapter, based only on the type information recorded for it by the Java + * compiler. + *

+ * The type returned could contain free type variables. + */ + public static Type underType(Class cls) + { + Type[] params = specialization(cls, Adapter.class); + if ( null == params ) + throw new IllegalArgumentException( + cls + " does not extend Adapter"); + return params[1]; + } + + /** + * A class that is returned by the {@link #configure configure} method, + * intended for use during an {@code Adapter} subclass's static + * initialization, and must be supplied to the constructor when instances + * of the class are created. + */ + protected static abstract class Configuration + { + final Class m_class; + /** + * In the case of a primitive-typed adapter, this will really be the + * primitive Class object, not the corresponding boxed class. + */ + final Type m_top; + + Configuration(Class cls, Type top) + { + m_class = cls; + m_top = top; + } + + static class Leaf extends Configuration + { + final MethodHandle m_fetch; + + Leaf(Class cls, Type top, MethodHandle fetcher) + { + super(cls, top); + m_fetch = fetcher; + } + } + + static class NonLeaf extends Configuration + { + /** + * For an adapter meant to compose over a primitive-typed one, this + * is the actual primitive class object for the under-adapter's + * expected return type, not the boxed counterpart. + */ + final Type m_under; + final MethodHandle m_adapt; + + NonLeaf( + Class cls, Type top, Type under, + MethodHandle fetcher) + { + super(cls, top); + m_under = under; + m_adapt = fetcher; + } + } + } + + /** + * Throws a security exception if permission to configure an adapter + * isn't held. + *

+ * For the time being, there is only Permission("*", "fetch"), so this needs + * no parameters and can use a static instance of the permission. + */ + @SuppressWarnings("removal") // JEP 411 + private static void checkAllowed() + { + Service.CHECKER.accept(Permission.INSTANCE); + } + + /** + * Method that must be called in static initialization of an {@code Adapter} + * subclass, producing a {@code Configuration} object that must be passed + * to the constructor when creating an instance. + *

+ * If the adapter class is in a named module, its containing package must be + * exported to at least {@code org.postgresql.pljava}. + *

+ * When a leaf adapter (one that does not compose over some other adapter, + * but acts directly on PostgreSQL datums) is configured, the necessary + * {@link Permission Permission} is checked. + * @param cls The Adapter subclass being configured. + * @param via null for a composing (non-leaf) adapter; otherwise a value + * of the {@link Via} enumeration, indicating how the underlying PostgreSQL + * datum will be presented to the adapter. + * @throws SecurityException if the class being configured represents a leaf + * adapter and the necessary permission is not held. + */ + protected static Configuration configure( + Class cls, Via via) + { + Adapter.class.getModule().addReads(cls.getModule()); + Type top = topType(cls); + Type under = underType(cls); + Class topErased = erase(top); + Class underErased = erase(under); + + MethodHandle underFetcher = null; + String fetchName; + Predicate fetchPredicate; + + if ( Void.class == underErased ) + { + checkAllowed(); + requireNonNull(via, "a leaf Adapter must have a non-null Via"); + underFetcher = via.m_handle; + underErased = underFetcher.type().returnType(); + Class[] params = { Attribute.class, underErased }; + final String fn = fetchName = "fetch"; + fetchPredicate = m -> fn.equals(m.getName()) + && Arrays.equals(m.getParameterTypes(), params); + } + else + { + if ( null != via ) + throw new IllegalArgumentException( + "a non-leaf (U is not Void) adapter must have null Via"); + final String fn = fetchName = "adapt"; + MethodType mt = methodType(underErased); + if ( mt.hasWrappers() ) // Void, handled above, won't be seen here + { + Class underOrig = underErased; + Class underPrim = mt.unwrap().returnType(); + fetchPredicate = m -> + { + if ( ! fn.equals(m.getName()) ) + return false; + Class[] ptypes = m.getParameterTypes(); + return + 2 == ptypes.length && Attribute.class == ptypes[0] && + ( underOrig == ptypes[1] || underPrim == ptypes[1] ); + }; + } + else + { + Class[] params = { Attribute.class, underErased }; + fetchPredicate = m -> fn.equals(m.getName()) + && Arrays.equals(m.getParameterTypes(), params); + } + } + + Method[] fetchCandidates = stream(cls.getMethods()) + .filter(fetchPredicate).toArray(Method[]::new); + if ( 1 < fetchCandidates.length ) + fetchCandidates = stream(fetchCandidates) + .filter(m -> ! m.isBridge()).toArray(Method[]::new); + if ( 1 != fetchCandidates.length ) + throw new IllegalArgumentException( + cls + " lacks " + fetchName + " method with the " + + "expected signature"); + if ( ! topErased.isAssignableFrom(fetchCandidates[0].getReturnType()) ) + throw new IllegalArgumentException( + cls + " lacks " + fetchName + " method with the " + + "expected return type"); + + MethodHandle fetcher; + + try + { + fetcher = lookup().unreflect(fetchCandidates[0]); + } + catch ( IllegalAccessException e ) + { + throw new IllegalArgumentException( + cls + " has " + fetchName + " method that is inaccessible", + e); + } + + /* + * Adjust the return type. isAssignableFrom was already checked, so + * this can only be a no-op or a widening, to make sure the handle + * will fit invokeExact with the expected return type. + */ + fetcher = fetcher.asType(fetcher.type().changeReturnType(topErased)); + + if ( null != via ) + { + fetcher = collectArguments(fetcher, 2, underFetcher); + return new Configuration.Leaf(cls, top, fetcher); + } + + // unbound virtual handle's type includes receiver; 2nd param is index 2 + Class asFound = fetcher.type().parameterType(2); + if ( asFound.isPrimitive() ) + under = underErased = asFound; + + return new Configuration.NonLeaf(cls, top, under, fetcher); + } + + /** + * Provided to serve as a superclass for a 'container' class that is used + * to group several related adapters without being instantiable + * as an adapter itself. + *

+ * By being technically a subclass of {@code Adapter}, the container class + * will have access to the protected {@code Configuration} class and + * {@code configure} method. + */ + public static abstract class Container extends Adapter + { + protected Container() + { + super(null, null, null, null); + } + } + + /** + * An {@code Adapter} that reports it can be used on any type, but cannot + * fetch anything. + *

+ * Can be useful when constructing a {@link Contract.Array Contract.Array} + * that will inspect metadata for an array (its element type or dimensions) + * without fetching any elements. + */ + public static final class Opaque extends As + { + /** + * Instance of the {@code Opaque} adapter. + */ + public static final Opaque INSTANCE; + + /** + * Returns true unconditionally, so the {@code Opaque} adapter can be + * applied to any type or when type is unknown. + *

+ * However, any actual attempt to fetch a non-null value + * using the {@code Opaque} adapter will incur + * an {@code UnsupportedOperationException}. + */ + @Override + public boolean canFetch(RegType pgType) + { + return true; + } + + private Void fetch( + Attribute a, Datum.Accessor acc, B buffer, int offset, + Attribute aa) + { + throw new UnsupportedOperationException( + "Adapter.Opaque cannot fetch anything"); + } + + private Opaque(Configuration c) + { + super(c, null, null); + } + + static + { + try + { + Lookup lup = lookup(); + MethodHandle fetcher = lup.findVirtual( + Opaque.class, "fetch", methodType(Void.class, + Attribute.class, Datum.Accessor.class, Object.class, + int.class, Attribute.class)); + Configuration c = + new Configuration.Leaf(Opaque.class, Void.class, fetcher); + + INSTANCE = new Opaque(c); + } + catch ( ReflectiveOperationException e ) + { + throw new ExceptionInInitializerError(e); + } + } + } + + /** + * Superclass for adapters that fetch something and return it as a reference + * type T. + *

+ * The type variable U for the thing consumed gets no enforcement from + * the compiler, because any extending adapter class provides its own + * {@code T fetch(Attribute,something)} method, with no abstract version + * inherited from this class to constrain it. The method will be found + * reflectively by name and parameter types, so the "something" only has to + * match the type of the accessor method specified with {@code Via}, or the + * type returned by an underlying adapter that this one will be composed + * over. + *

+ * In particular, that means this is the class to extend even if using a + * primitive accessor method, or composing over an adapter that returns a + * primitive type, as long as this adapter will return a reference type T. + * Such an adapter simply declares that it extends {@code As} when + * based on a primitive accessor method, or {@code As} when + * composed over another adapter of primitive type, where boxed-class is the + * boxed counterpart of the other adapter's primitive type. + *

+ * When Java's reflection methods on generic types are used to compute + * the (non-erased) result type of a stack of composed adapters, the type + * variable U can be used in relating the input to the output type of each. + */ + public abstract static class As + extends Adapter + implements ArrayProto + { + private final MethodHandle m_fetchHandleErased; + + /** + * Constructor for a simple leaf {@code Adapter}, or a composing + * (non-leaf) {@code Adapter} when passed another adapter over which + * it should be composed. + * @param c Configuration instance generated for this class + * @param over null for a leaf Adapter, otherwise another Adapter + * to compose this one over + * @param witness if not null, the top type the resulting + * adapter will produce, if a Class object can specify that more + * precisely than the default typing rules. + */ + protected As(Configuration c, Adapter over, Class witness) + { + super(c, over, null, witness); + + MethodHandle mh = m_fetchHandle; + m_fetchHandleErased = + mh.asType(mh.type().changeReturnType(Object.class)); + } + + /** + * Constructor for a leaf {@code Adapter} that is based on + * a {@code Contract}. + * @param using the scalar Contract that will be used to produce + * the value returned + * @param witness if not null, the top type the resulting + * adapter will produce, if a Class object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected As( + Contract.Scalar using, Class witness, Configuration c) + { + super(c, null, using, witness); + + MethodHandle mh = m_fetchHandle; + m_fetchHandleErased = + mh.asType(mh.type().changeReturnType(Object.class)); + } + + /** + * Used only by the {@code Array} subclass below. + *

+ * The contract and element adapter here are raw types. The accessible + * subclass constructors will permit only compatible combinations of + * parameterized types. + */ + private As( + Contract.Array using, Adapter adapter, Type witness, + Configuration c) + { + super(c, null, using, + witness != null ? witness : refinement(using, adapter)); + + MethodHandle mh = m_fetchHandle; + m_fetchHandleErased = + mh.asType(mh.type().changeReturnType(Object.class)); + } + + /** + * Returns the type that will be produced by the array contract + * using when applied to the element-type adapter + * adapter. + *

+ * Determined by unifying the contract's element type with + * the result type of adapter, then repeating any resulting + * substitutions in the contract's result type. + */ + private static Type refinement(Contract.Array using, Adapter adapter) + { + Type[] unrefined = + specialization(using.getClass(), Contract.Array.class); + Type result = unrefined[0]; + Type element = unrefined[1]; + /* + * A Contract that expects a primitive-typed adapter must already be + * specialized to one primitive type, so there is nothing to refine. + */ + if ( adapter instanceof Primitive ) + return result; + return refine(adapter.topType(), element, result)[1]; + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final T fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (T) + m_fetchHandleErased.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * A default implementation of {@code canFetchNull} that unconditionally + * returns true. + *

+ * An adapter that extends this class, if it does not override + * {@link #fetchNull fetchNull}, will simply map any SQL null value + * to a Java null. + */ + @Override + public boolean canFetchNull() + { + return true; + } + + /** + * Determines the value to which SQL null should be mapped. + *

+ * If not overridden, this implementation returns Java null. + */ + public T fetchNull(Attribute a) + { + return null; + } + + /** + * Allocate an array of the given length with this adapter's + * result type as its component type. + */ + @SuppressWarnings("unchecked") + public T[] arrayOf(int length) + { + return (T[])newInstance(m_topErased, length); + } + } + + /** + * Abstract supertype of array adapters. + *

+ * Instantiating an array adapter requires supplying an array contract + * and a compatible adapter for the element type, to be stored in the + * corresponding final fields here, which are declared with raw types. + * The several accessible constructors enforce the various compatible + * parameterizations for the two arguments. + */ + public abstract static class Array extends As + { + /** + * Returns an {@code Adapter.Array} that simply returns the element type + * of the fetched array. + *

+ * Can be used when the only statically-known type for an array + * is the polymorphic {@link RegType#ANYARRAY ANYARRAY} type, + * to determine the actual element type of a given array. A suitable + * {@code Adapter} for that type can then be chosen, and used + * to construct an array adapter that can access the content + * of the array. + */ + public static Array elementType() + { + return Service.INSTANCE.elementTypeAdapter(); + } + + /** + * The {@code Contract.Array} that this array adapter will use, + * together with the supplied element-type adapter. + *

+ * Declared here as the raw type. The accessible constructors enforce + * the compatibility requirements between this and the supplied + * element adapter. + */ + protected final Contract.Array m_contract; + + /** + * The {@code Adapter} that this array adapter will use for the array's + * element type, together with the supplied contract. + *

+ * Declared here as the raw type. The accessible constructors enforce + * the compatibility requirements between this and the supplied + * contract. + */ + protected final Adapter m_elementAdapter; + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a reference-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, As adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a long-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsLong adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a double-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsDouble adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and an int-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsInt adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a float-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsFloat adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a short-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsShort adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a char-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsChar adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a byte-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsByte adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + + /** + * Constructor for a leaf array {@code Adapter} that is based on + * a {@code Contract.Array} and a boolean-returning {@code Adapter} + * for the element type. + * @param using the array Contract that will be used to produce + * the value returned + * @param adapter an Adapter producing a representation of the array's + * element type + * @param witness if not null, the top type the resulting + * adapter will produce, if a Type object can specify that more + * precisely than the default typing rules. + * @param c Configuration instance generated for this class + */ + protected Array( + Contract.Array> using, AsBoolean adapter, + Type witness, Configuration c) + { + super(using, adapter, witness, c); + m_contract = using; + m_elementAdapter = adapter; + } + } + + /** + * Ancestor class for adapters that fetch something and return it as + * a Java primitive type. + *

+ * Subclasses for integral types, namely {@code AsLong}, {@code asInt}, + * and {@code AsShort}, cannot be extended directly, but only via their + * {@code Signed} or {@code Unsigned} nested subclasses, according to how + * the value is meant to be used. Nothing can change how Java treats the + * primitive types (always as signed), but the {@code Signed} and + * {@code Unsigned} subclasses here offer methods for the operations that + * differ, allowing the right behavior to be achieved if those methods + * are used. + *

+ * Whether an adapter extends {@code AsShort.Unsigned} or {@code AsChar} + * (also an unsigned 16-bit type) should be determined based on whether + * the resulting value is meant to have a UTF-16 character meaning. + */ + public abstract static class Primitive + extends Adapter + implements ArrayProto + { + private > Primitive(Configuration c, A over) + { + super(c, over, null, null); + } + + /** + * Implementation of {@code canFetchNull} that unconditionally returns + * false, as primitive adapters have no reliably distinguishable values + * to which SQL null can be mapped. + */ + @Override + public boolean canFetchNull() + { + return false; + } + } + + /** + * Abstract superclass of signed and unsigned primitive {@code long} + * adapters. + */ + public abstract static class AsLong extends Primitive + implements TwosComplement + { + private > AsLong(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final long fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (long) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public long fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java long", "22002")); + } + + /** + * Abstract superclass of signed primitive {@code long} adapters. + */ + public abstract static class Signed extends AsLong + implements TwosComplement.Signed + { + protected > Signed(Configuration c, A over) + { + super(c, over); + } + } + + /** + * Abstract superclass of unsigned primitive {@code long} adapters. + */ + public abstract static class Unsigned extends AsLong + implements TwosComplement.Unsigned + { + protected > Unsigned( + Configuration c, A over) + { + super(c, over); + } + } + } + + /** + * Abstract superclass of primitive {@code double} adapters. + */ + public abstract static class AsDouble + extends Primitive + { + protected > AsDouble(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final double fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (double) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public double fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java double", "22002")); + } + } + + /** + * Abstract superclass of signed and unsigned primitive {@code int} + * adapters. + */ + public abstract static class AsInt extends Primitive + implements TwosComplement + { + private > AsInt(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final int fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (int) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public int fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java int", "22002")); + } + + /** + * Abstract superclass of signed primitive {@code int} adapters. + */ + public abstract static class Signed extends AsInt + implements TwosComplement.Signed + { + protected > Signed(Configuration c, A over) + { + super(c, over); + } + } + + /** + * Abstract superclass of unsigned primitive {@code int} adapters. + */ + public abstract static class Unsigned extends AsInt + implements TwosComplement.Unsigned + { + protected > Unsigned( + Configuration c, A over) + { + super(c, over); + } + } + } + + /** + * Abstract superclass of primitive {@code float} adapters. + */ + public abstract static class AsFloat extends Primitive + { + protected > AsFloat(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final float fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (float) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public float fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java float", "22002")); + } + } + + /** + * Abstract superclass of signed and unsigned primitive {@code short} + * adapters. + */ + public abstract static class AsShort extends Primitive + implements TwosComplement + { + private > AsShort(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final short fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (short) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public short fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java short", "22002")); + } + + /** + * Abstract superclass of signed primitive {@code short} adapters. + */ + public abstract static class Signed extends AsShort + implements TwosComplement.Signed + { + protected > Signed(Configuration c, A over) + { + super(c, over); + } + } + + /** + * Abstract superclass of unsigned primitive {@code short} adapters. + */ + public abstract static class Unsigned extends AsShort + implements TwosComplement.Unsigned + { + protected > Unsigned( + Configuration c, A over) + { + super(c, over); + } + } + } + + /** + * Abstract superclass of primitive {@code char} adapters. + */ + public abstract static class AsChar extends Primitive + { + protected > AsChar(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final char fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (char) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public char fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java char", "22002")); + } + } + + /** + * Abstract superclass of signed and unsigned primitive {@code byte} + * adapters. + */ + public abstract static class AsByte extends Primitive + implements TwosComplement + { + private > AsByte(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final byte fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (byte) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public byte fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java byte", "22002")); + } + + /** + * Abstract superclass of signed primitive {@code byte} adapters. + */ + public abstract static class Signed extends AsByte + implements TwosComplement.Signed + { + protected > Signed(Configuration c, A over) + { + super(c, over); + } + } + + /** + * Abstract superclass of unsigned primitive {@code byte} adapters. + */ + public abstract static class Unsigned extends AsByte + implements TwosComplement.Unsigned + { + protected > Unsigned( + Configuration c, A over) + { + super(c, over); + } + } + } + + /** + * Abstract superclass of primitive {@code boolean} adapters. + */ + public abstract static class AsBoolean + extends Primitive + { + protected > AsBoolean(Configuration c, A over) + { + super(c, over); + } + + /** + * Method invoked internally when this {@code Adapter} is used to fetch + * a value; not intended for use in application code. + */ + public final boolean fetch( + Datum.Accessor acc, B buffer, int offset, Attribute a) + { + try + { + return (boolean) + m_fetchHandle.invokeExact(a, acc, buffer, offset, a); + } + catch ( Throwable t ) + { + throw wrapped(t); + } + } + + /** + * Determines the mapping of SQL null. + *

+ * If not overridden, this implementation throws an + * {@code SQLDataException} with {@code SQLSTATE 22002}, + * {@code null_value_no_indicator_parameter}. + */ + public boolean fetchNull(Attribute a) + { + throw wrapped(new SQLDataException( + "SQL NULL cannot be returned as Java boolean", "22002")); + } + } + + /** + * A marker interface to be extended by functional interfaces that + * serve as ADT contracts. + *

+ * It facilitates the declaration of "dispenser" interfaces by which + * one contract can rely on others. + * @param the type to be returned by an instance of the contract + */ + public interface Contract + { + /** + * Marker interface for contracts for simple scalar types. + */ + interface Scalar extends Contract + { + } + + /** + * Base for functional interfaces that serve as contracts + * for array-like types. + *

+ * The distinguishing feature is an associated {@code Adapter} handling + * the element type of the array-like type. This form of contract may + * be useful for range and multirange types as well as for arrays. + * @param the type to be returned by an instance of the contract. + * @param the type returned by an associated {@code Adapter} for + * the element type (or the boxed type, if the adapter returns + * a primitive type). + * @param The subtype of {@code Adapter} that the contract requires; + * reference-returning ({@code As}) and all of the primitive-returning + * types must be distinguished. + */ + public interface Array> extends Contract + { + /** + * Constructs a representation T representing + * a PostgreSQL array. + * @param nDims the number of array dimensions (always one half of + * {@code dimsAndBounds.length}, but passed separately for + * convenience) + * @param dimsAndBounds the first nDims elements + * represent the total number of valid indices for each dimension, + * and the next nDims elements represent the first valid index for each + * dimension. For example, if nDims is 3, dimsAndBounds[1] is 6, and + * dimsAndBounds[4] is -2, then the array's second dimension uses + * indices in [-2,4). The array is a copy and may be used freely. + * @param adapter an Adapter producing a representation of + * the array's element type. + * @param slot A TupleTableSlot with multiple components accessible + * by a (single, flat) index, all of the same type, described by + * a one-element TupleDescriptor. + */ + T construct( + int nDims, int[] dimsAndBounds, A adapter, Indexed slot) + throws SQLException; + } + } + + /** + * Functional interface able to dispense one instance of an ADT by passing + * its constituent values to a supplied {@code Contract} and returning + * whatever that returns. + */ + @FunctionalInterface + public interface Dispenser> + { + T get(U constructor); + } + + /** + * Functional interface able to dispense multiple instances of an ADT + * identified by a zero-based index, passing the its constituent values + * to a supplied {@code Contract} and returning whatever that returns. + */ + @FunctionalInterface + public interface PullDispenser> + { + T get(int index, U constructor); + } + + private static RuntimeException wrapped(Throwable t) + { + if ( t instanceof RuntimeException ) + return (RuntimeException)t; + if ( t instanceof Error ) + throw (Error)t; + return new AdapterException(t.getMessage(), t); + } + + /** + * A lightweight unchecked exception used to wrap checked ones + * (often {@link SQLException}) in settings where checked ones are a bother. + *

+ * The idea may or may not be worth keeping, and either way, this particular + * exception might not be part of any final API. + */ + public static class AdapterException extends RuntimeException + { + AdapterException(String message, Throwable cause) + { + super(message, cause, true, false); + } + + /** + * Unwraps this wrapper's cause and returns it, if it is an instance of + * the exception type declared; otherwise, just throws this + * wrapper again. + */ + public X unwrap(Class declared) + { + Throwable t = getCause(); + if ( declared.isInstance(t) ) + return declared.cast(t); + throw this; + } + } + + /** + * A permission allowing the creation of a leaf {@code Adapter}. + *

+ * The proper spelling in a policy file is + * {@code org.postgresql.pljava.Adapter$Permission}. + *

+ * For the time being, only {@code "*"} is allowed as the name, + * and only {@code "fetch"} as the actions. + *

+ * Only a "leaf" adapter (one that will interact with PostgreSQL datum + * values directly) requires permission. Definition of composing adapters + * (those that can be applied over another adapter and transform the Java + * values somehow) is unrestricted. + */ + public static final class Permission extends java.security.Permission + { + private static final long serialVersionUID = 1L; + + /** + * An instance of this permission (not a singleton, merely one among + * possible others). + */ + static final Permission INSTANCE = new Permission("*", "fetch"); + + public Permission(String name, String actions) + { + super("*"); + if ( ! ( "*".equals(name) && "fetch".equals(actions) ) ) + throw new IllegalArgumentException( + "the only currently-allowed name and actions are " + + "* and fetch, not " + name + " and " + actions); + } + + @Override + public boolean equals(Object other) + { + return other instanceof Permission; + } + + @Override + public int hashCode() + { + return 131129; + } + + @Override + public String getActions() + { + return "fetch"; + } + + @Override + public PermissionCollection newPermissionCollection() + { + return new Collection(); + } + + @Override + public boolean implies(java.security.Permission p) + { + return p instanceof Permission; + } + + static class Collection extends PermissionCollection + { + private static final long serialVersionUID = 1L; + + Permission the_permission = null; + + @Override + public void add(java.security.Permission p) + { + if ( isReadOnly() ) + throw new SecurityException( + "attempt to add a Permission to a readonly " + + "PermissionCollection"); + + if ( ! (p instanceof Permission) ) + throw new IllegalArgumentException( + "invalid in homogeneous PermissionCollection: " + p); + + if ( null == the_permission ) + the_permission = (Permission) p; + } + + @Override + public boolean implies(java.security.Permission p) + { + if ( null == the_permission ) + return false; + return the_permission.implies(p); + } + + @Override + public Enumeration elements() + { + if ( null == the_permission ) + return emptyEnumeration(); + return enumeration(List.of(the_permission)); + } + } + } + + /** + * Specification of a service supplied by the internals module for certain + * operations, such as specially instantiating array adapters based on + * {@code ArrayBuilder}s constructed here. + */ + public static abstract class Service + { + static final Service INSTANCE; + static final Consumer CHECKER; + + static + { + INSTANCE = ServiceLoader.load( + Service.class.getModule().getLayer(), Service.class) + .findFirst().orElseThrow(() -> new ServiceConfigurationError( + "could not load PL/Java Adapter.Service")); + CHECKER = INSTANCE.permissionChecker(); + } + + static + Array buildArrayAdapter( + ArrayBuilder builder, TypeWrapper w) + { + return INSTANCE.buildArrayAdapterImpl(builder, w); + } + + /** + * Builds an array adapter, given an {@code ArrayBuilder} (which wraps + * this {@code Adapter} and can describe the resulting array type), and + * an {@code TypeWrapper}. + *

+ * The {@code TypeWrapper} is a contrivance so that the computed array + * type can be passed back up through the constructors in a non-racy + * way. + */ + protected abstract + Array buildArrayAdapterImpl( + ArrayBuilder builder, TypeWrapper w); + + /** + * Returns a permission checker appropriate to whether PL/Java is + * running with enforcement or not. + */ + protected abstract + Consumer permissionChecker(); + + protected abstract + Array elementTypeAdapter(); + + /** + * An upcall from the implementation layer to obtain the + * {@code MultiArray} from an {@code ArrayBuilder} without cluttering + * the latter's exposed API. + */ + protected MultiArray multiArray(ArrayBuilder builder) + { + return builder.multiArray(); + } + + /** + * An upcall from the implementation layer to obtain the + * {@code Adapter} wrapped by an {@code ArrayBuilder} without cluttering + * the latter's exposed API. + */ + protected Adapter adapter(ArrayBuilder builder) + { + return builder.m_adapter; + } + } + + /** + * A class that sneakily implements {@link Type} just so it can be passed + * up through the witness parameter of existing constructors, + * and carry the computed type of an array adapter to be constructed. + *

+ * Can only be instantiated here, to limit the ability for arbitrary code + * to supply computed (or miscomputed) types. + *

+ * The implementation layer will call {@link #setWrappedType setWrappedType} + * and then pass the wrapper to the appropriate adapter constructor. + * @hidden + */ + public static class TypeWrapper implements Type + { + @Override + public String getTypeName() + { + return "(a PL/Java TypeWrapper)"; + } + + private Type wrapped; + + private TypeWrapper() { } + + public void setWrappedType(Type t) + { + wrapped = t; + } + } + + /** + * Mixin allowing properly-typed array adapters of various dimensionalities + * to be derived from an adapter for the array component type. + *

+ * If a is an adapter producing type T, then + * {@code a.a4().a2()} is an {@code ArrayBuilder} that can build a + * six-dimensional array adapter producing type T[][][][][][]. + * + * @param Type of a one-dimension array of the component type; the type + * a builder obtained with a1() would build. + */ + public interface ArrayProto + { + /** + * Returns a builder that will make an array adapter returning + * a one-dimension Java array of this {@code Adapter}'s Java type. + */ + default ArrayBuilder a1() + { + return new ArrayBuilder(this, 1); + } + + /** + * Returns a builder that will make an array adapter returning + * a two-dimension Java array of this {@code Adapter}'s Java type. + */ + default ArrayBuilder a2() + { + return new ArrayBuilder(this, 2); + } + + /** + * Returns a builder that will make an array adapter returning + * a four-dimension Java array of this {@code Adapter}'s Java type. + */ + default ArrayBuilder a4() + { + return new ArrayBuilder(this, 4); + } + } + + /** + * Builder to derive properly-typed array adapters of various + * dimensionalities, first obtained from an {@link ArrayProto}. + * + * @param The array type represented by this builder. a1() will produce + * a builder for TA[], and so on. + * @param The type of a one-dimension array of the original component + * type; remains unchanged by increases to the dimensionality of TA. + */ + @SuppressWarnings("unchecked") + public static final class ArrayBuilder + { + final Adapter m_adapter; + private int m_dimensions; + + /** + * Records the adapter for the component type (necessarily an instance + * of {@code Adapter} but here typed as {@code ArrayProto} to simplify + * call sites), and the dimensionality of array to be built. + */ + ArrayBuilder(ArrayProto adapter, int dimensions) + { + m_adapter = (Adapter)requireNonNull(adapter); + m_dimensions = dimensions; + } + + /** + * Returns an array adapter that will produce arrays with the chosen + * number of dimensions, and the original adapter's + * {@link #topType() topType} as the component type. + */ + public Array build() + { + return Service.buildArrayAdapter(this, new TypeWrapper()); + } + + MultiArray multiArray() + { + return new MultiArray(m_adapter.topType(), m_dimensions); + } + + /** + * Adds one to the result-array dimensions of the {@code Adapter} this + * builder will build. + * @return this builder, with dimensions increased, and a sneaky + * unchecked cast to the corresponding generic type. + */ + public ArrayBuilder a1() + { + m_dimensions += 1; + return (ArrayBuilder)this; + } + + /** + * Adds two to the result-array dimensions of the {@code Adapter} this + * builder will build. + * @return this builder, with dimensions increased, and a sneaky + * unchecked cast to the corresponding generic type. + */ + public ArrayBuilder a2() + { + m_dimensions += 2; + return (ArrayBuilder)this; + } + + /** + * Adds four to the result-array dimensions of the {@code Adapter} this + * builder will build. + * @return this builder, with dimensions increased, and a sneaky + * unchecked cast to the corresponding generic type. + */ + public ArrayBuilder a4() + { + m_dimensions += 4; + return (ArrayBuilder)this; + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/Adjusting.java b/pljava-api/src/main/java/org/postgresql/pljava/Adjusting.java index 9a2d379b7..341348ca8 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/Adjusting.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/Adjusting.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2019-2024 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -14,6 +14,9 @@ import java.io.Reader; import java.sql.SQLException; import java.sql.SQLXML; +import java.util.List; +import static java.util.Objects.requireNonNull; +import java.util.function.Consumer; import javax.xml.stream.XMLInputFactory; // for javadoc import javax.xml.stream.XMLResolver; // for javadoc import javax.xml.stream.XMLStreamReader; @@ -126,22 +129,189 @@ public static final class XML { private XML() { } // no instances + /** + * Attempts a given action (typically to set something) using a given + * value, trying one or more supplied keys in order until the action + * succeeds with no exception. + *

+ * This logic is common to the + * {@link Parsing#setFirstSupportedFeature setFirstSupportedFeature} + * and + * {@link Parsing#setFirstSupportedProperty setFirstSupportedProperty} + * methods, and is exposed here because it may be useful for other + * tasks in Java's XML APIs, such as configuring {@code Transformer}s. + *

+ * If any attempt succeeds, null is returned. If no attempt + * succeeds, the first exception caught is returned, with any + * exceptions from the subsequent attempts retrievable from it with + * {@link Exception#getSuppressed getSuppressed}. The return is + * immediate, without any remaining names being tried, if an exception + * is caught that is not assignable to a class in the + * expected list. Such an exception is returned (or added to + * the suppressed list of an exception already to be returned) only if + * the onUnexpected handler is null; otherwise, it is passed + * to the handler and does not affect the method's return. + *

+ * For some purposes, a single call of this method may not suffice: if + * alternate means to establish a desired configuration have existed and + * are not simply alternate property names that will accept the same + * value. For such a case, this method may be called more than once. The + * caller abandons the sequence of calls after the first call that + * returns null (indicating that it either succeeded, or incurred an + * unexpected exception and passed it to the onUnexpected + * handler. Otherwise, the exception returned by the first call can be + * passed as caught to the next call, instead of passing the + * usual null. (When a non-null caught is passed, it will be + * returned on failure, even if an unexpected exception has been caught; + * therefore, should it ever be necessary to chain more than two of + * these calls, the caller should abandon the sequence as soon as a call + * returns null or returns its caught argument with + * no growth of its suppressed list.) + * @param setter typically a method reference for a method that + * takes a string key and some value. + * @param value the value to pass to the setter + * @param expected a list of exception classes that can be foreseen + * to indicate that a key was not recognized, and the operation + * should be retried with the next possible key. + * @param caught null, or an exception returned by a preceding call if + * an operation cannot be implemented with one call of this method + * @param onUnexpected invoked, if non-null, on an {@code Exception} + * that is caught and matches nothing in the expected list, instead + * of returning it. If this parameter is null, such an exception is + * returned (or added to the suppressed list of the exception to be + * returned), just as for expected exceptions, but the return is + * immediate, without trying remaining names, if any. + * @param names one or more String keys to be tried in order until + * the action succeeds. + * @return null if any attempt succeeded, or if the first exception + * caught was passed to the onUnexpected handler; otherwise the first + * exception caught (if the caller supplied a non-null + * caught, then that exception), which may have further + * exceptions in its suppressed list. + */ + public static Exception setFirstSupported( + SetMethod setter, V value, + List> expected, + Exception caught, + Consumer onUnexpected, String... names) + { + requireNonNull(expected); + for ( String name : names ) + { + try + { + setter.set(name, value); + return null; + } + catch ( Exception e ) + { + boolean benign = + expected.stream().anyMatch(c -> c.isInstance(e)); + + if ( benign || null == onUnexpected ) + { + if ( null == caught ) + caught = e; + else + caught.addSuppressed(e); + } + else + onUnexpected.accept(e); + + if ( ! benign ) + break; + } + } + return caught; + } + + /** + * Calls the six-argument overload passing null for caught. + */ + public static Exception setFirstSupported( + SetMethod setter, V value, + List> expected, + Consumer onUnexpected, String... names) + { + return setFirstSupported( + setter, value, expected, null, onUnexpected, names); + } + + /** + * A functional interface fitting various {@code setFeature} or + * {@code setProperty} methods in Java XML APIs. + *

+ * The XML APIs have a number of methods on various interfaces that can + * be used to set some property or feature, and can generally be + * assigned to this functional interface by bound method reference, and + * used with {@link #setFirstSupported setFirstSupported}. + */ + @FunctionalInterface + public interface SetMethod + { + void set(String key, T value) throws Exception; + } + /** * Interface with methods to adjust the restrictions on XML parsing * that are commonly considered when XML content might be from untrusted * sources. *

- * The adjusting methods are best-effort and do not provide an - * indication of whether the requested adjustment was made. Not all of + * The adjusting methods are best-effort; not all of * the adjustments are available for all flavors of {@code Source} or * {@code Result} or for all parser implementations or versions the Java - * runtime may supply. + * runtime may supply. Cases where a requested adjustment has not been + * made are handled as follows: + *

+ * Any sequence of adjustment calls will ultimately be followed by a + * {@code get}. During the sequence of adjustments, exceptions caught + * are added to a signaling list or to a quiet list, where "added to" + * means that if either list has a first exception, any caught later are + * attached to that exception with + * {@link Exception#addSuppressed addSuppressed}. + *

+ * For each adjustment (and depending on the type of underlying + * {@code Source} or {@code Result}), one or more exception types will + * be 'expected' as indications that an identifying key or value for + * that adjustment was not recognized. This implementation may continue + * trying to apply the adjustment, using other keys that have at times + * been used to identify it. Expected exceptions caught during these + * attempts form a temporary list (a first exception and those attached + * to it by {@code addSuppressed}). Once any such attempt succeeds, the + * adjustment is considered made, and any temporary expected exceptions + * list from the adjustment is discarded. If no attempt succeeded, the + * temporary list is retained, by adding its head exception to the quiet + * list. + *

+ * Any exceptions caught that are not instances of any of the 'expected' + * types are added to the signaling list. + *

+ * When {@code get} is called, the head exception on the signaling list, + * if any, is thrown. Otherwise, the head exception on the quiet list, + * if any, is logged at {@code WARNING} level. + *

+ * During a chain of adjustments, {@link #lax lax()} can be called to + * tailor the handling of the quiet list. A {@code lax()} call applies + * to whatever exceptions have been added to the quiet list up to that + * point. To discard them, call {@code lax(true)}; to move them to the + * signaling list, call {@code lax(false)}. */ public interface Parsing> { /** Whether to allow a DTD at all. */ T allowDTD(boolean v); + /** + * Specifies that any DTD should be ignored (neither processed nor + * rejected as an error). + *

+ * This treatment is available in Java 22 and later. + * In earlier Java versions, this will not succeed. Where it is + * supported, the most recent call of this method or of + * {@link #allowDTD allowDTD} will be honored. + */ + T ignoreDTD(); + /** * Whether to retrieve external "general" entities (those * that can be used in the document body) declared in the DTD. @@ -173,14 +343,14 @@ public interface Parsing> /** * For a feature that may have been identified by more than one URI - * in different parsers or versions, try passing the supplied + * in different parsers or versions, tries passing the supplied * value with each URI from names in order until * one is not rejected by the underlying parser. */ T setFirstSupportedFeature(boolean value, String... names); /** - * Make a best effort to apply the recommended, restrictive + * Makes a best effort to apply the recommended, restrictive * defaults from the OWASP cheat sheet, to the extent they are * supported by the underlying parser, runtime, and version. *

@@ -196,7 +366,7 @@ public interface Parsing> /** * For a parser property (in DOM parlance, attribute) that may have * been identified by more than one URI in different parsers or - * versions, try passing the supplied value with each URI + * versions, tries passing the supplied value with each URI * from names in order until one is not rejected by the * underlying parser. *

@@ -278,7 +448,7 @@ public interface Parsing> T accessExternalSchema(String protocols); /** - * Set an {@link EntityResolver} of the type used by SAX and DOM + * Sets an {@link EntityResolver} of the type used by SAX and DOM * (optional operation). *

* This method only succeeds for a {@code SAXSource} or @@ -297,7 +467,7 @@ public interface Parsing> T entityResolver(EntityResolver resolver); /** - * Set a {@link Schema} to be applied during SAX or DOM parsing + * Sets a {@link Schema} to be applied during SAX or DOM parsing *(optional operation). *

* This method only succeeds for a {@code SAXSource} or @@ -316,6 +486,31 @@ public interface Parsing> * already. */ T schema(Schema schema); + + /** + * Tailors the treatment of 'quiet' exceptions during a chain of + * best-effort adjustments. + *

+ * See {@link Parsing the class description} for an explanation of + * the signaling and quiet lists. + *

+ * This method applies to whatever exceptions may have been added to + * the quiet list by best-effort adjustments made up to that point. + * They can be moved to the signaling list with {@code lax(false)}, + * or simply discarded with {@code lax(true)}. In either case, the + * quiet list is left empty when {@code lax} returns. + *

+ * At the time a {@code get} method is later called, any exception + * at the head of the signaling list will be thrown (possibly + * wrapped in an exception permitted by {@code get}'s {@code throws} + * clause), with any later exceptions on that list retrievable from + * the head exception with + * {@link Exception#getSuppressed getSuppressed}. Otherwise, any + * exception at the head of the quiet list (again with any later + * ones attached as its suppressed list) will be logged at + * {@code WARNING} level. + */ + T lax(boolean discard); } /** @@ -347,12 +542,17 @@ public interface Source extends Parsing>, javax.xml.transform.Source { /** - * Return an object of the expected {@code Source} subtype + * Returns an object of the expected {@code Source} subtype * reflecting any adjustments made with the other methods. + *

+ * Refer to {@link Parsing the {@code Parsing} class description} + * and the {@link Parsing#lax lax()} method for how any exceptions + * caught while applying best-effort adjustments are handled. * @return an implementing object of the expected Source subtype * @throws SQLException for any reason that {@code getSource} might * have thrown when supplying the corresponding non-Adjusting - * subtype of Source. + * subtype of Source, or for reasons saved while applying + * adjustments. */ T get() throws SQLException; } @@ -392,12 +592,16 @@ public interface Result extends Parsing>, javax.xml.transform.Result { /** - * Return an object of the expected {@code Result} subtype + * Returns an object of the expected {@code Result} subtype * reflecting any adjustments made with the other methods. + * Refer to {@link Parsing the {@code Parsing} class description} + * and the {@link Parsing#lax lax()} method for how any exceptions + * caught while applying best-effort adjustments are handled. * @return an implementing object of the expected Result subtype * @throws SQLException for any reason that {@code getResult} might * have thrown when supplying the corresponding non-Adjusting - * subtype of Result. + * subtype of Result, or for reasons saved while applying + * adjustments. */ T get() throws SQLException; } @@ -428,7 +632,7 @@ public interface Result public interface SourceResult extends Result { /** - * Supply the {@code Source} instance that is the source of the + * Supplies the {@code Source} instance that is the source of the * content. *

* This method must be called before any of the inherited adjustment @@ -484,7 +688,8 @@ SourceResult set(javax.xml.transform.stax.StAXSource source) throws SQLException; /** - * Provide the content to be copied in the form of a {@code String}. + * Provides the content to be copied in the form of a + * {@code String}. *

* An exception from the pattern of {@code Source}-typed arguments, * this method simplifies retrofitting adjustments into code that @@ -507,11 +712,14 @@ SourceResult set(javax.xml.transform.dom.DOMSource source) throws SQLException; /** - * Return the result {@code SQLXML} instance ready for handing off + * Returns the result {@code SQLXML} instance ready for handing off * to PostgreSQL. *

- * This method must be called after any of the inherited adjustment - * methods. + * The handling/logging of exceptions normally handled in a + * {@code get} method happens here for a {@code SourceResult}. + *

+ * Any necessary calls of the inherited adjustment methods must be + * made before this method is called. */ SQLXML getSQLXML() throws SQLException; } diff --git a/pljava-api/src/main/java/org/postgresql/pljava/Lifespan.java b/pljava-api/src/main/java/org/postgresql/pljava/Lifespan.java new file mode 100644 index 000000000..672079d5c --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/Lifespan.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import org.postgresql.pljava.model.MemoryContext; // javadoc +import org.postgresql.pljava.model.ResourceOwner; // javadoc + +/** + * Model of any notional object in PostgreSQL or PL/Java that has a definite + * temporal existence, with a detectable end, and so can be used to scope the + * lifetime of any PL/Java object that has corresponding native resources. + *

+ * A {@code Lifespan} generalizes over assorted classes that can play that role, + * such as PostgreSQL's {@link ResourceOwner ResourceOwner} and + * {@link MemoryContext MemoryContext}. {@code MemoryContext} may see the most + * use in PL/Java, as the typical reason to scope the lifetime of some PL/Java + * object is that it refers to some allocation of native memory. + *

+ * The invocation of a PL/Java function is also usefully treated as a resource + * owner. It is reasonable to depend on the objects passed in the function call + * to remain usable as long as the call is on the stack, if no other explicit + * lifespan applies. + *

+ * Java's incubating foreign function and memory API will bring a + * {@code ResourceScope} object for which some relation to a PL/Java + * {@code Lifespan} can probably be defined. + *

+ * The history of PostgreSQL MemoryContexts + * (the older mechanism, appearing in PostgreSQL 7.1), and ResourceOwners + * (introduced in 8.0) is interesting. As the latter's {@code README} puts it, + * The design of the ResourceOwner API is modeled on our MemoryContext API, + * which has proven very flexible and successful ... It is tempting to consider + * unifying ResourceOwners and MemoryContexts into a single object type, but + * their usage patterns are sufficiently different ...." + *

+ * Only later, in PostgreSQL 9.5, did {@code MemoryContext} gain a callback + * mechanism for detecting reset or delete, with which it also becomes usable + * as a kind of lifespan under PL/Java's broadened view of the concept. + * While not unifying ResourceOwners and MemoryContexts into a single + * object type, PL/Java here makes them both available as subtypes of a + * common interface, so either can be chosen to place an appropriate temporal + * scope on a PL/Java object. + */ +public interface Lifespan +{ +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/PLJavaBasedLanguage.java b/pljava-api/src/main/java/org/postgresql/pljava/PLJavaBasedLanguage.java new file mode 100644 index 000000000..890b8a5cf --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/PLJavaBasedLanguage.java @@ -0,0 +1,792 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; + +import java.util.List; + +import org.postgresql.pljava.model.ProceduralLanguage; // javadoc +import org.postgresql.pljava.model.ProceduralLanguage.PLJavaBased; +import org.postgresql.pljava.model.RegProcedure; +import org.postgresql.pljava.model.RegProcedure.Call; +import org.postgresql.pljava.model.RegProcedure.Call.Context.TriggerData; // jvd +import org.postgresql.pljava.model.RegProcedure.Lookup; +import org.postgresql.pljava.model.RegType; +import org.postgresql.pljava.model.Transform; +import org.postgresql.pljava.model.Trigger; +import org.postgresql.pljava.model.Trigger.ForTrigger; +import org.postgresql.pljava.model.TupleTableSlot; // javadoc + +import org.postgresql.pljava.annotation.Trigger.Called; // javadoc +import org.postgresql.pljava.annotation.Trigger.Event; // javadoc +import org.postgresql.pljava.annotation.Trigger.Scope; // javadoc + +/** + * Interface for a procedural language on PL/Java infrastructure. + *

+ * An implementing class does not implement this interface directly, but rather + * implements one or both of the subinterfaces {@link InlineBlocks InlineBlocks} + * and {@link Routines Routines}. A language that implements {@code Routines} + * may also implement one or more of: {@link Triggers Triggers}, + * {@link UsingTransforms UsingTransforms}. The implementing class must + * have a public constructor with a + * {@link ProceduralLanguage ProceduralLanguage} parameter, which it may ignore, + * or use to determine the name, oid, accessibility, or other details of the + * declared PostgreSQL language the handler class has been instantiated for. + */ +public interface PLJavaBasedLanguage +{ + /** + * To be implemented by a language that supports routines (that is, + * functions and/or procedures). + *

+ * Whether a routine is a function or procedure can be determined at + * validation time ({@code subject.}{@link RegProcedure#kind() kind()} in + * {@link #essentialChecks essentialChecks} or + * {@link #additionalChecks additionalChecks}) or at + * {@link #prepare prepare} time + * (({@code target.}{@link RegProcedure#kind() kind()}). + * A procedure can also be distinguished from a function in that, + * at {@link Routine#call Routine.call(fcinfo)} time, if a procedure is + * being called, {@code fcinfo.}{@link Call#context() context()} returns + * an instance of {@link Call.Context.CallContext CallContext}. + *

Transaction control

+ *

+ * A function is always called within an existing transaction; while it may + * use subtransactions / savepoints, it can never commit, roll back, or + * start a new top-level transaction. + *

+ * A procedure is allowed to start, commit, and roll back top-level + * transactions, provided it was not called inside an existing explicit + * transation. That condition can be checked by consulting + * {@link Call.Context.CallContext#atomic() CallContext.atomic()} when + * {@code fcinfo.context()} returns an instance of {@code CallContext}. + * When {@code atomic()} returns {@code true}, transaction control is not + * allowed. (If {@code fcinfo.context()} returns anything other than an + * instance of {@code CallContext}, this is not a procedure call, and + * transaction control is never allowed.) + *

+ * A handler may use this information to impose its own (for example, + * compile-time) limits on a routine's access to transaction-control + * operations. Any use of SPI by the routine will be appropriately limited + * with no need for attention from the handler, as PL/Java propagates the + * atomic/nonatomic flag to SPI always. + */ + public interface Routines extends PLJavaBasedLanguage + { + /** + * Performs the essential validation checks on a proposed + * PL/Java-based routine. + *

+ * This method should check (when checkBody is true) all the + * essential conditions that {@link #prepare prepare} may assume have + * been checked. Because there is no guarantee that validation at + * routine-creation time always occurred, PL/Java's dispatcher will not + * only call this method at validation time, but also will never call + * {@code prepare} without making sure this method (passing true for + * checkBody) has been called first. + *

+ * This method should throw an informative exception for any check that + * fails, otherwise returning normally. Unless there is a more-specific + * choice, {@link SQLSyntaxErrorException} with {@code SQLState} + * {@code 42P13} corresponds to PostgreSQL's + * {@code invalid_function_definition}. + *

+ * Checks that are helpful at routine-creation time, but not essential + * to correctness of {@code prepare}, can be made in + * {@link #additionalChecks additionalChecks}. + *

+ * The dispatcher will never invoke this method for a subject + * with {@link RegProcedure#returnsSet returnsSet()} true, so this + * method may assume that property is false, unless the language also + * implements {@link ReturningSets ReturningSets} and the + * {@link ReturningSets#essentialSRFChecks essentialSRFChecks} method + * delegates to this one (as its default implementation does). + *

+ * If checkBody is false, less-thorough checks may be + * needed. The details are left to the language implementation; + * in general, basic checks of syntax, matching parameter counts, and + * so on are ok, while checks that load or compile user code or depend + * on other database state may be better avoided. The validator may be + * invoked with checkBody false at times when not all + * expected state may be in place, such as during {@code pg_restore} + * or {@code pg_upgrade}. + *

+ * This method is invoked with checkBody false only if the + * JVM has been started and PL/Java has already loaded and instantiated + * this language-handler class, or succeeds in doing so. If not, and + * checkBody is false, PL/Java simply treats the validation + * as successful. + *

+ * This default implementation checks nothing. + */ + default void essentialChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + } + + /** + * Performs additional validation checks on a proposed PL/Java-based + * routine. + *

+ * This method should be used for checks that may give helpful feedback + * at routine-creation time, but can be skipped at run time because the + * correct behavior of {@link #prepare prepare} does not depend on them. + * PL/Java calls this method only at routine-creation time, just after + * {@link #essentialChecks essentialChecks} has completed normally. + *

+ * This method should throw an informative exception for any check that + * fails, otherwise returning normally. Unless there is a more-specific + * choice, {@link SQLSyntaxErrorException} with {@code SQLState} + * {@code 42P13} corresponds to PostgreSQL's + * {@code invalid_function_definition}. + *

+ * Checks of conditions essential to correctness of {@code prepare} + * must be made in {@code essentialChecks}. + *

+ * The dispatcher will never invoke this method for a subject + * with {@link RegProcedure#returnsSet returnsSet()} true, so this + * method may assume that property is false, unless the language also + * implements {@link ReturningSets ReturningSets} and the + * {@link ReturningSets#additionalSRFChecks additionalSRFChecks} method + * delegates to this one (as its default implementation does). + *

+ * If checkBody is false, less-thorough checks may be + * needed. The details are left to the language implementation; + * in general, basic checks of syntax, matching parameter counts, and + * so on are ok, while checks that load or compile user code or depend + * on other database state may be better avoided. The validator may be + * invoked with checkBody false at times when not all + * expected state may be in place, such as during {@code pg_restore} + * or {@code pg_upgrade}. + *

+ * This default implementation checks nothing. + */ + default void additionalChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + } + + /** + * Prepares a template for a call of the routine target. + *

+ * This method is never called without + * {@link #essentialChecks essentialChecks} having been called + * immediately prior and completing normally. + *

+ * The information available at this stage comes from the system + * catalogs, reflecting the static declaration of the target routine. + * The methods of target can be used to examine that + * catalog information; the {@link PLJavaBased PLJavaBased} + * memo holds additional derived information, + * including tuple descriptors for the inputs and outputs. + * (All routines, including those treated by PostgreSQL as + * returning a scalar result, are presented to a PL/Java handler with + * the inputs and outputs represented by {@link TupleTableSlot}.) + * The tuple descriptors seen at this stage may include attributes with + * polymorphic types, not resolvable to specific types until the + * {@code Template} instance this method returns is later applied at + * an actual call site. + *

+ * This method is never called for a target with + * {@link RegProcedure#returnsSet returnsSet()} true. If the language + * also implements {@link ReturningSets ReturningSets}, any such + * target will be passed to the + * {@link ReturningSets#prepareSRF prepareSRF} method instead; + * otherwise, it will incur an exception stating the language does not + * support returning sets. + *

+ * This method should return a {@link Template Template}, which may + * encapsulate any useful precomputed values based on the catalog + * information this method consulted. + *

+ * The template, when its {@link Template#specialize specialize} method + * is invoked on an actual {@link Lookup Lookup} instance, should return + * a {@link Routine Routine} able to apply the target function's logic + * when invoked any number of times on {@link Call Call} instances + * associated with the same {@code Lookup}. + *

+ * When there is no polymorphic or variadic-"any" funny business in + * target's declaration, this method may return a + * {@code Template} that ignores its argument and always returns the + * same {@code Routine}. It could even do so in all cases, if + * implementing a language where those dynamic details are left to user + * code. + */ + Template prepare(RegProcedure target, PLJavaBased memo) + throws SQLException; + } + + /** + * To be implemented by a language that can be used to write functions + * returning sets (that is, more than a single result or row). + */ + public interface ReturningSets extends PLJavaBasedLanguage + { + /** + * Performs the essential validation checks on a proposed + * PL/Java-based set-returning function. + *

+ * See {@link Routines#essentialChecks essentialChecks} for + * the explanation of what to consider 'essential' checks. + *

+ * This default implementation simply delegates to the + * {@link Routines#essentialChecks essentialChecks} method, which must + * therefore be prepared for subject to have either value of + * {@link RegProcedure#returnsSet returnsSet()}. + */ + default void essentialSRFChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + /* + * A cast, because the alternative of having SetReturning extend + * Routines would allow Routines to be omitted from the implements + * clause of a language handler, which I would rather not encourage + * as a matter of style. + */ + ((Routines)this).essentialChecks(subject, memo, checkBody); + } + + /** + * Performs additional validation checks on a proposed + * PL/Java-based set-returning function. + *

+ * See {@link Routines#additionalChecks additionalChecks} for + * the explanation of what to consider 'additional' checks. + *

+ * This default implementation simply delegates to the + * {@link Routines#additionalChecks additionalChecks} method, which must + * therefore be prepared for subject to have either value of + * {@link RegProcedure#returnsSet returnsSet()}. + */ + default void additionalSRFChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + ((Routines)this).additionalChecks(subject, memo, checkBody); + } + + /** + * Prepares a template for a call of the set-returning function + * target. + *

+ * This method is never called without + * {@link #essentialSRFChecks essentialSRFChecks} having been called + * immediately prior and completing normally. + *

+ * This method is analogous to the + * {@link Routines#prepare prepare} method, but is called only for + * a target with {@link RegProcedure#returnsSet returnsSet()} + * true, and must return {@link SRFTemplate SRFTemplate} rather than + * {@link Template Template}. + *

+ * The documentation of the {@link Routines#prepare prepare} method + * further describes what is expected of an implementation. + */ + SRFTemplate prepareSRF(RegProcedure target, PLJavaBased memo) + throws SQLException; + } + + /** + * To be implemented by a language that supports triggers. + *

+ * The methods of this interface will be called, instead of those declared + * in {@link Routines Routines}, for any function declared with return type + * {@link RegType#TRIGGER TRIGGER}. If a language does not implement + * this interface, any attempt to validate or use such a function will incur + * an exception. + */ + public interface Triggers extends PLJavaBasedLanguage + { + /** + * Performs the essential validation checks on a proposed + * trigger function. + *

+ * See {@link Routines#essentialChecks Routines.essentialChecks} for + * details on what to check here. + *

+ * Any subject passed to this method is already known to be + * a function with no declared parameters and a non-set return type of + * {@link RegType#TRIGGER TRIGGER}. + *

+ * This default implementation checks nothing further. + */ + default void essentialTriggerChecks( + RegProcedure subject, PLJavaBased memo, + boolean checkBody) + throws SQLException + { + } + + /** + * Performs additional validation checks on a proposed trigger function. + *

+ * See {@link Routines#additionalChecks Routines.additionalChecks} for + * details on what to check here. + *

+ * Any subject passed to this method is already known to be + * a function with no declared parameters and a non-set return type of + * {@link RegType#TRIGGER TRIGGER}, and to have passed the + * {@link #essentialTriggerChecks essentialTriggerChecks}. + *

+ * This default implementation checks nothing further. + */ + default void additionalTriggerChecks( + RegProcedure subject, PLJavaBased memo, + boolean checkBody) + throws SQLException + { + } + + /** + * Prepares a template for a call of the trigger function + * target. + *

+ * This method is never called without + * {@link #essentialTriggerChecks essentialTriggerChecks} having + * been called immediately prior and completing normally. + *

+ * See {@link Routines#prepare Routines.prepare} for background on + * what to do here. + *

+ * This method should return a {@link TriggerTemplate TriggerTemplate}, + * which may encapsulate any useful precomputed values based on + * the catalog information this method consulted. + *

+ * Any target passed to this method is already known to be + * a function with no declared parameters and a non-set return type of + * {@link RegType#TRIGGER TRIGGER}, and to have passed the + * {@link #essentialTriggerChecks essentialTriggerChecks}. + *

+ * The template, when its {@link TriggerTemplate#specialize specialize} + * method is invoked on a {@link Trigger Trigger} instance, should + * return a {@link TriggerFunction TriggerFunction} that can be invoked + * on a {@link TriggerData TriggerData} instance. + *

+ * The template may generate a {@code TriggerFunction} that encapsulates + * specifics of the {@code Trigger} such as its target table, name, + * arguments, enabled events, scope, and columns of interest. + * A {@code TriggerFunction} will not be invoked for any trigger except + * the one passed to the {@code specialize} call that returned it. + */ + TriggerTemplate prepareTrigger( + RegProcedure target, PLJavaBased memo) + throws SQLException; + } + + /** + * To be implemented by a language that supports routines declared with + * {@code TRANSFORM FOR TYPE}. + *

+ * In addition to implementing the abstract method declared here, a language + * that implements this interface takes up full responsibility for doing + * whatever must be done to give effect to any such transforms declared on + * routines that use the language. PostgreSQL itself provides nothing but + * a way to declare transforms and associate them with routine declarations. + *

+ * PL/Java will reject, at validation time when possible, any routine + * declared with {@code TRANSFORM FOR TYPE} if the language does not + * implement this interface. + *

+ * A language that does implement this interface can learn + * what transforms are to be applied by calling + * {@link PLJavaBased#transforms() memo.transforms()} in its + * {@link Routines#prepare prepare} and/or + * {@link Triggers#prepareTrigger prepareTrigger} methods, and perhaps + * also in its validation methods to detect configuration issues as early + * as possible. + */ + public interface UsingTransforms extends PLJavaBasedLanguage + { + /** + * Performs validation checks on a {@link Transform} that purports to be + * usable with this language. + *

+ * PL/Java will already have checked that t's + * {@link Transform#language() language()} refers to this language. + * This method should use best effort to make sure that t's + * {@link Transform#fromSQL() fromSQL()} and + * {@link Transform#toSQL() toSQL()} functions are, in fact, functions + * that this language implementation can use to transform values between + * t's target PostgreSQL {@link Transform#type() type()} and + * a data type available to this language. See documentation of the + * {@link Transform#fromSQL() fromSQL()} and + * {@link Transform#toSQL() toSQL()} methods for more detail on what may + * need to be checked. + *

+ * It is possible for {@link Transform#fromSQL() fromSQL()} + * or {@link Transform#toSQL() toSQL()} to return + * a {@code RegProcedure} instance for which + * {@link RegProcedure#isValid() isValid()} is false, which indicates + * that this language's default from-SQL or to-SQL handling, + * respectively, is to be used for the transform's + * {@linkplain Transform#type() type}. In such cases, this method should + * check that this language has a usable default conversion in the + * indicated direction for that type. + *

+ * This method should return normally on success, otherwise throwing + * an informative exception. Unless there is a more-specific + * choice, {@link SQLSyntaxErrorException} with {@code SQLState} + * {@code 42P17} corresponds to PostgreSQL's + * {@code invalid_object_definition}. + */ + void essentialTransformChecks(Transform t) throws SQLException; + } + + /** + * To be implemented by a language that supports inline code blocks. + *

Transaction control

+ *

+ * A {@code DO} block is allowed to start, commit, and roll back top-level + * transactions, as long as it was not invoked inside an existing explicit + * transaction. The atomic parameter passed to + * {@link #execute execute} will be {@code true} if transaction control + * is disallowed. + *

+ * A handler may use this information to impose its own (for example, + * compile-time) limits on the availability of transaction-control + * operations. Any use of SPI by the code block will be appropriately + * limited with no need for attention from the handler, as PL/Java + * propagates the atomic/nonatomic flag to SPI always. + */ + public interface InlineBlocks extends PLJavaBasedLanguage + { + /** + * Parses and executes an inline code block. + * @param source_text the inline code to be parsed and executed + * @param atomic true if transaction control actions must be disallowed + * within the code block + */ + void execute(String source_text, boolean atomic) throws SQLException; + } + + /** + * The result of a {@link Template#specialize specialize} call on + * a {@link Template Template}. + *

+ * An instance can incorporate whatever can be precomputed based on the + * resolved parameter types and other information available to + * {@code specialize}. Its {@link #call call} method will then be invoked to + * supply the arguments and produce the results for each call made + * at that call site. + */ + @FunctionalInterface + public interface Routine + { + /** + * Actually executes the prepared and specialized {@code Routine}, using + * the arguments and other call-specific information passed in + * {@code fcinfo}. + *

+ * Various special cases of routine calls (triggers, procedure calls, + * and so on) can be distinguished by the specific subtypes of + * {@link Call.Context} that may be returned by + * {@code fcinfo.}{@link Call#context() context()}. + */ + void call(Call fcinfo) throws SQLException; + } + + /** + * The result of a {@link TriggerTemplate#specialize specialize} call on + * a {@link TriggerTemplate TriggerTemplate}. + *

+ * An instance can incorporate whatever can be precomputed based on the + * specific {@link Trigger Trigger} that was passed to {@code specialize}. + * Its {@link #apply apply} method will then be invoked to act on + * the {@link TriggerData TriggerData} and produce the results each time + * that trigger fires. + */ + @FunctionalInterface + public interface TriggerFunction + { + /** + * Actually executes the prepared and specialized + * {@code TriggerFunction}, with the triggering data available in + * triggerData. + *

+ * The return value, ignored for an {@link Called#AFTER AFTER} trigger, + * and restricted to null for any + * {@link Called#BEFORE BEFORE} {@link Scope#STATEMENT STATEMENT} + * trigger, can influence the triggering operation for other types + * of triggers. To permit the operation with no changes by the trigger, + * return exactly {@link TriggerData#triggerTuple triggerTuple} (for + * a trigger on {@link Event#INSERT INSERT} or + * {@link Event#DELETE DELETE}), or exactly + * {@link TriggerData#newTuple newTuple} (for a trigger on + * {@link Event#UPDATE UPDATE}). To suppress the triggering operation, + * return null. + * @return a TupleTableSlot, or null + */ + TupleTableSlot apply(TriggerData triggerData) throws SQLException; + } + + /** + * The result of a {@link Routines#prepare prepare} call on a PL/Java-based + * routine. + *

+ * An instance should depend only on the static catalog information for the + * routine as passed to {@code prepare}, and may encapsulate any values that + * can be precomputed from that information alone. Its + * {@link #specialize specialize} method will be called, passing information + * specific to a call site, to obtain a {@link Routine Routine}. + */ + @FunctionalInterface + public interface Template + { + /** + * Given the information present at a particular call site, specialize + * this template into a {@link Routine Routine} that will handle calls + * through this call site. + *

+ * Typical activities for {@code specialize} would be to consult + * flinfo's {@link Lookup#inputsDescriptor inputsDescriptor} + * and {@link Lookup#outputsDescriptor outputsDescriptor} for the number + * and types of the expected input and output parameters, though it is + * unnecessary if the tuple descriptors obtained at + * {@link Routines#prepare prepare} time included no unresolved types. + * The {@link Lookup#inputsAreSpread inputsAreSpread} method should be + * consulted if the routine has a variadic parameter of the wildcard + * {@code "any"} type. + */ + Routine specialize(Lookup flinfo) throws SQLException; + } + + /** + * Superinterface for the result of a + * {@link ReturningSets#prepareSRF prepareSRF} call on a PL/Java-based + * set-returning function. + *

+ * An instance returned by {@link ReturningSets#prepareSRF prepareSRF} must + * implement at least one of the member subinterfaces. If it implements + * more than one, it will need to override the {@link #negotiate negotiate} + * method to select the behavior to be used at a given call site. + */ + public interface SRFTemplate + { + /** + * Returns the index of a preferred subinterface of {@code SRFTemplate} + * among a list of those the caller supports. + *

+ * The list is ordered with a caller's more-preferred choices early. + *

+ * An implementation could simply return the first index of an + * allowed class C such that + * {@code this instanceof C} to use the caller's preferred method + * always, or could make a choice informed by characteristics of + * the template. + * @return the index within allowed of the interface to be + * used at this call site, or -1 if no interface in allowed + * is supported. + */ + int negotiate(List> allowed); + + /** + * An {@code SRFTemplate} subinterface that can generate + * a specialization returning the set result materialized in + * a {@code Tuplestore}. + */ + interface Materialize extends SRFTemplate + { + /** + * {@inheritDoc} + *

+ * This default implementation simply returns + * {@code allowed.indexOf(Materialize.class)}. + */ + @Override + default int negotiate(List> allowed) + { + return allowed.indexOf(Materialize.class); + } + } + + /** + * An {@code SRFTemplate} subinterface that can generate + * a specialization returning the set result in a series of calls + * each returning one value or row. + */ + interface ValuePerCall extends SRFTemplate + { + /** + * {@inheritDoc} + *

+ * This default implementation simply returns + * {@code allowed.indexOf(ValuePerCall.class)}. + */ + @Override + default int negotiate(List> allowed) + { + return allowed.indexOf(ValuePerCall.class); + } + + SRFFirst specializeValuePerCall(Lookup flinfo) throws SQLException; + } + } + + /** + * The result of a {@link SRFTemplate.ValuePerCall#specializeValuePerCall + * specializeValuePerCall} call on an {@link SRFTemplate SRFTemplate}. + *

+ * An instance can incorporate whatever can be precomputed based on the + * resolved parameter types and other information available to + * {@code specializeValuePerCall}. Its {@link #firstCall firstCall} method + * will then be invoked, for each call made at that call site, to supply the + * arguments and obtain an instance of {@link SRFNext SRFNext} whose + * {@link SRFNext#nextResult nextResult} method will be called, as many + * times as needed, to retrieve all rows of the result. + */ + @FunctionalInterface + public interface SRFFirst + { + /** + * Executes the prepared and specialized {@code SRFFirst} code, using + * the arguments and other call-specific information passed in + * {@code fcinfo} and returns an instance of {@link SRFNext SRFNext} + * to produce a result set row by row. + *

+ * This method should not access fcinfo's + * {@link RegProcedure.Call#result result} or + * {@link RegProcedure.Call#isNull isNull} methods to return any value, + * but should return an instance of {@code SRFNext} that will do so. + */ + SRFNext firstCall(Call fcinfo) throws SQLException; + } + + /** + * The result of a {@link SRFFirst#firstCall firstCall} call on an instance + * of {@link SRFFirst SRFFirst}. + *

+ * The {@link #nextResult nextResult} method will be called repeatedly + * as long as its return value indicates another row may follow, unless + * PostgreSQL earlier determines no more rows are needed. + *

+ * The {@link #close close} method will be called after the last call of + * {@code nextResult}, whether because all rows have been read or because + * PostgreSQL has read all it needs. It is not called, however, + * if {@code nextResult} has returned {@link Result#SINGLE Result.SINGLE}. + * + */ + public interface SRFNext extends AutoCloseable + { + /** + * Called when PostgreSQL will be making no further calls of + * {@link #nextResult nextResult} for this result set, which may be + * before all results have been fetched. + *

+ * When a degenerate single-row set is returned (as indicated by + * {@link #nextResult nextResult} returning + * {@link Result#SINGLE Result.SINGLE}), this method is not called. + */ + void close(); + + /** + * Called to return a single result. + *

+ * As with non-set-returning routines, this method should store result + * values into {@link RegProcedure.Call#result fcinfo.result()} or set + * {@link RegProcedure.Call#isNull fcinfo.isNull(true)} (which, in this + * context, produces a row of all nulls). If there is no result + * to store, the method should return {@link Result#END Result.END}: + * no row will be produced, and the result set is considered complete. + *

+ * If the method has exactly one row to return, it may store the values + * and return {@link Result#SINGLE Result.SINGLE}: the result will be + * considered to be just that one row. None of the rest of the + * set-returning protocol will be involved, and + * {@link SRFNext#close close()} will not be called. + *

+ * Otherwise, the method should return + * {@link Result#MULTIPLE Result.MULTIPLE} after storing each row, and + * conclude by returning {@link Result#END Result.END} from the final + * call (without storing anything). + *

+ * It is a protocol violation to return + * {@link Result#SINGLE Result.SINGLE} from any but the very first call. + *

+ * The arguments in + * {@link RegProcedure.Call#arguments fcinfo.arguments()} will not be + * changing as the rows of a single result are retrieved. Any argument + * values that will be referred to repeatedly may be worth fetching once + * in the {@link SRFFirst#firstCall firstCall} method and their Java + * representations captured in this object, rather than fetching and + * converting them repeatedly. + */ + Result nextResult(Call fcinfo) throws SQLException; + + /** + * Used to indicate the state of the result sequence on return from + * a single call in the {@code ValuePerCall} protocol. + */ + enum Result + { + /** + * There is exactly one row and this call has returned it. + *

+ * None of the rest of the set-returning protocol will be involved, + * and {@link SRFNext#close close()} will not be called. + */ + SINGLE, + + /** + * This call has returned one of possibly multiple rows, and + * another call should be made to retrieve the next row if any. + */ + MULTIPLE, + + /** + * This call has no row to return and the result sequence + * is complete. + */ + END + } + } + + /** + * The result of a {@link Triggers#prepareTrigger prepareTrigger} call on + * a PL/Java-based trigger function. + *

+ * An instance should depend only on the static catalog information for the + * function as passed to {@code prepareTrigger}, and may encapsulate any + * values that can be precomputed from that information alone. Its + * {@link #specialize specialize} method will be called, passing information + * specific to one trigger, to obtain a + * {@link TriggerFunction TriggerFunction}. + */ + @FunctionalInterface + public interface TriggerTemplate + { + /** + * Given the specifics of one {@link Trigger Trigger}, specialize + * this template into a {@link TriggerFunction TriggerFunction} that + * will handle calls through this trigger. + *

+ * Typical activities for {@code specialize} would be to consult + * trigger's {@link Trigger#name name}, + * {@link Trigger#relation relation}, {@link Trigger#called called}, + * {@link Trigger#events events}, {@link Trigger#scope scope}, + * {@link Trigger#arguments arguments}, and + * {@link Trigger#columns columns} to + * determine the kind of trigger it is, and fold those values into + * the returned {@code TriggerFunction}. + *

+ * This stage is well suited for checking that the characteristics of + * the trigger (events, scope, when called, arguments, column types of + * the target table) conform to what the trigger function can handle. + */ + TriggerFunction specialize(Trigger trigger) throws SQLException; + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/RolePrincipal.java b/pljava-api/src/main/java/org/postgresql/pljava/RolePrincipal.java new file mode 100644 index 000000000..cd68813de --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/RolePrincipal.java @@ -0,0 +1,234 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import java.io.InvalidObjectException; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectStreamException; +import java.io.Serializable; + +import java.nio.file.attribute.GroupPrincipal; + +import java.util.function.Function; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Pseudo; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +public abstract class RolePrincipal extends BasePrincipal +{ + private static final long serialVersionUID = 5650953533699613976L; + + RolePrincipal(String name) + { + super(name); + constrain(IllegalArgumentException::new); + } + + RolePrincipal(Simple name) + { + super(name); + constrain(IllegalArgumentException::new); + /* + * Ensure the subclasses' PUBLIC singletons really are, by rejecting the + * Pseudo.PUBLIC identifier in this constructor. The subclasses use + * private constructors that call the specialized one below when + * initializing their singletons. + */ + if ( s_public == name ) + throw new IllegalArgumentException( + "attempt to create non-singleton PUBLIC RolePrincipal"); + } + + RolePrincipal(Pseudo name) + { + super(name); + constrain(IllegalArgumentException::new); + } + + private final void constrain(Function exc) + throws E + { + Class c = getClass(); + if ( c != Authenticated.class && c != Session.class + && c != Outer.class && c != Current.class ) + throw exc.apply( + "forbidden to create unknown RolePrincipal subclass: " + + c.getName()); + + /* + * Unlike many cases where a delimited identifier can be used whose + * regular-identifier form is a reserved word, PostgreSQL in fact + * forbids giving any role a name that the regular identifier public + * would match, even if the name is quoted. + */ + if ( ( "public".equals(m_name.nonFolded()) + || "public".equals(m_name.pgFolded()) ) && m_name != s_public ) + throw exc.apply( + "forbidden to create a RolePrincipal with name " + + "that matches \"public\" by PostgreSQL rules"); + } + + private void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException + { + in.defaultReadObject(); + constrain(InvalidObjectException::new); + } + + static final Pseudo s_public = Pseudo.PUBLIC; + + /** + * Compare two {@code RolePrincipal}s for equality, with special treatment + * for the {@code PUBLIC} ones. + *

+ * Each concrete subclass of {@code RolePrincipal} has a singleton + * {@code PUBLIC} instance, which will only compare equal to itself (this + * method is not the place to say everything matches {@code PUBLIC}, because + * {@code equals} should be symmetric, and security checks should not be). + * Otherwise, the result is that of + * {@link Identifier#equals(Object) Identifier.equals}. + *

+ * Note that these {@code PUBLIC} instances are distinct from the wild-card + * principal names that can appear in the Java policy file: those are + * handled without ever instantiating the class, and simply match any + * principal with the identically-spelled class name. + */ + @Override + public final boolean equals(Object other) + { + if ( this == other ) + return true; + /* + * Because the pseudo "PUBLIC" instances are restricted to being + * singletons (one per RolePrincipal subclass), the above test will have + * already handled the matching case for those. Below, if either one is + * a PUBLIC instance, its m_name won't match anything else, which is ok + * because of the PostgreSQL rule that no role can have a potentially + * matching name anyway. + */ + if ( ! getClass().isInstance(other) ) + return false; + RolePrincipal o = (RolePrincipal)other; + return m_name.equals(o.m_name); + } + + public static final class Authenticated extends RolePrincipal + { + private static final long serialVersionUID = -4558155344619605758L; + + public static final Authenticated PUBLIC = new Authenticated(s_public); + + public Authenticated(String name) + { + super(name); + } + + public Authenticated(Simple name) + { + super(name); + } + + private Authenticated(Pseudo name) + { + super(name); + } + + private Object readResolve() throws ObjectStreamException + { + return m_name == s_public ? PUBLIC : this; + } + } + + public static final class Session extends RolePrincipal + { + private static final long serialVersionUID = -598305505864518470L; + + public static final Session PUBLIC = new Session(s_public); + + public Session(String name) + { + super(name); + } + + public Session(Simple name) + { + super(name); + } + + private Session(Pseudo name) + { + super(name); + } + + private Object readResolve() throws ObjectStreamException + { + return m_name == s_public ? PUBLIC : this; + } + } + + public static final class Outer extends RolePrincipal + { + private static final long serialVersionUID = 2177159367185354785L; + + public static final Outer PUBLIC = new Outer(s_public); + + public Outer(String name) + { + super(name); + } + + public Outer(Simple name) + { + super(name); + } + + private Outer(Pseudo name) + { + super(name); + } + + private Object readResolve() throws ObjectStreamException + { + return m_name == s_public ? PUBLIC : this; + } + } + + public static final class Current extends RolePrincipal + implements GroupPrincipal + { + private static final long serialVersionUID = 2816051825662188997L; + + public static final Current PUBLIC = new Current(s_public); + + public Current(String name) + { + super(name); + } + + public Current(Simple name) + { + super(name); + } + + private Current(Pseudo name) + { + super(name); + } + + private Object readResolve() throws ObjectStreamException + { + return m_name == s_public ? PUBLIC : this; + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/Session.java b/pljava-api/src/main/java/org/postgresql/pljava/Session.java index 4dbc96947..2169bb644 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/Session.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/Session.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -17,6 +17,8 @@ import java.sql.Connection; import java.sql.SQLException; +import java.util.Properties; + /** * A Session brings together some useful methods and data for the current * database session. It provides a set of attributes (a @@ -35,6 +37,21 @@ */ public interface Session { + /** + * Returns an unmodifiable defensive copy of the Java + * {@link System#getProperties() system properties} taken early in PL/Java + * startup before user code has an opportunity to write them. + *

+ * When PL/Java is running without security policy enforcement, as on stock + * Java 24 and later, using the frozen properties can simplify defensive + * coding against the possibility of arbitrary property modifications. + * + * @return a {@link Properties} object that departs from the API spec by + * throwing {@link UnsupportedOperationException} from any method if the + * properties would otherwise be modified. + */ + Properties frozenSystemProperties(); + /** * Adds the specified {@code listener} to the list of listeners that will * receive savepoint events. An {@link AccessControlContext} saved by this diff --git a/pljava-api/src/main/java/org/postgresql/pljava/SessionManager.java b/pljava-api/src/main/java/org/postgresql/pljava/SessionManager.java index 5a8c0ed77..3211e4db0 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/SessionManager.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/SessionManager.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -23,21 +23,41 @@ */ public class SessionManager { - private static Session s_session; - /** * Returns the current session. */ public static Session current() throws SQLException { - if(s_session == null) + try + { + return Holder.s_session; + } + catch ( ExceptionInInitializerError e ) { - s_session = load( - Session.class.getModule().getLayer(), Session.class) - .findFirst().orElseThrow(() -> new SQLException( - "could not obtain PL/Java Session object")); + Throwable c = e.getCause(); + if ( c instanceof SQLException ) + throw (SQLException)c; + throw e; + } + } + + private static class Holder + { + private static final Session s_session; + + static { + try + { + s_session = load( + Session.class.getModule().getLayer(), Session.class) + .findFirst().orElseThrow(() -> new SQLException( + "could not obtain PL/Java Session object")); + } + catch ( SQLException e ) + { + throw new ExceptionInInitializerError(e); + } } - return s_session; } } diff --git a/pljava-api/src/main/java/org/postgresql/pljava/TargetList.java b/pljava-api/src/main/java/org/postgresql/pljava/TargetList.java new file mode 100644 index 000000000..5e643a2af --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/TargetList.java @@ -0,0 +1,971 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import java.sql.SQLException; // for javadoc +import java.sql.SQLXML; // for javadoc + +import java.util.Arrays; +import java.util.BitSet; +import java.util.Iterator; +import java.util.List; + +import java.util.stream.Stream; + +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.Adapter.AsBoolean; +import org.postgresql.pljava.Adapter.AsByte; +import org.postgresql.pljava.Adapter.AsChar; +import org.postgresql.pljava.Adapter.AsDouble; +import org.postgresql.pljava.Adapter.AsFloat; +import org.postgresql.pljava.Adapter.AsInt; +import org.postgresql.pljava.Adapter.AsLong; +import org.postgresql.pljava.Adapter.AsShort; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.Portal; // for javadoc +import org.postgresql.pljava.model.TupleDescriptor; // for javadoc +import org.postgresql.pljava.model.TupleTableSlot; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Identifies attributes to be retrieved from a set of tuples. + *

+ * {@code TargetList} is more general than {@link Projection Projection}: in a + * {@code Projection}, no attribute can appear more than once, but repetition + * is possible in a {@code TargetList}. + *

+ * In general, it will be more efficient, if processing logic requires more than + * one copy of some attribute's value, to simply mention the attribute once in a + * {@code Projection}, and have the Java logic then copy the value, rather than + * fetching and converting it twice from the database native form. But there + * may be cases where that isn't workable, such as when the value is needed in + * different Java representations from different {@link Adapter}s, or when the + * Java representation is a type like {@link SQLXML} that can only be used once. + * Such cases call for a {@code TargetList} in which the attribute is mentioned + * more than once, to be separately fetched. + *

+ * Given a {@code TargetList}, query results can be processed by supplying a + * lambda body to {@link #applyOver(Iterable,Cursor.Function) applyOver}. The + * lambda will be supplied a {@link Cursor Cursor} whose {@code apply} methods + * can be used to break out the wanted values on each row, in the + * {@code TargetList} order. + */ +public interface TargetList extends List +{ + /** + * A {@code TargetList} in which no one attribute may appear more than once. + *

+ * The prime example of a {@code Projection} is a {@link TupleDescriptor} as + * obtained, for example, from the {@link Portal} for a query result. + *

+ * To preserve the "no attribute appears more than once" property, the only + * new {@code Projection}s derivable from an existing one involve selecting + * a subset of its attributes, and possibly changing their order. The + * {@code project} methods taking attribute names, attribute indices, or the + * attributes themselves can be used to do so, as can the {@code subList} + * method. + */ + interface Projection extends TargetList + { + /** + * From this {@code Projection}, returns a {@code Projection} containing + * only the attributes matching the supplied names and in the + * order of the argument list. + * @throws IllegalArgumentException if more names are supplied than this + * Projection has attributes, or if any remain unmatched after matching + * each attribute in this Projection at most once. + */ + Projection project(Simple... names); + + /** + * From this {@code Projection}, returns a {@code Projection} containing + * only the attributes matching the supplied names and in the + * order of the argument list. + *

+ * The names will be converted to {@link Simple Identifier.Simple} by + * its {@link Simple#fromJava fromJava} method before comparison. + * @throws IllegalArgumentException if more names are supplied than this + * Projection has attributes, or if any remain unmatched after matching + * each attribute in this Projection at most once. + */ + default Projection project(CharSequence... names) + { + return project( + Arrays.stream(names) + .map(CharSequence::toString) + .map(Simple::fromJava) + .toArray(Simple[]::new) + ); + } + + /** + * Returns a {@code Projection} containing only the attributes found + * at the supplied indices in this {@code Projection}, and in + * the order of the argument list. + *

+ * The index of the first attribute is zero. + * @throws IllegalArgumentException if more indices are supplied than + * this Projection has attributes, if any index is negative or beyond + * the last index in this Projection, or if any index appears more than + * once. + */ + Projection project(int... indices); + + /** + * Returns a {@code Projection} containing only the attributes found + * at the supplied indices in this {@code Projection}, and in + * the order of the argument list. + *

+ * The index of the first attribute is zero. + * @throws IllegalArgumentException if more indices are supplied than + * this Projection has attributes, if any index is negative or beyond + * the last index in this Projection, or if any index appears more than + * once. + */ + Projection project(short... indices); + + /** + * Returns a {@code Projection} containing only the attributes whose + * indices in this {@code Projection} are set (true) in the supplied + * {@link BitSet BitSet}, and in the same order. + *

+ * The index of the first attribute is zero. + * @throws IllegalArgumentException if more bits are set than this + * Projection has attributes, or if any bit is set beyond the last index + * in this Projection. + */ + Projection project(BitSet indices); + + /** + * Returns a {@code Projection} containing only the attributes whose + * indices in this {@code Projection} are set (true) in the supplied + * {@link BitSet BitSet}, and in the same order. + *

+ * The index of the first attribute is one. + * @throws IllegalArgumentException if more bits are set than this + * Projection has attributes, or if any bit is set before the first or + * beyond the last corresponding to a 1-based index in this Projection. + */ + Projection sqlProject(BitSet indices); + + /** + * Like {@link #project(int...) project(int...)} but using SQL's 1-based + * indexing convention. + *

+ * The index of the first attribute is 1. + * @throws IllegalArgumentException if more indices are supplied than + * this Projection has attributes, if any index is nonpositive or beyond + * the last 1-based index in this Projection, or if any index appears + * more than once. + */ + Projection sqlProject(int... indices); + + /** + * Like {@link #project(int...) project(int...)} but using SQL's 1-based + * indexing convention. + *

+ * The index of the first attribute is 1. + * @throws IllegalArgumentException if more indices are supplied than + * this Projection has attributes, if any index is nonpositive or beyond + * the last 1-based index in this Projection, or if any index appears + * more than once. + */ + Projection sqlProject(short... indices); + + /** + * Returns a {@code Projection} containing only attributes + * and in the order of the argument list. + *

+ * The attributes must be found in this {@code Projection} by exact + * reference identity. + * @throws IllegalArgumentException if more attributes are supplied than + * this Projection has, or if any remain unmatched after matching + * each attribute in this Projection at most once. + */ + Projection project(Attribute... attributes); + + @Override + Projection subList(int fromIndex, int toIndex); + } + + @Override + TargetList subList(int fromIndex, int toIndex); + + /** + * Like {@link #get(int) get} but following the SQL convention where the + * first element has index 1. + */ + default Attribute sqlGet(int oneBasedIndex) + { + try + { + return get(oneBasedIndex - 1); + } + catch ( IndexOutOfBoundsException e ) + { + throw (IndexOutOfBoundsException) + new IndexOutOfBoundsException(String.format( + "sqlGet() one-based index %d should be > 0 and <= %d", + oneBasedIndex, size() + )) + .initCause(e); + } + } + + /** + * Executes the function f, once, supplying a + * {@link Cursor Cursor} that can be iterated over the supplied + * tuples and used to process each tuple. + * @return whatever f returns. + */ + R applyOver( + Iterable tuples, Cursor.Function f) + throws X, SQLException; + + /** + * Executes the function f, once, supplying a + * {@link Cursor Cursor} that can be used to process the tuple. + *

+ * The {@code Cursor} can be iterated, just as if a one-row + * {@code Iterable} had been passed to + * {@link #applyOver(Iterable,Cursor.Function) applyOver(tuples, f)}, but it + * need not be; it will already have the single supplied tuple as + * its current row, ready for its {@code apply} methods to be used. + * @return whatever f returns. + */ + R applyOver( + TupleTableSlot tuple, Cursor.Function f) + throws X, SQLException; + + /** + * A {@code TargetList} that has been bound to a source of tuples and can + * execute code with the wanted attribute values available. + *

+ * Being derived from a {@link TargetList}, a {@code Cursor} serves directly + * as an {@code Iterator}, supplying the attributes in the + * {@code TargetList} order. + *

+ * Being bound to a source of tuples, a {@code Cursor} also implements + * {@code Iterable}, and can supply an iterator over the bound tuples in + * order. The {@code Cursor} is mutated during the iteration, having a + * current row that becomes each tuple in turn. The object returned by that + * iterator is the {@code Cursor} itself, so the caller has no need for the + * iteration variable, and can use the "unnamed variable" {@code _} for it, + * in Java versions including that feature (which appears in Java 21 but + * only with {@code --enable-preview}). In older Java versions it can be + * given some other obviously throwaway name. + *

+ * When a {@code Cursor} has a current row, its {@code apply} methods can be + * used to execute a lambda body with its parameters mapped to the row's + * values, in {@code TargetList} order, or to a prefix of those, should + * a lambda with fewer parameters be supplied. + *

+ * Each overload of {@code apply} takes some number of + * {@link Adapter Adapter} instances, each of which must be suited to the + * PostgreSQL type at its corresponding position, followed by a lambda body + * with the same number of parameters, each of which will receive the value + * from the corresponding {@code Adapter}, and have an inferred type + * matching what that {@code Adapter} produces. + *

+ * Within a lambda body with fewer parameters than the length of the + * {@code TargetList}, the {@code Cursor}'s attribute iterator has been + * advanced by the number of columns consumed. It can be used again to apply + * an inner lambda body to remaining columns. This "curried" style can be + * useful when the number or types of values to be processed will not + * directly fit any available {@code apply} signature. + *

+	 *  overall_result = targetlist.applyOver(tuples, c ->
+	 *  {
+	 *      var resultCollector = ...;
+	 *      for ( Cursor _ : c )
+	 *      {
+	 *          var oneResult = c.apply(
+	 *              adap0, adap1,
+	 *             ( val0,  val1 ) -> c.apply(
+	 *                  adap2, adap3,
+	 *                 ( val2,  val3 ) -> process(val0, val1, val2, val3)));
+     *          resultCollector.collect(oneResult);
+	 *      }
+	 *      return resultCollector;
+	 *  });
+	 *
+ *

+ * As the {@code apply} overloads for reference-typed values and those for + * primitive values are separate, currying must be used when processing a + * mix of reference and primitive types. + *

+ * The {@code Cursor}'s attribute iterator is reset each time the tuple + * iterator moves to a new tuple. It is also reset on return (normal or + * exceptional) from an outermost {@code apply}, in case another function + * should then be applied to the row. + *

+ * The attribute iterator is not reset on return from an inner (curried) + * {@code apply}. Therefore, it is possible to process a tuple having + * repeating groups of attributes with matching types, reusing an inner + * lambda and its matching adapters for each occurrence of the group. + *

+ * If the tuple is nothing but repeating groups, the effect can still be + * achieved by using the zero-parameter {@code apply} overload as the + * outermost. + */ + interface Cursor extends Iterator, Iterable + { + /** + * Returns an {@link Iterator} that will return this {@code Cursor} + * instance itself, repeatedly, mutated each time to represent the next + * of the bound list of tuples. + *

+ * Because the {@code Iterator} will produce the same {@code Cursor} + * instance on each iteration, and the instance is mutated, saving + * values the iterator returns will not have effects one might expect, + * and no more than one iteration should be in progress at a time. + *

+ * The {@code Iterator} that this {@code Cursor} represents + * will be reset to the first attribute each time a new tuple is + * presented by the {@code Iterator}. + * @throws IllegalStateException within the code body passed to any + * {@code apply} method. Within any such code body, the cursor simply + * represents its current tuple. Only outside of any {@code apply()} may + * {@code iterator()} be called. + */ + @Override // Iterable + Iterator iterator(); + + /** + * Returns a {@link Stream} that will present this {@code Cursor} + * instance itself, repeatedly, mutated each time to represent the next + * of the bound list of tuples. + *

+ * The stream should be used within the scope of the + * {@link #applyOver(Iterable,Function) applyOver} that has made + * this {@code Cursor} available. + *

+ * Because the {@code Stream} will produce the same {@code Cursor} + * instance repeatedly, and the instance is mutated, saving instances + * will not have effects one might expect, and no more than one + * stream should be in progress at a time. Stateful operations such as + * {@code distinct} or {@code sorted} will make no sense applied to + * these instances. Naturally, this method does not return a parallel + * {@code Stream}. + *

+ * These restrictions do not satisfy all expectations of a + * {@code Stream}, and may be topics for future work as this API is + * refined. + *

+ * The {@code Iterator} that this {@code Cursor} represents + * will be reset to the first attribute each time a new tuple is + * presented by the {@code Stream}. + * @throws IllegalStateException within the code body passed to any + * {@code apply} method. Within any such code body, the cursor simply + * represents its current tuple. Only outside of any {@code apply()} may + * {@code stream()} be called. + */ + Stream stream(); + + R apply( + L0 f) + throws X; + + R apply( + As a0, + L1 f) + throws X; + + R apply( + As a0, As a1, + L2 f) + throws X; + + R apply( + As a0, As a1, As a2, + L3 f) + throws X; + + R apply( + As a0, As a1, As a2, As a3, + L4 f) + throws X; + + R apply( + As a0, As a1, As a2, As a3, + As a4, + L5 f) + throws X; + + R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, + L6 f) + throws X; + + R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, As a6, + L7 f) + throws X; + + R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, As a6, As a7, + L8 f) + throws X; + + R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, As a6, As a7, + As a8, As a9, As aa, As ab, + As ac, As ad, As ae, As af, + L16 f) + throws X; + + R apply( + AsLong a0, + J1 f) + throws X; + + R apply( + AsLong a0, AsLong a1, + J2 f) + throws X; + + R apply( + AsLong a0, AsLong a1, AsLong a2, + J3 f) + throws X; + + R apply( + AsLong a0, AsLong a1, AsLong a2, AsLong a3, + J4 f) + throws X; + + R apply( + AsDouble a0, + D1 f) + throws X; + + R apply( + AsDouble a0, AsDouble a1, + D2 f) + throws X; + + R apply( + AsDouble a0, AsDouble a1, AsDouble a2, + D3 f) + throws X; + + R apply( + AsDouble a0, AsDouble a1, AsDouble a2, AsDouble a3, + D4 f) + throws X; + + R apply( + AsInt a0, + I1 f) + throws X; + + R apply( + AsInt a0, AsInt a1, + I2 f) + throws X; + + R apply( + AsInt a0, AsInt a1, AsInt a2, + I3 f) + throws X; + + R apply( + AsInt a0, AsInt a1, AsInt a2, AsInt a3, + I4 f) + throws X; + + R apply( + AsFloat a0, + F1 f) + throws X; + + R apply( + AsFloat a0, AsFloat a1, + F2 f) + throws X; + + R apply( + AsFloat a0, AsFloat a1, AsFloat a2, + F3 f) + throws X; + + R apply( + AsFloat a0, AsFloat a1, AsFloat a2, AsFloat a3, + F4 f) + throws X; + + R apply( + AsShort a0, + S1 f) + throws X; + + R apply( + AsShort a0, AsShort a1, + S2 f) + throws X; + + R apply( + AsShort a0, AsShort a1, AsShort a2, + S3 f) + throws X; + + R apply( + AsShort a0, AsShort a1, AsShort a2, AsShort a3, + S4 f) + throws X; + + R apply( + AsChar a0, + C1 f) + throws X; + + R apply( + AsChar a0, AsChar a1, + C2 f) + throws X; + + R apply( + AsChar a0, AsChar a1, AsChar a2, + C3 f) + throws X; + + R apply( + AsChar a0, AsChar a1, AsChar a2, AsChar a3, + C4 f) + throws X; + + R apply( + AsByte a0, + B1 f) + throws X; + + R apply( + AsByte a0, AsByte a1, + B2 f) + throws X; + + R apply( + AsByte a0, AsByte a1, AsByte a2, + B3 f) + throws X; + + R apply( + AsByte a0, AsByte a1, AsByte a2, AsByte a3, + B4 f) + throws X; + + R apply( + AsBoolean a0, + Z1 f) + throws X; + + R apply( + AsBoolean a0, AsBoolean a1, + Z2 f) + throws X; + + R apply( + AsBoolean a0, AsBoolean a1, AsBoolean a2, + Z3 f) + throws X; + + R apply( + AsBoolean a0, AsBoolean a1, AsBoolean a2, AsBoolean a3, + Z4 f) + throws X; + + @FunctionalInterface + interface Function + { + R apply(Cursor c); + } + + @FunctionalInterface + interface L0 + { + R apply() throws X; + } + + @FunctionalInterface + interface L1 + { + R apply(A v0) throws X; + } + + @FunctionalInterface + interface L2 + { + R apply(A v0, B v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L3 + { + R apply(A v0, B v1, C v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L4 + { + R apply(A v0, B v1, C v2, D v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L5 + { + R apply(A v0, B v1, C v2, D v3, E v4) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L6 + { + R apply(A v0, B v1, C v2, D v3, E v4, F v5) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L7 + { + R apply(A v0, B v1, C v2, D v3, E v4, F v5, G v6) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L8 + { + R apply(A v0, B v1, C v2, D v3, E v4, F v5, G v6, H v7) + throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface L16 + { + R apply( + A v0, B v1, C v2, D v3, E v4, F v5, G v6, H v7, + I v8, J v9, K va, L vb, M vc, N vd, O ve, P vf) + throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface J1 + { + R apply(long v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface J2 + { + R apply(long v0, long v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface J3 + { + R apply(long v0, long v1, long v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface J4 + { + R apply(long v0, long v1, long v2, long v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface D1 + { + R apply(double v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface D2 + { + R apply(double v0, double v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface D3 + { + R apply(double v0, double v1, double v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface D4 + { + R apply(double v0, double v1, double v2, double v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface I1 + { + R apply(int v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface I2 + { + R apply(int v0, int v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface I3 + { + R apply(int v0, int v1, int v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface I4 + { + R apply(int v0, int v1, int v2, int v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface F1 + { + R apply(float v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface F2 + { + R apply(float v0, float v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface F3 + { + R apply(float v0, float v1, float v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface F4 + { + R apply(float v0, float v1, float v2, float v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface S1 + { + R apply(short v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface S2 + { + R apply(short v0, short v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface S3 + { + R apply(short v0, short v1, short v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface S4 + { + R apply(short v0, short v1, short v2, short v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface C1 + { + R apply(char v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface C2 + { + R apply(char v0, char v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface C3 + { + R apply(char v0, char v1, char v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface C4 + { + R apply(char v0, char v1, char v2, char v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface B1 + { + R apply(byte v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface B2 + { + R apply(byte v0, byte v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface B3 + { + R apply(byte v0, byte v1, byte v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface B4 + { + R apply(byte v0, byte v1, byte v2, byte v3) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface Z1 + { + R apply(boolean v0) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface Z2 + { + R apply(boolean v0, boolean v1) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface Z3 + { + R apply(boolean v0, boolean v1, boolean v2) throws X; + } + + /** + * @hidden + */ + @FunctionalInterface + interface Z4 + { + R apply(boolean v0, boolean v1, boolean v2, boolean v3) throws X; + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Array.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Array.java new file mode 100644 index 000000000..63cee9dbc --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Array.java @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.sql.SQLException; + +import java.util.List; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.Adapter.Contract; + +import org.postgresql.pljava.model.TupleTableSlot.Indexed; + +/** + * Container for functional interfaces presenting a PostgreSQL array. + */ +public interface Array +{ + /** + * A contract whereby an array is returned flattened into a Java list, + * with no attention to its specified dimensionality or index bounds. + */ + @FunctionalInterface + interface AsFlatList extends Contract.Array,E,Adapter.As> + { + /** + * Shorthand for a cast of a suitable method reference to this + * functional interface type. + */ + static AsFlatList of(AsFlatList instance) + { + return instance; + } + + /** + * An implementation that produces a Java list eagerly copied from the + * PostgreSQL array, which is then no longer needed; null elements in + * the array are included in the list. + */ + static List nullsIncludedCopy( + int nDims, int[] dimsAndBounds, Adapter.As adapter, + Indexed slot) + throws SQLException + { + int n = slot.elements(); + E[] result = adapter.arrayOf(n); + for ( int i = 0; i < n; ++ i ) + result[i] = slot.get(i, adapter); + return List.of(result); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Bitstring.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Bitstring.java new file mode 100644 index 000000000..8b43b3b5a --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Bitstring.java @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.nio.ByteBuffer; + +import java.util.OptionalInt; + +import org.postgresql.pljava.Adapter.Contract; + +/** + * Container for abstract-type functional interfaces in PostgreSQL's + * {@code BITSTRING} type category. + */ +public interface Bitstring +{ + /** + * The {@code BIT} and {@code VARBIT} types' PostgreSQL semantics: the + * number of bits, and the sequence of bytes they're packed into. + */ + @FunctionalInterface + public interface Bit extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param nBits the actual number of bits in the value, not necessarily + * a multiple of 8. For type BIT, must equal the modifier nBits if + * specified; for VARBIT, must be equal or smaller. + * @param bytes a buffer of ceiling(nBits/8) bytes, not aliasing any + * internal storage, so safely readable (and writable, if useful for + * format conversion). Before accessing it in wider units, its byte + * order should be explicitly set. Within each byte, the logical order + * of the bits is from MSB to LSB; beware that this within-byte bit + * order is the reverse of what java.util.BitSet.valueOf(...) expects. + * When nBits is not a multiple of 8, the unused low-order bits of + * the final byte must be zero. + */ + T construct(int nBits, ByteBuffer bytes); + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns a {@code Bit} function possibly tailored ("curried") + * with the values from a PostgreSQL type modifier on the type. + * @param nBits for the BIT type, the exact number of bits the + * value must have; for VARBIT, the maximum. When not specified, + * the meaning is 1 for BIT, and unlimited for VARBIT. + */ + Bit modify(OptionalInt nBits); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Datetime.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Datetime.java new file mode 100644 index 000000000..0ac42e41d --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Datetime.java @@ -0,0 +1,596 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.sql.SQLException; +import java.sql.SQLDataException; + +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.ZoneOffset; + +import static java.time.ZoneOffset.UTC; +import static java.time.temporal.ChronoUnit.DAYS; +import static java.time.temporal.ChronoUnit.MICROS; +import static java.time.temporal.JulianFields.JULIAN_DAY; + +import java.util.OptionalInt; + +import org.postgresql.pljava.Adapter.Contract; + +/** + * Container for abstract-type functional interfaces in PostgreSQL's + * {@code DATETIME} type category. + */ +public interface Datetime +{ + /** + * PostgreSQL "infinitely early" date, as a value of what would otherwise be + * days from the PostgreSQL epoch. + */ + int DATEVAL_NOBEGIN = Integer.MIN_VALUE; + + /** + * PostgreSQL "infinitely late" date, as a value of what would otherwise be + * days from the PostgreSQL epoch. + */ + int DATEVAL_NOEND = Integer.MAX_VALUE; + + /** + * PostgreSQL "infinitely early" timestamp, as a value of what would + * otherwise be microseconds from the PostgreSQL epoch. + */ + long DT_NOBEGIN = Long.MIN_VALUE; + + /** + * PostgreSQL "infinitely late" timestamp, as a value of what would + * otherwise be microseconds from the PostgreSQL epoch. + */ + long DT_NOEND = Long.MAX_VALUE; + + /** + * The PostgreSQL "epoch", 1 January 2000, as a Julian day; the date + * represented by a {@code DATE}, {@code TIMESTAMP}, or {@code TIMESTAMPTZ} + * with a stored value of zero. + */ + int POSTGRES_EPOCH_JDATE = 2451545; + + /** + * Maximum value allowed for a type modifier specifying the seconds digits + * to the right of the decimal point for a {@code TIME} or {@code TIMETZ}. + */ + int MAX_TIME_PRECISION = 6; + + /** + * Maximum value allowed for a type modifier specifying the seconds digits + * to the right of the decimal point for a {@code TIMESTAMP} or + * {@code TIMESTAMPTZ}. + */ + int MAX_TIMESTAMP_PRECISION = 6; + + /** + * The maximum allowed value, inclusive, for a {@code TIME} or the time + * portion of a {@code TIMETZ}. + *

+ * The limit is inclusive; PostgreSQL officially accepts 24:00:00 + * as a valid time value. + */ + long USECS_PER_DAY = 86400000000L; + + /** + * The {@code DATE} type's PostgreSQL semantics: a signed number of days + * since the "Postgres epoch". + */ + @FunctionalInterface + public interface Date extends Contract.Scalar + { + /** + * The PostgreSQL "epoch" as a {@code java.time.LocalDate}. + */ + LocalDate POSTGRES_EPOCH = + LocalDate.EPOCH.with(JULIAN_DAY, POSTGRES_EPOCH_JDATE); + + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * The argument represents days since + * {@link #POSTGRES_EPOCH POSTGRES_EPOCH}, unless it is one of + * the special values {@link #DATEVAL_NOBEGIN DATEVAL_NOBEGIN} or + * {@link #DATEVAL_NOEND DATEVAL_NOEND}. + *

+ * When constructing a representation that lacks notions of positive or + * negative "infinity", one option is to simply map the above special + * values no differently than ordinary ones, and remember the two + * resulting representations as the "infinite" ones. If that is done + * without wraparound, the resulting "-infinity" value will precede all + * other PostgreSQL-representable dates and the resulting "+infinity" + * will follow them. + *

+ * The older {@code java.util.Date} cannot represent those values + * without wraparound; the two resulting values can still be saved as + * representing -infinity and +infinity, but will not have the expected + * ordering with respect to other values. They will both be quite far + * from the present. + */ + T construct(int daysSincePostgresEpoch); + + /** + * A reference implementation that maps to {@link LocalDate LocalDate}. + *

+ * The PostgreSQL "-infinity" and "+infinity" values are mapped to + * {@code LocalDate} instances matching (by {@code equals}) the special + * instances {@code NOBEGIN} and {@code NOEND} here, respectively. + */ + static class AsLocalDate implements Date + { + private AsLocalDate() // I am a singleton + { + } + + public static final AsLocalDate INSTANCE = new AsLocalDate(); + + /** + * {@code LocalDate} representing PostgreSQL's "infinitely early" + * date. + */ + public static final LocalDate NOBEGIN = + INSTANCE.construct(DATEVAL_NOBEGIN); + + /** + * {@code LocalDate} representing PostgreSQL's "infinitely late" + * date. + */ + public static final LocalDate NOEND = + INSTANCE.construct(DATEVAL_NOEND); + + @Override + public LocalDate construct(int daysSincePostgresEpoch) + { + return POSTGRES_EPOCH.plusDays(daysSincePostgresEpoch); + } + + public T store(LocalDate d, Date f) throws SQLException + { + if ( NOBEGIN.isAfter(d) || d.isAfter(NOEND) ) + throw new SQLDataException(String.format( + "date out of range: \"%s\"", d), "22008"); + + return f.construct((int)POSTGRES_EPOCH.until(d, DAYS)); + } + } + } + + /** + * The {@code TIME} type's PostgreSQL semantics: microseconds since + * midnight. + */ + @FunctionalInterface + public interface Time extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * The argument represents microseconds since midnight, nonnegative + * and not exceeding {@code USECS_PER_DAY}. + *

+ * PostgreSQL does allow the value to exactly equal + * {@code USECS_PER_DAY}. 24:00:00 is considered a valid value. That + * may need extra attention if the representation to be constructed + * doesn't allow that. + */ + T construct(long microsecondsSinceMidnight); + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns a {@code Time} function possibly tailored ("curried") + * with the values from a PostgreSQL type modifier on the type. + *

+ * The precision indicates the number of seconds digits desired + * to the right of the decimal point, and must be positive and + * no greater than {@code MAX_TIME_PRECISION}. + */ + Time modify(OptionalInt precision); + } + + /** + * A reference implementation that maps to {@link LocalTime LocalTime}. + *

+ * While PostgreSQL allows 24:00:00 as a valid time, {@code LocalTime} + * maxes out at the preceding nanosecond. That is still a value that + * can be distinguished, because PostgreSQL's time resolution is only + * to microseconds, so the PostgreSQL 24:00:00 value will be mapped + * to that. + *

+ * In the other direction, nanoseconds will be rounded to microseconds, + * so any value within the half-microsecond preceding {@code HOUR24} + * will become the PostgreSQL 24:00:00 value. + */ + static class AsLocalTime implements Time + { + private AsLocalTime() // I am a singleton + { + } + + public static final AsLocalTime INSTANCE = new AsLocalTime(); + + /** + * {@code LocalTime} representing the 24:00:00 time that PostgreSQL + * accepts but {@code LocalTime} does not. + *

+ * This {@code LocalTime} represents the immediately preceding + * nanosecond. That is still distinguishable from any other + * PostgreSQL time, because those have only microsecond + * resolution. + */ + public static final LocalTime HOUR24 = + LocalTime.ofNanoOfDay(1000L * USECS_PER_DAY - 1L); + + @Override + public LocalTime construct(long microsecondsSinceMidnight) + { + if ( USECS_PER_DAY == microsecondsSinceMidnight ) + return HOUR24; + + return LocalTime.ofNanoOfDay(1000L * microsecondsSinceMidnight); + } + + public T store(LocalTime t, Time f) + { + long nanos = t.toNanoOfDay(); + + return f.construct((500L + nanos) / 1000L); + } + } + } + + /** + * The {@code TIMETZ} type's PostgreSQL semantics: microseconds since + * midnight, accompanied by a time zone offset expressed in seconds. + */ + @FunctionalInterface + public interface TimeTZ extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * The first argument represents microseconds since midnight, + * nonnegative and not exceeding {@code USECS_PER_DAY}, and + * the second is a time zone offset expressed in seconds, positive + * for locations west of the prime meridian. + *

+ * It should be noted that other common conventions, such as ISO 8601 + * and {@code java.time.ZoneOffset}, use positive offsets for locations + * east of the prime meridian, requiring a sign flip. + *

+ * Also noteworthy, as with {@link Time Time}, is that the first + * argument may exactly equal {@code USECS_PER_DAY}; 24:00:00 + * is a valid value to PostgreSQL. That may need extra attention if + * the representation to be constructed doesn't allow that. + * @param microsecondsSinceMidnight the time of day, in the zone + * indicated by the second argument + * @param secondsWestOfPrimeMeridian note that the sign of this time + * zone offset will be the opposite of that used in other common systems + * using positive values for offsets east of the prime meridian. + */ + T construct( + long microsecondsSinceMidnight, int secondsWestOfPrimeMeridian); + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns a {@code TimeTZ} function possibly tailored ("curried") + * with the values from a PostgreSQL type modifier on the type. + *

+ * The precision indicates the number of seconds digits desired + * to the right of the decimal point, and must be positive and + * no greater than {@code MAX_TIME_PRECISION}. + */ + TimeTZ modify(OptionalInt precision); + } + + /** + * A reference implementation that maps to + * {@link OffsetTime OffsetTime}. + *

+ * While PostgreSQL allows 24:00:00 as a valid time, Java's rules + * max out at the preceding nanosecond. That is still a value that + * can be distinguished, because PostgreSQL's time resolution is only + * to microseconds, so the PostgreSQL 24:00:00 value will be mapped + * to a value whose {@code LocalTime} component matches (with + * {@code equals}) {@link Time.AsLocalTime#HOUR24 AsLocalTime.HOUR24}, + * which is really one nanosecond shy of 24 hours. + *

+ * In the other direction, nanoseconds will be rounded to microseconds, + * so any value within the half-microsecond preceding {@code HOUR24} + * will become the PostgreSQL 24:00:00 value. + */ + static class AsOffsetTime implements TimeTZ + { + private AsOffsetTime() // I am a singleton + { + } + + public static final AsOffsetTime INSTANCE = new AsOffsetTime(); + + @Override + public OffsetTime construct( + long microsecondsSinceMidnight, int secondsWestOfPrimeMeridian) + { + ZoneOffset offset = + ZoneOffset.ofTotalSeconds( - secondsWestOfPrimeMeridian); + + LocalTime local = Time.AsLocalTime.INSTANCE + .construct(microsecondsSinceMidnight); + + return OffsetTime.of(local, offset); + } + + public T store(OffsetTime t, TimeTZ f) + { + int secondsWest = - t.getOffset().getTotalSeconds(); + + LocalTime local = t.toLocalTime(); + + return Time.AsLocalTime.INSTANCE + .store(local, micros -> f.construct(micros, secondsWest)); + } + } + } + + /** + * The {@code TIMESTAMP} type's PostgreSQL semantics: microseconds since + * midnight of the PostgreSQL epoch, without an assumed time zone. + */ + @FunctionalInterface + public interface Timestamp extends Contract.Scalar + { + /** + * The PostgreSQL "epoch" as a {@code java.time.LocalDateTime}. + */ + LocalDateTime POSTGRES_EPOCH = Date.POSTGRES_EPOCH.atStartOfDay(); + + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * The argument represents microseconds since midnight on + * {@link #POSTGRES_EPOCH POSTGRES_EPOCH}. + *

+ * Because no particular time zone is understood to apply, the exact + * corresponding point on a standard timeline cannot be identified, + * absent outside information. It is typically used to represent + * a timestamp in the local zone, whatever that is. + *

+ * The argument represents microseconds since + * {@link #POSTGRES_EPOCH POSTGRES_EPOCH}, unless it is one of + * the special values {@link #DT_NOBEGIN DT_NOBEGIN} or + * {@link #DT_NOEND DT_NOEND}. + *

+ * When constructing a representation that lacks notions of positive or + * negative "infinity", one option is to simply map the above special + * values no differently than ordinary ones, and remember the two + * resulting representations as the "infinite" ones. If that is done + * without wraparound, the resulting "-infinity" value will precede all + * other PostgreSQL-representable dates and the resulting "+infinity" + * will follow them. + *

+ * The older {@code java.util.Date} cannot represent those values + * without wraparound; the two resulting values can still be saved as + * representing -infinity and +infinity, but will not have the expected + * ordering with respect to other values. They will both be quite far + * from the present. + */ + T construct(long microsecondsSincePostgresEpoch); + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns a {@code Timestamp} function possibly tailored + * ("curried") with the values from a PostgreSQL type modifier + * on the type. + *

+ * The precision indicates the number of seconds digits desired + * to the right of the decimal point, and must be positive and + * no greater than {@code MAX_TIMESTAMP_PRECISION}. + */ + Timestamp modify(OptionalInt precision); + } + + /** + * A reference implementation that maps to + * {@link LocalDateTime LocalDateTime}. + *

+ * The PostgreSQL "-infinity" and "+infinity" values are mapped to + * {@code LocalDateTime} instances matching (by {@code equals}) + * the special instances {@code NOBEGIN} and {@code NOEND} here, + * respectively. + */ + static class AsLocalDateTime implements Timestamp + { + private AsLocalDateTime() // I am a singleton + { + } + + public static final AsLocalDateTime INSTANCE = + new AsLocalDateTime(); + + /** + * {@code LocalDateTime} representing PostgreSQL's "infinitely + * early" timestamp. + */ + public static final LocalDateTime NOBEGIN = + INSTANCE.construct(DT_NOBEGIN); + + /** + * {@code LocalDateTime} representing PostgreSQL's "infinitely + * late" timestamp. + */ + public static final LocalDateTime NOEND = + INSTANCE.construct(DT_NOEND); + + @Override + public LocalDateTime construct(long microsecondsSincePostgresEpoch) + { + return + POSTGRES_EPOCH.plus(microsecondsSincePostgresEpoch, MICROS); + } + + public T store(LocalDateTime d, Timestamp f) + throws SQLException + { + try + { + return f.construct(POSTGRES_EPOCH.until(d, MICROS)); + } + catch ( ArithmeticException e ) + { + throw new SQLDataException(String.format( + "timestamp out of range: \"%s\"", d), "22008", e); + } + } + } + } + + /** + * The {@code TIMESTAMPTZ} type's PostgreSQL semantics: microseconds since + * midnight UTC of the PostgreSQL epoch. + */ + @FunctionalInterface + public interface TimestampTZ extends Contract.Scalar + { + /** + * The PostgreSQL "epoch" as a {@code java.time.OffsetDateTime}. + */ + OffsetDateTime POSTGRES_EPOCH = + OffsetDateTime.of(Timestamp.POSTGRES_EPOCH, UTC); + + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * The argument represents microseconds since midnight UTC on + * {@link #POSTGRES_EPOCH POSTGRES_EPOCH}. + *

+ * Given any desired local time zone, conversion to/from this value + * is possible if the rules for that time zone as of the represented + * date are available. + *

+ * The argument represents microseconds since + * {@link #POSTGRES_EPOCH POSTGRES_EPOCH}, unless it is one of + * the special values {@link #DT_NOBEGIN DT_NOBEGIN} or + * {@link #DT_NOEND DT_NOEND}. + *

+ * When constructing a representation that lacks notions of positive or + * negative "infinity", one option is to simply map the above special + * values no differently than ordinary ones, and remember the two + * resulting representations as the "infinite" ones. If that is done + * without wraparound, the resulting "-infinity" value will precede all + * other PostgreSQL-representable dates and the resulting "+infinity" + * will follow them. + *

+ * The older {@code java.util.Date} cannot represent those values + * without wraparound; the two resulting values can still be saved as + * representing -infinity and +infinity, but will not have the expected + * ordering with respect to other values. They will both be quite far + * from the present. + */ + T construct(long microsecondsSincePostgresEpochUTC); + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns a {@code TimestampTZ} function possibly tailored + * ("curried") with the values from a PostgreSQL type modifier + * on the type. + *

+ * The precision indicates the number of seconds digits desired + * to the right of the decimal point, and must be positive and + * no greater than {@code MAX_TIMESTAMP_PRECISION}. + */ + TimestampTZ modify(OptionalInt precision); + } + + /** + * A reference implementation that maps to + * {@link OffsetDateTime OffsetDateTime}. + *

+ * A value from PostgreSQL is always understood to be at UTC, and + * will be mapped always to an {@code OffsetDateTime} with UTC as + * its offset. + *

+ * A value from Java is adjusted by its offset so that PostgreSQL will + * always be passed {@code microsecondsSincePostgresEpochUTC}. + *

+ * The PostgreSQL "-infinity" and "+infinity" values are mapped to + * instances whose corresponding {@code LocalDateTime} at UTC will match + * (by {@code equals}) the constants {@code NOBEGIN} and {@code NOEND} + * of {@code AsLocalDateTime}, respectively. + */ + static class AsOffsetDateTime implements TimestampTZ + { + private AsOffsetDateTime() // I am a singleton + { + } + + public static final AsOffsetDateTime INSTANCE = + new AsOffsetDateTime(); + + @Override + public OffsetDateTime construct(long microsecondsSincePostgresEpoch) + { + return + POSTGRES_EPOCH.plus(microsecondsSincePostgresEpoch, MICROS); + } + + public T store(OffsetDateTime d, TimestampTZ f) + throws SQLException + { + try + { + return f.construct(POSTGRES_EPOCH.until(d, MICROS)); + } + catch ( ArithmeticException e ) + { + throw new SQLDataException(String.format( + "timestamp out of range: \"%s\"", d), "22008", e); + } + } + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Geometric.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Geometric.java new file mode 100644 index 000000000..a07052d6b --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Geometric.java @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import org.postgresql.pljava.Adapter.Contract; +import org.postgresql.pljava.Adapter.Dispenser; +import org.postgresql.pljava.Adapter.PullDispenser; + +/** + * Container for abstract-type functional interfaces in PostgreSQL's + * {@code GEOMETRIC} type category. + */ +public interface Geometric +{ + /** + * The {@code POINT} type's PostgreSQL semantics: a pair of {@code float8} + * coordinates. + */ + @FunctionalInterface + public interface Point extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + */ + T construct(double x, double y); + } + + /** + * The {@code LSEG} type's PostgreSQL semantics: two endpoints. + * @param the type returned by the constructor + * @param internal parameter that consumers of this interface should + * wildcard; an implementor may bound this parameter to get stricter type + * checking of the {@code Dispenser} uses within the implementing body. + */ + @FunctionalInterface + public interface LSeg extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param endpoints a dispenser that will dispense a {@code Point} for + * index 0 and index 1. + */ + T construct(PullDispenser> endpoints); + } + + /** + * The {@code PATH} type's PostgreSQL semantics: vertex points and whether + * closed. + * @param the type returned by the constructor + * @param internal parameter that consumers of this interface should + * wildcard; an implementor may bound this parameter to get stricter type + * checking of the {@code Dispenser} uses within the implementing body. + */ + @FunctionalInterface + public interface Path extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param nPoints the number of points on the path + * @param closed whether the path should be understood to include + * a segment joining the last point to the first one. + * @param points a dispenser that will dispense a {@code Point} for + * each index 0 through nPoint - 1. + */ + T construct( + int nPoints, boolean closed, PullDispenser> points); + } + + /** + * The {@code LINE} type's PostgreSQL semantics: coefficients of its + * general equation Ax+By+C=0. + */ + @FunctionalInterface + public interface Line extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + */ + T construct(double A, double B, double C); + } + + /** + * The {@code BOX} type's PostgreSQL semantics: two corner points. + * @param the type returned by the constructor + * @param internal parameter that consumers of this interface should + * wildcard; an implementor may bound this parameter to get stricter type + * checking of the {@code Dispenser} uses within the implementing body. + */ + @FunctionalInterface + public interface Box extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * As stored, the corner point at index 0 is never below or to the left + * of that at index 1. This may be achieved by permuting the points + * or their coordinates obtained as input, in any way that preserves + * the box. + * @param corners a dispenser that will dispense a {@code Point} for + * index 0 and at index 1. + */ + T construct(PullDispenser> corners); + } + + /** + * The {@code POLYGON} type's PostgreSQL semantics: vertex points and + * a bounding box. + * @param the type returned by the constructor + * @param internal parameter that consumers of this interface should + * wildcard; an implementor may bound this parameter to get stricter type + * checking of the boundingBox dispenser used within + * the implementing body. + * @param internal parameter that consumers of this interface should + * wildcard; an implementor may bound this parameter to get stricter type + * checking of the vertices dispenser used within + * the implementing body. + */ + @FunctionalInterface + public interface Polygon extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param nVertices the number of vertices in the polygon + * @param boundingBox a dispenser from which the bounding box may be + * obtained. + * @param vertices a dispenser from which a vertex {@code Point} may be + * obtained for each index 0 through nVertices - 1. + */ + T construct( + int nVertices, Dispenser> boundingBox, + PullDispenser> vertices); + } + + /** + * The {@code CIRCLE} type's PostgreSQL semantics: center point and radius. + * @param the type returned by the constructor + * @param internal parameter that consumers of this interface should + * wildcard; an implementor may bound this parameter to get stricter type + * checking of the {@code Dispenser} uses within the implementing body. + */ + @FunctionalInterface + public interface Circle extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + */ + T construct(Dispenser> center, double radius); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Internal.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Internal.java new file mode 100644 index 000000000..49e344471 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Internal.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import org.postgresql.pljava.Adapter.Contract; + +/** + * Container for abstract-type functional interfaces, not quite exactly + * corresponding to PostgreSQL's {@code INTERNAL} category; there are some + * fairly "internal" types that ended up in the {@code USER} category too, + * for whatever reason. + */ +public interface Internal +{ + /** + * The {@code tid} type's PostgreSQL semantics: a block ID and + * a row index within that block. + */ + @FunctionalInterface + public interface Tid extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param blockId (treat as unsigned) identifies the block in a table + * containing the target row + * @param offsetNumber (treat as unsigned) the index of the target row + * within the identified block + */ + T construct(int blockId, short offsetNumber); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Money.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Money.java new file mode 100644 index 000000000..d4f35ef19 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Money.java @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.math.BigDecimal; // javadoc + +import java.text.NumberFormat; // javadoc + +import java.util.Currency; // javadoc +import java.util.Locale; // javadoc + +import org.postgresql.pljava.Adapter.Contract; + +/** + * The {@code MONEY} type's PostgreSQL semantics: an integer value, whose + * scaling, display format, and currency are all determined by a + * user-settable configuration setting. + *

+ * This type is a strange duck in PostgreSQL. It is stored + * as a (64 bit) integer, and must have a scaling applied on input and + * output to the appropriate number of decimal places. + *

+ * The appropriate scaling, the symbols for decimal point and grouping + * separators, how the sign is shown, and even what currency it + * represents and the currency symbol to use, are all determined + * from the locale specified by the {@code lc_monetary} configuration + * setting, which can be changed within any session with no special + * privilege at any time. That may make {@code MONEY} the only data type + * in PostgreSQL where a person can use a single {@code SET} command to + * instantly change what an entire table of data means. + *

+ * For example, this little catalog of products: + *

+ * => SELECT * FROM products;
+ *  product |       price
+ * ---------+--------------------
+ *  widget  |             $19.00
+ *  tokamak | $19,000,000,000.00
+ *
+ *

+ * can be instantly marked down by about 12 percent (at the exchange + * rates looked up at this writing): + *

+ * => SET lc_monetary TO 'ja_JP';
+ * SET
+ * => SELECT * FROM products;
+ *  product |        price
+ * ---------+---------------------
+ *  widget  |             ï¿¥1,900
+ *  tokamak | ï¿¥1,900,000,000,000
+ *
+ *

+ * or marked up by roughly the same amount: + *

+ * => SET lc_monetary TO 'de_DE@euro';
+ * SET
+ * => SELECT * FROM products;
+ *  product |        price
+ * ---------+---------------------
+ *  widget  |             19,00 €
+ *  tokamak | 19.000.000.000,00 €
+ *
+ *

+ * or marked up even further (as of this writing, 26%): + *

+ * => SET lc_monetary TO 'en_GB';
+ * SET
+ * => SELECT * FROM products;
+ *  product |       price
+ * ---------+--------------------
+ *  widget  |             £19.00
+ *  tokamak | £19,000,000,000.00
+ *
+ *

+ * Obtaining the locale information in Java + *

+ * Before the integer value provided here can be correctly scaled or + * interpreted, the locale-dependent information must be obtained. + * In Java, that can be done in six steps: + *

    + *
  1. Obtain the string value of PostgreSQL's {@code lc_monetary} + * configuration setting. + *
  2. Let's not talk about step 2 just yet. + *
  3. Obtain a {@code Locale} object by passing the BCP 47 tag to + * {@link Locale#forLanguageTag Locale.forLanguageTag}. + *
  4. Pass the {@code Locale} object to + * {@link NumberFormat#getCurrencyInstance(Locale) + NumberFormat.getCurrencyInstance}. + *
  5. From that, obtain an actual instance of {@code Currency} with + * {@link NumberFormat#getCurrency NumberFormat.getCurrency}. + *
  6. Obtain the correct power of ten for scaling from + * {@link Currency#getDefaultFractionDigits + Currency.getDefaultFractionDigits}. + *
+ *

+ * The {@code NumberFormat} obtained in step 4 knows all the appropriate + * formatting details, but will not automatically scale the integer + * value here by the proper power of ten. That must be done explicitly, + * and to avoid compromising the precision objectives of the + * {@code MONEY} type, should be done with something like a + * {@link BigDecimal BigDecimal}. If fmt was obtained + * in step 4 above and scale is the value from step 6: + *

+ * BigDecimal bd =
+ *     BigDecimal.valueOf(scaledToInteger).movePointLeft(scale);
+ * String s = fmt.format(bd);
+ *
+ *

+ * would produce the correctly-formatted value, where + * scaledToInteger is the parameter supplied to this interface + * method. + *

+ * If the format is not needed, the scale can be obtained in fewer steps + * by passing the {@code Locale} from step 3 directly to + * {@link Currency#getInstance(Locale) Currency.getInstance}. + * That would be enough to build a simple reference implementation for + * this data type that would return a {@code BigDecimal} with its point + * moved left by the scale. + *

+ * Now let's talk about step 2. + *

+ * Java's locale support is based on BCP 47, a format for identifiers + * standardized by + * IETF to ensure that they are reliable and specific. + *

+ * The string obtained from the {@code lc_monetary} setting in step 1 + * above is, most often, a string that makes sense to the underlying + * operating system's C library, using some syntax that predated BCP 47, + * and likely demonstrates all of the problems BCP 47 was created to + * overcome. + *

+ * From a first glance at a few simple examples, it can appear that + * replacing some underscores with hyphens could turn some simple + * OS-library strings into BCP 47 tags, but that is far from the general + * case, which is full of nonobvious rules, special cases, and + * grandfather clauses. + *

+ * A C library, {@code liblangtag}, is available to perform exactly that + * mapping, and weighs in at about two and a half megabytes. The library + * might be present on the system where PostgreSQL is running, in which + * case it could be used in step 2, at the cost of a native call. + *

+ * If PostgreSQL was built with ICU, a native method could accomplish + * the same (as nearly as practical) thing by calling + * {@code uloc_canonicalize} followed by {@code uloc_toLanguageTag}; or, + * if the ICU4J Java library is available, + * {@code ULocale.createCanonical}could be used to the same effect. + *

+ * It might be simplest to just use a native call to obtain the + * scaling and other needed details from the underlying operating system + * library. + *

+ * Because of step 2's complexity, PL/Java does not here supply the + * simple reference implementation to {@code BigDecimal} proposed above. + */ +@FunctionalInterface +public interface Money extends Contract.Scalar +{ + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * It might be necessary to extend this interface with extra parameters + * (or to use the {@code Modifier} mechanism) to receive the needed + * scaling and currency details, and require the corresponding + * {@code Adapter} (which could no longer be pure Java) to make + * the needed native calls to obtain those. + * @param scaledToInteger integer value that must be scaled according + * to the setting of the lc_monetary configuration setting, + * and represents a value in the currency also determined by that + * setting. + */ + T construct(long scaledToInteger); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Network.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Network.java new file mode 100644 index 000000000..9bfb2e542 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Network.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.net.StandardProtocolFamily; + +import org.postgresql.pljava.Adapter.Contract; + +/** + * Container for abstract-type functional interfaces in PostgreSQL's + * {@code NETWORK} type category (and MAC addresses, which, for arcane reasons, + * are not in that category). + */ +public interface Network +{ + /** + * The {@code INET} and {@code CIDR} types' PostgreSQL semantics: the + * family ({@code INET} or {@code INET6}), the number of network prefix + * bits, and the address bytes in network byte order. + */ + @FunctionalInterface + public interface Inet extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param addressFamily INET or INET6 + * @param networkPrefixBits nonnegative, not greater than 32 for INET + * or 128 for INET6 (either maximum value indicates the address is for + * a single host rather than a network) + * @param networkOrderAddress the address bytes in network order. When + * the type is CIDR, only the leftmost networkPrefixBits bits are + * allowed to be nonzero. The array does not alias any internal storage + * and may be used as desired. + */ + T construct( + StandardProtocolFamily addressFamily, int networkPrefixBits, + byte[] networkOrderAddress); + } + + /** + * The {@code macaddr} and {@code macaddr8} types' PostgreSQL semantics: + * a byte array (6 or 8 bytes, respectively)., of which byte 0 is the one + * appearing first in the text representation (and stored in the member + * named a of the C struct). + */ + @FunctionalInterface + public interface MAC extends Contract.Scalar + { + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + * @param address array of 6 (macaddr) or 8 (macaddr8) bytes, of which + * byte 0 is the one appearing first in the text representation (and + * stored in the member named a of the C struct). The array + * does not alias any internal storage and may be used as desired. + */ + T construct(byte[] address); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Numeric.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Numeric.java new file mode 100644 index 000000000..4abaa6eaf --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Numeric.java @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import static java.lang.Math.multiplyExact; + +import java.math.BigDecimal; + +import java.sql.SQLException; +import java.sql.SQLDataException; + +import org.postgresql.pljava.Adapter.Contract; + +/** + * The {@code NUMERIC} type's PostgreSQL semantics: a sign (or indication + * that the value is NaN, + infinity, or - infinity), a display scale, + * a weight, and zero or more base-ten-thousand digits. + *

+ * This data type can have a type modifier that specifies a maximum + * precision (total number of base-ten digits to retain) and a maximum scale + * (how many of those base-ten digits are right of the decimal point). + *

+ * A curious feature of the type is that, when a type modifier is specified, + * the value becomes "anchored" to the decimal point: all of its decimal + * digits must be within precision places of the decimal point, + * or an error is reported. This rules out the kind of values that can crop + * up in physics, for example, where there might be ten digits of precision + * but those are twenty places away from the decimal point. This limitation + * apparently follows from the ISO SQL definitions of the precision and + * scale. + *

+ * However, when PostgreSQL {@code NUMERIC} is used with no type modifier, + * such values are not rejected, and are stored efficiently, just as you + * would expect, keeping only the digits that are needed and adjusting + * weight for the distance to the decimal point. + *

+ * In mapping to and from a Java representation, extra care may be needed + * if that capability is to be preserved. + */ +@FunctionalInterface +public interface Numeric extends Contract.Scalar +{ + /** + * The maximum precision that may be specified in a {@code numeric} type + * modifier. + *

+ * Without a modifier, the type is subject only to its implementation + * limits, which are much larger. + */ + int NUMERIC_MAX_PRECISION = 1000; + + /** + * The minimum 'scale' that may be specified in a {@code numeric} type + * modifier in PostgreSQL 15 or later. + *

+ * Negative scale indicates rounding left of the decimal point. A scale of + * -1000 indicates rounding to a multiple of 101000. + *

+ * Prior to PostgreSQL 15, a type modifier did not allow a negative + * scale. + *

+ * Without a modifier, the type is subject only to its implementation + * limits. + */ + int NUMERIC_MIN_SCALE = -1000; + + /** + * The maximum 'scale' that may be specified in a {@code numeric} type + * modifier in PostgreSQL 15 or later. + *

+ * When scale is positive, the digits string represents a value smaller by + * the indicated power of ten. When scale exceeds precision, the digits + * string represents digits that appear following (scale - precision) zeros + * to the right of the decimal point. + *

+ * Prior to PostgreSQL 15, a type modifier did not allow a scale greater + * than the specified precision. + *

+ * Without a modifier, the type is subject only to its implementation + * limits. + */ + int NUMERIC_MAX_SCALE = 1000; + + /** + * The base of the 'digit' elements supplied by PostgreSQL. + *

+ * This is also built into the parameter name base10000Digits and + * is highly unlikely to change; a comment in the PostgreSQL code since 2015 + * confirms "values of {@code NBASE} other than 10000 are considered of + * historical interest only and are no longer supported in any sense". + */ + int NBASE = 10000; + + /** + * Decimal digits per {@code NBASE} digit. + */ + int DEC_DIGITS = 4; + + /** + * Label to distinguish positive, negative, and three kinds of special + * values. + */ + enum Kind { POSITIVE, NEGATIVE, NAN, POSINFINITY, NEGINFINITY } + + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * A note about displayScale: when positive, it is information, + * stored with the PostgreSQL value, that conveys how far (right of the + * units place) the least significant decimal digit of the intended value + * falls. + *

+ * An apparentScale can also be computed: + *

+	 *  apparentScale = (1 + weight - base10000Digits.length) * (- DEC_DIGITS)
+	 *
+ * This computation has a simple meaning, and gives the distance, right of + * the units place, of the least-significant decimal digit in the stored + * representation. When negative, of course, it means that least stored + * digit falls left of the units place. + *

+ * Because of the {@code DEC_DIGITS} factor, apparentScale + * computed this way will always be a multiple of four, the next such (in + * the direction of more significant digits) from the position of the + * actual least significant digit in the value. So apparentScale + * may exceed displayScale by as much as three, and, if so, + * displayScale should be used in preference, to avoid + * overstating the value's significant figures. + *

+ * Likewise, if displayScale is positive, it should be used even + * if it exceeds apparentScale. In that case, it conveys that + * PostgreSQL knows additional digits are significant, even though they were + * zero and it did not store them. + *

+ * However, the situation when displayScale is zero is less + * clear-cut, because PostgreSQL simply disallows it ever to be negative. + * This clamping of displayScale loses information, such that a + * value with displayScale zero and apparentScale + * negative may represent any of: + *

    + *
  • A limited-precision value with non-significant trailing zeros (from + * -apparentScale to as many as -apparentScale+3 of + * them)
  • + *
  • A precise integer, all of whose -apparentScale non-stored + * significant digits just happened to be zeros
  • + *
  • or anything in between.
  • + *
+ *

+ * That these cases can't be distinguished is inherent in PostgreSQL's + * representation of the type, and any implementation of this interface will + * need to make and document a choice of how to proceed. If the choice is + * to rely on apparentScale, then the fact that it is a multiple + * of four and may overstate, by up to three, the number of significant + * digits (as known, perhaps, to a human who assigned the value) has to be + * lived with; when displayScale is clamped to zero there simply + * isn't enough information to do better. + *

+ * For example, consider this adapter applied to the result of: + *

+	 * SELECT 6.62607015e-34 AS planck, 6.02214076e23 AS avogadro;
+	 *
+ *

+ * Planck's constant (a small number defined with nine significant places) + * will be presented with displayScale=42, weight=-9, + * and base10000Digits=[662, 6070, 1500]. + * Because apparentScale works out to 44 (placing the least + * stored digit 44 places right of the decimal point, a multiple of 4) but + * displayScale is only 42, it is clear that the two trailing + * zeroes in the last element are non-significant, and the value has not + * eleven but only nine significant figures. + *

+ * In contrast, Avogadro's number (a large one, defined also with nine + * significant places) will arrive with weight=5 and + * base10000Digits=[6022, 1407, 6000], but + * displayScale will not be -15; it is clamped to zero instead. + * If an implementation of this contract chooses to compute + * apparentScale, that will be -12 (the next larger multiple of + * four) and the value will seem to have gained three extra significant + * figures. On the other hand, in an implementation that takes the + * clamped-to-zero displayScale at face value, the number will + * seem to have gained fifteen extra significant figures. + * @param kind POSITIVE, NEGATIVE, POSINFINITY, NEGINFINITY, or NAN + * @param displayScale nominal precision, nonnegative; the number of + * base ten digits right of the decimal point. If this exceeds + * the number of right-of-decimal digits determined by the stored value, + * the excess represents a number of trailing decimal zeroes that are + * significant but trimmed from storage. + * @param weight indicates the power of ten thousand which the first + * base ten-thousand digit is taken is taken to represent. If the array + * base10000Digits has length one, and that one digit has the + * value 3, and weight is zero, the value is 3. If + * weight is 1, the value is 30000, and if weight + * is -1, the value is 0.0003. + * @param base10000Digits each array element is a nonnegative value not + * above 9999, representing a single digit of a base-ten-thousand + * number. The element at index zero is the most significant. The caller + * may pass a zero-length array, but may not pass null. The array is + * unshared and may be used as desired. + */ + T construct(Kind kind, int displayScale, int weight, + short[] base10000Digits) throws SQLException; + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns a {@code Numeric} function possibly tailored + * ("curried") with the values from a PostgreSQL type modifier + * on the type. + *

+ * If specified, precision must be at least one and + * not greater than {@code NUMERIC_MAX_PRECISION}, and scale + * must be not less than {@code NUMERIC_MIN_SCALE} nor more than + * {@code NUMERIC_MAX_SCALE}. + * @param specified true if a type modifier was specified, false if + * omitted + * @param precision the maximum number of base-ten digits to be + * retained, counting those on both sides of the decimal point + * @param scale maximum number of base-ten digits to be retained + * to the right of the decimal point. + */ + Numeric modify(boolean specified, int precision, int scale); + } + + /** + * A reference implementation that maps to {@link BigDecimal BigDecimal} + * (but cannot represent all possible values). + *

+ * A Java {@code BigDecimal} cannot represent the not-a-number or positive + * or negative infinity values possible for a PostgreSQL {@code NUMERIC}. + */ + static class AsBigDecimal implements Numeric + { + private AsBigDecimal() // I am a singleton + { + } + + public static final AsBigDecimal INSTANCE = new AsBigDecimal(); + + /** + * Produces a {@link BigDecimal} representation of the {@code NUMERIC} + * value, or throws an exception if the value is not-a-number or + * positive or negative infinity. + *

+ * In resolving the ambiguity when displayScale is zero, + * this implementation constructs a {@code BigDecimal} with significant + * figures inferred from the base10000Digits array's length, + * where decimal digits are grouped in fours, and therefore the + * {@code BigDecimal}'s {@link BigDecimal#scale() scale} method will + * always return a multiple of four in such cases. Therefore, from the + * query + *

+		 * SELECT 6.62607015e-34 AS planck, 6.02214076e23 AS avogadro;
+		 *
+ * this conversion will produce the {@code BigDecimal} 6.62607015E-34 + * for planck ({@code scale} will return 42, as expected), + * but will produce 6.02214076000E+23 for avogadro, showing + * three unexpected trailing zeros; {@code scale()} will not return -15 + * as expected, but the next larger multiple of four, -12. + * @throws SQLException 22000 if the value is NaN or +/- infinity. + */ + @Override + public BigDecimal construct( + Kind kind, int displayScale, int weight, short[] base10000Digits) + throws SQLException + { + switch ( kind ) + { + case NAN: + case POSINFINITY: + case NEGINFINITY: + throw new SQLDataException( + "cannot represent PostgreSQL numeric " + kind + + " as Java BigDecimal", "22000"); + default: + } + + int scale = multiplyExact(weight, - DEC_DIGITS); + + if ( 0 == base10000Digits.length ) + return BigDecimal.valueOf(0L, scale); + + // check that the final value also won't wrap around + multiplyExact(1 + weight - base10000Digits.length, - DEC_DIGITS); + + BigDecimal bd = BigDecimal.valueOf(base10000Digits[0], scale); + + for ( int i = 1 ; i < base10000Digits.length ; ++ i ) + { + scale += DEC_DIGITS; + bd = bd.add(BigDecimal.valueOf(base10000Digits[i], scale)); + } + + /* + * The final value of scale from the loop above is + * (1 + weight - base10000Digits.length) * (- DEC_DIGITS), so + * will always be a multiple of DEC_DIGITS (i.e. 4). It's also + * the scale of the BigDecimal constructed so far, and represents + * the position, right of the decimal point, of the least stored + * digit. Because of that DEC-DIGITS granularity, though, it may + * reflect up to three trailing zeros from the last element of + * base10000Digits that are not really significant. When scale and + * displayScale are positive (the value extends right of the decimal + * point), we can use displayScale to correct the scale of the + * BigDecimal. (This 'correction' applies even when displayScale + * is greater than scale; that means PostgreSQL knows even more + * trailing zeros are significant, and simply avoided storing them.) + * + * When scale ends up negative, though (the least stored digit falls + * somewhere left of the units place), and displayScale is zero, + * we get no such help, because PostgreSQL simply clamps that value + * to zero. We are on our own to decide whether we are looking at + * + * a) a value of limited precision, with (- scale) non-significant + * trailing zeros (and possibly up to three more) + * b) a precise integer value, all of whose (- scale) trailing + * digits happen to be zero (figure the odds...) + * c) anything in between. + * + * The Java BigDecimal will believe whatever we tell it and use the + * corresponding amount of memory, so on efficiency as well as + * plausibility grounds, we'll tell it (a). The scale will still be + * that multiple of four, though, so we may still have bestowed + * significance upon up to three trailing zeros, compared to what a + * human who assigned the value might think. That cannot affect + * roundtripping of the value back to PostgreSQL, because indeed the + * corresponding PostgreSQL forms are identical, so PostgreSQL can't + * notice any difference; that's how we got into this mess. + */ + if ( displayScale > 0 || scale > displayScale ) + { + assert displayScale >= 1 + scale - DEC_DIGITS; + bd = bd.setScale(displayScale); + } + + return Kind.POSITIVE == kind ? bd : bd.negate(); + } + + public T store(BigDecimal bd, Numeric f) + throws SQLException + { + throw new UnsupportedOperationException( + "no BigDecimal->NUMERIC store for now"); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/Timespan.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/Timespan.java new file mode 100644 index 000000000..862f8319a --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/Timespan.java @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.util.EnumSet; +import static java.util.Collections.unmodifiableSet; +import static java.util.EnumSet.of; +import static java.util.EnumSet.noneOf; +import static java.util.EnumSet.range; +import java.util.OptionalInt; +import java.util.Set; + +import org.postgresql.pljava.Adapter.Contract; + +/* + * For the javadoc: + */ +import java.time.Duration; +import java.time.Period; +import java.time.chrono.ChronoPeriod; +import java.time.temporal.ChronoUnit; +import java.time.temporal.TemporalAmount; + +/** + * Container for abstract-type functional interfaces in PostgreSQL's + * {@code TIMESPAN} type category (which, at present, includes the single + * type {@code INTERVAL}). + */ +public interface Timespan +{ + /** + * The {@code INTERVAL} type's PostgreSQL semantics: separate microseconds, + * days, and months components, independently signed. + *

+ * A type modifier can specify field-presence bits, and precision (number of + * seconds digits to the right of the decimal point). An empty fields set + * indicates that fields were not specified. + *

Infinitely negative or positive intervals

+ *

+ * Starting with PostgreSQL 17, intervals whose three components are + * {@code (Long.MIN_VALUE, Integer.MIN_VALUE, Integer.MIN_VALUE)} or + * {@code (Long.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE)} have the + * semantics of infinitely negative or positive intervals, respectively. + * In PostgreSQL versions before 17, they are simply the most negative or + * positive representable finite intervals. + *

Why no reference implementation?

+ *

+ * The types in the {@link Datetime Datetime} interface come with reference + * implementations returning Java's JSR310 {@code java.time} types. + *

+ * For PostgreSQL {@code INTERVAL}, there are two candidate JSR310 concrete + * types, {@link Period Period} and {@link Duration Duration}, each of which + * would be appropriate for a different subset of PostgreSQL + * {@code INTERVAL} values. + *

+ * {@code Period} is appropriate for the months and days components. + * A {@code Period} treats the length of a day as subject to daylight + * adjustments following time zone rules, as does PostgreSQL. + *

+ * {@code Duration} is suitable for the sub-day components. It also allows + * access to a "day" field, but treats that field, unlike PostgreSQL, as + * having invariant 24-hour width. + *

+ * Both share the superinterface {@link TemporalAmount TemporalAmount}. That + * interface itself is described as "a framework-level interface that should + * not be widely used in application code", recommending instead that new + * concrete types can be created that implement it. + *

+ * PostgreSQL's {@code INTERVAL} could be represented by a concrete type + * that implements {@code TemporalAmount} or, preferably (because its days + * and months components are subject to rules of a chronology), its + * subinterface {@link ChronoPeriod ChronoPeriod}. The most natural such + * implementation would have {@link TemporalAmount#getUnits getUnits} return + * {@link ChronoUnit#MONTHS MONTHS}, {@link ChronoUnit#DAYS DAYS}, and + * {@link ChronoUnit#MICROS MICROS}, except for instances representing the + * infinitely negative or positive intervals and using the unit + * {@link ChronoUnit#FOREVER FOREVER} with a negative or positive value. + *

+ * In the datatype library that comes with the PGJDBC-NG driver, there is + * a class {@code com.impossibl.postgres.api.data.Interval} that does + * implement {@code TemporalAmount} (but not {@code ChronoPeriod}) and + * internally segregates the PostgreSQL {@code INTERVAL} components into + * a {@code Period} and a {@code Duration}. An application with that library + * available could use an implementation of this functional interface that + * would return instances of that class. As of PGJDBC-NG 0.8.9, the class + * does not seem to have a representation for the PostgreSQL 17 infinite + * intervals. Its {@code getUnits} method returns a longer list of units + * than needed to naturally represent the PostgreSQL type. + *

+ * The PGJDBC driver includes the {@code org.postgresql.util.PGInterval} + * class for the same purpose; that one does not derive from any JSR310 + * type. As of PGJDBC 42.7.5, it does not explicitly represent infinite + * intervals, and also has an internal state split into more units than the + * natural representation would require. + *

Related notes from the ISO SQL/XML specification

+ *

+ * SQL/XML specifies how to map SQL {@code INTERVAL} types and values to + * the XML Schema types {@code xs:yearMonthDuration} and + * {@code xs:dayTimeDuration}, which were added in XML Schema 1.1 as + * distinct subtypes of the broader {@code xs:duration} type from XML Schema + * 1.0. That Schema 1.0 supertype has a corresponding class in the standard + * Java library, + * {@link javax.xml.datatype.Duration javax.xml.datatype.Duration}, so + * an implementation of this functional interface to return that type would + * also be easy. It would not, however, represent PostgreSQL 17 infinitely + * negative or positive intervals. + *

+ * These XML Schema types do not perfectly align with the PostgreSQL + * {@code INTERVAL} type, because they group the day with the sub-day + * components and treat it as having invariant width. (The only time zone + * designations supported in XML Schema are fixed offsets, for which no + * daylight rules apply). The XML Schema types allow one overall sign, + * positive or negative, but do not allow the individual components to have + * signs that differ, as PostgreSQL does. + *

+ * Java's JSR310 types can be used with equal convenience in the PostgreSQL + * way (by assigning days to the {@code Period} and the smaller + * components to the {@code Duration}) or in the XML Schema way (by storing + * days in the {@code Duration} along with the smaller + * components), but of course those choices have different implications. + *

+ * A related consideration is, in a scheme like SQL/XML's where the SQL + * {@code INTERVAL} can be mapped to a choice of types, whether that choice + * is made statically (i.e. by looking at the declared type modifier such as + * {@code YEAR TO MONTH} or {@code HOUR TO SECOND} for a column) or + * per-value (by looking at which fields are nonzero in each value + * encountered). + *

+ * The SQL/XML rule is to choose a static mapping at analysis time according + * to the type modifier. {@code YEAR}, {@code MONTH}, or + * {@code YEAR TO MONTH} call for a mapping to {@code xs:yearMonthDuration}, + * while any of the finer modifiers call for mapping to + * {@code xs:dayTimeDuration}, and no mapping is defined for an + * {@code INTERVAL} lacking a type modifier to constrain its fields in one + * of those ways. Again, those specified mappings assume that days are not + * subject to daylight rules, contrary to the behavior of the PostgreSQL + * type. + *

+ * In view of those considerations, there seems to be no single mapping of + * PostgreSQL {@code INTERVAL} to a common Java type that is sufficiently + * free of caveats to stand as a reference implementation. An application + * ought to choose an implementation of this functional interface to create + * whatever representation of an {@code INTERVAL} will suit that + * application's purposes. + */ + @FunctionalInterface + public interface Interval extends Contract.Scalar + { + enum Field + { + YEAR, MONTH, DAY, HOUR, MINUTE, SECOND + } + + Set YEAR = unmodifiableSet(of(Field.YEAR)); + Set MONTH = unmodifiableSet(of(Field.MONTH)); + Set DAY = unmodifiableSet(of(Field.DAY)); + Set HOUR = unmodifiableSet(of(Field.HOUR)); + Set MINUTE = unmodifiableSet(of(Field.MINUTE)); + Set SECOND = unmodifiableSet(of(Field.SECOND)); + + Set YEAR_TO_MONTH = + unmodifiableSet(range(Field.YEAR, Field.MONTH)); + Set DAY_TO_HOUR = + unmodifiableSet(range(Field.DAY, Field.HOUR)); + Set DAY_TO_MINUTE = + unmodifiableSet(range(Field.DAY, Field.MINUTE)); + Set DAY_TO_SECOND = + unmodifiableSet(range(Field.DAY, Field.SECOND)); + Set HOUR_TO_MINUTE = + unmodifiableSet(range(Field.HOUR, Field.MINUTE)); + Set HOUR_TO_SECOND = + unmodifiableSet(range(Field.HOUR, Field.SECOND)); + Set MINUTE_TO_SECOND = + unmodifiableSet(range(Field.HOUR, Field.SECOND)); + + Set> ALLOWED_FIELDS = + Set.of( + unmodifiableSet(noneOf(Field.class)), + YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, + YEAR_TO_MONTH, DAY_TO_HOUR, DAY_TO_MINUTE, DAY_TO_SECOND, + HOUR_TO_MINUTE, HOUR_TO_SECOND, MINUTE_TO_SECOND); + + int MAX_INTERVAL_PRECISION = 6; + + /** + * Constructs a representation T from the components + * of the PostgreSQL data type. + *

+ * PostgreSQL allows the three components to have independent signs. + * They are stored separately because the results of combining them with + * a date or a timestamp cannot be precomputed without knowing the other + * operand. + *

+ * In arithmetic involving an interval and a timestamp, the width of one + * unit in days can depend on the other operand if a timezone + * applies and has daylight savings rules: + *

+		 * SELECT (t + i) - t
+		 * FROM (VALUES (interval '1' DAY)) AS s(i),
+		 * (VALUES (timestamptz '12 mar 2022'), ('13 mar 2022'), ('6 nov 2022')) AS v(t);
+		 * ----------------
+		 *  1 day
+		 *  23:00:00
+		 *  1 day 01:00:00
+		 *
+ *

+ * In arithmetic involving an interval and a date or timestamp, the + * width of one unit in months can depend on the calendar + * month of the other operand, as well as on timezone shifts as for + * days: + *

+		 * SELECT (t + i) - t
+		 * FROM (VALUES (interval '1' MONTH)) AS s(i),
+		 * (VALUES (timestamptz '1 feb 2022'), ('1 mar 2022'), ('1 nov 2022')) AS v(t);
+		 * ------------------
+		 *  28 days
+		 *  30 days 23:00:00
+		 *  30 days 01:00:00
+		 *
+ */ + T construct(long microseconds, int days, int months); + + /** + * Functional interface to obtain information from the PostgreSQL type + * modifier applied to the type. + */ + @FunctionalInterface + interface Modifier + { + /** + * Returns an {@code Interval} function possibly tailored + * ("curried") with the values from a PostgreSQL type modifier + * applied to the type. + *

+ * The notional fields to be present in the interval are indicated + * by fields; the SQL standard defines more than three of + * these, which PostgreSQL combines into the three components + * actually stored. In a valid type modifier, the fields + * set must equal one of the members of {@code ALLOWED_FIELDS}: one + * of the named constants in this interface or the empty set. If it + * is empty, the type modifier does not constrain the fields that + * may be present. In practice, it is the finest field allowed in + * the type modifier that matters; PostgreSQL rounds away portions + * of an interval finer than that, but applies no special treatment + * based on the coarsest field the type modifier mentions. + *

+ * The desired number of seconds digits to the right of the decimal + * point is indicated by precision if present, which must + * be between 0 and {@code MAX_INTERVAL_PRECISION} inclusive. In + * a valid type modifier, when this is specified, fields + * must either include {@code SECONDS}, or be unspecified. + */ + Interval modify(Set fields, OptionalInt precision); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/package-info.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/package-info.java new file mode 100644 index 000000000..d97b70340 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/package-info.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +/** + * Package containing functional interfaces that document and present + * PostgreSQL data types abstractly, but clearly enough for faithful mapping. + *

+ * Interfaces in this package are meant to occupy a level between a PL/Java + * {@link Adapter Adapter} (responsible for PostgreSQL internal details that + * properly remain encapsulated) and some intended Java representation class + * (which may encapsulate details of its own). + *

Example

+ *

+ * Suppose an application would like to manipulate + * a PostgreSQL {@code TIME WITH TIME ZONE} in the form of a Java + * {@link OffsetTime OffsetTime} instance. + *

+ * The application selects a PL/Java {@link Adapter Adapter} that handles the + * PostgreSQL {@code TIME WITH TIME ZONE} type and presents it via the + * functional interface {@link Datetime.TimeTZ Datetime.TimeTZ} in this package. + *

+ * The application can instantiate that {@code Adapter} with some implementation + * (possibly just a lambda) of that functional interface, which will construct + * an {@code OffsetTime} instance. That {@code Adapter} instance now maps + * {@code TIME WITH TIME ZONE} to {@code OffsetTime}, as desired. + *

+ * The PostgreSQL internal details are handled by the {@code Adapter}. The + * internal details of {@code OffsetTime} are {@code OffsetTime}'s business. + * In between those two sits the {@link Datetime.TimeTZ Datetime.TimeTZ} + * interface in this package, with its one simple role: it presents the value + * in a clear, documented form as consisting of: + *

    + *
  • microseconds since midnight, and + *
  • a time zone offset in seconds west of the prime meridian + *
+ *

+ * It serves as a contract for the {@code Adapter} and as a clear starting point + * for constructing the wanted Java representation. + *

+ * It is important that the interfaces here serve as documentation as + * well as code, as it turns out that {@code OffsetTime} expects its + * time zone offsets to be positive east of the prime meridian, + * so a sign flip is needed. Interfaces in this package must be + * documented with enough detail to allow a developer to make correct + * use of the exposed values. + *

+ * The division of labor between what is exposed in these interfaces and what + * is encapsulated within {@code Adapter}s calls for a judgment of which + * details are semantically significant. If PostgreSQL somehow changes the + * internal details needed to retrieve a {@code timetz} value, it should be the + * {@code Adapter}'s job to make that transparent. If PostgreSQL ever changes + * the fact that a {@code timetz} is microseconds since midnight with + * seconds-west as a zone offset, that would require versioning the + * corresponding interface here; it is something a developer would need to know. + *

Reference implementations

+ * A few simple reference implementations (including the + * {@code timetz}-as-{@code OffsetTime} used as the example) can also be found + * in this package, and {@code Adapter} instances using them are available, + * so an application would not really have to follow the steps of the example + * to obtain one. + * @author Chapman Flack + */ +package org.postgresql.pljava.adt; + +import java.time.OffsetTime; + +import org.postgresql.pljava.Adapter; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/AbstractType.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/AbstractType.java new file mode 100644 index 000000000..0feece20c --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/AbstractType.java @@ -0,0 +1,1168 @@ +/* + * Copyright (c) 2020-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt.spi; + +import static java.lang.System.identityHashCode; + +import java.lang.reflect.Array; +import java.lang.reflect.Type; +import java.lang.reflect.GenericArrayType; +import java.lang.reflect.GenericDeclaration; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.TypeVariable; +import java.lang.reflect.WildcardType; + +import static java.util.Arrays.stream; +import static java.util.Collections.addAll; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; +import static java.util.Objects.requireNonNull; + +import static java.util.stream.Stream.concat; +import static java.util.stream.Collectors.joining; + +/** + * Custom implementations of Java's {@link Type Type} interfaces, with methods + * for a couple useful manipulations. + *

+ * The implementations returned from Java reflection methods are internal, with + * no way to instantiate arbitrary new ones to represent the results of + * computations with them. + *

+ * Note: the implementations here do not override {@code equals} and + * {@code hashCode} inherited from {@code Object}. The JDK internal ones do, + * but not with documented behaviors, so it didn't seem worthwhile to try + * to match them. (The API specifies an {@code equals} behavior only for + * {@code ParameterizedType}, and no corresponding {@code hashCode} even for + * that, so good luck matching it.) Results from methods in this class can + * include new objects (instances of these classes) and original ones + * constructed by Java; don't assume anything sane will happen using + * {@code equals} or {@code hashCode} between them. There is a + * {@code typesEqual} static method defined here to do that job. + */ +public abstract class AbstractType implements Type +{ + enum TypeKind + { + ARRAY(GenericArrayType.class), + PT(ParameterizedType.class), + TV(TypeVariable.class), + WILDCARD(WildcardType.class), + CLASS(Class.class); + + private Class m_class; + + TypeKind(Class cls) + { + m_class = cls; + } + + static TypeKind of(Class cls) + { + for ( TypeKind k : values() ) + if ( k.m_class.isAssignableFrom(cls) ) + return k; + throw new AssertionError("TypeKind nonexhaustive: " + cls); + } + } + + /** + * Compare two Types for equality without relying on their own + * {@code equals} methods. + */ + static boolean typesEqual(Type a, Type b) + { + if ( a == b ) + return true; + + if ( null == a || null == b ) + return false; + + TypeKind ak = TypeKind.of(a.getClass()); + TypeKind bk = TypeKind.of(b.getClass()); + + if ( ak != bk ) + return false; + + switch ( ak ) + { + case ARRAY: + GenericArrayType gaa = (GenericArrayType)a; + GenericArrayType gab = (GenericArrayType)b; + return typesEqual(gaa, gab); + case PT: + ParameterizedType pta = (ParameterizedType)a; + ParameterizedType ptb = (ParameterizedType)b; + if ( ! typesEqual(pta.getRawType(), ptb.getRawType()) ) + return false; + Type[] taa = pta.getActualTypeArguments(); + Type[] tab = ptb.getActualTypeArguments(); + if ( taa.length != tab.length ) + return false; + for ( int i = 0; i < taa.length; ++ i ) + if ( ! typesEqual(taa[i], tab[i]) ) + return false; + return true; + case TV: + TypeVariable tva = (TypeVariable)a; + TypeVariable tvb = (TypeVariable)b; + return tva.getGenericDeclaration() == tvb.getGenericDeclaration() + && tva.getName().equals(tvb.getName()); + case WILDCARD: + WildcardType wa = (WildcardType)a; + WildcardType wb = (WildcardType)b; + Type[] ua = wa.getUpperBounds(); + Type[] ub = wb.getUpperBounds(); + Type[] la = wa.getLowerBounds(); + Type[] lb = wb.getLowerBounds(); + if ( ua.length != ub.length || la.length != lb.length ) + return false; + for ( int i = 0; i < ua.length; ++ i ) + if ( ! typesEqual(ua[i], ub[i]) ) + return false; + for ( int i = 0; i < la.length; ++ i ) + if ( ! typesEqual(la[i], lb[i]) ) + return false; + return true; + case CLASS: + return false; // they failed the == test at the very top + } + + return false; // unreachable, but tell that to javac + } + + /** + * Refines some {@code Type}s in by unifying the first of them + * with using. + *

+ * The variadic array of in arguments is returned, modified + * in place. + *

+ * The type using is unified with {@code in[0]} and then used to + * replace {@code in[0]}, while any variable substitutions made in + * the unification are repeated in the remaining in elements. + */ + public static Type[] refine(Type using, Type... in) + { + Map bindings = new HashMap<>(); + unify(bindings, using, in[0]); + + TypeVariable[] vars = new TypeVariable[bindings.size()]; + Type [] args = new Type [bindings.size()]; + + int i = 0; + for ( Map.Entry e : bindings.entrySet() ) + { + vars[i] = e.getKey().get(); + args[i] = e.getValue(); + ++ i; + } + Bindings b = new Bindings(vars, args); + + in[0] = using; + for ( i = 1; i < in.length; ++ i ) + in[i] = substitute(b, in[i]); + + return in; + } + + /** + * A simpleminded unify that assumes one argument is always + * the more-specific one, should resolve type variables found in the other, + * and that this can be done for cases of interest without generating and + * then solving constraints. + */ + static void unify(Map bindings, Type specific, Type general) + { + Type element1; + Type element2; + + while ( null != (element1 = toElementIfArray(specific)) + && null != (element2 = toElementIfArray(general)) ) + { + specific = element1; + general = element2; + } + + if ( general instanceof TypeVariable ) + { + // XXX verify here that specific satisfies the variable's bounds + Type wasBound = + bindings.put(new VKey((TypeVariable)general), specific); + if ( null != wasBound && ! typesEqual(specific, wasBound) ) + throw new UnsupportedOperationException( + "unimplemented case in AbstractType.unify: binding again"); + return; + } + + if ( general instanceof ParameterizedType ) + { + ParameterizedType t = (ParameterizedType)general; + Type[] oldActuals = t.getActualTypeArguments(); + Class raw = (Class)t.getRawType(); + Type[] newActuals = specialization(specific, raw); + if ( null != newActuals ) + { + for ( int i = 0; i < oldActuals.length; ++ i ) + unify(bindings, newActuals[i], oldActuals[i]); + return; + } + } + else if ( general instanceof Class ) + { + Class c = (Class)general; + TypeVariable[] formals = c.getTypeParameters(); + Type[] actuals = specialization(specific, c); + if ( null != actuals ) + { + for ( int i = 0; i < formals.length; ++ i ) + unify(bindings, actuals[i], formals[i]); + return; + } + } + + throw new IllegalArgumentException( + "failed to unify " + specific + " with " + general); + } + + /** + * Returns the component type of either a {@code GenericArrayType} or + * an array {@code Class}, otherwise null. + */ + private static Type toElementIfArray(Type possibleArray) + { + if ( possibleArray instanceof GenericArrayType ) + return ((GenericArrayType)possibleArray).getGenericComponentType(); + if ( ! (possibleArray instanceof Class) ) + return null; + return ((Class)possibleArray).getComponentType(); // null if !array + } + + /** + * Needed: test whether sub is a subtype of sup. + *

+ * XXX For the time being, this is nothing but a test of + * erased subtyping, hastily implemented by requiring that + * {@code specialization(sub, erase(sup))} does not return null. + *

+ * This must sooner or later be replaced with an implementation of + * the subtyping rules from Java Language Specification 4.10, taking + * also type parameterization into account. + */ + public static boolean isSubtype(Type sub, Type sup) + { + return null != specialization(sub, erase(sup)); + } + + /** + * Equivalent to {@code specialization(candidate, expected, null)}. + */ + public static Type[] specialization(Type candidate, Class expected) + { + return specialization(candidate, expected, null); + } + + /** + * Test whether the type {@code candidate} is, directly or indirectly, + * a specialization of generic type {@code expected}. + *

+ * For example, the Java type T of a particular adapter A that extends + * {@code Adapter.As} can be retrieved with + * {@code specialization(A.class, As.class)[0]}. + *

+ * More generally, this method can retrieve the generic type information + * from any "super type token", as first proposed by Neal Gafter in 2006, + * where a super type token is generally an instance of an anonymous + * subclass that specializes a certain generic type. Although the idea has + * been often used, the usages have not settled on one agreed name for the + * generic type. This method will work with any of them, by supplying the + * expected generic type itself as the second parameter. For example, a + * super type token {@code foo} derived from Gafter's suggested class + * {@code TypeReference} can be unpacked with + * {@code specialization(foo.getClass(), TypeReference.class)}. + * @param candidate a type to be checked + * @param expected known (normally generic) type to check for + * @param rtype array to receive (if non-null) the corresponding + * (parameterized or raw) type if the result is non-null. + * @return null if candidate does not extend expected, + * otherwise the array of type arguments with which it specializes + * expected + * @throws IllegalArgumentException if passed a Type that is not a + * Class or a ParameterizedType + * @throws NullPointerException if either argument is null + * @throws UnsupportedOperationException if candidate does extend + * expected but does not carry the needed parameter bindings (such as + * when the raw expected Class itself is passed) + */ + public static Type[] specialization( + Type candidate, Class expected, Type[] rtype) + { + Type t = requireNonNull(candidate, "candidate is null"); + requireNonNull(expected, "expected is null"); + boolean superinterfaces = expected.isInterface(); + Class c; + ParameterizedType pt = null; + Bindings latestBindings = null; + boolean ptFound = false; + boolean rawTypeFound = false; + + if ( t instanceof Class ) + { + c = (Class)t; + if ( ! expected.isAssignableFrom(c) ) + return null; + if ( expected == c ) + rawTypeFound = true; + else + latestBindings = // trivial, non-null initial value + new Bindings(new TypeVariable[0], new Type[0]); + } + else if ( t instanceof ParameterizedType ) + { + pt = (ParameterizedType)t; + c = (Class)pt.getRawType(); + if ( ! expected.isAssignableFrom(c) ) + return null; + if ( expected == c ) + ptFound = true; + else + latestBindings = new Bindings(latestBindings, pt); + } + else + throw new IllegalArgumentException( + "expected Class or ParameterizedType, got: " + t); + + if ( ! ptFound && ! rawTypeFound ) + { + List pending = new LinkedList<>(); + pending.add(c.getGenericSuperclass()); + if ( superinterfaces ) + addAll(pending, c.getGenericInterfaces()); + + while ( ! pending.isEmpty() ) + { + t = pending.remove(0); + if ( null == t ) + continue; + if ( t instanceof Class ) + { + c = (Class)t; + if ( expected == c ) + { + rawTypeFound = true; + break; + } + if ( ! expected.isAssignableFrom(c) ) + continue; + pending.add(latestBindings); + } + else if ( t instanceof ParameterizedType ) + { + pt = (ParameterizedType)t; + c = (Class)pt.getRawType(); + if ( expected == c ) + { + ptFound = true; + break; + } + if ( ! expected.isAssignableFrom(c) ) + continue; + pending.add(new Bindings(latestBindings, pt)); + } + else if ( t instanceof Bindings ) + { + latestBindings = (Bindings)t; + continue; + } + else + throw new AssertionError( + "expected Class or ParameterizedType, got: " + t); + + pending.add(c.getGenericSuperclass()); + if ( superinterfaces ) + addAll(pending, c.getGenericInterfaces()); + } + } + + Type[] actualArgs = null; + + if ( ptFound ) + { + if ( null != latestBindings ) + pt = (ParameterizedType) + AbstractType.substitute(latestBindings, pt); + actualArgs = pt.getActualTypeArguments(); + if ( null != rtype ) + rtype[0] = pt; + } + else if ( rawTypeFound ) + { + actualArgs = new Type[0]; + if ( null != rtype ) + rtype[0] = expected; + } + + if ( null == actualArgs + || actualArgs.length != expected.getTypeParameters().length ) + throw new UnsupportedOperationException( + "failed checking whether " + candidate + + " specializes " + expected); + + return actualArgs; + } + + /** + * Returns the erasure of a type. + *

+ * If t is a {@code Class}, it is returned unchanged. + */ + public static Class erase(Type t) + { + if ( t instanceof Class ) + { + return (Class)t; + } + else if ( t instanceof GenericArrayType ) + { + int dims = 0; + do + { + ++ dims; + GenericArrayType a = (GenericArrayType)t; + t = a.getGenericComponentType(); + } while ( t instanceof GenericArrayType ); + Class c = (Class)erase(t); + // in Java 12+ see TypeDescriptor.ofField.arrayType(int) + return Array.newInstance(c, new int [ dims ]).getClass(); + } + else if ( t instanceof ParameterizedType ) + { + return (Class)((ParameterizedType)t).getRawType(); + } + else if ( t instanceof WildcardType ) + { + throw new UnsupportedOperationException("erase on wildcard type"); + /* + * Probably just resolve all the lower and/or upper bounds, as long + * as b is known to be the right set of bindings for the type that + * contains the member declaration, but I'm not convinced at present + * that wouldn't require more work keeping track of bindings. + */ + } + else if ( t instanceof TypeVariable ) + { + return erase(((TypeVariable)t).getBounds()[0]); + } + else + throw new UnsupportedOperationException( + "erase on unknown Type " + t.getClass()); + } + + /** + * Recursively descend t substituting any occurrence of a type variable + * found in b, returning a new object, or t unchanged if no substitutions + * were made. + *

+ * Currently throws {@code UnsupportedOperationException} if t is + * a wildcard, as that case shouldn't be needed for the analysis of + * class/interface inheritance hierarchies that {@code specialization} + * is concerned with. + *

+ */ + public static Type substitute(Bindings b, Type t) + { + if ( t instanceof GenericArrayType ) + { + GenericArrayType a = (GenericArrayType)t; + Type oc = a.getGenericComponentType(); + Type nc = substitute(b, oc); + if ( nc == oc ) + return t; + return new GenericArray(nc); + } + else if ( t instanceof ParameterizedType ) + { + ParameterizedType p = (ParameterizedType)t; + Type[] as = p.getActualTypeArguments(); + Type oown = p.getOwnerType(); + Type oraw = p.getRawType(); + assert oraw instanceof Class; + + boolean changed = substituted(b, as); + + if ( null != oown ) + { + Type nown = substitute(b, oown); + if ( nown != oown ) + { + oown = nown; + changed = true; + } + } + + if ( changed ) + return new Parameterized(as, oown, oraw); + return t; + } + else if ( t instanceof WildcardType ) + { + WildcardType w = (WildcardType)t; + Type[] lbs = w.getLowerBounds(); + Type[] ubs = w.getUpperBounds(); + + boolean changed = substituted(b, lbs) | substituted(b, ubs); + + if ( changed ) + return new Wildcard(lbs, ubs); + return t; + } + else if ( t instanceof TypeVariable ) + { + /* + * First the bad news: there isn't a reimplementation of + * TypeVariable here, to handle returning a changed version with + * substitutions in its bounds. Doesn't seem worth the effort, as + * the classes that hold/supply TypeVariables are Class/Method/ + * Constructor, and we're not going to be reimplementing *them*. + * + * Next the good news: TypeVariable bounds are the places where + * a good story for terminating recursion would be needed, so + * if we can't substitute in them anyway, that's a non-concern. + */ + return b.substitute((TypeVariable)t); + } + else if ( t instanceof Class ) + { + return t; + } + else + throw new UnsupportedOperationException( + "substitute on unknown Type " + t.getClass()); + } + + /** + * Applies substitutions in b to each type in types, + * updating them in place, returning true if any change resulted. + */ + private static boolean substituted(Bindings b, Type[] types) + { + boolean changed = false; + for ( int i = 0; i < types.length; ++ i ) + { + Type ot = types[i]; + Type nt = substitute(b, ot); + if ( nt == ot ) + continue; + types[i] = nt; + changed = true; + } + return changed; + } + + static String toString(Type t) + { + if ( t instanceof Class ) + return ((Class)t).getCanonicalName(); + return t.toString(); + } + + /** + * A key class for entering {@code TypeVariable}s in hash structures, + * without relying on the undocumented behavior of the Java implementation. + *

+ * Assumes that object identity is significant for + * {@code GenericDeclaration} instances ({@code Class} instances are chiefly + * what will be of interest here), just as {@code typesEqual} does. + */ + static final class VKey + { + private final TypeVariable m_tv; + + VKey(TypeVariable tv) + { + m_tv = tv; + } + + @Override + public int hashCode() + { + return + m_tv.getName().hashCode() + ^ identityHashCode(m_tv.getGenericDeclaration()); + } + + @Override + public boolean equals(Object other) + { + if ( this == other ) + return true; + if ( ! (other instanceof VKey) ) + return false; + return typesEqual(m_tv, ((VKey)other).m_tv); + } + + TypeVariable get() + { + return m_tv; + } + } + + public static TypeVariable[] freeVariables(Type t) + { + Set result = new HashSet<>(); + freeVariables(result, t); + return result.stream().map(VKey::get).toArray(TypeVariable[]::new); + } + + private static void freeVariables(Set s, Type t) + { + if ( t instanceof Class ) + return; + if ( t instanceof GenericArrayType ) + { + GenericArrayType a = (GenericArrayType)t; + freeVariables(s, a.getGenericComponentType()); + return; + } + if ( t instanceof ParameterizedType ) + { + ParameterizedType p = (ParameterizedType)t; + freeVariables(s, p.getOwnerType()); + stream(p.getActualTypeArguments()) + .forEach(tt -> freeVariables(s, tt)); + return; + } + if ( t instanceof TypeVariable ) + { + TypeVariable v = (TypeVariable)t; + if ( s.add(new VKey(v)) ) + stream(v.getBounds()).forEach(tt -> freeVariables(s, tt)); + return; + } + if ( t instanceof WildcardType ) + { + WildcardType w = (WildcardType)t; + concat(stream(w.getUpperBounds()), stream(w.getLowerBounds())) + .forEach(tt -> freeVariables(s, tt)); + return; + } + } + + @Override + public String getTypeName() + { + return toString(); + } + + static class GenericArray extends AbstractType implements GenericArrayType + { + private final Type component; + + GenericArray(Type component) + { + this.component = component; + } + + @Override + public Type getGenericComponentType() + { + return component; + } + + @Override + public String toString() + { + return toString(component) + "[]"; + } + } + + static class Parameterized extends AbstractType implements ParameterizedType + { + private final Type[] arguments; + private final Type owner; + private final Type raw; + + Parameterized(Type[] arguments, Type owner, Type raw) + { + this.arguments = arguments; + this.owner = owner; + this.raw = raw; + } + + @Override + public Type[] getActualTypeArguments() + { + return arguments; + } + + @Override + public Type getOwnerType() + { + return owner; + } + + @Override + public Type getRawType() + { + return raw; + } + + @Override + public String toString() + { + if ( 0 == arguments.length ) + return toString(raw); + return toString(raw) + stream(arguments) + .map(AbstractType::toString).collect(joining(",", "<", ">")); + } + } + + static class Wildcard extends AbstractType implements WildcardType + { + private final Type[] lbounds; + private final Type[] ubounds; + + Wildcard(Type[] lbounds, Type[] ubounds) + { + this.lbounds = lbounds; + this.ubounds = ubounds; + } + + @Override + public Type[] getLowerBounds() + { + return lbounds; + } + + @Override + public Type[] getUpperBounds() + { + return ubounds; + } + + @Override + public String toString() + { + if ( 0 < lbounds.length ) + return "? super " + stream(lbounds) + .map(AbstractType::toString).collect(joining(" & ")); + else if ( 0 < ubounds.length && Object.class != ubounds[0] ) + return "? extends " + stream(ubounds) + .map(AbstractType::toString).collect(joining(" & ")); + else + return "?"; + } + } + + /** + * A class recording the bindings made in a ParameterizedType to the type + * parameters in a GenericDeclaration<Class>. Implements {@code Type} + * so it can be added to the {@code pending} queue in + * {@code specialization}. + *

+ * In {@code specialization}, the tree of superclasses/superinterfaces will + * be searched breadth-first, with all of a node's immediate supers enqueued + * before any from the next level. By recording a node's type variable to + * type argument bindings in an object of this class, and enqueueing it + * before any of the node's supers, any type variables encountered as actual + * type arguments to any of those supers should be resolvable in the object + * of this class most recently dequeued. + */ + public static class Bindings implements Type + { + private final TypeVariable[] formalTypeParams; + private final Type[] actualTypeArgs; + + public Bindings(TypeVariable[] formalParams, Type[] actualArgs) + { + actualTypeArgs = actualArgs; + formalTypeParams = formalParams; + if ( actualTypeArgs.length != formalTypeParams.length ) + throw new IllegalArgumentException( + "formalParams and actualArgs differ in length"); + // XXX check actualTypeArgs against bounds of the formalParams + } + + Bindings(Bindings prior, ParameterizedType pt) + { + actualTypeArgs = pt.getActualTypeArguments(); + formalTypeParams = + ((GenericDeclaration)pt.getRawType()).getTypeParameters(); + assert actualTypeArgs.length == formalTypeParams.length; + + if ( 0 == prior.actualTypeArgs.length ) + return; + + for ( int i = 0; i < actualTypeArgs.length; ++ i ) + actualTypeArgs[i] = + AbstractType.substitute(prior, actualTypeArgs[i]); + } + + Type substitute(TypeVariable v) + { + for ( int i = 0; i < formalTypeParams.length; ++ i ) + if ( typesEqual(formalTypeParams[i], v) ) + return actualTypeArgs[i]; + return v; + } + } + + /** + * A class dedicated to manipulating the types of multidimensional Java + * arrays, and their instances, that conform to PostgreSQL array constraints + * (non-'jagged', each dimension's arrays all equal size, no intermediate + * nulls). + *

+ * Construct a {@code MultiArray} by supplying a component {@link Type} and + * a number of dimensions. The resulting {@code MultiArray} represents the + * Java array type, and has a number of bracket pairs equal to the supplied + * dimensions argument plus those of the component type if it is itself a + * Java array. (There could be an {@code Adapter} for some PostgreSQL scalar + * type that presents it as a Java array, and then there could be a + * PostgreSQL array of that type.) So the type reported by + * {@link #arrayType arrayType} may have more bracket pairs than the + * {@code MultiArray}'s dimensions. Parentheses are used by + * {@link #toString toString} to help see what's going on. + *

+ * When converting a {@code MultiArray} to a {@link Sized Sized}, only as + * many sizes are supplied as the multiarray's dimensions, and when + * converting that to an {@link Sized.Allocated Allocated}, only that much + * allocation is done. Populating the arrays at that last allocated level + * with the converted elements of the PostgreSQL array is the work left + * for the caller. + */ + public static class MultiArray + { + public final Type component; + public final int dimensions; + + /** + * Constructs a description of a multiarray with a given component type + * and dimensions. + * @param component the type of the component (which may itself be an + * array) + * @param dimensions dimensions of the multiarray (if the component type + * is an array, the final resulting type will have the sum of its + * dimensions and these) + */ + public MultiArray(Type component, int dimensions) + { + if ( 1 > dimensions ) + throw new IllegalArgumentException( + "dimensions must be positive: " + dimensions); + this.component = component; + this.dimensions = dimensions; + } + + /** + * Returns a representation of the resulting Java array type, with + * parentheses around the component type (which may itself be an array + * type) and around the array brackets corresponding to this + * multiarray's dimensions. + */ + @Override + public String toString() + { + return "MultiArray: (" + component + ")([])*" + dimensions; + } + + /** + * Returns the resulting Java array type (which, if the component type + * is also an array, does not distinguish between its dimensions and + * those of this multiarray). + */ + public Type arrayType() + { + Type t = component; + + if ( t instanceof Class ) + t = Array.newInstance((Class)t, new int[dimensions]) + .getClass(); + else + for ( int i = 0 ; i < dimensions ; ++ i ) + t = new GenericArray(t); + + return t; + } + + /** + * Returns a {@code MultiArray} representing an array type t + * in a canonical form, with its ultimate non-array type as the + * component type, and all of its array dimensions belonging to the + * multiarray. + */ + public static MultiArray canonicalize(Type t) + { + Type t1 = requireNonNull(t); + int dims = 0; + + for ( ;; ) + { + t1 = toElementIfArray(t1); + if ( null == t1 ) + break; + t = t1; + ++ dims; + } + + if ( 0 == dims ) + throw new IllegalArgumentException("not an array type: " + t); + + return new MultiArray(t, dims); + } + + /** + * Returns a new {@code MultiArray} with the same Java array type but + * where {@link #component} is a non-array type and {@link #dimensions} + * holds the total number of dimensions. + */ + public MultiArray canonicalize() + { + if ( null == toElementIfArray(component) ) + return this; + + MultiArray a = canonicalize(component); + return new MultiArray(a.component, dimensions + a.dimensions); + } + + /** + * Returns this {@code MultiArray} as a 'prefix' of suffix + * (which must have the same ultimate non-array type but a smaller + * number of dimensions). + *

+ * The result will have the array type of suffix as its + * component type, and the dimensions required to have the same overall + * Java {@link #arrayType arrayType} as the receiver. + */ + public MultiArray asPrefixOf(MultiArray suffix) + { + MultiArray pfx = canonicalize(); + MultiArray sfx = suffix.canonicalize(); + + if ( 1 + sfx.dimensions > pfx.dimensions ) + throw new IllegalArgumentException( + "suffix too long: ("+ this +").asPrefixOf("+ suffix +")"); + + if ( ! typesEqual(pfx.component, sfx.component) ) + throw new IllegalArgumentException( + "asPrefixOf with different component types: " + + pfx.component + ", " + sfx.component); + + Type c = sfx.arrayType(); + + return new MultiArray(c, pfx.dimensions - sfx.dimensions); + } + + /** + * Returns a new {@code MultiArray} with this one's type (possibly a + * raw, or parameterized type) refined according to the known type of + * model. + */ + public MultiArray refine(Type model) + { + int modelDims = 0; + + if ( null != toElementIfArray(model) ) + { + MultiArray cmodel = canonicalize(model); + modelDims = cmodel.dimensions; + model = cmodel.component; + } + + MultiArray canon = canonicalize(); + + Type[] rtype = new Type[1]; + if ( null == specialization(model, erase(canon.component), rtype) ) + throw new IllegalArgumentException( + "refine: " + model + " does not specialize " + + canon.component); + + MultiArray result = new MultiArray(rtype[0], canon.dimensions); + + if ( 0 < modelDims ) + { + MultiArray suffix = new MultiArray(rtype[0], modelDims); + result = result.asPrefixOf(suffix); + } + + return result; + } + + /** + * Returns a {@link Sized Sized} representing this {@code MultiArray} + * with a size for each of its dimensions. + */ + public Sized size(int... dims) + { + return new Sized(dims); + } + + /** + * Represents a {@code MultiArray} for which sizes for its dimensions + * have been specified, so that an instance can be allocated. + */ + public class Sized + { + private final int[] lengths; + + private Sized(int[] dims) + { + if ( dims.length != dimensions ) + throw new IllegalArgumentException( + "("+ this +").size(passed " + + dims.length +" dimensions)"); + lengths = dims.clone(); + } + + @Override + public String toString() + { + return MultiArray.this.toString(); + } + + /** + * Returns an {@link Allocated Allocated} that wraps a + * freshly-allocated array having the sizes recorded here. + *

+ * The result is returned with wildcard types. If the caller code + * has been written so as to have type variables with the proper + * types at compile time, it may do an unchecked cast on the result, + * which may make later operations more concise. + */ + public Allocated allocate() + { + Class c = erase(component); + Object a = Array.newInstance(c, lengths); + + return new Allocated(a); + } + + /** + * Wraps an existing instance of the multiarray type in question. + * + * @param the overall Java type of the whole array, which + * can be retrieved with array() + * @param the type of the arrays at the final level + * (one-dimensional arrays of the component type) that can be + * iterated, in order, to be populated or read out. <TI> is + * always an array type, but can be a reference array or any + * primitive array type, and therefore not as convenient as it might + * be, because the least upper bound of those types is + * {@code Object}. + */ + public class Allocated implements Iterable + { + final Object array; + + private Allocated(Object a) + { + array = requireNonNull(a); + } + + /** + * Returns the resulting array. + */ + public TA array() + { + @SuppressWarnings("unchecked") + TA result = (TA)array; + return result; + } + + @Override + public String toString() + { + return MultiArray.this.toString(); + } + + /** + * Returns an {@code Iterator} over the array(s) at the bottom + * level of this multiarray, the ones that are one-dimensional + * arrays of the component type. + *

+ * They are returned in order, so that a simple loop to copy the + * component values into or out of each array in turn will + * amount to a row-major traversal (same as PostgreSQL's storage + * order) of the whole array. + */ + @Override + public Iterator iterator() + { + final Object[][] arrays = new Object [ dimensions ] []; + final int[] indices = new int [ dimensions ]; + final int rightmost = dimensions - 1; + + arrays[0] = new Object[] { array }; + + for ( int i = 1; i < arrays.length; ++ i ) + { + Object[] a = arrays[i-1]; + if ( 0 == a.length ) + { + ++ indices[0]; + break; + } + arrays[i] = (Object[])requireNonNull(a[0]); + } + + return new Iterator() + { + @Override + public boolean hasNext() + { + return 0 == indices[0]; + } + + @Override + public TI next() + { + if ( 0 < indices[0] ) + throw new NoSuchElementException(); + + @SuppressWarnings("unchecked") + TI o = (TI)arrays[rightmost][indices[rightmost]++]; + + if (indices[rightmost] >= arrays[rightmost].length) + { + int i = rightmost - 1; + while ( 0 <= i ) + { + if ( ++ indices[i] < arrays[i].length ) + break; + -- i; + } + if ( 0 <= i ) + { + while ( i < rightmost ) + { + Object a = arrays[i][indices[i]]; + ++ i; + arrays[i] = (Object[])requireNonNull(a); + indices[i] = 0; + } + } + } + + return o; + } + }; + } + } + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/Datum.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/Datum.java new file mode 100644 index 000000000..84fbd669c --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/Datum.java @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt.spi; + +import java.io.Closeable; +import java.io.InputStream; + +import java.nio.ByteBuffer; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; // for javadoc +import org.postgresql.pljava.model.Attribute; + +/** + * Raw access to the contents of a PostgreSQL datum. + *

+ * For type safety, only {@link Adapter Adapter} implementations should be + * able to obtain a {@code Datum}, and should avoid leaking it to other code. + */ +public interface Datum extends Closeable +{ + /** + * Use the given {@link Verifier} to confirm that the {@code Datum} content + * is well-formed, throwing an exception if not. + */ + void verify(Verifier.OfBuffer v) throws SQLException; + + /** + * Use the given {@link Verifier} to confirm that the {@code Datum} content + * is well-formed, throwing an exception if not. + */ + void verify(Verifier.OfStream v) throws SQLException; + + /** + * Interface through which PL/Java code reads the content of an existing + * PostgreSQL datum. + */ + interface Input extends Datum + { + default void pin() throws SQLException + { + } + + default boolean pinUnlessReleased() + { + return false; + } + + default void unpin() + { + } + + /** + * Returns a read-only {@link ByteBuffer} covering the content of the + * datum. + *

+ * When the datum is a {@code varlena}, the "content" does not include + * the four-byte header. When implementing an adapter for a varlena + * datatype, note carefully whether offsets used in the PostgreSQL C + * code are relative to the start of the content or the start of the + * varlena overall. If the latter, they will need adjustment when + * indexing into the {@code ByteBuffer}. + *

+ * If the byte order of the buffer will matter, it should be explicitly + * set. + *

+ * The buffer may window native memory allocated by PostgreSQL, so + * {@link #pin pin()} and {@link #unpin unpin()} should surround + * accesses through it. Like {@code Datum} itself, the + * {@code ByteBuffer} should be used only within an {@code Adapter}, and + * not exposed to other code. + */ + ByteBuffer buffer() throws SQLException; + + /** + * Returns an {@link InputStream} that presents the same bytes contained + * in the buffer returned by {@link #buffer buffer()}. + *

+ * When necessary, the {@code InputStream} will handle pinning the + * buffer when reading, so the {@code InputStream} can safely be exposed + * to other code, if it is a reasonable way to present the contents of + * the datatype in question. + *

+ * The stream supports {@code mark} and {@code reset}. + */ + T inputStream() throws SQLException; + } + + /** + * Empty superinterface of {@code Accessor.Deformed} and + * {@code Accessor.Heap}, which are erased at run time but help distinguish, + * in source code, which memory layout convention an {@code Accessor} + * is tailored for. + */ + interface Layout + { + } + + /** + * Accessor for a {@code Datum} located, at some offset, in + * memory represented by a {@code } object. + *

+ * {@code } is a type variable to anticipate future memory abstractions + * like the incubating {@code MemorySegment} from JEP 412. The present + * implementation will work with any {@code } that you want as long + * as it is {@code java.nio.ByteBuffer}. + *

+ * Given an {@code Accessor} instance properly selected for the memory + * layout, datum width, type length, and by-value/by-reference passing + * convention declared for a given {@link Attribute Attribute}, methods on + * the {@code Accessor} are available to retrieve the individual datum + * in {@code Datum} form (essentially another {@code } of exactly + * the length of the datum, wrapped with methods to avoid access outside + * of its lifetime), or as any Java primitive type appropriate to + * the datum's width. A {@code get} method of the datum's exact width or + * wider may be used (except for {@code float} and {@code double}, which + * only work for width exactly 4 or 8 bytes, respectively). + *

+ * PostgreSQL only allows power-of-two widths up to {@code SIZEOF_DATUM} for + * a type that specifies the by-value convention, and so an {@code Accessor} + * for the by-value case only supports those widths. An {@code Accessor} for + * the by-reference case supports any size, with direct access as a Java + * primitive supported for any size up to the width of a Java long. + *

+ * {@code getBoolean} can be used for any width the {@code Accessor} + * supports up to the width of Java long, and the result will be true + * if the value has any 1 bits. + *

+ * Java {@code long} and {@code int} are always treated as + * signed by the language (though unsigned operations are available as + * methods), but have paired methods here to explicitly indicate which + * treatment is intended. The choice can affect the returned value when + * fetching a value as a primitive type that is wider than its type's + * declared length. Paired methods for {@code byte} are not provided because + * a byte is not wider than any type's length. When a type narrower than + * {@code SIZEOF_DATUM} is stored (in the {@code Deformed} layout), unused + * high bits are stored as zero. This should not strictly matter, as + * PostgreSQL strictly ignores the unused high bits, but it is consistent + * with the way PostgreSQL declares {@code Datum} as an unsigned integral + * type. + * + * @param type of the memory abstraction used. Accessors will be + * available supporting {@code ByteBuffer}, and may be available supporting + * a newer abstraction like {@code MemorySegment}. + * @param a subinterface of {@code Layout}, either {@code Deformed} or + * {@code Heap}, indicating which {@code TupleTableSlot} layout the + * {@code Accessor} is intended for, chiefly as a tool for compile-time + * checking that they haven't been mixed up. + */ + interface Accessor + { + Datum.Input getDatum(B buffer, int offset, Attribute a); + + long getLongSignExtended(B buffer, int offset); + + long getLongZeroExtended(B buffer, int offset); + + double getDouble(B buffer, int offset); + + int getIntSignExtended(B buffer, int offset); + + int getIntZeroExtended(B buffer, int offset); + + float getFloat(B buffer, int offset); + + short getShort(B buffer, int offset); + + char getChar(B buffer, int offset); + + byte getByte(B buffer, int offset); + + boolean getBoolean(B buffer, int offset); + + /** + * An accessor for use with a 'deformed' (array-of-{@code Datum}) + * memory layout. + *

+ * When using a 'deformed' accessor, the caller is responsible for + * passing an {@code offset} value that is an integral multiple of + * {@code SIZEOF_DATUM} from where the array-of-{@code Datum} starts. + */ + interface Deformed extends Layout + { + } + + /** + * An accessor for use with a heap-tuple styled, flattened, + * memory layout. + *

+ * When using a heap accessor, the caller is responsible for passing an + * {@code offset} value properly computed from the sizes of preceding + * members and the alignment of the member to be accessed. + */ + interface Heap extends Layout + { + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/TwosComplement.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/TwosComplement.java new file mode 100644 index 000000000..d2323ed96 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/TwosComplement.java @@ -0,0 +1,560 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt.spi; + +/** + * Methods that have variants on twos-complement Java types that might be signed + * or unsigned. + *

+ * The {@code Signed} or {@code Unsigned} subinterface below, as appropriate, + * can be used as a mixin on a class where the right treatment of a Java + * {@code long}, {@code int}, {@code short}, or {@code byte} might be + * class-specific. + *

+ * The semantic difference between a {@code short} treated as unsigned and a + * {@code char} (also an unsigned 16-bit type) is whether the value is expected + * to mean what UTF-16 says it means. + */ +public interface TwosComplement +{ + boolean unsigned(); + + /* + * Methods for long + */ + + int compare(long x, long y); + + long divide(long dividend, long divisor); + + long remainder(long dividend, long divisor); + + long parseLong(CharSequence s, int beginIndex, int endIndex, int radix); + + String deparse(long i, int radix); + + default long parseLong(CharSequence s, int radix) + { + return parseLong(s, 0, s.length(), radix); + } + + default long parseLong(CharSequence s) + { + return parseLong(s, 0, s.length(), 10); + } + + default String deparse(long i) + { + return deparse(i, 10); + } + + /* + * Methods for int + */ + + int compare(int x, int y); + + int divide(int dividend, int divisor); + + int remainder(int dividend, int divisor); + + long toLong(int i); + + int parseInt(CharSequence s, int beginIndex, int endIndex, int radix); + + String deparse(int i, int radix); + + default int parseInt(CharSequence s, int radix) + { + return parseInt(s, 0, s.length(), radix); + } + + default int parseInt(CharSequence s) + { + return parseInt(s, 0, s.length(), 10); + } + + default String deparse(int i) + { + return deparse(i, 10); + } + + /* + * Methods for short + */ + + int compare(short x, short y); + + short divide(short dividend, short divisor); + + short remainder(short dividend, short divisor); + + long toLong(short i); + + int toInt(short i); + + short parseShort(CharSequence s, int beginIndex, int endIndex, int radix); + + String deparse(short i, int radix); + + default short parseShort(CharSequence s, int radix) + { + return parseShort(s, 0, s.length(), radix); + } + + default short parseShort(CharSequence s) + { + return parseShort(s, 0, s.length(), 10); + } + + default String deparse(short i) + { + return deparse(i, 10); + } + + /* + * Methods for byte + */ + + int compare(byte x, byte y); + + byte divide(byte dividend, byte divisor); + + byte remainder(byte dividend, byte divisor); + + long toLong(byte i); + + int toInt(byte i); + + short toShort(byte i); + + byte parseByte(CharSequence s, int beginIndex, int endIndex, int radix); + + String deparse(byte i, int radix); + + default byte parseByte(CharSequence s, int radix) + { + return parseByte(s, 0, s.length(), radix); + } + + default byte parseByte(CharSequence s) + { + return parseByte(s, 0, s.length(), 10); + } + + default String deparse(byte i) + { + return deparse(i, 10); + } + + /** + * Mixin with default signed implementations of the interface methods. + */ + interface Signed extends TwosComplement + { + @Override + default boolean unsigned() + { + return false; + } + + /* + * Methods for long + */ + + @Override + default int compare(long x, long y) + { + return Long.compare(x, y); + } + + @Override + default long divide(long dividend, long divisor) + { + return dividend / divisor; + } + + @Override + default long remainder(long dividend, long divisor) + { + return dividend % divisor; + } + + @Override + default long parseLong( + CharSequence s, int beginIndex, int endIndex, int radix) + { + return Long.parseLong(s, beginIndex, endIndex, radix); + } + + @Override + default String deparse(long i, int radix) + { + return Long.toString(i, radix); + } + + /* + * Methods for int + */ + + @Override + default int compare(int x, int y) + { + return Integer.compare(x, y); + } + + @Override + default int divide(int dividend, int divisor) + { + return dividend / divisor; + } + + @Override + default int remainder(int dividend, int divisor) + { + return dividend % divisor; + } + + @Override + default long toLong(int i) + { + return i; + } + + @Override + default int parseInt( + CharSequence s, int beginIndex, int endIndex, int radix) + { + return Integer.parseInt(s, beginIndex, endIndex, radix); + } + + @Override + default String deparse(int i, int radix) + { + return Integer.toString(i, radix); + } + + /* + * Methods for short + */ + + @Override + default int compare(short x, short y) + { + return Short.compare(x, y); + } + + @Override + default short divide(short dividend, short divisor) + { + return (short)(dividend / divisor); + } + + @Override + default short remainder(short dividend, short divisor) + { + return (short)(dividend % divisor); + } + + @Override + default long toLong(short i) + { + return i; + } + + @Override + default int toInt(short i) + { + return i; + } + + @Override + default short parseShort( + CharSequence s, int beginIndex, int endIndex, int radix) + { + int i = Integer.parseInt(s, beginIndex, endIndex, radix); + if ( Short.MIN_VALUE <= i && i <= Short.MAX_VALUE ) + return (short)i; + throw new NumberFormatException(String.format( + "Value out of range. Value:\"%s\" Radix:%d", + s.subSequence(beginIndex, endIndex), radix)); + } + + @Override + default String deparse(short i, int radix) + { + return Integer.toString(i, radix); + } + + /* + * Methods for byte + */ + + @Override + default int compare(byte x, byte y) + { + return Byte.compare(x, y); + } + + @Override + default byte divide(byte dividend, byte divisor) + { + return (byte)(dividend / divisor); + } + + @Override + default byte remainder(byte dividend, byte divisor) + { + return (byte)(dividend % divisor); + } + + @Override + default long toLong(byte i) + { + return i; + } + + @Override + default int toInt(byte i) + { + return i; + } + + @Override + default short toShort(byte i) + { + return i; + } + + @Override + default byte parseByte( + CharSequence s, int beginIndex, int endIndex, int radix) + { + int i = Integer.parseInt(s, beginIndex, endIndex, radix); + if ( Byte.MIN_VALUE <= i && i <= Byte.MAX_VALUE ) + return (byte)i; + throw new NumberFormatException(String.format( + "Value out of range. Value:\"%s\" Radix:%d", + s.subSequence(beginIndex, endIndex), radix)); + } + + @Override + default String deparse(byte i, int radix) + { + return Integer.toString(i, radix); + } + } + + /** + * Mixin with default unsigned implementations of the interface methods. + */ + interface Unsigned extends TwosComplement + { + @Override + default boolean unsigned() + { + return true; + } + + /* + * Methods for long + */ + + @Override + default int compare(long x, long y) + { + return Long.compareUnsigned(x, y); + } + + @Override + default long divide(long dividend, long divisor) + { + return Long.divideUnsigned(dividend, divisor); + } + + @Override + default long remainder(long dividend, long divisor) + { + return Long.remainderUnsigned(dividend, divisor); + } + + @Override + default long parseLong( + CharSequence s, int beginIndex, int endIndex, int radix) + { + return Long.parseUnsignedLong(s, beginIndex, endIndex, radix); + } + + @Override + default String deparse(long i, int radix) + { + return Long.toUnsignedString(i, radix); + } + + /* + * Methods for int + */ + + @Override + default int compare(int x, int y) + { + return Integer.compareUnsigned(x, y); + } + + @Override + default int divide(int dividend, int divisor) + { + return Integer.divideUnsigned(dividend, divisor); + } + + @Override + default int remainder(int dividend, int divisor) + { + return Integer.remainderUnsigned(dividend, divisor); + } + + @Override + default long toLong(int i) + { + return Integer.toUnsignedLong(i); + } + + @Override + default int parseInt( + CharSequence s, int beginIndex, int endIndex, int radix) + { + return Integer.parseUnsignedInt(s, beginIndex, endIndex, radix); + } + + @Override + default String deparse(int i, int radix) + { + return Integer.toUnsignedString(i, radix); + } + + /* + * Methods for short + */ + + @Override + default int compare(short x, short y) + { + return Short.compareUnsigned(x, y); + } + + @Override + default short divide(short dividend, short divisor) + { + return (short) + Integer.divideUnsigned(toInt(dividend), toInt(divisor)); + } + + @Override + default short remainder(short dividend, short divisor) + { + return (short) + Integer.remainderUnsigned(toInt(dividend), toInt(divisor)); + } + + @Override + default long toLong(short i) + { + return Short.toUnsignedLong(i); + } + + @Override + default int toInt(short i) + { + return Short.toUnsignedInt(i); + } + + @Override + default short parseShort( + CharSequence s, int beginIndex, int endIndex, int radix) + { + int i = + Integer.parseUnsignedInt(s, beginIndex, endIndex, radix); + if ( 0 <= i && i <= 0xffff ) + return (short)i; + throw new NumberFormatException(String.format( + "Value out of range. Value:\"%s\" Radix:%d", + s.subSequence(beginIndex, endIndex), radix)); + } + + @Override + default String deparse(short i, int radix) + { + return Integer.toUnsignedString(toInt(i), radix); + } + + /* + * Methods for byte + */ + + @Override + default int compare(byte x, byte y) + { + return Byte.compareUnsigned(x, y); + } + + @Override + default byte divide(byte dividend, byte divisor) + { + return (byte) + Integer.divideUnsigned(toInt(dividend), toInt(divisor)); + } + + @Override + default byte remainder(byte dividend, byte divisor) + { + return (byte) + Integer.remainderUnsigned(toInt(dividend), toInt(divisor)); + } + + @Override + default long toLong(byte i) + { + return Byte.toUnsignedLong(i); + } + + @Override + default int toInt(byte i) + { + return Byte.toUnsignedInt(i); + } + + @Override + default short toShort(byte i) + { + return (short)Byte.toUnsignedInt(i); + } + + @Override + default byte parseByte( + CharSequence s, int beginIndex, int endIndex, int radix) + { + int i = + Integer.parseUnsignedInt(s, beginIndex, endIndex, radix); + if ( 0 <= i && i <= 0xff ) + return (byte)i; + throw new NumberFormatException(String.format( + "Value out of range. Value:\"%s\" Radix:%d", + s.subSequence(beginIndex, endIndex), radix)); + } + + @Override + default String deparse(byte i, int radix) + { + return Integer.toUnsignedString(toInt(i), radix); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/Verifier.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/Verifier.java new file mode 100644 index 000000000..7ee581a4a --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/Verifier.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.adt.spi; + +import java.io.InputStream; + +import java.nio.ByteBuffer; + +/** + * A {@code Verifier} verifies the proper form of content written to a + * {@code Datum}. + *

+ * This is necessary only when the correctness of the written stream may be + * doubtful, as when an API spec requires exposing a method for client code + * to write arbitrary bytes. If a type implementation exposes only + * type-appropriate operations to client code, and always controls the byte + * stream written to the varlena, the {@code NOOP} verifier can be used. + *

+ * There are no methods accepting an unextended {@code Verifier}, only those + * accepting one of its contained functional interfaces + * {@link OfBuffer OfBuffer} and {@link OfStream OfStream}. + *

+ * A type-specific verifier must supply a {@code verify} method that reads all + * of the content and completes normally if it is a complete and well-formed + * representation of the type. Otherwise, it must throw an exception. + *

+ * An {@code OfBuffer} verifier must leave the buffer's position equal to the + * value of the buffer's limit when the verifier was entered. An + * {@code OfStream} verifier must leave the stream at end of input. An + * {@code OfStream} verifier may assume that the supplied {@code InputStream} + * supports {@code mark} and {@code reset} efficiently. + *

+ * An {@code OfStream} verifier may execute in another thread concurrently with + * the writing of the content by the adapter. + * Its {@code verify} method must not interact with PostgreSQL. + */ +public interface Verifier +{ + /** + * A verifier interface to be used when the {@code ByteBuffer} API provides + * the most natural interface for manipulating the content. + *

+ * Such a verifier will be run only when the content has been completely + * produced. + */ + @FunctionalInterface + interface OfBuffer extends Verifier + { + /** + * Completes normally if the verification succeeds, otherwise throwing + * an exception. + *

+ * The buffer's {@code position} when this method returns must equal the + * value of the buffer's {@code limit} when the method was called. + */ + void verify(ByteBuffer b) throws Exception; + } + + /** + * A verifier interface to be used when the {@code InputStream} API provides + * the most natural interface for manipulating the content. + *

+ * Such a verifier may be run concurrently in another thread while the + * data type adapter is writing the content. It must therefore be able to + * verify the content without interacting with PostgreSQL. + */ + @FunctionalInterface + interface OfStream extends Verifier + { + /** + * Completes normally if the verification succeeds, otherwise throwing + * an exception. + *

+ * The method must leave the stream at end-of-input. It may assume that + * the stream supports {@code mark} and {@code reset} efficiently. + * It must avoid interacting with PostgreSQL, in case it is run in + * another thread concurrently with the production of the content. + */ + void verify(InputStream s) throws Exception; + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/package-info.java b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/package-info.java new file mode 100644 index 000000000..de2fe153f --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/adt/spi/package-info.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +/** + * Types that will be of interest in the implementation of {@code Adapter}s. + *

+ * First-class PL/Java support for a new PostgreSQL data type entails + * implementation of an {@link Adapter Adapter}. Unlike non-{@code Adapter} + * code, an {@code Adapter} implementation may have to concern itself with + * the facilities in this package, {@code Datum} in particular. An + * {@code Adapter} should avoid leaking a {@code Datum} to non-{@code Adapter} + * code. + *

Adapter manager

+ *

+ * There needs to be an {@code Adapter}-manager service to accept application + * requests to connect x PostgreSQL type with y Java type + * and find or compose available {@code Adapter}s (built-in or by service + * loader) to do so. There is some work in that direction (the methods in + * {@link AbstractType AbstractType} should be helpful), but no such manager + * yet. + * @author Chapman Flack + */ +package org.postgresql.pljava.adt.spi; + +import org.postgresql.pljava.Adapter; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/BaseUDT.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/BaseUDT.java index e416f2afc..3798c0a3c 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/BaseUDT.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/BaseUDT.java @@ -56,13 +56,15 @@ *

* Other static methods in the class may be exported as SQL functions by * marking them with {@code @Function} in the usual way, and will not have any - * special treatment on account of being in a UDT class. If those function - * declarations will depend on the existence of this type, or the type must + * special treatment on account of being in a UDT class. Those function + * declarations will be correctly ordered before or after this type's, in common + * cases such as when this type appears in their signatures, or the type must * refer to the functions (as it must for * {@link #typeModifierInput typeModifierInput} or - * {@link #typeModifierOutput typeModifierOutput} functions, for example), - * appropriate {@link #provides provides}/{@link #requires requires} labels must - * be used in their {@code @Function} annotations and this annotation, to make + * {@link #typeModifierOutput typeModifierOutput} functions, for example). + * In a case that the automatic ordering does not handle correctly, + * appropriate {@link #provides provides}/{@link #requires requires} labels can + * be used in the {@code @Function} annotations and this annotation, to make * the order come out right. */ @Target(ElementType.TYPE) @Retention(RetentionPolicy.CLASS) @Documented @@ -254,9 +256,8 @@ public interface Code *

* Even if the method is defined on the UDT class marked by this annotation, * it is not automatically found or used. It will need its own - * {@link Function} annotation giving it a name and a {@code provides} - * label, and this annotation must refer to it by that name and include the - * label in {@code requires} to ensure the SQL is generated in the right + * {@link Function} annotation giving it a name, and this annotation must + * refer to it by that name to ensure the SQL is generated in the right * order. */ String typeModifierInput() default ""; @@ -274,9 +275,8 @@ public interface Code *

* Even if the method is defined on the UDT class marked by this annotation, * it is not automatically found or used. It will need its own - * {@link Function} annotation giving it a name and a {@code provides} - * label, and this annotation must refer to it by that name and include the - * label in {@code requires} to ensure the SQL is generated in the right + * {@link Function} annotation giving it a name, and this annotation must + * refer to it by that name to ensure the SQL is generated in the right * order. */ String typeModifierOutput() default ""; @@ -288,6 +288,12 @@ public interface Code * The details of the necessary API are in {@code vacuum.h}. + *

+ * Even if the method is defined on the UDT class marked by this annotation, + * it is not automatically found or used. It will need its own + * {@link Function} annotation giving it a name, and this annotation must + * refer to it by that name to ensure the SQL is generated in the right + * order. */ String analyze() default ""; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Operator.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Operator.java index 7e7fe0df3..0b60808bb 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Operator.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Operator.java @@ -115,6 +115,16 @@ * different, so the two functions can be distinguished by overloading). A * typical case would be the twin of a cross-type function like {@code add} * that is commutative, so using the same name makes sense. + *

+ * When derived by commutation, the synthetic function simply calls the + * base function with the parameters swapped. For negation, the base + * function must return {@code boolean} or {@code Boolean}, and the + * synthetic function returns true for false, false for true, and null + * for null. This will give familiar SQL behavior in many cases. For a base + * function with {@code onNullInput=CALLED}, if it can return non-null + * boolean results on some null inputs, it may be necessary to code + * a negator or commutator by hand if the synthetic one would not have + * the intended semantics. */ String[] synthetic() default {}; @@ -129,16 +139,6 @@ * (which must be different) reversed. A typical case would be the twin of a * cross-type operator like {@code +} that is commutative, so using the same * name makes sense. - *

- * When derived by commutation, the synthetic function simply calls the - * base function with the parameters swapped. For negation, the base - * function must return {@code boolean} or {@code Boolean}, and the - * synthetic function returns true for false, false for true, and null - * for null. This will give familiar SQL behavior in many cases. For a base - * function with {@code onNullInput=CALLED}, if it can return non-null - * boolean results on some null inputs, it may be necessary to code - * a negator or commutator by hand if the synthetic one would not have - * the intended semantics. */ String[] commutator() default {}; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLAction.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLAction.java index face77719..a1ff47377 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLAction.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLAction.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -22,14 +22,33 @@ /** * Annotation that supplies verbatim commands to be copied into the * deployment descriptor. - * - * Strings supplied within a single SQLAction annotation will be copied - * in the order supplied. Strings from different SQLAction annotations, and - * generated code for functions, will be assembled in an order that can be - * influenced by 'provides' and 'requires' labels. No snippet X will be - * emitted ahead of any snippets that provide what X requires. The "remove" - * actions will be assembled in the reverse of that order. - * + *

+ * Strings supplied to {@link #install install} or {@link #remove remove} within + * a single {@code SQLAction} annotation become code snippets emitted into the + * deployment descriptor's {@code INSTALL} or {@code REMOVE} section, + * respectively, in the order supplied. + *

+ * Snippets from different {@code SQLAction} annotations, + * and snippets generated by annotations on functions, types, and such, will be + * assembled in an order that can be influenced by {@link #provides provides} + * and {@link #requires requires} labels. No snippet X will be emitted as an + * {@code INSTALL} action ahead of any snippets that provide what X requires. + * The sense of that dependency is reversed when ordering {@code REMOVE} + * snippets. + *

Conditional execution

+ *

+ * An {@code SQLAction} may supply an {@code install} snippet that tests some + * condition at the time of deployment and adjusts the + * {@code pljava.implementors} setting to include or not include a specific + * {@code }, controlling whether actions later in + * the deployment descriptor that are annotated with that + * {@code } will be executed. The {@code SQLAction} that + * controls whether an {@code } will be recognized should use + * {@link #provides provides} with exactly that name, which is implicitly + * 'required' by statements that use that name as + * {@link #implementor implementor}. For details on this usage, which involves + * a different ordering rule, see "conditional execution" in + * {@link org.postgresql.pljava.annotation the package documentation}. * @author Thomas Hallgren - pre-Java6 version * @author Chapman Flack (Purdue Mathematics) - updated to Java6, * added SQLAction @@ -58,6 +77,10 @@ * generated in such an order that other objects that 'require' labels * 'provided' by this come later in the output for install actions, and * earlier for remove actions. + *

+ * For use of this element on an {@code SQLAction} that tests a condition + * to control conditional execution, see "conditional execution" in + * {@link SQLAction the class description}. */ String[] provides() default {}; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/package-info.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/package-info.java index de7309e5b..932113bdd 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/package-info.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/package-info.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016 Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -13,6 +13,7 @@ /** * Annotations for use in Java code to generate the SQLJ Deployment Descriptor * automatically. + *

Eliminating error-prone hand-maintained SQL scripts

*

* To define functions or types in PL/Java requires more than one step. The * Java code must be written, compiled to a jar, and made available to the @@ -22,56 +23,65 @@ * version that undoes it when uninstalling the jar) can be written in a * prescribed form and stored inside the jar itself as an "SQLJ Deployment * Descriptor", and processed automatically when the jar is installed in or - * removed from the backend. + * removed from the DBMS. *

* To write the deployment descriptor by hand can be tedious and error-prone, * as it must largely duplicate the method and type declarations in the * Java code, but using SQL's syntax and types in place of Java's. Instead, * when the annotations in this package are used in the Java code, the Java - * compiler itself will generate a deployment descriptor file, ready to include - * with the compiled classes to make a complete SQLJ jar. + * compiler itself will generate a deployment descriptor (DDR) file, ready to + * include with the compiled classes to make a complete SQLJ jar. *

* Automatic descriptor generation requires attention to a few things. *

    - *
  • A Java 6 or later Java compiler is required, and at least the - * pljava-api jar must be on its class path. (The full - * pljava.jar would also work, but only pljava-api - * is required.) The jar must be on the class path in any case in order to - * compile PL/Java code. + *
  • The {@code pljava-api} jar must be on the Java compiler's class path. + * (All but the simplest PL/Java functions probably refer to some class in + * PL/Java's API anyway, in which case the jar would already have to be on + * the class path.) + *
  • Java compilers older than Java 23 will automatically find and use + * PL/Java's DDR processor as long as the {@code pljava-api} jar is on the class + * path. Starting in Java 23, the compiler will not do so automatically, and a + * {@code -processor org.postgresql.pljava.annotation.processing.DDRProcessor} + * option is also needed on the {@code javac} command line. (Warnings about this + * are issued starting in Java 21, though the processor is still used + * automatically, with the warnings, until Java 23.) *
  • When recompiling after changing only a few sources, it is possible the * Java compiler will only process a subset of the source files containing * annotations. If so, it may generate an incomplete deployment descriptor, * and a clean build may be required to ensure the complete descriptor is * written. - *
  • Additional options are available when invoking the Java compiler, and - * can be specified with -Aoption=value on the command line: + *
+ *

New compiler options when generating the deployment descriptor

+ *

Additional options are available when invoking the Java compiler, and + * can be specified with {@code -Aoption=value} on the command line: *

- *
ddr.output + *
{@code ddr.output} *
The file name to be used for the generated deployment descriptor. * If not specified, the file will be named pljava.ddr and found * in the top directory of the tree where the compiled class files are written. - *
ddr.name.trusted + *
{@code ddr.name.trusted} *
The language name that will be used to declare methods that are * annotated to have {@link org.postgresql.pljava.annotation.Function.Trust#SANDBOXED} behavior. If not - * specified, the name java will be used. It must match the name + * specified, the name {@code java} will be used. It must match the name * used for the "trusted" language declaration when PL/Java was installed. - *
ddr.name.untrusted + *
{@code ddr.name.untrusted} *
The language name that will be used to declare methods that are * annotated to have {@link org.postgresql.pljava.annotation.Function.Trust#UNSANDBOXED} behavior. If not - * specified, the name javaU will be used. It must match the name + * specified, the name {@code javaU} will be used. It must match the name * used for the "untrusted" language declaration when PL/Java was installed. - *
ddr.implementor + *
{@code ddr.implementor} *
The identifier (defaulting to {@code PostgreSQL} if not specified here) * that will be used in the {@code }s wrapping any SQL * generated from elements that do not specify their own. If this is set to a * single hyphen (-), elements that specify no implementor will produce plain * {@code }s not wrapped in {@code }s. - *
ddr.reproducible + *
{@code ddr.reproducible} *
When {@code true} (the default), SQL statements are written to the * deployment descriptor in an order meant to be consistent across successive * compilations of the same sources. This option is further discussed below. *
- *
  • The deployment descriptor may contain statements that cannot succeed if + *

    Controlling order of statements in the deployment descriptor

    + *

    The deployment descriptor may contain statements that cannot succeed if * placed in the wrong order, and to keep a manually-edited script in a workable * order while adding and modifying code can be difficult. Most of the * annotations in this package accept arbitrary {@code requires} and @@ -81,12 +91,13 @@ * compiler, except that it will make sure not to write anything that * {@code requires} some string X into the generated script * before whatever {@code provides} it. - *

  • There can be multiple ways to order the statements in the deployment + *

    Effect of {@code ddr.reproducible}

    + *

    There can be multiple ways to order the statements in the deployment * descriptor to satisfy the given {@code provides} and {@code requires} * relationships. While the compiler will always write the descriptor in an * order that satisfies those relationships, when the {@code ddr.reproducible} * option is {@code false}, the precise order may differ between successive - * compilations of the same sources, which should not affect successful + * compilations of the same sources, which should not affect successful * loading and unloading of the jar with {@code install_jar} and * {@code remove_jar}. In testing, this can help to confirm that all of the * needed {@code provides} and {@code requires} relationships have been @@ -95,6 +106,74 @@ * orders, chosen arbitrarily but consistently between multiple compilations as * long as the sources are unchanged. This can be helpful in software * distribution when reproducible output is wanted. + *

    Conditional execution in the deployment descriptor

    + *

    The deployment-descriptor syntax fixed by the ISO SQL/JRT standard has + * a rudimentary conditional-inclusion feature based on + * {@code }s. + * SQL statements wrapped in {@code BEGIN}/{@code END} with an + * {@code } are executed only if that name is recognized + * by the DBMS when installing or removing the jar. Statements in the deployment + * descriptor that are not wrapped in an {@code } are + * executed unconditionally. + *

    PL/Java's descriptor generator normally emits statements + * as {@code }s, using the name {@code PostgreSQL} + * (or the value of the {@code ddr.implementor} option if present on + * the compiler command line) by default, or a specific name supplied + * with {@code implementor=} to one of the annotations in this package. + *

    When loading or unloading a jar file and processing its deployment + * descriptor, PL/Java 'recognizes' any implementor name listed in the runtime + * setting {@code pljava.implementors}, which contains only {@code PostgreSQL} + * by default. + *

    The {@code pljava.implementors} setting can be changed, even by SQL + * statements within a deployment descriptor, to affect which subsequent + * statements will be executed. An SQL statement may test some condition and + * set {@code pljava.implementors} accordingly. In PL/Java's supplied examples, + * ConditionalDDR illustrates this approach to conditional execution. + *

    Naturally, this scheme requires the SQL generator to emit the statement + * that tests the condition earlier in the deployment descriptor than + * the statements relying on the {@code } being set. + * Building on the existing ability to control the order of statements + * using {@code provides} and {@code requires} elements, an {@code implementor} + * element specified in the annotation for a statement is treated also as + * an implicit {@code requires} for that name, so the programmer only needs + * to place an explicit {@code provides} element on whatever + * {@link SQLAction SQLAction} tests the condition and determines if the name + * will be recognized. + *

    The {@code provides}/{@code requires} relationship so created differs + * in three ways from other {@code provides}/{@code requires} relationships: + *

      + *
    • It does not reverse for generating {@code remove} actions. + * Normal dependencies must be reversed for that case, so dependent objects + * are removed before those they depend on. By contrast, a condition determining + * the setting of an implementor name must be evaluated before the name + * is needed, whether the jar is being installed or removed. + *
    • If it does not have an explicit {@code remove} action (the usual case), + * its {@code install} action (the condition test and setting of the name) + * is used both when installing and removing. + *
    • It is weak. The SQL generator does not flag an error if the implicit + * {@code requires} for an implementor name is not satisfied by any annotation's + * {@code provides} in the visible Java sources. It is possible the name may be + * set some other way in the DBMS environment where the jar is to be deployed. + * Faced with statements that require such 'unprovided' implementor names, + * the SQL generator just falls back to emitting them as late in the deployment + * descriptor as possible, after all other statements that do not depend + * on them. *
    + *

    Matching {@code implementor} and {@code provides}

    + *

    Given the 'weak' nature of the {@code implementor}/{@code provides} + * relationship, an error will not be reported if a spelling or upper/lower case + * difference prevents identifying an {@code } with the + * {@code provides} string of an annotated statement intended to match it. + * The resulting deployment descriptor may have a workable order + * as a result of the fallback ordering rules, or may have a mysteriously + * unworkable order, particularly of the {@code remove} actions. + *

    According to the ISO SQL/JRT standard, an {@code } is + * an SQL identifier, having a case-insensitive matching behavior unless quoted. + * PL/Java, however, treats a {@code provides} value as an arbitrary Java string + * that can only match exactly, and so PL/Java's SQL generator will successfully + * match up {@code implementor} and {@code provides} strings only when + * they are identical in spelling and case. */ package org.postgresql.pljava.annotation; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Commentable.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Commentable.java new file mode 100644 index 000000000..d0320386e --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Commentable.java @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2016-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import javax.lang.model.element.Element; + +interface Commentable +{ + public String comment(); + public void setComment( Object o, boolean explicit, Element e); + public String derivedComment( Element e); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DBType.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DBType.java new file mode 100644 index 000000000..ac9d49682 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DBType.java @@ -0,0 +1,638 @@ +/* + * Copyright (c) 2020-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.AbstractMap; +import java.util.Map; +import static java.util.Objects.requireNonNull; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import static java.util.regex.Pattern.compile; + +import javax.annotation.processing.Messager; + +import static org.postgresql.pljava.sqlgen.Lexicals + .ISO_AND_PG_IDENTIFIER_CAPTURING; +import static org.postgresql.pljava.sqlgen.Lexicals.ISO_REGULAR_IDENTIFIER_PART; +import static org.postgresql.pljava.sqlgen.Lexicals.PG_REGULAR_IDENTIFIER_PART; +import static org.postgresql.pljava.sqlgen.Lexicals.SEPARATOR; +import static org.postgresql.pljava.sqlgen.Lexicals.identifierFrom; +import static org.postgresql.pljava.sqlgen.Lexicals.separator; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import static org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple.pgFold; + +/** + * Abstraction of a database type, which is usually specified by an + * {@code Identifier.Qualified}, but sometimes by reserved SQL syntax. + */ +abstract class DBType +{ + DBType withModifier(String modifier) + { + return new Modified(this, modifier); + } + + DBType asArray(String notated) + { + return new Array(this, notated); + } + + DBType withDefault(String suffix) + { + return new Defaulting(this, suffix); + } + + String toString(boolean withDefault) + { + return toString(); + } + + abstract DependTag dependTag(); + + /** + * Return the original underlying (leaf) type, either a {@code Named} or + * a {@code Reserved}. + *

    + * Override in non-leaf classes (except {@code Array}). + */ + DBType leaf() + { + return this; + } + + boolean isArray() + { + return false; + } + + @Override + public final boolean equals(Object o) + { + return equals(o, null); + } + + /** + * True if the underlying (leaf) types compare equal (overridden for + * {@code Array}). + *

    + * The assumption is that equality checking will be done for function + * signature equivalence, for which defaults and typmods don't matter + * (but arrayness does). + */ + public final boolean equals(Object o, Messager msgr) + { + if ( this == o ) + return true; + if ( ! (o instanceof DBType) ) + return false; + DBType dt1 = this.leaf(); + DBType dt2 = ((DBType)o).leaf(); + if ( dt1.getClass() != dt2.getClass() ) + return false; + if ( dt1 instanceof Array ) + { + dt1 = ((Array)dt1).m_component.leaf(); + dt2 = ((Array)dt2).m_component.leaf(); + if ( dt1.getClass() != dt2.getClass() ) + return false; + } + if ( dt1 instanceof Named ) + return ((Named)dt1).m_ident.equals(((Named)dt2).m_ident, msgr); + return pgFold(((Reserved)dt1).m_reservedName) + .equals(pgFold(((Reserved)dt2).m_reservedName)); + } + + /** + * Pattern to match type names that are special in SQL, if they appear as + * regular (unquoted) identifiers and without a schema qualification. + *

    + * This list does not include {@code DOUBLE} or {@code NATIONAL}, as the + * reserved SQL form for each includes a following keyword + * ({@code PRECISION} or {@code CHARACTER}/{@code CHAR}, respectively). + * There is a catch-all test in {@code fromSQLTypeAnnotation} that will fall + * back to 'reserved' treatment if the name is followed by anything that + * isn't a parenthesized type modifier, so the fallback will naturally catch + * these two cases. + */ + static final Pattern s_reservedTypeFirstWords = compile( + "(?i:" + + "INT|INTEGER|SMALLINT|BIGINT|REAL|FLOAT|DECIMAL|DEC|NUMERIC|" + + "BOOLEAN|BIT|CHARACTER|CHAR|VARCHAR|TIMESTAMP|TIME|INTERVAL" + + ")" + ); + + /** + * Parse a string, representing an optional parameter/column name followed + * by a type, into an {@code Identifier.Simple}, possibly null, and a + * {@code DBType}. + *

    + * Whitespace (or, strictly, separator; comments would be accepted) must + * separate the name from the type, if the name is not quoted. To omit a + * name and supply only the type, the string must begin with whitespace + * (ahem, separator). + */ + static Map.Entry fromNameAndType(String nandt) + { + Identifier.Simple name = null; + Matcher m = ISO_AND_PG_IDENTIFIER_CAPTURING.matcher(nandt); + if ( m.lookingAt() ) + { + nandt = nandt.substring(m.end()); + name = identifierFrom(m); + } + return + new AbstractMap.SimpleImmutableEntry<>( + name, fromSQLTypeAnnotation(nandt)); + } + + /** + * Make a {@code DBType} from whatever might appear in an {@code SQLType} + * annotation. + *

    + * The possibilities are numerous, as that text used to be dumped rather + * blindly into the descriptor and thus could be whatever PostgreSQL would + * make sense of. The result could be a {@code DBType.Named} if the start of + * the text parses as a (possibly schema-qualified) identifier, or a + * {@code DBType.Reserved} if it doesn't (or it parses as a non-schema- + * qualified regular identifier and matches one of SQL's grammatically + * reserved type names). It could be either of those wrapped in a + * {@code DBType.Modified} if a type modifier was parsed out. It could be + * any of those wrapped in a {@code DBType.Array} if the text ended with any + * of the recognized forms of array dimension notation. The one thing it + * can't be (as a result from this method) is a {@code DBType.Defaulting}; + * that wrapping can be applied to the result later, to carry a default + * value that has been specified at a particular site of use. + *

    + * The parsing strategy is a bit heuristic. An attempt is made to parse a + * (possibly schema-qualified) identifier at the start of the string. + * An attempt is made to find a match for array-dimension notation that runs + * to the end of the string. Whatever lies between gets to be a typmod if it + * looks enough like one, or gets rolled with the front of the string into a + * {@code DBType.Reserved}, which is not otherwise scrutinized; the + * {@code Reserved} case is still more or less a catch-all that will be + * dumped blindly into the descriptor in the hope that PostgreSQL will make + * sense of it. + *

    + * This strategy is used because compared to what can appear in a typmod + * (which could require arbitrary constant expression parsing), the array + * grammar depends on much less. + */ + static DBType fromSQLTypeAnnotation(String value) + { + Identifier.Qualified qname = null; + + Matcher m = SEPARATOR.matcher(value); + separator(m, false); + int postSeparator = m.regionStart(); + + if ( m.usePattern(ISO_AND_PG_IDENTIFIER_CAPTURING).lookingAt() ) + { + Identifier.Simple id1 = identifierFrom(m); + m.region(m.end(), m.regionEnd()); + + separator(m, false); + if ( value.startsWith(".", m.regionStart()) ) + { + m.region(m.regionStart() + 1, m.regionEnd()); + separator(m, false); + if ( m.usePattern(ISO_AND_PG_IDENTIFIER_CAPTURING).lookingAt() ) + { + Identifier.Simple id2 = identifierFrom(m); + qname = id2.withQualifier(id1); + m.region(m.end(), m.regionEnd()); + separator(m, false); + } + } + else + qname = id1.withQualifier(null); + } + + /* + * At this point, qname may have a local name and qualifier, or it may + * have a local name and null qualifier (if a single identifier was + * successfully matched but not followed by a dot). It is also possible + * for qname to be null, either because the start of the string didn't + * look like an identifier at all, or because it did, but was followed + * by a dot, and what followed the dot could not be parsed as another + * identifier. Probably both of those cases are erroneous, but they can + * also be handled by simply treating the content as Reserved and hoping + * PostgreSQL can make sense of it. + * + * Search from here to the end of the string for possible array notation + * that can be stripped off the end, leaving just the middle (if any) to + * be dealt with. + */ + + String arrayNotation = arrayNotationIfPresent(m, value); + + /* + * If arrayNotation is not null, m's region end has been adjusted to + * exclude the array notation. + */ + + boolean reserved; + + if ( null == qname ) + reserved = true; + else if ( null != qname.qualifier() ) + reserved = false; + else + { + Identifier.Simple local = qname.local(); + if ( ! local.folds() ) + reserved = false; + else + { + Matcher m1 = + s_reservedTypeFirstWords.matcher(local.nonFolded()); + reserved = m1.matches(); + } + } + + /* + * If this is a reserved type, just wrap up everything from its start to + * the array notation (if any) as a Reserved; there is no need to try to + * tease out a typmod separately. (The reserved syntax can be quite + * unlike the generic typename(typmod) pattern; there could be what + * looks like a (typmod) between TIME and WITH TIME ZONE, or the moral + * equivalent of a typmod could look like HOUR TO MINUTE, and so on.) + * + * If we think this is a non-reserved type, and there is anything left + * in the matching region (preceding the array notation, if any), then + * it had better be a typmod in the generic form starting with a (. We + * will capture whatever is there and call it a typmod as long as it + * does start that way. (More elaborate checking, such as balancing the + * parens, would require ability to parse an expr_list.) This can allow + * malformed syntax to be uncaught until deployment time when PostgreSQL + * sees it, but that's unchanged from when the entire SQLType string was + * passed along verbatim. The 'threat' model here is just that the + * legitimate developer may get an error later when earlier would be + * more helpful, not a malicious adversary bent on injection. + * + * On the other hand, if what's left doesn't start with a ( then we + * somehow don't know what we're looking at, so fall back and treat it + * as reserved. This will naturally catch the two-token reserved names + * DOUBLE PRECISION, NATIONAL CHARACTER or NATIONAL CHAR, which were + * therefore left out of the s_reservedTypeFirstWords pattern. + */ + + if ( ! reserved && m.regionStart() < m.regionEnd() ) + if ( ! value.startsWith("(", m.regionStart()) ) + reserved = true; + + DBType result; + + if ( reserved ) + result = new DBType.Reserved( + value.substring(postSeparator, m.regionEnd())); + else + { + result = new DBType.Named(qname); + if ( m.regionStart() < m.regionEnd() ) + result = result.withModifier( + value.substring(m.regionStart(), m.regionEnd())); + } + + if ( null != arrayNotation ) + result = result.asArray(arrayNotation); + + return result; + } + + private static final Pattern s_arrayDimStart = compile(String.format( + "(?i:(? + * If a non-null string is returned, the matcher's region-end has been + * adjusted to exclude it. + *

    + * The matcher's associated pattern may have been changed, and the region + * transiently changed, but on return the region will either be the same as + * on entry (if no array notation was found), or have only the region end + * adjusted to exclude the notation. + *

    + * The returned string can include a {@code separator} that followed the + * array notation. + */ + private static String arrayNotationIfPresent(Matcher m, String s) + { + int originalRegionStart = m.regionStart(); + int notationStart; + int dims; + boolean atMostOneDimAllowed; // true after ARRAY keyword + +restart:for ( ;; ) + { + notationStart = -1; + dims = 0; + atMostOneDimAllowed = false; + + m.usePattern(s_arrayDimStart); + if ( ! m.find() ) + break restart; // notationStart is -1 indicating not found + + notationStart = m.start(); + if ( ! "[".equals(m.group()) ) // saw ARRAY + { + atMostOneDimAllowed = true; + m.region(m.end(), m.regionEnd()); + separator(m, false); + if ( ! s.startsWith("[", m.regionStart()) ) + { + if ( m.regionStart() == m.regionEnd() ) + { + dims = 1; // ARRAY separator $ --ok (means 1 dim) + break restart; + } + /* + * ARRAY separator something-other-than-[ + * This is not the match we're looking for. The regionStart + * already points here, so restart the loop to look for + * another potential array notation start beyond this point. + */ + continue restart; + } + m.region(m.regionStart() + 1, m.regionEnd()); + } + + /* + * Invariant: have seen [ and regionStart still points to it. + * Accept optional digits, then ] + * Repeat if followed by a [ + */ + for ( ;; ) + { + m.region(m.regionStart() + 1, m.regionEnd()); + separator(m, false); + + if ( m.usePattern(s_digits).lookingAt() ) + { + m.region(m.end(), m.regionEnd()); + separator(m, false); + } + + if ( ! s.startsWith("]", m.regionStart()) ) + continue restart; + + ++ dims; // have seen a complete [ (\d+)? ] + m.region(m.regionStart() + 1, m.regionEnd()); + separator(m, false); + if ( s.startsWith("[", m.regionStart()) ) + continue; + if ( m.regionStart() == m.regionEnd() ) + if ( ! atMostOneDimAllowed || 1 == dims ) + break restart; + continue restart; // not at end, not at [ --start over + } + } + + if ( -1 == notationStart ) + { + m.region(originalRegionStart, m.regionEnd()); + return null; + } + + m.region(originalRegionStart, notationStart); + return s.substring(notationStart); + } + + static final class Reserved extends DBType + { + private final String m_reservedName; + + Reserved(String name) + { + m_reservedName = name; + } + + @Override + public String toString() + { + return m_reservedName; + } + + @Override + DependTag dependTag() + { + return null; + } + + @Override + public int hashCode() + { + return pgFold(m_reservedName).hashCode(); + } + } + + static final class Named extends DBType + { + private final Identifier.Qualified m_ident; + + Named(Identifier.Qualified ident) + { + m_ident = ident; + } + + @Override + public String toString() + { + return m_ident.toString(); + } + + @Override + DependTag dependTag() + { + return new DependTag.Type(m_ident); + } + + @Override + public int hashCode() + { + return m_ident.hashCode(); + } + } + + static final class Modified extends DBType + { + private final DBType m_raw; + private final String m_modifier; + + Modified(DBType raw, String modifier) + { + m_raw = raw; + m_modifier = modifier; + } + + @Override + public String toString() + { + return m_raw.toString() + m_modifier; + } + + @Override + DBType withModifier(String modifier) + { + throw new UnsupportedOperationException( + "withModifier on a Modified"); + } + + @Override + DependTag dependTag() + { + return m_raw.dependTag(); + } + + @Override + public int hashCode() + { + return m_raw.hashCode(); + } + + @Override + DBType leaf() + { + return m_raw.leaf(); + } + } + + static final class Array extends DBType + { + private final DBType m_component; + private final int m_dims; + private final String m_notated; + + Array(DBType component, String notated) + { + assert component instanceof Named + || component instanceof Reserved + || component instanceof Modified; + int dims = 0; + for ( int pos = 0; -1 != (pos = notated.indexOf('[', pos)); ++ pos ) + ++ dims; + m_dims = 0 == dims ? 1 : dims; // "ARRAY" with no [ has dimension 1 + m_notated = notated; + m_component = requireNonNull(component); + } + + @Override + Array asArray(String notated) + { + /* Implementable in principle, but may never be needed */ + throw new UnsupportedOperationException("asArray on an Array"); + } + + @Override + public String toString() + { + return m_component.toString() + m_notated; + } + + @Override + DependTag dependTag() + { + return m_component.dependTag(); + } + + @Override + boolean isArray() + { + return true; + } + + @Override + public int hashCode() + { + return m_component.hashCode(); + } + } + + static final class Defaulting extends DBType + { + private final DBType m_raw; + private final String m_suffix; + + Defaulting(DBType raw, String suffix) + { + assert ! (raw instanceof Defaulting); + m_raw = requireNonNull(raw); + m_suffix = suffix; + } + + @Override + Modified withModifier(String notated) + { + throw new UnsupportedOperationException( + "withModifier on a Defaulting"); + } + + @Override + Array asArray(String notated) + { + throw new UnsupportedOperationException("asArray on a Defaulting"); + } + + @Override + Array withDefault(String suffix) + { + /* Implementable in principle, but may never be needed */ + throw new UnsupportedOperationException( + "withDefault on a Defaulting"); + } + + @Override + public String toString() + { + return m_raw.toString() + " " + m_suffix; + } + + @Override + String toString(boolean withDefault) + { + return withDefault ? toString() : m_raw.toString(); + } + + @Override + DependTag dependTag() + { + return m_raw.dependTag(); + } + + @Override + boolean isArray() + { + return m_raw.isArray(); + } + + @Override + public int hashCode() + { + return m_raw.hashCode(); + } + + @Override + DBType leaf() + { + return m_raw.leaf(); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessor.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessor.java index b78e321f3..a289bec49 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessor.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessor.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -9,133 +9,23 @@ * Contributors: * Tada AB * Purdue University + * Chapman Flack */ package org.postgresql.pljava.annotation.processing; -import java.io.IOException; - import java.lang.annotation.Annotation; -import java.lang.reflect.Array; -import java.lang.reflect.Field; -import java.lang.reflect.InvocationTargetException; - -import java.math.BigDecimal; -import java.math.BigInteger; - -import java.sql.ResultSet; -import java.sql.SQLData; -import java.sql.SQLInput; -import java.sql.SQLOutput; -import java.sql.Time; -import java.sql.Timestamp; - -import java.text.BreakIterator; - -import java.time.LocalDate; -import java.time.LocalTime; -import java.time.OffsetTime; -import java.time.LocalDateTime; -import java.time.OffsetDateTime; - -import java.util.AbstractMap; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import static java.util.Collections.unmodifiableSet; -import java.util.Comparator; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.HashSet; -import java.util.IdentityHashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.ListIterator; -import java.util.Locale; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.Objects; -import static java.util.Objects.hash; -import static java.util.Objects.requireNonNull; -import java.util.PriorityQueue; -import java.util.Queue; import java.util.Set; -import java.util.function.BiConsumer; -import java.util.function.Supplier; -import static java.util.function.UnaryOperator.identity; - -import java.util.stream.Stream; -import static java.util.stream.Collectors.groupingBy; -import static java.util.stream.Collectors.joining; -import static java.util.stream.Collectors.mapping; -import static java.util.stream.Collectors.toList; -import static java.util.stream.Collectors.toSet; - -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import static java.util.regex.Pattern.compile; - -import javax.annotation.processing.*; +import javax.annotation.processing.AbstractProcessor; +import javax.annotation.processing.ProcessingEnvironment; +import javax.annotation.processing.RoundEnvironment; +import javax.annotation.processing.SupportedAnnotationTypes; +import javax.annotation.processing.SupportedOptions; import javax.lang.model.SourceVersion; -import javax.lang.model.element.AnnotationMirror; -import javax.lang.model.element.AnnotationValue; -import javax.lang.model.element.Element; -import javax.lang.model.element.ElementKind; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Modifier; -import javax.lang.model.element.ModuleElement; -import javax.lang.model.element.NestingKind; -import javax.lang.model.element.Name; import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; - -import javax.lang.model.type.ArrayType; -import javax.lang.model.type.DeclaredType; -import javax.lang.model.type.ExecutableType; -import javax.lang.model.type.NoType; -import javax.lang.model.type.PrimitiveType; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; - -import javax.lang.model.util.Elements; -import javax.lang.model.util.Types; - -import static javax.lang.model.util.ElementFilter.constructorsIn; -import static javax.lang.model.util.ElementFilter.methodsIn; - -import static javax.tools.Diagnostic.Kind; - -import org.postgresql.pljava.ResultSetHandle; -import org.postgresql.pljava.ResultSetProvider; -import org.postgresql.pljava.TriggerData; - -import org.postgresql.pljava.annotation.Aggregate; -import org.postgresql.pljava.annotation.Cast; -import org.postgresql.pljava.annotation.Function; -import org.postgresql.pljava.annotation.Operator; -import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; -import org.postgresql.pljava.annotation.SQLType; -import org.postgresql.pljava.annotation.Trigger; -import org.postgresql.pljava.annotation.BaseUDT; -import org.postgresql.pljava.annotation.MappedUDT; - -import org.postgresql.pljava.sqlgen.Lexicals; -import static org.postgresql.pljava.sqlgen.Lexicals - .ISO_AND_PG_IDENTIFIER_CAPTURING; -import static org.postgresql.pljava.sqlgen.Lexicals.ISO_REGULAR_IDENTIFIER_PART; -import static org.postgresql.pljava.sqlgen.Lexicals.PG_REGULAR_IDENTIFIER_PART; -import static org.postgresql.pljava.sqlgen.Lexicals.SEPARATOR; -import static org.postgresql.pljava.sqlgen.Lexicals.identifierFrom; -import static org.postgresql.pljava.sqlgen.Lexicals.separator; -import org.postgresql.pljava.sqlgen.Lexicals.Identifier; -import static org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple.pgFold; /** * Annotation processor invoked by the annotations framework in javac for @@ -158,10 +48,31 @@ "ddr.implementor", // implementor when not annotated, default "PostgreSQL" "ddr.output" // name of ddr file to write }) -@SupportedSourceVersion(SourceVersion.RELEASE_9) public class DDRProcessor extends AbstractProcessor { private DDRProcessorImpl impl; + + @Override + public SourceVersion getSupportedSourceVersion() + { + /* + * Because this must compile on Java versions back to 9, it must not + * mention by name any SourceVersion constant later than RELEASE_9. + * + * Update latest_tested to be the latest Java release on which this + * annotation processor has been tested without problems. + */ + int latest_tested = 24; + int ordinal_9 = SourceVersion.RELEASE_9.ordinal(); + int ordinal_latest = latest_tested - 9 + ordinal_9; + + SourceVersion latestSupported = SourceVersion.latestSupported(); + + if ( latestSupported.ordinal() <= ordinal_latest ) + return latestSupported; + + return SourceVersion.values()[ordinal_latest]; + } @Override public void init( ProcessingEnvironment processingEnv) @@ -180,7041 +91,3 @@ public boolean process( Set tes, RoundEnvironment re) return impl.process( tes, re); } } - -/** - * Where the work happens. - */ -class DDRProcessorImpl -{ - // Things supplied by the calling framework in ProcessingEnvironment, - // used enough that it makes sense to break them out here with - // short names that all nested classes below will inherit. - // - final Elements elmu; - final Filer filr; - final Locale loca; - final Messager msgr; - final Map opts; - final SourceVersion srcv; - final Types typu; - - // Similarly, the TypeMapper should be easily available to code below. - // - final TypeMapper tmpr; - final SnippetTiebreaker snippetTiebreaker; - - // Options obtained from the invocation - // - final Identifier.Simple nameTrusted; - final Identifier.Simple nameUntrusted; - final String output; - final Identifier.Simple defaultImplementor; - final boolean reproducible; - - // Certain known types that need to be recognized in the processed code - // - final DeclaredType TY_ITERATOR; - final DeclaredType TY_OBJECT; - final DeclaredType TY_RESULTSET; - final DeclaredType TY_RESULTSETPROVIDER; - final DeclaredType TY_RESULTSETHANDLE; - final DeclaredType TY_SQLDATA; - final DeclaredType TY_SQLINPUT; - final DeclaredType TY_SQLOUTPUT; - final DeclaredType TY_STRING; - final DeclaredType TY_TRIGGERDATA; - final NoType TY_VOID; - - // Our own annotations - // - final TypeElement AN_FUNCTION; - final TypeElement AN_SQLTYPE; - final TypeElement AN_TRIGGER; - final TypeElement AN_BASEUDT; - final TypeElement AN_MAPPEDUDT; - final TypeElement AN_SQLACTION; - final TypeElement AN_SQLACTIONS; - final TypeElement AN_CAST; - final TypeElement AN_CASTS; - final TypeElement AN_AGGREGATE; - final TypeElement AN_AGGREGATES; - final TypeElement AN_OPERATOR; - final TypeElement AN_OPERATORS; - - // Certain familiar DBTypes (capitalized as this file historically has) - // - final DBType DT_BOOLEAN = new DBType.Reserved("boolean"); - final DBType DT_INTEGER = new DBType.Reserved("integer"); - final DBType DT_RECORD = new DBType.Named( - Identifier.Qualified.nameFromJava("pg_catalog.RECORD")); - final DBType DT_TRIGGER = new DBType.Named( - Identifier.Qualified.nameFromJava("pg_catalog.trigger")); - final DBType DT_VOID = new DBType.Named( - Identifier.Qualified.nameFromJava("pg_catalog.void")); - final DBType DT_ANY = new DBType.Named( - Identifier.Qualified.nameFromJava("pg_catalog.\"any\"")); - final DBType DT_BYTEA = new DBType.Named( - Identifier.Qualified.nameFromJava("pg_catalog.bytea")); - final DBType DT_INTERNAL = new DBType.Named( - Identifier.Qualified.nameFromJava("pg_catalog.internal")); - - // Function signatures for certain known functions - // - final DBType[] SIG_TYPMODIN = - { DBType.fromSQLTypeAnnotation("pg_catalog.cstring[]") }; - final DBType[] SIG_TYPMODOUT = { DT_INTEGER }; - final DBType[] SIG_ANALYZE = { DT_INTERNAL }; - - DDRProcessorImpl( ProcessingEnvironment processingEnv) - { - elmu = processingEnv.getElementUtils(); - filr = processingEnv.getFiler(); - loca = processingEnv.getLocale(); - msgr = processingEnv.getMessager(); - opts = processingEnv.getOptions(); - srcv = processingEnv.getSourceVersion(); - typu = processingEnv.getTypeUtils(); - - tmpr = new TypeMapper(); - - String optv; - - optv = opts.get( "ddr.name.trusted"); - if ( null != optv ) - nameTrusted = Identifier.Simple.fromJava(optv); - else - nameTrusted = Identifier.Simple.fromJava("java"); - - optv = opts.get( "ddr.name.untrusted"); - if ( null != optv ) - nameUntrusted = Identifier.Simple.fromJava(optv); - else - nameUntrusted = Identifier.Simple.fromJava("javaU"); - - optv = opts.get( "ddr.implementor"); - if ( null != optv ) - defaultImplementor = "-".equals( optv) ? null : - Identifier.Simple.fromJava(optv); - else - defaultImplementor = Identifier.Simple.fromJava("PostgreSQL"); - - optv = opts.get( "ddr.output"); - if ( null != optv ) - output = optv; - else - output = "pljava.ddr"; - - optv = opts.get( "ddr.reproducible"); - if ( null != optv ) - reproducible = Boolean.parseBoolean( optv); - else - reproducible = true; - - snippetTiebreaker = reproducible ? new SnippetTiebreaker() : null; - - TY_ITERATOR = declaredTypeForClass(java.util.Iterator.class); - TY_OBJECT = declaredTypeForClass(Object.class); - TY_RESULTSET = declaredTypeForClass(java.sql.ResultSet.class); - TY_RESULTSETPROVIDER = declaredTypeForClass(ResultSetProvider.class); - TY_RESULTSETHANDLE = declaredTypeForClass(ResultSetHandle.class); - TY_SQLDATA = declaredTypeForClass(SQLData.class); - TY_SQLINPUT = declaredTypeForClass(SQLInput.class); - TY_SQLOUTPUT = declaredTypeForClass(SQLOutput.class); - TY_STRING = declaredTypeForClass(String.class); - TY_TRIGGERDATA = declaredTypeForClass(TriggerData.class); - TY_VOID = typu.getNoType(TypeKind.VOID); - - AN_FUNCTION = elmu.getTypeElement( Function.class.getName()); - AN_SQLTYPE = elmu.getTypeElement( SQLType.class.getName()); - AN_TRIGGER = elmu.getTypeElement( Trigger.class.getName()); - AN_BASEUDT = elmu.getTypeElement( BaseUDT.class.getName()); - AN_MAPPEDUDT = elmu.getTypeElement( MappedUDT.class.getName()); - - // Repeatable annotations and their containers. - // - AN_SQLACTION = elmu.getTypeElement( SQLAction.class.getName()); - AN_SQLACTIONS = elmu.getTypeElement( SQLActions.class.getName()); - AN_CAST = elmu.getTypeElement( Cast.class.getName()); - AN_CASTS = elmu.getTypeElement( - Cast.Container.class.getCanonicalName()); - AN_AGGREGATE = elmu.getTypeElement( Aggregate.class.getName()); - AN_AGGREGATES = elmu.getTypeElement( - Aggregate.Container.class.getCanonicalName()); - AN_OPERATOR = elmu.getTypeElement( Operator.class.getName()); - AN_OPERATORS = elmu.getTypeElement( - Operator.Container.class.getCanonicalName()); - } - - void msg( Kind kind, String fmt, Object... args) - { - msgr.printMessage( kind, String.format( fmt, args)); - } - - void msg( Kind kind, Element e, String fmt, Object... args) - { - msgr.printMessage( kind, String.format( fmt, args), e); - } - - void msg( Kind kind, Element e, AnnotationMirror a, - String fmt, Object... args) - { - msgr.printMessage( kind, String.format( fmt, args), e, a); - } - - void msg( Kind kind, Element e, AnnotationMirror a, AnnotationValue v, - String fmt, Object... args) - { - msgr.printMessage( kind, String.format( fmt, args), e, a, v); - } - - /** - * Map a {@code Class} to a {@code TypeElement} and from there to a - * {@code DeclaredType}. - *

    - * This needs to work around some weird breakage in javac 10 and 11 when - * given a {@code --release} option naming an earlier release, as described - * in commit c763cee. The version of of {@code getTypeElement} with a module - * parameter is needed then, because the other version will go bonkers and - * think it found the class in every module that transitively requires - * its actual module and then return null because the result wasn't - * unique. That got fixed in Java 12, but because 11 is the LTS release and - * there won't be another for a while yet, it is better to work around the - * issue here. - *

    - * If not supporting Java 10 or 11, this could be simplified to - * {@code typu.getDeclaredType(elmu.getTypeElement(className))}. - */ - private DeclaredType declaredTypeForClass(Class clazz) - { - String className = clazz.getName(); - String moduleName = clazz.getModule().getName(); - - TypeElement e; - - if ( null == moduleName ) - e = elmu.getTypeElement(className); - else - { - ModuleElement m = elmu.getModuleElement(moduleName); - if ( null == m ) - e = elmu.getTypeElement(className); - else - e = elmu.getTypeElement(m, className); - } - - requireNonNull(e, - () -> "unexpected failure to resolve TypeElement " + className); - - DeclaredType t = typu.getDeclaredType(e); - - requireNonNull(t, - () -> "unexpected failure to resolve DeclaredType " + e); - - return t; - } - - /** - * Key usable in a mapping from (Object, Snippet-subtype) to Snippet. - * Because there's no telling in which order a Map implementation will - * compare two keys, the class matches if either one is assignable to - * the other. That's ok as long as the Snippet-subtype is never Snippet - * itself, no Object ever has two Snippets hung on it where one extends - * the other, and getSnippet is always called for the widest of any of - * the types it may retrieve. - */ - static final class SnippetsKey - { - final Object o; - final Class c; - SnippetsKey(Object o, Class c) - { - assert Snippet.class != c : "Snippet key must be a subtype"; - this.o = o; - this.c = c; - } - public boolean equals(Object oth) - { - if ( ! (oth instanceof SnippetsKey) ) - return false; - SnippetsKey osk = (SnippetsKey)oth; - return o.equals( osk.o) - && ( c.isAssignableFrom( osk.c) || osk.c.isAssignableFrom( c) ); - } - public int hashCode() - { - return o.hashCode(); // must not depend on c (subtypes will match) - } - } - - /** - * Collection of code snippets being accumulated (possibly over more than - * one round), keyed by the object for which each snippet has been - * generated. - */ - /* - * This is a LinkedHashMap so that the order of handling annotation types - * in process() below will be preserved in calling their characterize() - * methods at end-of-round, and so, for example, characterize() on a Cast - * can use values set by characterize() on an associated Function. - */ - Map snippets = new LinkedHashMap<>(); - - S getSnippet(Object o, Class c, Supplier ctor) - { - return - c.cast(snippets - .computeIfAbsent(new SnippetsKey( o, c), k -> ctor.get())); - } - - void putSnippet( Object o, Snippet s) - { - snippets.put( new SnippetsKey( o, s.getClass()), s); - } - - /** - * Queue on which snippets are entered in preparation for topological - * ordering. Has to be an instance field because populating the queue - * (which involves invoking the snippets' characterize methods) cannot - * be left to generateDescriptor, which runs in the final round. This is - * (AFAICT) another workaround for javac 7's behavior of throwing away - * symbol tables between rounds; when characterize was invoked in - * generateDescriptor, any errors reported were being shown with no source - * location info, because it had been thrown away. - */ - List> snippetVPairs = new ArrayList<>(); - - /** - * Map from each arbitrary provides/requires label to the snippet - * that 'provides' it (snippets, in some cases). Has to be out here as an - * instance field for the same reason {@code snippetVPairs} does. - *

    - * Originally limited each tag to have only one provider; that is still - * enforced for implicitly-generated tags, but relaxed for explicit ones - * supplied in annotations, hence the list. - */ - Map>> provider = new HashMap<>(); - - /** - * Find the elements in each round that carry any of the annotations of - * interest and generate code snippets accordingly. On the last round, with - * all processing complete, generate the deployment descriptor file. - */ - boolean process( Set tes, RoundEnvironment re) - { - boolean functionPresent = false; - boolean sqlActionPresent = false; - boolean baseUDTPresent = false; - boolean mappedUDTPresent = false; - boolean castPresent = false; - boolean aggregatePresent = false; - boolean operatorPresent = false; - - boolean willClaim = true; - - for ( TypeElement te : tes ) - { - if ( AN_FUNCTION.equals( te) ) - functionPresent = true; - else if ( AN_BASEUDT.equals( te) ) - baseUDTPresent = true; - else if ( AN_MAPPEDUDT.equals( te) ) - mappedUDTPresent = true; - else if ( AN_SQLTYPE.equals( te) ) - ; // these are handled within FunctionImpl - else if ( AN_SQLACTION.equals( te) || AN_SQLACTIONS.equals( te) ) - sqlActionPresent = true; - else if ( AN_CAST.equals( te) || AN_CASTS.equals( te) ) - castPresent = true; - else if ( AN_AGGREGATE.equals( te) || AN_AGGREGATES.equals( te) ) - aggregatePresent = true; - else if ( AN_OPERATOR.equals( te) || AN_OPERATORS.equals( te) ) - operatorPresent = true; - else - { - msg( Kind.WARNING, te, - "PL/Java annotation processor version may be older than " + - "this annotation:\n%s", te.toString()); - willClaim = false; - } - } - - if ( baseUDTPresent ) - for ( Element e : re.getElementsAnnotatedWith( AN_BASEUDT) ) - processUDT( e, UDTKind.BASE); - - if ( mappedUDTPresent ) - for ( Element e : re.getElementsAnnotatedWith( AN_MAPPEDUDT) ) - processUDT( e, UDTKind.MAPPED); - - if ( functionPresent ) - for ( Element e : re.getElementsAnnotatedWith( AN_FUNCTION) ) - processFunction( e); - - if ( sqlActionPresent ) - for ( Element e - : re.getElementsAnnotatedWithAny( AN_SQLACTION, AN_SQLACTIONS) ) - processRepeatable( - e, AN_SQLACTION, AN_SQLACTIONS, SQLActionImpl.class, null); - - if ( castPresent ) - for ( Element e - : re.getElementsAnnotatedWithAny( AN_CAST, AN_CASTS) ) - processRepeatable( - e, AN_CAST, AN_CASTS, CastImpl.class, null); - - if ( operatorPresent ) - for ( Element e - : re.getElementsAnnotatedWithAny( AN_OPERATOR, AN_OPERATORS) ) - processRepeatable( - e, AN_OPERATOR, AN_OPERATORS, OperatorImpl.class, - this::operatorPreSynthesize); - - if ( aggregatePresent ) - for ( Element e - : re.getElementsAnnotatedWithAny( AN_AGGREGATE, AN_AGGREGATES) ) - processRepeatable( - e, AN_AGGREGATE, AN_AGGREGATES, AggregateImpl.class, null); - - tmpr.workAroundJava7Breakage(); // perhaps to be fixed in Java 9? nope. - - if ( ! re.processingOver() ) - defensiveEarlyCharacterize(); - else if ( ! re.errorRaised() ) - generateDescriptor(); - - return willClaim; - } - - /** - * Iterate over collected snippets, characterize them, and enter them - * (if no error) in the data structures for topological ordering. Was - * originally the first part of {@code generateDescriptor}, but that is - * run in the final round, which is too late for javac 7 anyway, which - * throws symbol tables away between rounds. Any errors reported from - * characterize were being shown without source locations, because the - * information was gone. This may now be run more than once, so the - * {@code snippets} map is cleared before returning. - */ - void defensiveEarlyCharacterize() - { - for ( Snippet snip : snippets.values() ) - { - Set ready = snip.characterize(); - for ( Snippet readySnip : ready ) - { - VertexPair v = new VertexPair<>( readySnip); - snippetVPairs.add( v); - for ( DependTag t : readySnip.provideTags() ) - { - List> ps = - provider.computeIfAbsent(t, k -> new ArrayList<>()); - /* - * Explicit tags are allowed more than one provider. - */ - if ( t instanceof DependTag.Explicit || ps.isEmpty() ) - ps.add(v); - else - msg(Kind.ERROR, "tag %s has more than one provider", t); - } - } - } - snippets.clear(); - } - - /** - * Arrange the collected snippets into a workable sequence (nothing with - * requires="X" can come before whatever has provides="X"), then create - * a deployment descriptor file in proper form. - */ - void generateDescriptor() - { - boolean errorRaised = false; - Set fwdConsumers = new HashSet<>(); - Set revConsumers = new HashSet<>(); - - for ( VertexPair v : snippetVPairs ) - { - List> ps; - - /* - * First handle the implicit requires(implementor()). This is unlike - * the typical provides/requires relationship, in that it does not - * reverse when generating the 'remove' actions. Conditions that - * determined what got installed must also be evaluated early and - * determine what gets removed. - */ - Identifier.Simple impName = v.payload().implementorName(); - DependTag imp = v.payload().implementorTag(); - if ( null != imp ) - { - ps = provider.get( imp); - if ( null != ps ) - { - fwdConsumers.add( imp); - revConsumers.add( imp); - - ps.forEach(p -> - { - p.fwd.precede( v.fwd); - p.rev.precede( v.rev); - - /* - * A snippet providing an implementor tag probably has - * no undeployStrings, because its deployStrings should - * be used on both occasions; if so, replace it with a - * proxy that returns deployStrings for undeployStrings. - */ - if ( 0 == p.rev.payload.undeployStrings().length ) - p.rev.payload = new ImpProvider( p.rev.payload); - }); - } - else if ( ! defaultImplementor.equals( impName, msgr) ) - { - /* - * Don't insist that every implementor tag have a provider - * somewhere in the code. Perhaps the environment will - * provide it at load time. If this is not the default - * implementor, bump the relying vertices' indegree anyway - * so the snippet won't be emitted until the cycle-breaker - * code (see below) sets it free after any others that - * can be handled first. - */ - ++ v.fwd.indegree; - ++ v.rev.indegree; - } - } - for ( DependTag s : v.payload().requireTags() ) - { - ps = provider.get( s); - if ( null != ps ) - { - fwdConsumers.add( s); - revConsumers.add( s); - ps.forEach(p -> - { - p.fwd.precede( v.fwd); - v.rev.precede( p.rev); // these relationships do reverse - }); - } - else if ( s instanceof DependTag.Explicit ) - { - msg( Kind.ERROR, - "tag \"%s\" is required but nowhere provided", s); - errorRaised = true; - } - } - } - - if ( errorRaised ) - return; - - Queue> fwdBlocked = new LinkedList<>(); - Queue> revBlocked = new LinkedList<>(); - - Queue> fwdReady; - Queue> revReady; - if ( reproducible ) - { - fwdReady = new PriorityQueue<>( 11, snippetTiebreaker); - revReady = new PriorityQueue<>( 11, snippetTiebreaker); - } - else - { - fwdReady = new LinkedList<>(); - revReady = new LinkedList<>(); - } - - for ( VertexPair vp : snippetVPairs ) - { - Vertex v = vp.fwd; - if ( 0 == v.indegree ) - fwdReady.add( v); - else - fwdBlocked.add( v); - v = vp.rev; - if ( 0 == v.indegree ) - revReady.add( v); - else - revBlocked.add( v); - } - - Snippet[] fwdSnips = order( fwdReady, fwdBlocked, fwdConsumers, true); - Snippet[] revSnips = order( revReady, revBlocked, revConsumers, false); - - if ( null == fwdSnips || null == revSnips ) - return; // error already reported - - try - { - DDRWriter.emit( fwdSnips, revSnips, this); - } - catch ( IOException ioe ) - { - msg( Kind.ERROR, "while writing %s: %s", output, ioe.getMessage()); - } - } - - /** - * Given a Snippet DAG, either the forward or reverse one, return the - * snippets in a workable order. - * @return Array of snippets in order, or null if no suitable order could - * be found. - */ - Snippet[] order( - Queue> ready, Queue> blocked, - Set consumer, boolean deploying) - { - ArrayList snips = new ArrayList<>(ready.size()+blocked.size()); - Vertex cycleBreaker = null; - -queuerunning: - for ( ; ; ) - { - while ( ! ready.isEmpty() ) - { - Vertex v = ready.remove(); - snips.add(v.payload); - v.use(ready, blocked); - for ( DependTag p : v.payload.provideTags() ) - consumer.remove(p); - } - if ( blocked.isEmpty() ) - break; // all done - - /* - * There are snippets remaining to output but they all have - * indegree > 0, normally a 'cycle' error. But some may have - * breakCycle methods that can help. Add any vertices they return - * onto the ready queue (all at once, so that for reproducible - * builds, the ready queue's ordering constraints will take effect). - */ - boolean cycleBroken = false; - for ( Iterator> it = blocked.iterator(); - it.hasNext(); ) - { - Vertex v = it.next(); - cycleBreaker = v.payload.breakCycle(v, deploying); - if ( null == cycleBreaker ) - continue; - /* - * If v supplied another vertex to go on the ready queue, leave - * v on the blocked queue; it should become ready in due course. - * If v nominated itself as cycle breaker, remove from blocked. - */ - if ( cycleBreaker == v ) - it.remove(); - ready.add(cycleBreaker); - cycleBroken = true; - } - if ( cycleBroken ) - continue; - - /* - * A cycle was detected and no snippet's breakCycle method broke it, - * but there may yet be a way. Somewhere there may be a vertex - * with indegree exactly 1 and an implicit requirement of its - * own implementor tag, with no snippet on record to provide it. - * That's allowed (maybe the installing/removing environment will - * be "providing" that tag anyway), so set one such snippet free - * and see how much farther we get. - */ - for ( Iterator> it = blocked.iterator(); - it.hasNext(); ) - { - Vertex v = it.next(); - if ( 1 < v.indegree ) - continue; - Identifier.Simple impName = v.payload.implementorName(); - if ( null == impName - || defaultImplementor.equals( impName, msgr) ) - continue; - if ( provider.containsKey( v.payload.implementorTag()) ) - continue; - if ( reproducible ) - { - if (null == cycleBreaker || - 0 < snippetTiebreaker.compare(cycleBreaker, v)) - cycleBreaker = v; - } - else - { - -- v.indegree; - it.remove(); - ready.add( v); - continue queuerunning; - } - } - if ( null != cycleBreaker ) - { - blocked.remove( cycleBreaker); - -- cycleBreaker.indegree; - ready.add( cycleBreaker); - cycleBreaker = null; - continue; - } - /* - * Got here? It's a real cycle ... nothing to be done. - */ - for ( DependTag s : consumer ) - msg( Kind.ERROR, "requirement in a cycle: %s", s); - return null; - } - return snips.toArray(new Snippet[snips.size()]); - } - - void putRepeatableSnippet(Element e, T snip) - { - if ( null != snip ) - putSnippet( snip, (Snippet)snip); - } - - /** - * Process an element carrying a repeatable annotation, the container - * of that repeatable annotation, or both. - *

    - * Snippets corresponding to repeatable annotations might not be entered in the - * {@code snippets} map keyed by the target element, as that might not be - * unique. Each populated snippet is passed to putter along with - * the element it annotates, and putter determines what to do with - * it. If putter is null, the default enters the snippet with a key - * made from its class and itself, as typical repeatable snippets are are - * not expected to be looked up, only processed when all of the map entries - * are enumerated. - *

    - * After all snippets of the desired class have been processed for a given - * element, a final call to putter is made passing the element and - * null for the snippet. - */ - void processRepeatable( - Element e, TypeElement annot, TypeElement container, Class clazz, - BiConsumer putter) - { - if ( null == putter ) - putter = this::putRepeatableSnippet; - - for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) - { - Element asElement = am.getAnnotationType().asElement(); - if ( asElement.equals( annot) ) - { - T snip; - try - { - snip = clazz.getDeclaredConstructor( DDRProcessorImpl.class, - Element.class, AnnotationMirror.class) - .newInstance( DDRProcessorImpl.this, e, am); - } - catch ( ReflectiveOperationException re ) - { - throw new RuntimeException( - "Incorrect implementation of annotation processor", re); - } - populateAnnotationImpl( snip, e, am); - putter.accept( e, snip); - } - else if ( asElement.equals( container) ) - { - Container c = new Container<>(clazz); - populateAnnotationImpl( c, e, am); - for ( T snip : c.value() ) - putter.accept( e, snip); - } - } - - putter.accept( e, null); - } - - static enum UDTKind { BASE, MAPPED } - - /** - * Process a single element annotated with @BaseUDT or @MappedUDT, as - * indicated by the UDTKind k. - */ - void processUDT( Element e, UDTKind k) - { - /* - * The allowed target type for the UDT annotations is TYPE, which can - * be a class, interface (including annotation type) or enum, of which - * only CLASS is valid here. If it is anything else, just return, as - * that can only mean a source error prevented the compiler making sense - * of it, and the compiler will have its own messages about that. - */ - switch ( e.getKind() ) - { - case CLASS: - break; - case ANNOTATION_TYPE: - case ENUM: - case INTERFACE: - msg( Kind.ERROR, e, "A PL/Java UDT must be a class"); - default: - return; - } - Set mods = e.getModifiers(); - if ( ! mods.contains( Modifier.PUBLIC) ) - { - msg( Kind.ERROR, e, "A PL/Java UDT must be public"); - } - if ( mods.contains( Modifier.ABSTRACT) ) - { - msg( Kind.ERROR, e, "A PL/Java UDT must not be abstract"); - } - if ( ! ((TypeElement)e).getNestingKind().equals( - NestingKind.TOP_LEVEL) ) - { - if ( ! mods.contains( Modifier.STATIC) ) - { - msg( Kind.ERROR, e, - "When nested, a PL/Java UDT must be static (not inner)"); - } - for ( Element ee = e; null != ( ee = ee.getEnclosingElement() ); ) - { - if ( ! ee.getModifiers().contains( Modifier.PUBLIC) ) - msg( Kind.ERROR, ee, - "A PL/Java UDT must not have a non-public " + - "enclosing class"); - if ( ((TypeElement)ee).getNestingKind().equals( - NestingKind.TOP_LEVEL) ) - break; - } - } - - switch ( k ) - { - case BASE: - BaseUDTImpl bu = getSnippet( e, BaseUDTImpl.class, () -> - new BaseUDTImpl( (TypeElement)e)); - for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) - { - if ( am.getAnnotationType().asElement().equals( AN_BASEUDT) ) - populateAnnotationImpl( bu, e, am); - } - bu.registerFunctions(); - break; - - case MAPPED: - MappedUDTImpl mu = getSnippet( e, MappedUDTImpl.class, () -> - new MappedUDTImpl( (TypeElement)e)); - for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) - { - if ( am.getAnnotationType().asElement().equals( AN_MAPPEDUDT) ) - populateAnnotationImpl( mu, e, am); - } - mu.registerMapping(); - break; - } - } - - ExecutableElement huntFor(List ees, String name, - boolean isStatic, TypeMirror retType, TypeMirror... paramTypes) - { - ExecutableElement quarry = null; -hunt: for ( ExecutableElement ee : ees ) - { - if ( null != name && ! ee.getSimpleName().contentEquals( name) ) - continue; - if ( ee.isVarArgs() ) - continue; - if ( null != retType - && ! typu.isSameType( ee.getReturnType(), retType) ) - continue; - List pts = - ((ExecutableType)ee.asType()).getParameterTypes(); - if ( pts.size() != paramTypes.length ) - continue; - for ( int i = 0; i < paramTypes.length; ++i ) - if ( ! typu.isSameType( pts.get( i), paramTypes[i]) ) - continue hunt; - Set mods = ee.getModifiers(); - if ( ! mods.contains( Modifier.PUBLIC) ) - continue; - if ( isStatic && ! mods.contains( Modifier.STATIC) ) - continue; - if ( null == quarry ) - quarry = ee; - else - { - msg( Kind.ERROR, ee, - "Found more than one candidate " + - (null == name ? "constructor" : (name + " method"))); - } - } - return quarry; - } - - /** - * Process a single element annotated with @Function. After checking that - * it has the right modifiers to be called via PL/Java, analyze its type - * information and annotations and register an appropriate SQL code snippet. - */ - void processFunction( Element e) - { - /* - * METHOD is the only target type allowed for the Function annotation, - * so the only way for e to be anything else is if some source error has - * prevented the compiler making sense of it. In that case just return - * silently on the assumption that the compiler will have its own - * message about the true problem. - */ - if ( ! ElementKind.METHOD.equals( e.getKind()) ) - return; - - Set mods = e.getModifiers(); - if ( ! mods.contains( Modifier.PUBLIC) ) - { - msg( Kind.ERROR, e, "A PL/Java function must be public"); - } - - for ( Element ee = e; null != ( ee = ee.getEnclosingElement() ); ) - { - if ( ElementKind.CLASS.equals( ee.getKind()) ) - { - if ( ! ee.getModifiers().contains( Modifier.PUBLIC) ) - msg( Kind.ERROR, ee, - "A PL/Java function must not have a non-public " + - "enclosing class"); - if ( ((TypeElement)ee).getNestingKind().equals( - NestingKind.TOP_LEVEL) ) - break; - } - } - - FunctionImpl f = getSnippet( e, FunctionImpl.class, () -> - new FunctionImpl( (ExecutableElement)e)); - for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) - { - if ( am.getAnnotationType().asElement().equals( AN_FUNCTION) ) - populateAnnotationImpl( f, e, am); - } - } - - /** - * Populate an array of specified type from an annotation value - * representing an array. - * - * AnnotationValue's getValue() method returns Object, where the - * object is known to be an instance of one of a small set of classes. - * Populating an array when that value represents one is a common - * operation, so it is factored out here. - */ - static T[] avToArray( Object o, Class k) - { - boolean isEnum = k.isEnum(); - - @SuppressWarnings({"unchecked"}) - List vs = (List)o; - - @SuppressWarnings({"unchecked"}) - T[] a = (T[])Array.newInstance( k, vs.size()); - - int i = 0; - for ( AnnotationValue av : vs ) - { - Object v = getValue( av); - if ( isEnum ) - { - @SuppressWarnings({"unchecked"}) - T t = (T)Enum.valueOf( k.asSubclass( Enum.class), - ((VariableElement)v).getSimpleName().toString()); - a[i++] = t; - } - else - a[i++] = k.cast( v); - } - return a; - } - - /** - * Abstract superclass for synthetic implementations of annotation - * interfaces; these can be populated with element-value pairs from - * an AnnotationMirror and then used in the natural way for access to - * the values. Each subclass of this should implement the intended - * annotation interface, and should also have a - * setFoo(Object,boolean,Element) method for each foo() method in the - * interface. Rather than longwindedly using the type system to enforce - * that the needed setter methods are all there, they will be looked - * up using reflection. - */ - class AbstractAnnotationImpl implements Annotation - { - private Set m_provideTags = new HashSet<>(); - private Set m_requireTags = new HashSet<>(); - - @Override - public Class annotationType() - { - throw new UnsupportedOperationException(); - } - - /** - * Supply the required implementor() method for those subclasses - * that will implement {@link Snippet}. - */ - public String implementor() - { - return null == _implementor ? null : _implementor.pgFolded(); - } - - /** - * Supply the required implementor() method for those subclasses - * that will implement {@link Snippet}. - */ - public Identifier.Simple implementorName() - { - return _implementor; - } - - Identifier.Simple _implementor = defaultImplementor; - String _comment; - boolean commentDerived; - - public void setImplementor( Object o, boolean explicit, Element e) - { - if ( explicit ) - _implementor = "".equals( o) ? null : - Identifier.Simple.fromJava((String)o, msgr); - } - - @Override - public String toString() - { - return String.format( - "(%s)%s", getClass().getSimpleName(), _comment); - } - - public String comment() { return _comment; } - - public void setComment( Object o, boolean explicit, Element e) - { - if ( explicit ) - { - _comment = (String)o; - if ( "".equals( _comment) ) - _comment = null; - } - else - { - _comment = ((Commentable)this).derivedComment( e); - commentDerived = true; - } - } - - protected void replaceCommentIfDerived( String comment) - { - if ( ! commentDerived ) - return; - commentDerived = false; - _comment = comment; - } - - public String derivedComment( Element e) - { - String dc = elmu.getDocComment( e); - if ( null == dc ) - return null; - return firstSentence( dc); - } - - public String firstSentence( String s) - { - BreakIterator bi = BreakIterator.getSentenceInstance( loca); - bi.setText( s); - int start = bi.first(); - int end = bi.next(); - if ( BreakIterator.DONE == end ) - return null; - return s.substring( start, end).trim(); - } - - /** - * Called by a snippet's {@code characterize} method to install its - * explicit, annotation-supplied 'provides' / 'requires' strings, if - * any, into the {@code provideTags} and {@code requireTags} sets, then - * making those sets immutable. - */ - protected void recordExplicitTags(String[] provides, String[] requires) - { - if ( null != provides ) - for ( String s : provides ) - m_provideTags.add(new DependTag.Explicit(s)); - if ( null != requires ) - for ( String s : requires ) - m_requireTags.add(new DependTag.Explicit(s)); - m_provideTags = unmodifiableSet(m_provideTags); - m_requireTags = unmodifiableSet(m_requireTags); - } - - /** - * Return the set of 'provide' tags, mutable before - * {@code recordExplicitTags} has been called, immutable thereafter. - */ - public Set provideTags() - { - return m_provideTags; - } - - /** - * Return the set of 'require' tags, mutable before - * {@code recordExplicitTags} has been called, immutable thereafter. - */ - public Set requireTags() - { - return m_requireTags; - } - } - - class Repeatable extends AbstractAnnotationImpl - { - final Element m_targetElement; - final AnnotationMirror m_origin; - - Repeatable(Element e, AnnotationMirror am) - { - m_targetElement = e; - m_origin = am; - } - } - - /** - * Populate an AbstractAnnotationImpl-derived Annotation implementation - * from the element-value pairs in an AnnotationMirror. For each element - * foo in the annotation interface, the implementation is assumed to have - * a method setFoo(Object o, boolean explicit, element e) where o is the - * element's value as obtained from AnnotationValue.getValue(), explicit - * indicates whether the element was explicitly present in the annotation - * or filled in from a default value, and e is the element carrying the - * annotation (chiefly for use as a location hint in diagnostic messages). - * - * Some of the annotation implementations below will leave certain elements - * null if they were not given explicit values, in order to have a clear - * indication that they were defaulted, even though that is not the way - * normal annotation objects behave. - * - * If a setFoo(Object o, boolean explicit, element e) method is not found - * but there is an accessible field _foo it will be set directly, but only - * if the value was explicitly present in the annotation or the field value - * is null. By this convention, an implementation can declare a field - * initially null and let its default value be filled in from what the - * annotation declares, or initially some non-null value distinct from - * possible annotation values, and be able to tell whether it was explicitly - * set. Note that a field of primitive type will never be seen as null. - */ - void populateAnnotationImpl( - AbstractAnnotationImpl inst, Element e, AnnotationMirror am) - { - Map explicit = - am.getElementValues(); - Map defaulted = - elmu.getElementValuesWithDefaults( am); - - // Astonishingly, even though JLS3 9.7 clearly says "annotations must - // contain an element-value pair for every element of the corresponding - // annotation type, except for those elements with default values, or a - // compile-time error occurs" - in Sun 1.6.0_39 javac never flags - // the promised error, and instead allows us to NPE on something that - // ought to be guaranteed to be there! >:[ - // - // If you want something done right, you have to do it yourself.... - // - - Element anne = am.getAnnotationType().asElement(); - List keys = methodsIn( anne.getEnclosedElements()); - for ( ExecutableElement k : keys ) - if ( ! defaulted.containsKey( k) ) - msg( Kind.ERROR, e, am, - "annotation missing required element \"%s\"", - k.getSimpleName()); - - for ( - Map.Entry me - : defaulted.entrySet() - ) - { - ExecutableElement k = me.getKey(); - AnnotationValue av = me.getValue(); - boolean isExplicit = explicit.containsKey( k); - String name = k.getSimpleName().toString(); - Class kl = inst.getClass(); - try - { - Object v = getValue( av); - kl.getMethod( // let setter for foo() be setFoo() - "set"+name.substring( 0, 1).toUpperCase() + - name.substring( 1), - Object.class, boolean.class, Element.class) - .invoke(inst, v, isExplicit, e); - } - catch (AnnotationValueException ave) - { - msg( Kind.ERROR, e, am, - "unresolved value for annotation member \"%s\"" + - " (check for missing/misspelled import, etc.)", - name); - } - catch (NoSuchMethodException nsme) - { - Object v = getValue( av); - try - { - Field f = kl.getField( "_"+name); - Class fkl = f.getType(); - if ( ! isExplicit && null != f.get( inst) ) - continue; - if ( fkl.isArray() ) - { - try { - f.set( inst, avToArray( v, fkl.getComponentType())); - } - catch (AnnotationValueException ave) - { - msg( Kind.ERROR, e, am, - "unresolved value for an element of annotation" + - " member \"%s\" (check for missing/misspelled" + - " import, etc.)", - name); - } - } - else if ( fkl.isEnum() ) - { - @SuppressWarnings("unchecked") - Object t = Enum.valueOf( fkl.asSubclass( Enum.class), - ((VariableElement)v).getSimpleName().toString()); - f.set( inst, t); - } - else - f.set( inst, v); - nsme = null; - } - catch (NoSuchFieldException | IllegalAccessException ex) { } - if ( null != nsme ) - throw new RuntimeException( - "Incomplete implementation in annotation processor", - nsme); - } - catch (IllegalAccessException iae) - { - throw new RuntimeException( - "Incorrect implementation of annotation processor", iae); - } - catch (InvocationTargetException ite) - { - String msg = ite.getCause().getMessage(); - msg( Kind.ERROR, e, am, av, "%s", msg); - } - } - } - - // It could be nice to have another annotation-driven tool that could just - // generate these implementations of some annotation types.... - - class SQLTypeImpl extends AbstractAnnotationImpl implements SQLType - { - public String value() { return _value; } - public String[] defaultValue() { return _defaultValue; } - public boolean optional() { return Boolean.TRUE.equals(_optional); } - public String name() { return _name; } - - String _value; - String[] _defaultValue; - String _name; - Boolean _optional; // boxed so it can be null if not explicit - - public void setValue( Object o, boolean explicit, Element e) - { - if ( explicit ) - _value = (String)o; - } - - public void setDefaultValue( Object o, boolean explicit, Element e) - { - if ( explicit ) - _defaultValue = avToArray( o, String.class); - } - - public void setOptional( Object o, boolean explicit, Element e) - { - if ( explicit ) - _optional = (Boolean)o; - } - - public void setName( Object o, boolean explicit, Element e) - { - if ( ! explicit ) - return; - - _name = (String)o; - if ( _name.startsWith( "\"") - && ! Lexicals.ISO_DELIMITED_IDENTIFIER.matcher( _name).matches() - ) - msg( Kind.WARNING, e, "malformed parameter name: %s", _name); - } - } - - class Container - extends AbstractAnnotationImpl - { - public T[] value() { return _value; } - - T[] _value; - final Class _clazz; - - Container(Class clazz) - { - _clazz = clazz; - } - - public void setValue( Object o, boolean explicit, Element e) - { - AnnotationMirror[] ams = avToArray( o, AnnotationMirror.class); - - @SuppressWarnings("unchecked") - T[] t = (T[])Array.newInstance( _clazz, ams.length); - _value = t; - - int i = 0; - for ( AnnotationMirror am : ams ) - { - try - { - T a = _clazz.getDeclaredConstructor(DDRProcessorImpl.class, - Element.class, AnnotationMirror.class) - .newInstance(DDRProcessorImpl.this, e, am); - populateAnnotationImpl( a, e, am); - _value [ i++ ] = a; - } - catch ( ReflectiveOperationException re ) - { - throw new RuntimeException( - "Incorrect implementation of annotation processor", re); - } - } - } - } - - class SQLActionImpl - extends Repeatable - implements SQLAction, Snippet - { - SQLActionImpl(Element e, AnnotationMirror am) - { - super(e, am); - } - - public String[] install() { return _install; } - public String[] remove() { return _remove; } - public String[] provides() { return _provides; } - public String[] requires() { return _requires; } - - public String[] deployStrings() { return _install; } - public String[] undeployStrings() { return _remove; } - - public String[] _install; - public String[] _remove; - public String[] _provides; - public String[] _requires; - - public Set characterize() - { - recordExplicitTags(_provides, _requires); - return Set.of(this); - } - } - - class TriggerImpl - extends AbstractAnnotationImpl - implements Trigger, Snippet, Commentable - { - public String[] arguments() { return _arguments; } - public Constraint constraint() { return _constraint; } - public Event[] events() { return _events; } - public String fromSchema() { return _fromSchema; } - public String from() { return _from; } - public String name() { return _name; } - public String schema() { return _schema; } - public String table() { return _table; } - public Scope scope() { return _scope; } - public Called called() { return _called; } - public String when() { return _when; } - public String[] columns() { return _columns; } - public String tableOld() { return _tableOld; } - public String tableNew() { return _tableNew; } - - public String[] provides() { return new String[0]; } - public String[] requires() { return new String[0]; } - /* Trigger is a Snippet but doesn't directly participate in tsort */ - - public String[] _arguments; - public Constraint _constraint; - public Event[] _events; - public String _fromSchema; - public String _from; - public String _name; - public String _schema; - public String _table; - public Scope _scope; - public Called _called; - public String _when; - public String[] _columns; - public String _tableOld; - public String _tableNew; - - FunctionImpl func; - AnnotationMirror origin; - - boolean refOld; - boolean refNew; - boolean isConstraint = false; - - /* The only values of the Constraint enum are those applicable to - * constraint triggers. To determine whether this IS a constraint - * trigger or not, use the 'explicit' parameter to distinguish whether - * the 'constraint' attribute was or wasn't seen in the annotation. - */ - public void setConstraint( Object o, boolean explicit, Element e) - { - if ( explicit ) - { - isConstraint = true; - _constraint = Constraint.valueOf( - ((VariableElement)o).getSimpleName().toString()); - } - } - - TriggerImpl( FunctionImpl f, AnnotationMirror am) - { - func = f; - origin = am; - } - - public Set characterize() - { - if ( Scope.ROW.equals( _scope) ) - { - for ( Event e : _events ) - if ( Event.TRUNCATE.equals( e) ) - msg( Kind.ERROR, func.func, origin, - "TRUNCATE trigger cannot be FOR EACH ROW"); - } - else if ( Called.INSTEAD_OF.equals( _called) ) - msg( Kind.ERROR, func.func, origin, - "INSTEAD OF trigger cannot be FOR EACH STATEMENT"); - - if ( ! "".equals( _when) && Called.INSTEAD_OF.equals( _called) ) - msg( Kind.ERROR, func.func, origin, - "INSTEAD OF triggers do not support WHEN conditions"); - - if ( 0 < _columns.length ) - { - if ( Called.INSTEAD_OF.equals( _called) ) - msg( Kind.ERROR, func.func, origin, - "INSTEAD OF triggers do not support lists of columns"); - boolean seen = false; - for ( Event e : _events ) - if ( Event.UPDATE.equals( e) ) - seen = true; - if ( ! seen ) - msg( Kind.ERROR, func.func, origin, - "Column list is meaningless unless UPDATE is a trigger event"); - } - - refOld = ! "".equals( _tableOld); - refNew = ! "".equals( _tableNew); - - if ( refOld || refNew ) - { - if ( ! Called.AFTER.equals( _called) ) - msg( Kind.ERROR, func.func, origin, - "Only AFTER triggers can reference OLD TABLE or NEW TABLE"); - boolean badOld = refOld; - boolean badNew = refNew; - for ( Event e : _events ) - { - switch ( e ) - { - case INSERT: badNew = false; break; - case UPDATE: badOld = badNew = false; break; - case DELETE: badOld = false; break; - } - } - if ( badOld ) - msg( Kind.ERROR, func.func, origin, - "Trigger must be callable on UPDATE or DELETE to reference OLD TABLE"); - if ( badNew ) - msg( Kind.ERROR, func.func, origin, - "Trigger must be callable on UPDATE or INSERT to reference NEW TABLE"); - } - - if ( isConstraint ) - { - if ( ! Called.AFTER.equals( _called) ) - msg( Kind.ERROR, func.func, origin, - "A constraint trigger must be an AFTER trigger"); - if ( ! Scope.ROW.equals( _scope) ) - msg( Kind.ERROR, func.func, origin, - "A constraint trigger must be FOR EACH ROW"); - if ( "".equals( _from) && ! "".equals( _fromSchema) ) - msg( Kind.ERROR, func.func, origin, - "To use fromSchema, specify a table name with from"); - } - else - { - if ( ! "".equals( _from) ) - msg( Kind.ERROR, func.func, origin, - "Only a constraint trigger can use 'from'"); - if ( ! "".equals( _fromSchema) ) - msg( Kind.ERROR, func.func, origin, - "Only a constraint trigger can use 'fromSchema'"); - } - - if ( "".equals( _name) ) - _name = TriggerNamer.synthesizeName( this); - return Set.of(); - } - - public String[] deployStrings() - { - StringBuilder sb = new StringBuilder(); - sb.append("CREATE "); - if ( isConstraint ) - { - sb.append("CONSTRAINT "); - } - sb.append("TRIGGER ").append(name()).append("\n\t"); - switch ( called() ) - { - case BEFORE: sb.append( "BEFORE " ); break; - case AFTER: sb.append( "AFTER " ); break; - case INSTEAD_OF: sb.append( "INSTEAD OF "); break; - } - int s = _events.length; - for ( Event e : _events ) - { - sb.append( e.toString()); - if ( Event.UPDATE.equals( e) && 0 < _columns.length ) - { - sb.append( " OF "); - int cs = _columns.length; - for ( String c : _columns ) - { - sb.append( c); - if ( 0 < -- cs ) - sb.append( ", "); - } - } - if ( 0 < -- s ) - sb.append( " OR "); - } - sb.append( "\n\tON "); - sb.append(qnameFrom(table(), schema())); - if ( ! "".equals( from()) ) - { - sb.append("\n\tFROM "); - sb.append(qnameFrom(from(), fromSchema())); - } - if ( isConstraint ) { - sb.append("\n\t"); - switch ( _constraint ) - { - case NOT_DEFERRABLE: - sb.append("NOT DEFERRABLE"); - break; - case INITIALLY_IMMEDIATE: - sb.append("DEFERRABLE INITIALLY IMMEDIATE"); - break; - case INITIALLY_DEFERRED: - sb.append("DEFERRABLE INITIALLY DEFERRED"); - break; - } - } - if ( refOld || refNew ) - { - sb.append( "\n\tREFERENCING"); - if ( refOld ) - sb.append( " OLD TABLE AS ").append( _tableOld); - if ( refNew ) - sb.append( " NEW TABLE AS ").append( _tableNew); - } - sb.append( "\n\tFOR EACH "); - sb.append( scope().toString()); - if ( ! "".equals( _when) ) - sb.append( "\n\tWHEN ").append( _when); - sb.append( "\n\tEXECUTE PROCEDURE "); - func.appendNameAndParams( sb, true, false, false); - sb.setLength( sb.length() - 1); // drop closing ) - s = _arguments.length; - for ( String a : _arguments ) - { - sb.append( "\n\t").append( DDRWriter.eQuote( a)); - if ( 0 < -- s ) - sb.append( ','); - } - sb.append( ')'); - - String comm = comment(); - if ( null == comm ) - return new String[] { sb.toString() }; - - return new String[] { - sb.toString(), - "COMMENT ON TRIGGER " + name() + " ON " + - qnameFrom(table(), schema()) + - "\nIS " + - DDRWriter.eQuote( comm) - }; - } - - public String[] undeployStrings() - { - StringBuilder sb = new StringBuilder(); - sb.append( "DROP TRIGGER ").append( name()).append( "\n\tON "); - sb.append(qnameFrom(table(), schema())); - return new String[] { sb.toString() }; - } - } - - /** - * Enumeration of different method "shapes" and the treatment of - * {@code type=} and {@code out=} annotation elements they need. - *

    - * Each member has a {@code setComposite} method that will be invoked - * by {@code checkOutType} if the method is judged to have a composite - * return type according to the annotations present. - *

    - * There is one case (no {@code out} and a {@code type} other than - * {@code RECORD}) where {@code checkOutType} will resolve the - * ambiguity by assuming composite, and will have set - * {@code assumedComposite} accordingly. The {@code MAYBECOMPOSITE} - * shape checks that assumption against the presence of a countervailing - * {@code SQLType} annotation, the {@code ITERATOR} shape clears it and - * behaves as noncomposite as always, and the {@code PROVIDER} shape - * clears it because that shape is unambiguously composite. - */ - enum MethodShape - { - /** - * Method has the shape {@code boolean foo(..., ResultSet)], which - * could be an ordinary method with an incoming record parameter and - * boolean return, or a composite-returning method whose last - * a writable ResultSet supplied by PL/Java for the return value. - */ - MAYBECOMPOSITE((f,msgr) -> - { - boolean sqlTyped = null != - f.paramTypeAnnotations[f.paramTypeAnnotations.length - 1]; - if ( ! sqlTyped ) - f.complexViaInOut = true; - else if ( f.assumedComposite ) - f.assumedComposite = false; // SQLType cancels assumption - else - msgr.printMessage(Kind.ERROR, - "no @SQLType annotation may appear on " + - "the return-value ResultSet parameter", f.func); - }), - - /** - * Method has the shape {@code Iterator foo(...)} and represents - * a set-returning function with a non-composite return type. - *

    - * If the shape has been merely assumed composite, clear - * that flag and proceed as if it is not. Otherwise, issue an error - * that it can't be composite. - */ - ITERATOR((f,msgr) -> - { - if ( f.assumedComposite ) - f.assumedComposite = false; - else - msgr.printMessage(Kind.ERROR, - "the iterator style cannot return a row-typed result", - f.func); - }), - - /** - * Method has the shape {@code ResultSetProvider foo(...)} or - * {@code ResultSetHandle foo(...)} and represents - * a set-returning function with a non-composite return type. - *

    - * If the shape has been merely assumed composite, clear - * that flag; for this shape that assumption is not tentative. - */ - PROVIDER((f,msgr) -> f.assumedComposite = false), - - /** - * Method is something else (trigger, for example) for which no - * {@code type} or {@code out} is allowed. - *

    - * The {@code setComposite} method for this shape will never - * be called. - */ - OTHER(null); - - private final BiConsumer compositeSetter; - - MethodShape(BiConsumer setter) - { - compositeSetter = setter; - } - - void setComposite(FunctionImpl f, Messager msgr) - { - compositeSetter.accept(f, msgr); - } - } - - class FunctionImpl - extends AbstractAnnotationImpl - implements Function, Snippet, Commentable - { - public String type() { return _type; } - public String[] out() { return _out; } - public String name() { return _name; } - public String schema() { return _schema; } - public boolean variadic() { return _variadic; } - public OnNullInput onNullInput() { return _onNullInput; } - public Security security() { return _security; } - public Effects effects() { return _effects; } - public Trust trust() { return _trust; } - public Parallel parallel() { return _parallel; } - public boolean leakproof() { return _leakproof; } - public int cost() { return _cost; } - public int rows() { return _rows; } - public String[] settings() { return _settings; } - public String[] provides() { return _provides; } - public String[] requires() { return _requires; } - public Trigger[] triggers() { return _triggers; } - public String language() - { - return _languageIdent.toString(); - } - - ExecutableElement func; - - public String _type; - public String[] _out; - public String _name; - public String _schema; - public boolean _variadic; - public OnNullInput _onNullInput; - public Security _security; - public Effects _effects; - public Trust _trust; - public Parallel _parallel; - public Boolean _leakproof; - int _cost; - int _rows; - public String[] _settings; - public String[] _provides; - public String[] _requires; - Trigger[] _triggers; - - public Identifier.Simple _languageIdent; - - boolean complexViaInOut = false; - boolean setof = false; - TypeMirror setofComponent = null; - boolean trigger = false; - TypeMirror returnTypeMapKey = null; - SQLType[] paramTypeAnnotations; - - DBType returnType; - DBType[] parameterTypes; - List> outParameters; - boolean assumedComposite = false; - boolean forceResultRecord = false; - - boolean subsumed = false; - - FunctionImpl(ExecutableElement e) - { - func = e; - } - - public void setType( Object o, boolean explicit, Element e) - { - if ( explicit ) - _type = (String)o; - } - - public void setOut( Object o, boolean explicit, Element e) - { - if ( explicit ) - _out = avToArray( o, String.class); - } - - public void setTrust( Object o, boolean explicit, Element e) - { - if ( explicit ) - _trust = Trust.valueOf( - ((VariableElement)o).getSimpleName().toString()); - } - - public void setLanguage( Object o, boolean explicit, Element e) - { - if ( explicit ) - _languageIdent = Identifier.Simple.fromJava((String)o); - } - - public void setCost( Object o, boolean explicit, Element e) - { - _cost = ((Integer)o).intValue(); - if ( _cost < 0 && explicit ) - throw new IllegalArgumentException( "cost must be nonnegative"); - } - - public void setRows( Object o, boolean explicit, Element e) - { - _rows = ((Integer)o).intValue(); - if ( _rows < 0 && explicit ) - throw new IllegalArgumentException( "rows must be nonnegative"); - } - - public void setTriggers( Object o, boolean explicit, Element e) - { - AnnotationMirror[] ams = avToArray( o, AnnotationMirror.class); - _triggers = new Trigger [ ams.length ]; - int i = 0; - for ( AnnotationMirror am : ams ) - { - TriggerImpl ti = new TriggerImpl( this, am); - populateAnnotationImpl( ti, e, am); - _triggers [ i++ ] = ti; - } - } - - public Set characterize() - { - if ( "".equals( _name) ) - _name = func.getSimpleName().toString(); - - resolveLanguage(); - - Set mods = func.getModifiers(); - if ( ! mods.contains( Modifier.STATIC) ) - { - msg( Kind.ERROR, func, "A PL/Java function must be static"); - } - - TypeMirror ret = func.getReturnType(); - if ( ret.getKind().equals( TypeKind.ERROR) ) - { - msg( Kind.ERROR, func, - "Unable to resolve return type of function"); - return Set.of(); - } - - ExecutableType et = (ExecutableType)func.asType(); - List ptms = et.getParameterTypes(); - List typeArgs; - int arity = ptms.size(); - - /* - * Collect the parameter type annotations now, in case needed below - * in checkOutType(MAYBECOMPOSITE) to disambiguate. - */ - - collectParameterTypeAnnotations(); - - /* - * If a type= annotation is present, provisionally set returnType - * accordingly. Otherwise, leave it null, to be filled in by - * resolveParameterAndReturnTypes below. - */ - - if ( null != _type ) - returnType = DBType.fromSQLTypeAnnotation(_type); - - /* - * Take a first look according to the method's Java return type. - */ - if ( ret.getKind().equals( TypeKind.BOOLEAN) ) - { - if ( 0 < arity ) - { - TypeMirror tm = ptms.get( arity - 1); - if ( ! tm.getKind().equals( TypeKind.ERROR) - // unresolved things seem assignable to anything - && typu.isSameType( tm, TY_RESULTSET) ) - { - checkOutType(MethodShape.MAYBECOMPOSITE); - } - } - } - else if ( null != (typeArgs = specialization( ret, TY_ITERATOR)) ) - { - setof = true; - if ( 1 != typeArgs.size() ) - { - msg( Kind.ERROR, func, - "Need one type argument for Iterator return type"); - return Set.of(); - } - setofComponent = typeArgs.get( 0); - if ( null == setofComponent ) - { - msg( Kind.ERROR, func, - "Failed to find setof component type"); - return Set.of(); - } - checkOutType(MethodShape.ITERATOR); - } - else if ( typu.isAssignable( ret, TY_RESULTSETPROVIDER) - || typu.isAssignable( ret, TY_RESULTSETHANDLE) ) - { - setof = true; - checkOutType(MethodShape.PROVIDER); - } - else if ( ret.getKind().equals( TypeKind.VOID) && 1 == arity ) - { - TypeMirror tm = ptms.get( 0); - if ( ! tm.getKind().equals( TypeKind.ERROR) - // unresolved things seem assignable to anything - && typu.isSameType( tm, TY_TRIGGERDATA) ) - { - trigger = true; - checkOutType(MethodShape.OTHER); - } - } - - returnTypeMapKey = ret; - - if ( ! setof && -1 != rows() ) - msg( Kind.ERROR, func, - "ROWS specified on a function not returning SETOF"); - - if ( ! trigger && 0 != _triggers.length ) - msg( Kind.ERROR, func, - "a function with triggers needs void return and " + - "one TriggerData parameter"); - - /* - * Report any unmappable types now that could appear in - * deployStrings (return type or parameter types) ... so that the - * error messages won't be missing the source location, as they can - * with javac 7 throwing away symbol tables between rounds. - */ - resolveParameterAndReturnTypes(); - - if ( _variadic ) - { - int last = parameterTypes.length - 1; - if ( 0 > last || ! parameterTypes[last].isArray() ) - msg( Kind.ERROR, func, - "VARIADIC function must have a last, non-output " + - "parameter that is an array"); - } - - recordImplicitTags(); - - recordExplicitTags(_provides, _requires); - - for ( Trigger t : triggers() ) - ((TriggerImpl)t).characterize(); - return Set.of(this); - } - - void resolveLanguage() - { - if ( null != _trust && null != _languageIdent ) - msg( Kind.ERROR, func, "A PL/Java function may specify " + - "only one of trust, language"); - if ( null == _languageIdent ) - { - if ( null == _trust || Trust.SANDBOXED == _trust ) - _languageIdent = nameTrusted; - else - _languageIdent = nameUntrusted; - } - } - - /* - * Factored out of characterize() so it could be called if needed by - * BaseUDTFunctionImpl.characterize(), which does not need anything else - * from its super.characterize(). But for now it doesn't need this - * either; it knows what parameters the base UDT functions take, and it - * takes no heed of @SQLType annotations. Perhaps it should warn if such - * annotations are used, but that's for another day. - */ - void collectParameterTypeAnnotations() - { - List ves = func.getParameters(); - paramTypeAnnotations = new SQLType [ ves.size() ]; - int i = 0; - boolean anyOptional = false; - for ( VariableElement ve : ves ) - { - for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( ve) ) - { - if ( am.getAnnotationType().asElement().equals(AN_SQLTYPE) ) - { - SQLTypeImpl sti = new SQLTypeImpl(); - populateAnnotationImpl( sti, ve, am); - paramTypeAnnotations[i] = sti; - - if (null != sti._optional && null != sti._defaultValue) - msg(Kind.ERROR, ve, "Only one of optional= or " + - "defaultValue= may be given"); - - anyOptional |= sti.optional(); - } - } - ++ i; - } - - if ( anyOptional && OnNullInput.RETURNS_NULL.equals(_onNullInput) ) - msg(Kind.ERROR, func, "A PL/Java function with " + - "onNullInput=RETURNS_NULL may not have parameters with " + - "optional=true"); - } - - private static final int NOOUT = 0; - private static final int ONEOUT = 4; - private static final int MOREOUT = 8; - - private static final int NOTYPE = 0; - private static final int RECORDTYPE = 1; - private static final int OTHERTYPE = 2; - - /** - * Reads the tea leaves of the {@code type=} and {@code out=} - * annotation elements to decide whether the method has a composite - * or noncomposite return. - *

    - * This is complicated by the PostgreSQL behavior of treating a function - * declared with one {@code OUT} parameter, or as - * a one-element {@code TABLE} function, as not - * returning a row type. - *

    - * This method avoids rejecting the case of a one-element {@code out=} - * with an explicit {@code type=RECORD}, to provide a way to explicitly - * request composite behavior for that case, on the chance that some - * future PostgreSQL version may accept it, though as of this writing - * no current version does. - *

    - * If the {@code MAYBECOMPOSITE} shape is used with a single {@code out} - * parameter, it is likely a mistake (what are the odds the developer - * wanted a function with a row-typed input parameter and a named out - * parameter of boolean type?), and will be rejected unless the - * {@code ResultSet} final parameter has been given an {@code SQLType} - * annotation. - */ - void checkOutType(MethodShape shape) - { - int out = - null == _out ? NOOUT : 1 == _out.length ? ONEOUT : MOREOUT; - - /* - * The caller will have set returnType from _type if present, - * or left it null otherwise. We know RECORD is a composite type; - * we don't presume here to know whether any other type is or not. - */ - int type = - null == returnType ? NOTYPE : - DT_RECORD.equals(returnType) ? RECORDTYPE : OTHERTYPE; - - if ( MethodShape.OTHER == shape && 0 != (out | type) ) - { - msg( Kind.ERROR, func, - "no type= or out= element may be applied to this method"); - return; - } - - switch ( out | type ) - { - case NOOUT | OTHERTYPE: - assumedComposite = true; // annotations not definitive; assume - shape.setComposite(this, msgr); - return; - case NOOUT | RECORDTYPE: - case MOREOUT | NOTYPE: - shape.setComposite(this, msgr); - return; - case ONEOUT | RECORDTYPE: // in case PostgreSQL one day allows this - forceResultRecord = true; - shape.setComposite(this, msgr); - return; - case ONEOUT | NOTYPE: - /* - * No special action needed here except for the MAYBECOMPOSITE - * or PROVIDER shapes, to check for likely mistakes. - */ - if ( MethodShape.MAYBECOMPOSITE == shape - && null == - paramTypeAnnotations[paramTypeAnnotations.length - 1] ) - { - msg(Kind.ERROR, func, - "a function with one declared OUT parameter returns " + - "it normally, not through an extra ResultSet " + - "parameter. If the trailing ResultSet parameter is " + - "intended as an input, it can be marked with an " + - "@SQLType annotation"); - } - else if ( MethodShape.PROVIDER == shape ) - { - msg(Kind.ERROR, func, - "a set-returning function with one declared OUT " + - "parameter must return an Iterator, not a " + - "ResultSetProvider or ResultSetHandle"); - } - return; - case NOOUT | NOTYPE: - /* - * No special action; MAYBECOMPOSITE will treat as noncomposite, - * ITERATOR and PROVIDER will behave as they always do. - */ - return; - case ONEOUT | OTHERTYPE: - msg( Kind.ERROR, func, - "no type= allowed here (the out parameter " + - "declares its own type"); - return; - case MOREOUT | RECORDTYPE: - case MOREOUT | OTHERTYPE: - msg( Kind.ERROR, func, - "type= and out= may not be combined here"); - return; - default: - throw new AssertionError("unhandled case"); - } - } - - /** - * Return a stream of {@code ParameterInfo} 'records' for the function's - * parameters in order. - *

    - * If {@code paramTypeAnnotations} has not been set, every element in - * the stream will have null for {@code st}. - *

    - * If {@code parameterTypes} has not been set, every element in - * the stream will have null for {@code dt}. - */ - Stream parameterInfo() - { - if ( trigger ) - return Stream.empty(); - - ExecutableType et = (ExecutableType)func.asType(); - List tms = et.getParameterTypes(); - if ( complexViaInOut ) - tms = tms.subList( 0, tms.size() - 1); - - Iterator ves = - func.getParameters().iterator(); - - Supplier sts = - null == paramTypeAnnotations - ? () -> null - : Arrays.asList(paramTypeAnnotations).iterator()::next; - - Supplier dts = - null == parameterTypes - ? () -> null - : Arrays.asList(parameterTypes).iterator()::next; - - return tms.stream().map(tm -> - new ParameterInfo(tm, ves.next(), sts.get(), dts.get())); - } - - /** - * Create the {@code DBType}s to populate {@code returnType} and - * {@code parameterTypes}. - */ - void resolveParameterAndReturnTypes() - { - if ( null != returnType ) - /* it was already set from a type= attribute */; - else if ( null != setofComponent ) - returnType = tmpr.getSQLType( setofComponent, func); - else if ( setof ) - returnType = DT_RECORD; - else - returnType = tmpr.getSQLType( returnTypeMapKey, func); - - parameterTypes = parameterInfo() - .map(i -> tmpr.getSQLType(i.tm, i.ve, i.st, true, true)) - .toArray(DBType[]::new); - - if ( null != _out ) - { - outParameters = Arrays.stream(_out) - .map(DBType::fromNameAndType) - .collect(toList()); - if ( 1 < _out.length || forceResultRecord ) - returnType = DT_RECORD; - else - returnType = outParameters.get(0).getValue(); - } - } - - /** - * Record that this function provides itself, and requires its - * parameter and return types. - *

    - * Must be called before {@code recordExplicitTags}, which makes the - * provides and requires sets immutable. - */ - void recordImplicitTags() - { - Set provides = provideTags(); - Set requires = requireTags(); - - provides.add(new DependTag.Function( - qnameFrom(_name, _schema), parameterTypes)); - - DependTag t = returnType.dependTag(); - if ( null != t ) - requires.add(t); - - for ( DBType dbt : parameterTypes ) - { - t = dbt.dependTag(); - if ( null != t ) - requires.add(t); - } - - if ( null != outParameters ) - outParameters.stream() - .map(m -> m.getValue().dependTag()) - .filter(Objects::nonNull) - .forEach(requires::add); - } - - @Override - public void subsume() - { - subsumed = true; - } - - /** - * Append SQL syntax for the function's name (schema-qualified if - * appropriate) and parameters, either with any defaults indicated - * (for use in CREATE FUNCTION) or without (for use in DROP FUNCTION). - * - * @param sb StringBuilder in which to generate the SQL. - * @param names Whether to include the parameter names. - * @param outs Whether to include out parameters. - * @param dflts Whether to include the defaults, if any. - */ - void appendNameAndParams( - StringBuilder sb, boolean names, boolean outs, boolean dflts) - { - appendNameAndParams(sb, names, outs, dflts, - qnameFrom(name(), schema()), parameterInfo().collect(toList())); - } - - /** - * Internal version taking name and parameter stream as extra arguments - * so they can be overridden from {@link Transformed}. - */ - void appendNameAndParams( - StringBuilder sb, boolean names, boolean outs, boolean dflts, - Identifier.Qualified qname, - Iterable params) - { - sb.append(qname).append( '('); - appendParams( sb, names, outs, dflts, params); - // TriggerImpl relies on ) being the very last character - sb.append( ')'); - } - - /** - * Takes the parameter stream as an extra argument - * so it can be overridden from {@link Transformed}. - */ - void appendParams( - StringBuilder sb, boolean names, boolean outs, boolean dflts, - Iterable params) - { - int lengthOnEntry = sb.length(); - - Iterator iter = params.iterator(); - ParameterInfo i; - while ( iter.hasNext() ) - { - i = iter.next(); - - String name = i.name(); - - sb.append("\n\t"); - - if ( _variadic && ! iter.hasNext() ) - sb.append("VARIADIC "); - - if ( names ) - sb.append(name).append(' '); - - sb.append(i.dt.toString(dflts)); - - sb.append(','); - } - - if ( outs && null != outParameters ) - { - outParameters.forEach(e -> { - sb.append("\n\tOUT "); - if ( null != e.getKey() ) - sb.append(e.getKey()).append(' '); - sb.append(e.getValue().toString(false)).append(','); - }); - } - - if ( lengthOnEntry < sb.length() ) - sb.setLength(sb.length() - 1); // that last pesky comma - } - - String makeAS() - { - StringBuilder sb = new StringBuilder(); - if ( ! ( complexViaInOut || setof || trigger ) ) - sb.append( typu.erasure( func.getReturnType())).append( '='); - Element e = func.getEnclosingElement(); - if ( ! e.getKind().equals( ElementKind.CLASS) ) - msg( Kind.ERROR, func, - "Somehow this method got enclosed by something other " + - "than a class"); - sb.append( e.toString()).append( '.'); - sb.append( trigger ? func.getSimpleName() : func.toString()); - return sb.toString(); - } - - public String[] deployStrings() - { - return deployStrings( - qnameFrom(name(), schema()), parameterInfo().collect(toList()), - makeAS(), comment()); - } - - /** - * Internal version taking the function name, parameter stream, - * AS string, and comment (if any) as extra arguments so they can be - * overridden from {@link Transformed}. - */ - String[] deployStrings( - Identifier.Qualified qname, - Iterable params, String as, String comment) - { - ArrayList al = new ArrayList<>(); - StringBuilder sb = new StringBuilder(); - if ( assumedComposite ) - sb.append("/*\n * PL/Java generated this declaration assuming" + - "\n * a composite-returning function was intended." + - "\n * If a boolean function with a row-typed parameter" + - "\n * was intended, add any @SQLType annotation on the" + - "\n * ResultSet final parameter to make the intent clear." + - "\n */\n"); - if ( forceResultRecord ) - sb.append("/*\n * PL/Java generated this declaration for a" + - "\n * function with one OUT parameter that was annotated" + - "\n * to explicitly request treatment as a function that" + - "\n * returns RECORD. A given version of PostgreSQL might" + - "\n * not accept such a declaration. More at" + - "\n * https://www.postgresql.org/message-id/" + - "619BBE78.7040009%40anastigmatix.net" + - "\n */\n"); - sb.append( "CREATE OR REPLACE FUNCTION "); - appendNameAndParams( sb, true, true, true, qname, params); - sb.append( "\n\tRETURNS "); - if ( trigger ) - sb.append( DT_TRIGGER.toString()); - else - { - if ( setof ) - sb.append( "SETOF "); - sb.append( returnType); - } - sb.append( "\n\tLANGUAGE "); - sb.append( _languageIdent.toString()); - sb.append( ' ').append( effects()); - if ( leakproof() ) - sb.append( " LEAKPROOF"); - sb.append( '\n'); - if ( OnNullInput.RETURNS_NULL.equals( onNullInput()) ) - sb.append( "\tRETURNS NULL ON NULL INPUT\n"); - if ( Security.DEFINER.equals( security()) ) - sb.append( "\tSECURITY DEFINER\n"); - if ( ! Parallel.UNSAFE.equals( parallel()) ) - sb.append( "\tPARALLEL ").append( parallel()).append( '\n'); - if ( -1 != cost() ) - sb.append( "\tCOST ").append( cost()).append( '\n'); - if ( -1 != rows() ) - sb.append( "\tROWS ").append( rows()).append( '\n'); - for ( String s : settings() ) - sb.append( "\tSET ").append( s).append( '\n'); - sb.append( "\tAS ").append( DDRWriter.eQuote( as)); - al.add( sb.toString()); - - if ( null != comment ) - { - sb.setLength( 0); - sb.append( "COMMENT ON FUNCTION "); - appendNameAndParams( sb, true, false, false, qname, params); - sb.append( "\nIS "); - sb.append( DDRWriter.eQuote( comment)); - al.add( sb.toString()); - } - - for ( Trigger t : triggers() ) - for ( String s : ((TriggerImpl)t).deployStrings() ) - al.add( s); - return al.toArray( new String [ al.size() ]); - } - - public String[] undeployStrings() - { - return undeployStrings( - qnameFrom(name(), schema()), parameterInfo().collect(toList())); - } - - String[] undeployStrings( - Identifier.Qualified qname, - Iterable params) - { - if ( subsumed ) - return new String[0]; - - String[] rslt = new String [ 1 + triggers().length ]; - int i = rslt.length - 1; - for ( Trigger t : triggers() ) - for ( String s : ((TriggerImpl)t).undeployStrings() ) - rslt [ --i ] = s; - - StringBuilder sb = new StringBuilder(); - sb.append( "DROP FUNCTION "); - appendNameAndParams( sb, true, false, false, qname, params); - rslt [ rslt.length - 1 ] = sb.toString(); - return rslt; - } - - /** - * Test whether the type {@code tm} is, directly or indirectly, - * a specialization of generic type {@code dt}. - * @param tm a type to be checked - * @param dt known generic type to check for - * @return null if {@code tm} does not extend {@code dt}, otherwise the - * list of type arguments with which it specializes {@code dt} - */ - List specialization( - TypeMirror tm, DeclaredType dt) - { - if ( ! typu.isAssignable( typu.erasure( tm), dt) ) - return null; - - List pending = new LinkedList<>(); - pending.add( tm); - while ( ! pending.isEmpty() ) - { - tm = pending.remove( 0); - if ( typu.isSameType( typu.erasure( tm), dt) ) - return ((DeclaredType)tm).getTypeArguments(); - pending.addAll( typu.directSupertypes( tm)); - } - /* - * This is a can't-happen: tm is assignable to dt but has no - * supertype that's dt? Could throw an AssertionError, but returning - * an empty list will lead the caller to report an error, and that - * will give more information about the location in the source being - * compiled. - */ - return Collections.emptyList(); - } - - private Map m_variants= new HashMap<>(); - - /** - * Return an instance representing a transformation of this function, - * or null on second and subsequent requests for the same - * transformation (so the caller will not register the variant more - * than once). - */ - Transformed transformed( - Identifier.Qualified qname, - boolean commute, boolean negate) - { - Transformed prospect = new Transformed(qname, commute, negate); - DependTag.Function tag = - (DependTag.Function)prospect.provideTags().iterator().next(); - Transformed found = m_variants.putIfAbsent(tag, prospect); - if ( null == found ) - return prospect; - return null; - } - - class Transformed implements Snippet - { - final Identifier.Qualified m_qname; - final boolean m_commute; - final boolean m_negate; - final String m_comment; - - Transformed( - Identifier.Qualified qname, - boolean commute, boolean negate) - { - EnumSet how = - EnumSet.noneOf(OperatorPath.Transform.class); - if ( commute ) - how.add(OperatorPath.Transform.COMMUTATION); - if ( negate ) - how.add(OperatorPath.Transform.NEGATION); - assert ! how.isEmpty() : "no transformation to apply"; - m_qname = requireNonNull(qname); - m_commute = commute; - m_negate = negate; - m_comment = "Function automatically derived by " + how + - " from " + qnameFrom( - FunctionImpl.this.name(), FunctionImpl.this.schema()); - } - - List parameterInfo() - { - List params = - FunctionImpl.this.parameterInfo().collect(toList()); - if ( ! m_commute ) - return params; - assert 2 == params.size() : "commute with arity != 2"; - Collections.reverse(params); - return params; - } - - @Override - public Set characterize() - { - return Set.of(); - } - - @Override - public Identifier.Simple implementorName() - { - return FunctionImpl.this.implementorName(); - } - - @Override - public Set requireTags() - { - return FunctionImpl.this.requireTags(); - } - - @Override - public Set provideTags() - { - DBType[] sig = - parameterInfo().stream() - .map(p -> p.dt) - .toArray(DBType[]::new); - return Set.of(new DependTag.Function(m_qname, sig)); - } - - @Override - public String[] deployStrings() - { - String as = Stream.of( - m_commute ? "commute" : (String)null, - m_negate ? "negate" : (String)null) - .filter(Objects::nonNull) - .collect(joining(",", "[", "]")) - + FunctionImpl.this.makeAS(); - - return FunctionImpl.this.deployStrings( - m_qname, parameterInfo(), as, m_comment); - } - - @Override - public String[] undeployStrings() - { - return FunctionImpl.this.undeployStrings( - m_qname, parameterInfo()); - } - } - } - - static enum BaseUDTFunctionID - { - INPUT("in", null, "pg_catalog.cstring", "pg_catalog.oid", "integer"), - OUTPUT("out", "pg_catalog.cstring", (String[])null), - RECEIVE("recv", null, "pg_catalog.internal","pg_catalog.oid","integer"), - SEND("send", "pg_catalog.bytea", (String[])null); - BaseUDTFunctionID( String suffix, String ret, String... param) - { - this.suffix = suffix; - this.param = null == param ? null : - Arrays.stream(param) - .map(DBType::fromSQLTypeAnnotation) - .toArray(DBType[]::new); - this.ret = null == ret ? null : - new DBType.Named(Identifier.Qualified.nameFromJava(ret)); - } - private String suffix; - private DBType[] param; - private DBType ret; - String getSuffix() { return suffix; } - DBType[] getParam( BaseUDTImpl u) - { - if ( null != param ) - return param; - return new DBType[] { u.qname }; - } - DBType getRet( BaseUDTImpl u) - { - if ( null != ret ) - return ret; - return u.qname; - } - } - - class BaseUDTFunctionImpl extends FunctionImpl - { - BaseUDTFunctionImpl( - BaseUDTImpl ui, TypeElement te, BaseUDTFunctionID id) - { - super( null); - this.ui = ui; - this.te = te; - this.id = id; - - returnType = id.getRet( ui); - parameterTypes = id.getParam( ui); - - _type = returnType.toString(); - _name = Identifier.Simple.fromJava(ui.name()) - .concat("_", id.getSuffix()).toString(); - _schema = ui.schema(); - _variadic = false; - _cost = -1; - _rows = -1; - _onNullInput = OnNullInput.CALLED; - _security = Security.INVOKER; - _effects = Effects.VOLATILE; - _parallel = Parallel.UNSAFE; - _leakproof = false; - _settings = new String[0]; - _triggers = new Trigger[0]; - _provides = _settings; - _requires = _settings; - } - - BaseUDTImpl ui; - TypeElement te; - BaseUDTFunctionID id; - - @Override - public String[] deployStrings() - { - return deployStrings( - qnameFrom(name(), schema()), - null, // parameter iterable unused in appendParams below - "UDT[" + te + "] " + id.name(), - comment()); - } - - @Override - public String[] undeployStrings() - { - return undeployStrings( - qnameFrom(name(), schema()), - null); // parameter iterable unused in appendParams below - } - - @Override - void appendParams( - StringBuilder sb, boolean names, boolean outs, boolean dflts, - Iterable params) - { - sb.append( - Arrays.stream(id.getParam( ui)) - .map(Object::toString) - .collect(joining(", ")) - ); - } - - StringBuilder appendTypeOp( StringBuilder sb) - { - sb.append( id.name()).append( " = "); - return sb.append(qnameFrom(name(), schema())); - } - - @Override - public Set characterize() - { - resolveLanguage(); - recordImplicitTags(); - recordExplicitTags(_provides, _requires); - return Set.of(this); - } - - public void setType( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, - "The type of a UDT function may not be changed"); - } - - public void setOut( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, - "The type of a UDT function may not be changed"); - } - - public void setVariadic( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, "A UDT function is never variadic"); - } - - public void setRows( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, - "The rows attribute of a UDT function may not be set"); - } - - public void setProvides( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, - "A UDT function does not have its own provides/requires"); - } - - public void setRequires( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, - "A UDT function does not have its own provides/requires"); - } - - public void setTriggers( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, - "A UDT function may not have associated triggers"); - } - - public void setImplementor( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, - "A UDT function does not have its own implementor"); - } - - public String implementor() - { - return ui.implementor(); - } - - public String derivedComment( Element e) - { - String comm = super.derivedComment( e); - if ( null != comm ) - return comm; - return id.name() + " method for type " + ui.qname; - } - } - - abstract class AbstractUDTImpl - extends AbstractAnnotationImpl - implements Snippet, Commentable - { - public String name() { return _name; } - public String schema() { return _schema; } - public String[] provides() { return _provides; } - public String[] requires() { return _requires; } - - public String[] _provides; - public String[] _requires; - public String _name; - public String _schema; - - TypeElement tclass; - - DBType qname; - - AbstractUDTImpl(TypeElement e) - { - tclass = e; - - if ( ! typu.isAssignable( e.asType(), TY_SQLDATA) ) - { - msg( Kind.ERROR, e, "A PL/Java UDT must implement %s", - TY_SQLDATA); - } - - ExecutableElement niladicCtor = huntFor( - constructorsIn( tclass.getEnclosedElements()), null, false, - null); - - if ( null == niladicCtor ) - { - msg( Kind.ERROR, tclass, - "A PL/Java UDT must have a public no-arg constructor"); - } - } - - protected void setQname() - { - if ( "".equals( _name) ) - _name = tclass.getSimpleName().toString(); - - qname = new DBType.Named(qnameFrom(_name, _schema)); - - if ( ! tmpr.mappingsFrozen() ) - tmpr.addMap( tclass.asType(), qname); - } - - protected void addComment( ArrayList al) - { - String comm = comment(); - if ( null == comm ) - return; - al.add( "COMMENT ON TYPE " + qname + "\nIS " + - DDRWriter.eQuote( comm)); - } - } - - class MappedUDTImpl - extends AbstractUDTImpl - implements MappedUDT - { - public String[] structure() { return _structure; } - - String[] _structure; - - public void setStructure( Object o, boolean explicit, Element e) - { - if ( explicit ) - _structure = avToArray( o, String.class); - } - - MappedUDTImpl(TypeElement e) - { - super( e); - } - - public void registerMapping() - { - setQname(); - } - - public Set characterize() - { - if ( null != structure() ) - { - DependTag t = qname.dependTag(); - if ( null != t ) - provideTags().add(t); - } - recordExplicitTags(_provides, _requires); - return Set.of(this); - } - - public String[] deployStrings() - { - ArrayList al = new ArrayList<>(); - if ( null != structure() ) - { - StringBuilder sb = new StringBuilder(); - sb.append( "CREATE TYPE ").append( qname).append( " AS ("); - int i = structure().length; - for ( String s : structure() ) - sb.append( "\n\t").append( s).append( - ( 0 < -- i ) ? ',' : '\n'); - sb.append( ')'); - al.add( sb.toString()); - } - al.add( "SELECT sqlj.add_type_mapping(" + - DDRWriter.eQuote( qname.toString()) + ", " + - DDRWriter.eQuote( tclass.toString()) + ')'); - addComment( al); - return al.toArray( new String [ al.size() ]); - } - - public String[] undeployStrings() - { - ArrayList al = new ArrayList<>(); - al.add( "SELECT sqlj.drop_type_mapping(" + - DDRWriter.eQuote( qname.toString()) + ')'); - if ( null != structure() ) - al.add( "DROP TYPE " + qname); - return al.toArray( new String [ al.size() ]); - } - } - - class BaseUDTImpl - extends AbstractUDTImpl - implements BaseUDT - { - class Shell implements Snippet - { - @Override - public Identifier.Simple implementorName() - { - return BaseUDTImpl.this.implementorName(); - } - - @Override - public String[] deployStrings() - { - return new String[] { "CREATE TYPE " + qname }; - } - - @Override - public String[] undeployStrings() - { - return new String[0]; - } - - @Override - public Set provideTags() - { - return Set.of(); - } - - @Override - public Set requireTags() - { - return Set.of(); - } - - @Override - public Set characterize() - { - return Set.of(); - } - } - - public String typeModifierInput() { return _typeModifierInput; } - public String typeModifierOutput() { return _typeModifierOutput; } - public String analyze() { return _analyze; } - public int internalLength() { return _internalLength; } - public boolean passedByValue() { return _passedByValue; } - public Alignment alignment() { return _alignment; } - public Storage storage() { return _storage; } - public String like() { return _like; } - public char category() { return _category; } - public boolean preferred() { return _preferred; } - public String defaultValue() { return _defaultValue; } - public String element() { return _element; } - public char delimiter() { return _delimiter; } - public boolean collatable() { return _collatable; } - - BaseUDTFunctionImpl in, out, recv, send; - - public String _typeModifierInput; - public String _typeModifierOutput; - public String _analyze; - int _internalLength; - public Boolean _passedByValue; - Alignment _alignment; - Storage _storage; - public String _like; - char _category; - public Boolean _preferred; - String _defaultValue; - public String _element; - char _delimiter; - public Boolean _collatable; - - boolean lengthExplicit; - boolean alignmentExplicit; - boolean storageExplicit; - boolean categoryExplicit; - boolean delimiterExplicit; - - public void setInternalLength( Object o, boolean explicit, Element e) - { - _internalLength = (Integer)o; - lengthExplicit = explicit; - } - - public void setAlignment( Object o, boolean explicit, Element e) - { - _alignment = Alignment.valueOf( - ((VariableElement)o).getSimpleName().toString()); - alignmentExplicit = explicit; - } - - public void setStorage( Object o, boolean explicit, Element e) - { - _storage = Storage.valueOf( - ((VariableElement)o).getSimpleName().toString()); - storageExplicit = explicit; - } - - public void setDefaultValue( Object o, boolean explicit, Element e) - { - if ( explicit ) - _defaultValue = (String)o; // "" could be a real default value - } - - public void setCategory( Object o, boolean explicit, Element e) - { - _category = (Character)o; - categoryExplicit = explicit; - } - - public void setDelimiter( Object o, boolean explicit, Element e) - { - _delimiter = (Character)o; - delimiterExplicit = explicit; - } - - BaseUDTImpl(TypeElement e) - { - super( e); - } - - void registerFunctions() - { - setQname(); - - ExecutableElement instanceReadSQL = huntFor( - methodsIn( tclass.getEnclosedElements()), "readSQL", false, - TY_VOID, TY_SQLINPUT, TY_STRING); - - ExecutableElement instanceWriteSQL = huntFor( - methodsIn( tclass.getEnclosedElements()), "writeSQL", false, - TY_VOID, TY_SQLOUTPUT); - - ExecutableElement instanceToString = huntFor( - methodsIn( tclass.getEnclosedElements()), "toString", false, - TY_STRING); - - ExecutableElement staticParse = huntFor( - methodsIn( tclass.getEnclosedElements()), "parse", true, - tclass.asType(), TY_STRING, TY_STRING); - - if ( null == staticParse ) - { - msg( Kind.ERROR, tclass, - "A PL/Java UDT must have a public static " + - "parse(String,String) method that returns the UDT"); - } - else - { - in = new BaseUDTFunctionImpl( - this, tclass, BaseUDTFunctionID.INPUT); - putSnippet( staticParse, in); - } - - out = new BaseUDTFunctionImpl( - this, tclass, BaseUDTFunctionID.OUTPUT); - putSnippet( null != instanceToString ? instanceToString : out, out); - - recv = new BaseUDTFunctionImpl( - this, tclass, BaseUDTFunctionID.RECEIVE); - putSnippet( null != instanceReadSQL ? instanceReadSQL : recv, recv); - - send = new BaseUDTFunctionImpl( - this, tclass, BaseUDTFunctionID.SEND); - putSnippet( null != instanceWriteSQL ? instanceWriteSQL : send, - send); - } - - public Set characterize() - { - if ( "".equals( typeModifierInput()) - && ! "".equals( typeModifierOutput()) ) - msg( Kind.ERROR, tclass, - "UDT typeModifierOutput useless without typeModifierInput"); - - if ( 1 > internalLength() && -1 != internalLength() ) - msg( Kind.ERROR, tclass, - "UDT internalLength must be positive, or -1 for varying"); - - if ( passedByValue() && - ( 8 < internalLength() || -1 == internalLength() ) ) - msg( Kind.ERROR, tclass, - "Only a UDT of fixed length <= 8 can be passed by value"); - - if ( -1 == internalLength() && - -1 == alignment().compareTo( Alignment.INT4) ) - msg( Kind.ERROR, tclass, - "A variable-length UDT must have alignment at least INT4"); - - if ( -1 != internalLength() && Storage.PLAIN != storage() ) - msg( Kind.ERROR, tclass, - "Storage for a fixed-length UDT must be PLAIN"); - - // see PostgreSQL backend/commands/typecmds.c "must be simple ASCII" - if ( 32 > category() || category() > 126 ) - msg( Kind.ERROR, tclass, - "UDT category must be a printable ASCII character"); - - if ( categoryExplicit && Character.isUpperCase(category()) ) - if ( null == PredefinedCategory.valueOf(category()) ) - msg( Kind.WARNING, tclass, - "upper-case letters are reserved for PostgreSQL's " + - "predefined UDT categories, but '%c' is not recognized", - category()); - - recordImplicitTags(); - recordExplicitTags(_provides, _requires); - - return Set.of(this); - } - - void recordImplicitTags() - { - Set provides = provideTags(); - Set requires = requireTags(); - - provides.add(qname.dependTag()); - - for ( BaseUDTFunctionImpl f : List.of(in, out, recv, send) ) - requires.add(new DependTag.Function( - qnameFrom(f._name, f._schema), f.parameterTypes)); - - String s = typeModifierInput(); - if ( ! s.isEmpty() ) - requires.add(new DependTag.Function( - qnameFrom(s), SIG_TYPMODIN)); - - s = typeModifierOutput(); - if ( ! s.isEmpty() ) - requires.add(new DependTag.Function( - qnameFrom(s), SIG_TYPMODOUT)); - - s = analyze(); - if ( ! s.isEmpty() ) - requires.add(new DependTag.Function(qnameFrom(s), SIG_ANALYZE)); - } - - public String[] deployStrings() - { - ArrayList al = new ArrayList<>(); - - StringBuilder sb = new StringBuilder(); - sb.append( "CREATE TYPE ").append( qname).append( " (\n\t"); - in.appendTypeOp( sb).append( ",\n\t"); - out.appendTypeOp( sb).append( ",\n\t"); - recv.appendTypeOp( sb).append( ",\n\t"); - send.appendTypeOp( sb); - - if ( ! "".equals( typeModifierInput()) ) - sb.append( ",\n\tTYPMOD_IN = ").append( typeModifierInput()); - - if ( ! "".equals( typeModifierOutput()) ) - sb.append( ",\n\tTYPMOD_OUT = ").append( typeModifierOutput()); - - if ( ! "".equals( analyze()) ) - sb.append( ",\n\tANALYZE = ").append( analyze()); - - if ( lengthExplicit || "".equals( like()) ) - sb.append( ",\n\tINTERNALLENGTH = ").append( - -1 == internalLength() ? "VARIABLE" - : String.valueOf( internalLength())); - - if ( passedByValue() ) - sb.append( ",\n\tPASSEDBYVALUE"); - - if ( alignmentExplicit || "".equals( like()) ) - sb.append( ",\n\tALIGNMENT = ").append( alignment().name()); - - if ( storageExplicit || "".equals( like()) ) - sb.append( ",\n\tSTORAGE = ").append( storage().name()); - - if ( ! "".equals( like()) ) - sb.append( ",\n\tLIKE = ").append( like()); - - if ( categoryExplicit ) - sb.append( ",\n\tCATEGORY = ").append( - DDRWriter.eQuote( String.valueOf( category()))); - - if ( preferred() ) - sb.append( ",\n\tPREFERRED = true"); - - if ( null != defaultValue() ) - sb.append( ",\n\tDEFAULT = ").append( - DDRWriter.eQuote( defaultValue())); - - if ( ! "".equals( element()) ) - sb.append( ",\n\tELEMENT = ").append( element()); - - if ( delimiterExplicit ) - sb.append( ",\n\tDELIMITER = ").append( - DDRWriter.eQuote( String.valueOf( delimiter()))); - - if ( collatable() ) - sb.append( ",\n\tCOLLATABLE = true"); - - al.add( sb.append( "\n)").toString()); - addComment( al); - return al.toArray( new String [ al.size() ]); - } - - public String[] undeployStrings() - { - return new String[] - { - "DROP TYPE " + qname + " CASCADE" - }; - } - - @Override - public Vertex breakCycle(Vertex v, boolean deploy) - { - assert this == v.payload; - - /* - * Find the entries in my adjacency list that are implicated in the - * cycle (that is, that precede, perhaps transitively, me). - */ - Vertex[] vs = v.precedesTransitively(v); - - assert null != vs && 0 < vs.length : "breakCycle not in a cycle"; - - if ( vs.length < v.indegree ) - return null; // other non-cyclic edges not satisfied yet - - if ( deploy ) - { - Vertex breaker = new Vertex<>(new Shell()); - v.transferSuccessorsTo(breaker, vs); - return breaker; - } - - for ( Vertex subsumed : vs ) - subsumed.payload.subsume(); - - /* - * Set indegree now to zero, so that when the subsumed snippets are - * themselves emitted, they will not decrement it to zero and cause - * this to be scheduled again. - */ - v.indegree = 0; - - return v; // use this vertex itself in the undeploy case - } - } - - class CastImpl - extends Repeatable - implements Cast, Snippet, Commentable - { - CastImpl(Element e, AnnotationMirror am) - { - super(e, am); - } - - public String from() { return _from; } - public String to() { return _to; } - public Cast.Path path() { return _path; } - public Cast.Application application() { return _application; } - public String[] provides() { return _provides; } - public String[] requires() { return _requires; } - - public String _from; - public String _to; - public Cast.Path _path; - public Cast.Application _application; - public String[] _provides; - public String[] _requires; - - FunctionImpl func; - DBType fromType; - DBType toType; - - public void setPath( Object o, boolean explicit, Element e) - { - if ( explicit ) - _path = Path.valueOf( - ((VariableElement)o).getSimpleName().toString()); - } - - public Set characterize() - { - boolean ok = true; - - if ( ElementKind.METHOD.equals(m_targetElement.getKind()) ) - { - func = getSnippet(m_targetElement, FunctionImpl.class, - () -> (FunctionImpl)null); - if ( null == func ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "A method annotated with @Cast must also have @Function" - ); - ok = false; - } - } - - if ( null == func && "".equals(_from) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Cast not annotating a method must specify from=" - ); - ok = false; - } - - if ( null == func && "".equals(_to) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Cast not annotating a method must specify to=" - ); - ok = false; - } - - if ( null == func && null == _path ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Cast not annotating a method, and without path=, " + - "is not yet supported" - ); - ok = false; - } - - if ( ok ) - { - fromType = ("".equals(_from)) - ? func.parameterTypes[0] - : DBType.fromSQLTypeAnnotation(_from); - - toType = ("".equals(_to)) - ? func.returnType - : DBType.fromSQLTypeAnnotation(_to); - } - - if ( null != _path ) - { - if ( ok && Path.BINARY == _path && fromType.equals(toType) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "A cast with from and to types the same can only " + - "apply a type modifier; path=BINARY will have " + - "no effect"); - ok = false; - } - } - else if ( null != func ) - { - int nparams = func.parameterTypes.length; - - if ( ok && 2 > nparams && fromType.equals(toType) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "A cast with from and to types the same can only " + - "apply a type modifier, therefore must have at least " + - "two parameters"); - ok = false; - } - - if ( 1 > nparams || nparams > 3 ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "A cast function must have 1, 2, or 3 parameters"); - ok = false; - } - - if (1 < nparams && ! DT_INTEGER.equals(func.parameterTypes[1])) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Parameter 2 of a cast function must have integer type" - ); - ok = false; - } - - if (3 == nparams && ! DT_BOOLEAN.equals(func.parameterTypes[2])) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Parameter 3 of a cast function must have boolean type" - ); - ok = false; - } - } - - if ( ! ok ) - return Set.of(); - - recordImplicitTags(); - recordExplicitTags(_provides, _requires); - return Set.of(this); - } - - void recordImplicitTags() - { - Set requires = requireTags(); - - DependTag dt = fromType.dependTag(); - if ( null != dt ) - requires.add(dt); - - dt = toType.dependTag(); - if ( null != dt ) - requires.add(dt); - - if ( null == _path ) - { - dt = func.provideTags().stream() - .filter(DependTag.Function.class::isInstance) - .findAny().get(); - requires.add(dt); - } - } - - public String[] deployStrings() - { - List al = new ArrayList<>(); - - StringBuilder sb = new StringBuilder(); - - sb.append("CREATE CAST (") - .append(fromType).append(" AS ").append(toType).append(")\n\t"); - - if ( Path.BINARY == _path ) - sb.append("WITHOUT FUNCTION"); - else if ( Path.INOUT == _path ) - sb.append("WITH INOUT"); - else - { - sb.append("WITH FUNCTION "); - func.appendNameAndParams(sb, false, false, false); - } - - switch ( _application ) - { - case ASSIGNMENT: sb.append("\n\tAS ASSIGNMENT"); break; - case EXPLICIT: break; - case IMPLICIT: sb.append("\n\tAS IMPLICIT"); - } - - al.add(sb.toString()); - - if ( null != comment() ) - al.add( - "COMMENT ON CAST (" + - fromType + " AS " + toType + ") IS " + - DDRWriter.eQuote(comment())); - - return al.toArray( new String [ al.size() ]); - } - - public String[] undeployStrings() - { - return new String[] - { - "DROP CAST (" + fromType + " AS " + toType + ")" - }; - } - } - - /* - * Called by processRepeatable for each @Operator processed. - * This happens before characterize, but after populating, so the - * operator's name and commutator/negator/synthetic elements can be - * inspected. All operators annotating a given element e are processed - * consecutively, and followed by a call with the same e and null snip. - * - * This will accumulate the snippets onto two lists, for non-synthetic and - * synthetic ones and, on the final call, process the lists to find possible - * paths from non-synthetic to synthetic ones via commutation and/or - * negation. The possible paths will be recorded on each synthetic operator. - * They will have to be confirmed during characterize after things like - * operand types and arity have been resolved. - */ - void operatorPreSynthesize( Element e, OperatorImpl snip) - { - if ( ! ElementKind.METHOD.equals(e.getKind()) ) - { - if ( null != snip ) - putSnippet( snip, (Snippet)snip); - return; - } - - if ( null != snip ) - { - if ( snip.selfCommutator || snip.twinCommutator ) - snip.commutator = snip.qname; - - (snip.isSynthetic ? m_synthetic : m_nonSynthetic).add(snip); - return; - } - - /* - * Initially: - * processed: is empty - * ready: contains all non-synthetic snippets - * pending: contains all synthetic snippets - * Step: - * A snippet s is removed from ready and added to processed. - * If s.commutator or s.negator matches a synthetic snippet in pending, - * a corresponding path is recorded on that snippet. If it is - * the first path recorded on that snippet, the snippet is moved - * to ready. - */ - - List processed = - new ArrayList<>(m_nonSynthetic.size() + m_synthetic.size()); - Queue ready = new LinkedList<>(m_nonSynthetic); - LinkedList pending = new LinkedList<>(m_synthetic); - m_nonSynthetic.clear(); - m_synthetic.clear(); - - while ( null != (snip = ready.poll()) ) - { - processed.add(snip); - if ( null != snip.commutator ) - { - ListIterator it = pending.listIterator(); - while ( it.hasNext() ) - { - OperatorImpl other = it.next(); - if ( maybeAddPath(snip, other, - OperatorPath.Transform.COMMUTATION) ) - { - it.remove(); - ready.add(other); - } - } - } - if ( null != snip.negator ) - { - ListIterator it = pending.listIterator(); - while ( it.hasNext() ) - { - OperatorImpl other = it.next(); - if ( maybeAddPath(snip, other, - OperatorPath.Transform.NEGATION) ) - { - it.remove(); - ready.add(other); - } - } - } - } - - if ( ! pending.isEmpty() ) - msg(Kind.ERROR, e, "Cannot synthesize operator(s) (%s)", - pending.stream() - .map(o -> o.qname.toString()) - .collect(joining(" "))); - - for ( OperatorImpl s : processed ) - putSnippet( s, (Snippet)s); - } - - boolean maybeAddPath( - OperatorImpl from, OperatorImpl to, OperatorPath.Transform how) - { - if ( ! to.isSynthetic ) - return false; // don't add paths to a non-synthetic operator - - /* - * setSynthetic will have left synthetic null in the synthetic=TWIN - * case. That case imposes more constraints on what paths can be added: - * an acceptable path must involve commutation (and only commutation) - * from another operator that will have a function name (so, either - * a non-synthetic one, or a synthetic one given an actual name, other - * than TWIN). In the latter case, copy the name here (for the former, - * it will be copied from the function's name, in characterize()). - */ - boolean syntheticTwin = null == to.synthetic; - - switch ( how ) - { - case COMMUTATION: - if ( ! from.commutator.equals(to.qname) ) - return false; // this is not the operator you're looking for - if ( null != to.commutator && ! to.commutator.equals(from.qname) ) - return false; // you're not the one it's looking for - break; - case NEGATION: - if ( ! from.negator.equals(to.qname) ) - return false; // move along - if ( null != to.negator && ! to.negator.equals(from.qname) ) - return false; // move along - if ( syntheticTwin ) - return false; - break; - } - - if ( syntheticTwin ) - { - /* - * We will apply commutation to 'from' (the negation case - * would have been rejected above). Either 'from' is nonsynthetic - * and its function name will be copied in characterize(), or it is - * synthetic and must have a name or we reject it here. If not - * rejected, copy the name. - */ - if ( from.isSynthetic ) - { - if ( null == from.synthetic ) - return false; - to.synthetic = from.synthetic; - } - } - - if ( null == to.paths ) - to.paths = new ArrayList<>(); - - if ( ! from.isSynthetic ) - to.paths.add(new OperatorPath(from, from, null, EnumSet.of(how))); - else - { - for ( OperatorPath path : from.paths ) - { - to.paths.add(new OperatorPath( - path.base, from, path.fromBase, EnumSet.of(how))); - } - } - - return true; - } - - /** - * Why has {@code Set} or at least {@code EnumSet} not got this? - */ - static > EnumSet symmetricDifference( - EnumSet a, EnumSet b) - { - EnumSet result = a.clone(); - result.removeAll(b); - b = b.clone(); - b.removeAll(a); - result.addAll(b); - return result; - } - - List m_nonSynthetic = new ArrayList<>(); - List m_synthetic = new ArrayList<>(); - - static class OperatorPath - { - OperatorImpl base; - OperatorImpl proximate; - EnumSet fromBase; - EnumSet fromProximate; - - enum Transform { NEGATION, COMMUTATION } - - OperatorPath( - OperatorImpl base, OperatorImpl proximate, - EnumSet baseToProximate, - EnumSet proximateToNew) - { - this.base = base; - this.proximate = proximate; - fromProximate = proximateToNew.clone(); - - if ( base == proximate ) - fromBase = fromProximate; - else - fromBase = symmetricDifference(baseToProximate, proximateToNew); - } - - public String toString() - { - return - base.commentDropForm() + " " + fromBase + - (base == proximate - ? "" - : " (... " + proximate.commentDropForm() + - " " + fromProximate); - } - } - - class OperatorImpl - extends Repeatable - implements Operator, Snippet, Commentable - { - OperatorImpl(Element e, AnnotationMirror am) - { - super(e, am); - } - - public String[] name() { return qstrings(qname); } - public String left() { return operand(0); } - public String right() { return operand(1); } - public String[] function() { return qstrings(funcName); } - public String[] synthetic() { return qstrings(synthetic); } - public String[] commutator() { return qstrings(commutator); } - public String[] negator() { return qstrings(negator); } - public boolean hashes() { return _hashes; } - public boolean merges() { return _merges; } - public String[] restrict() { return qstrings(restrict); } - public String[] join() { return qstrings(join); } - public String[] provides() { return _provides; } - public String[] requires() { return _requires; } - - public String[] _provides; - public String[] _requires; - public boolean _hashes; - public boolean _merges; - - Identifier.Qualified qname; - DBType[] operands = { null, null }; - FunctionImpl func; - Identifier.Qualified funcName; - Identifier.Qualified commutator; - Identifier.Qualified negator; - Identifier.Qualified restrict; - Identifier.Qualified join; - Identifier.Qualified synthetic; - boolean isSynthetic; - boolean selfCommutator; - boolean twinCommutator; - List paths; - - private String operand(int i) - { - return null == operands[i] ? null : operands[i].toString(); - } - - public void setName( Object o, boolean explicit, Element e) - { - qname = operatorNameFrom(avToArray( o, String.class)); - } - - public void setLeft( Object o, boolean explicit, Element e) - { - if ( explicit ) - operands[0] = DBType.fromSQLTypeAnnotation((String)o); - } - - public void setRight( Object o, boolean explicit, Element e) - { - if ( explicit ) - operands[1] = DBType.fromSQLTypeAnnotation((String)o); - } - - public void setFunction( Object o, boolean explicit, Element e) - { - if ( explicit ) - funcName = qnameFrom(avToArray( o, String.class)); - } - - public void setSynthetic( Object o, boolean explicit, Element e) - { - if ( ! explicit ) - return; - - /* - * Use isSynthetic to indicate that synthetic= has been used at all. - * Set synthetic to the supplied qname only if it is a qname, and - * not the distinguished value TWIN. - * - * Most of the processing below only needs to look at isSynthetic. - * The TWIN case, recognized by isSynthetic && null == synthetic, - * will be handled late in the game by copying the base function's - * qname. - */ - - isSynthetic = true; - String[] ss = avToArray( o, String.class); - if ( 1 != ss.length || ! TWIN.equals(ss[0]) ) - synthetic = qnameFrom(ss); - } - - public void setCommutator( Object o, boolean explicit, Element e) - { - if ( ! explicit ) - return; - - String[] ss = avToArray( o, String.class); - if ( 1 == ss.length ) - { - if ( SELF.equals(ss[0]) ) - { - selfCommutator = true; - return; - } - if ( TWIN.equals(ss[0]) ) - { - twinCommutator = true; - return; - } - } - commutator = operatorNameFrom(ss); - } - - public void setNegator( Object o, boolean explicit, Element e) - { - if ( explicit ) - negator = operatorNameFrom(avToArray( o, String.class)); - } - - public void setRestrict( - Object o, boolean explicit, Element e) - { - if ( explicit ) - restrict = qnameFrom(avToArray( o, String.class)); - } - - public void setJoin( - Object o, boolean explicit, Element e) - { - if ( explicit ) - join = qnameFrom(avToArray( o, String.class)); - } - - public Set characterize() - { - boolean ok = true; - Snippet syntheticFunction = null; - - if ( ElementKind.METHOD.equals(m_targetElement.getKind()) ) - { - func = getSnippet(m_targetElement, FunctionImpl.class, - () -> (FunctionImpl)null); - } - - if ( isSynthetic ) - { - if ( null != funcName ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Operator may not specify both function= and " + - "synthetic=" - ); - ok = false; - } - funcName = synthetic; // can be null (the TWIN case) - } - - if ( null == func && null == funcName && ! isSynthetic ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Operator not annotating a method must specify function=" - ); - ok = false; - } - - if ( null == func ) - { - if ( null == operands[0] && null == operands[1] ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Operator not annotating a method must specify " + - "left= or right= or both" - ); - ok = false; - } - } - else - { - Identifier.Qualified fn = - qnameFrom(func.name(), func.schema()); - - if ( null == funcName ) - funcName = fn; - else if ( ! funcName.equals(fn) && ! isSynthetic ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Operator annotates a method but function= gives a " + - "different name" - ); - ok = false; - } - - long explicit = - Arrays.stream(operands).filter(Objects::nonNull).count(); - - if ( 0 != explicit && isSynthetic ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Operator with synthetic= must not specify " + - "operand types" - ); - ok = false; - } - - if ( 0 == explicit ) - { - int nparams = func.parameterTypes.length; - if ( 1 > nparams || nparams > 2 ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "method annotated with @Operator must take one " + - "or two parameters" - ); - ok = false; - } - if ( 1 == nparams ) - operands[1] = func.parameterTypes[0]; - else - System.arraycopy(func.parameterTypes,0, operands,0,2); - } - else if ( explicit != func.parameterTypes.length ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Operator annotates a method but specifies " + - "a different number of operands" - ); - ok = false; - } - else if ( 2 == explicit - && ! Arrays.equals(operands, func.parameterTypes) - || 1 == explicit - && ! Arrays.asList(operands) - .contains(func.parameterTypes[0]) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Operator annotates a method but specifies " + - "different operand types" - ); - ok = false; - } - } - - /* - * At this point, ok ==> there is a non-null funcName ... UNLESS - * isSynthetic is true, synthetic=TWIN was given, and we are not - * annotating a method (that last condition is currently not - * supported, so we could in fact rely on having a funcName here, - * but that condition may be worth supporting in the future, so - * better to keep the exception in mind). - */ - - if ( ! ok ) - return Set.of(); - - long arity = - Arrays.stream(operands).filter(Objects::nonNull).count(); - - if ( 1 == arity && null == operands[1] ) - { - msg(Kind.WARNING, m_targetElement, m_origin, - "Right unary (postfix) operators are deprecated and will " + - "be removed in PostgreSQL version 14." - ); - } - - if ( null != commutator ) - { - if ( 2 != arity ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "unary @Operator cannot have a commutator" - ); - ok = false; - } - else if ( selfCommutator && ! operands[0].equals(operands[1]) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Operator with different left and right operand " + - "types cannot have commutator=SELF" - ); - ok = false; - } - else if ( twinCommutator && operands[0].equals(operands[1]) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Operator with matching left and right operand " + - "types cannot have commutator=TWIN" - ); - ok = false; - } - } - - boolean knownNotBoolean = - null != func && ! DT_BOOLEAN.equals(func.returnType); - - if ( null != negator ) - { - if ( knownNotBoolean ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "negator= only belongs on a boolean @Operator" - ); - ok = false; - } - else if ( negator.equals(qname) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Operator can never be its own negator" - ); - ok = false; - } - } - - boolean knownNotBinaryBoolean = 2 != arity || knownNotBoolean; - boolean knownVolatile = - null != func && Function.Effects.VOLATILE == func.effects(); - boolean operandTypesDiffer = - 2 == arity && ! operands[0].equals(operands[1]); - boolean selfCommutates = - null != commutator && commutator.equals(qname); - - ok &= Stream.of( - _hashes ? "hashes" : null, - _merges ? "merges" : null) - .filter(Objects::nonNull) - .map(s -> - { - boolean inner_ok = true; - if ( knownNotBinaryBoolean ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "%s= only belongs on a boolean " + - "binary @Operator", s - ); - inner_ok = false; - } - if ( null == commutator ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "%s= requires that the @Operator " + - "have a commutator", s - ); - inner_ok = false; - } - else if ( ! (operandTypesDiffer || selfCommutates) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "%s= requires the @Operator to be its own" + - "commutator as its operand types are the same", s - ); - inner_ok = false; - } - if ( knownVolatile ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "%s= requires an underlying function " + - "declared IMMUTABLE or STABLE", s - ); - inner_ok = false; - } - return inner_ok; - }) - .allMatch(t -> t); - - if ( null != restrict && knownNotBinaryBoolean ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "restrict= only belongs on a boolean binary @Operator" - ); - ok = false; - } - - if ( null != join && knownNotBinaryBoolean ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "join= only belongs on a boolean binary @Operator" - ); - ok = false; - } - - if ( ! ok ) - return Set.of(); - - if ( isSynthetic ) - { - if ( null == func ) - { - /* - * It could be possible to relax this requirement if there - * is a need, but this way is easier. - */ - msg(Kind.ERROR, m_targetElement, m_origin, - "Synthetic operator annotation must appear " + - "on the method to be used as the base"); - ok = false; - } - - if ( paths.isEmpty() ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Synthetic operator %s has no derivation path " + - "involving negation or commutation from another " + - "operator", qnameUnwrapped()); - /* - * If no paths at all, return empty from here; no point in - * further checks. - */ - return Set.of(); - } - - /* - * Check for conditions where deriving by commutation wouldn't - * make sense. Any of these three conditions will trigger the - * test of available paths. The conditions are rechecked but the - * third one is changed, so either of the first two will always - * preclude commutation, but ! operandTypesDiffer only does if - * the synthetic function's name will be the same as the base's. - * (If the types were different, PostgreSQL overloading would - * allow the functions to share a name, but that's not possible - * if the types are the same.) In those cases, any commutation - * paths are filtered out; if no path remains, that's an error. - */ - if ( 2 != arity || selfCommutator || ! operandTypesDiffer ) - { - List filtered = - paths.stream() - .filter( - p -> ! p.fromBase.contains( - OperatorPath.Transform.COMMUTATION)) - .collect(toList()); - if ( 2 != arity || selfCommutator - || null == synthetic || - synthetic.equals(qnameFrom(func.name(), func.schema()))) - { - if ( filtered.isEmpty() ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Synthetic operator %s cannot be another " + - "operator's commutator, but found only " + - "path(s) involving commutation: %s", - qnameUnwrapped(), paths.toString()); - ok = false; - } - else - paths = filtered; - } - } - - ok &= paths.stream().collect( - groupingBy(p -> p.base, - mapping(p -> p.fromBase, toSet()))) - .entrySet().stream() - .filter(e -> 1 < e.getValue().size()) - .map(e -> - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Synthetic operator %s found paths with " + - "different transforms %s from same base %s", - qnameUnwrapped(), - e.getValue(), e.getKey().qnameUnwrapped()); - return false; - }) - .allMatch(t -> t); - - ok &= paths.stream().collect( - groupingBy(p -> p.proximate, - mapping(p -> p.fromProximate, toSet()))) - .entrySet().stream() - .filter(e -> 1 < e.getValue().size()) - .map(e -> - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Synthetic operator %s found paths with " + - "different transforms %s from %s", - qnameUnwrapped(), - e.getValue(), e.getKey().qnameUnwrapped()); - return false; - }) - .allMatch(t -> t); - - Set> - commutatorCandidates = - paths.stream() - .filter( - p -> p.fromProximate.contains( - OperatorPath.Transform.COMMUTATION)) - .map(p -> p.proximate.qname) - .collect(toSet()); - if ( null == commutator && 0 < commutatorCandidates.size() ) - { - if ( 1 == commutatorCandidates.size() ) - commutator = commutatorCandidates.iterator().next(); - else - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Synthetic operator %s has muliple commutator " + - "candidates %s", - qnameUnwrapped(), commutatorCandidates); - ok = false; - } - } - - Set> - negatorCandidates = - paths.stream() - .filter( - p -> p.fromProximate.contains( - OperatorPath.Transform.NEGATION)) - .map(p -> p.proximate.qname) - .collect(toSet()); - if ( null == negator && 0 < negatorCandidates.size() ) - { - if ( 1 == negatorCandidates.size() ) - negator = negatorCandidates.iterator().next(); - else - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Synthetic operator %s has muliple negator " + - "candidates %s", - qnameUnwrapped(), negatorCandidates); - ok = false; - } - } - - /* - * Filter paths to only those based on an operator that is built - * over this method. (That's currently guaranteed by the way - * operatorPreSynthesize generates paths, but may as well check - * here to ensure sanity during future maintenance.) - * - * For synthetic=TWIN (represented here by null==synthetic), - * also filter out paths that don't involve commutation (without - * it, the synthetic function would collide with the base one). - */ - - boolean nonCommutedOK = null != synthetic; - - paths = paths.stream() - .filter( - p -> p.base.func == func - && (nonCommutedOK || p.fromBase.contains( - OperatorPath.Transform.COMMUTATION)) - ).collect(toList()); - - if ( 0 == paths.size() ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Synthetic operator %s has no derivation path " + - "from an operator that is based on this method%s", - qnameUnwrapped(), - nonCommutedOK ? "" : " and involves commutation"); - ok = false; - } - - if ( ! ok ) - return Set.of(); - - /* - * Select a base. Could there be more than one? As the checks - * for transform inconsistencies above found none, we will - * assume any should be ok, and choose one semi-arbitrarily. - */ - - OperatorPath selected = - paths.stream() - .sorted( - Comparator.comparingInt( - p -> p.fromBase.size()) - .thenComparingInt( - p -> p.fromBase.stream() - .mapToInt(Enum::ordinal) - .max().getAsInt()) - .thenComparing(p -> p.base.qnameUnwrapped())) - .findFirst().get(); - - /* - * At last, the possibly null funcName (synthetic=TWIN case) - * can be fixed up. - */ - if ( null == synthetic ) - { - FunctionImpl f = selected.base.func; - funcName = synthetic = qnameFrom(f.name(), f.schema()); - } - - replaceCommentIfDerived("Operator " + qnameUnwrapped() - + " automatically derived by " - + selected.fromBase + " from " - + selected.base.qnameUnwrapped()); - - boolean commute = selected.fromBase - .contains(OperatorPath.Transform.COMMUTATION); - boolean negate = selected.fromBase - .contains(OperatorPath.Transform.NEGATION); - - if ( operandTypesDiffer && commute ) - { - DBType t = operands[0]; - operands[0] = operands[1]; - operands[1] = t; - } - - syntheticFunction = - func.transformed(synthetic, commute, negate); - } - - recordImplicitTags(); - recordExplicitTags(_provides, _requires); - return null == syntheticFunction - ? Set.of(this) : Set.of(syntheticFunction, this); - } - - void recordImplicitTags() - { - Set provides = provideTags(); - Set requires = requireTags(); - - provides.add(new DependTag.Operator(qname, operands)); - - /* - * Commutator and negator often involve cycles. PostgreSQL already - * has its own means of breaking them, so it is not necessary here - * even to declare dependencies based on them. - * - * There is also, for now, no point in declaring dependencies on - * selectivity estimators; they can't be written in Java, so they - * won't be products of this compilation. - * - * So, just require the operand types and the function. - */ - - Arrays.stream(operands) - .filter(Objects::nonNull) - .map(DBType::dependTag) - .filter(Objects::nonNull) - .forEach(requires::add); - - if ( null != func && null == synthetic ) - { - func.provideTags().stream() - .filter(DependTag.Function.class::isInstance) - .forEach(requires::add); - } - else - { - requires.add(new DependTag.Function(funcName, - Arrays.stream(operands) - .filter(Objects::nonNull) - .toArray(DBType[]::new))); - } - } - - /** - * Just to keep things interesting, a schema-qualified operator name is - * wrapped in OPERATOR(...) pretty much everywhere, except as the guest - * of honor in a CREATE OPERATOR or DROP OPERATOR, where the unwrapped - * form is needed. - */ - private String qnameUnwrapped() - { - String local = qname.local().toString(); - Identifier.Simple qualifier = qname.qualifier(); - return null == qualifier ? local : qualifier + "." + local; - } - - /** - * An operator is identified this way in a COMMENT or DROP. - */ - private String commentDropForm() - { - return qnameUnwrapped() + " (" + - (null == operands[0] ? "NONE" : operands[0]) + ", " + - (null == operands[1] ? "NONE" : operands[1]) + ")"; - } - - public String[] deployStrings() - { - List al = new ArrayList<>(); - - StringBuilder sb = new StringBuilder(); - - sb.append("CREATE OPERATOR ").append(qnameUnwrapped()); - sb.append(" (\n\tPROCEDURE = ").append(funcName); - - if ( null != operands[0] ) - sb.append(",\n\tLEFTARG = ").append(operands[0]); - - if ( null != operands[1] ) - sb.append(",\n\tRIGHTARG = ").append(operands[1]); - - if ( null != commutator ) - sb.append(",\n\tCOMMUTATOR = ").append(commutator); - - if ( null != negator ) - sb.append(",\n\tNEGATOR = ").append(negator); - - if ( null != restrict ) - sb.append(",\n\tRESTRICT = ").append(restrict); - - if ( null != join ) - sb.append(",\n\tJOIN = ").append(join); - - if ( _hashes ) - sb.append(",\n\tHASHES"); - - if ( _merges ) - sb.append(",\n\tMERGES"); - - sb.append(')'); - - al.add(sb.toString()); - if ( null != comment() ) - al.add( - "COMMENT ON OPERATOR " + commentDropForm() + " IS " + - DDRWriter.eQuote(comment())); - - return al.toArray( new String [ al.size() ]); - } - - public String[] undeployStrings() - { - return new String[] - { - "DROP OPERATOR " + commentDropForm() - }; - } - } - - class AggregateImpl - extends Repeatable - implements Aggregate, Snippet, Commentable - { - AggregateImpl(Element e, AnnotationMirror am) - { - super(e, am); - } - - public String[] name() { return qstrings(qname); } - public String[] arguments() { return argsOut(aggregateArgs); } - public String[] directArguments() { return argsOut(directArgs); } - public boolean hypothetical() { return _hypothetical; } - public boolean[] variadic() { return _variadic; } - public Plan[] plan() { return new Plan[]{_plan}; } - public Plan[] movingPlan() { return _movingPlan; } - public Function.Parallel parallel() { return _parallel; } - public String[] sortOperator() { return qstrings(sortop); } - public String[] provides() { return _provides; } - public String[] requires() { return _requires; } - - public boolean _hypothetical; - public boolean[] _variadic = {false, false}; - public Plan _plan; - public Plan[] _movingPlan; - public Function.Parallel _parallel; - public String[] _provides; - public String[] _requires; - - FunctionImpl func; - Identifier.Qualified qname; - List> aggregateArgs; - List> directArgs; - Identifier.Qualified sortop; - static final int DIRECT_ARGS = 0; // index into _variadic[] - static final int AGG_ARGS = 1; // likewise - boolean directVariadicExplicit; - - private List> - argsIn(String[] names) - { - return Arrays.stream(names) - .map(DBType::fromNameAndType) - .collect(toList()); - } - - private String[] - argsOut(List> names) - { - return names.stream() - .map(e -> e.getKey() + " " + e.getValue()) - .toArray(String[]::new); - } - - @Override - public String derivedComment( Element e) - { - /* - * When this annotation targets a TYPE, just as a - * place to hang it, there's no particular reason to believe a - * doc comment on the type is a good choice for this aggregate. - * When the annotation is on a method, the chances are better. - */ - if ( ElementKind.METHOD.equals(e.getKind()) ) - return super.derivedComment(e); - return null; - } - - public void setName( Object o, boolean explicit, Element e) - { - if ( explicit ) - qname = qnameFrom(avToArray( o, String.class)); - } - - public void setArguments( Object o, boolean explicit, Element e) - { - if ( explicit ) - aggregateArgs = argsIn( avToArray( o, String.class)); - } - - public void setDirectArguments( Object o, boolean explicit, Element e) - { - if ( explicit ) - directArgs = argsIn( avToArray( o, String.class)); - } - - public void setSortOperator( Object o, boolean explicit, Element e) - { - if ( explicit ) - sortop = operatorNameFrom(avToArray( o, String.class)); - } - - public void setVariadic( Object o, boolean explicit, Element e) - { - if ( ! explicit ) - return; - - Boolean[] a = avToArray( o, Boolean.class); - - if ( 1 > a.length || a.length > 2 ) - throw new IllegalArgumentException( - "supply only boolean or {boolean,boolean} for variadic"); - - if ( ! Arrays.asList(a).contains(true) ) - throw new IllegalArgumentException( - "supply variadic= only if aggregated arguments, direct " + - "arguments, or both, are variadic"); - - _variadic[AGG_ARGS] = a[a.length - 1]; - if ( 2 == a.length ) - { - directVariadicExplicit = true; - _variadic[DIRECT_ARGS] = a[0]; - } - } - - public void setPlan( Object o, boolean explicit, Element e) - { - _plan = new Plan(); // always a plan, even if members uninitialized - - if ( explicit ) - _plan = planFrom( _plan, o, e, "plan"); - } - - public void setMovingPlan( Object o, boolean explicit, Element e) - { - if ( ! explicit ) - return; - - _movingPlan = new Plan[1]; - _movingPlan [ 0 ] = planFrom( new Moving(), o, e, "movingPlan"); - } - - Plan planFrom( Plan p, Object o, Element e, String which) - { - AnnotationMirror[] ams = avToArray( o, AnnotationMirror.class); - - if ( 1 != ams.length ) - throw new IllegalArgumentException( - which + " must be given exactly one @Plan"); - - populateAnnotationImpl( p, e, ams[0]); - return p; - } - - public Set characterize() - { - boolean ok = true; - boolean orderedSet = null != directArgs; - boolean moving = null != _movingPlan; - boolean checkAccumulatorSig = false; - boolean checkFinisherSig = false; - boolean unary = false; - - if ( ElementKind.METHOD.equals(m_targetElement.getKind()) ) - { - func = getSnippet(m_targetElement, FunctionImpl.class, - () -> (FunctionImpl)null); - if ( null == func ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "A method annotated with @Aggregate must " + - "also have @Function" - ); - ok = false; - } - } - - if ( null != func ) - { - Identifier.Qualified funcName = - qnameFrom(func.name(), func.schema()); - boolean inferAccumulator = - null == _plan.accumulate || null == aggregateArgs; - boolean inferFinisher = - null == _plan.finish && ! inferAccumulator; - boolean stateTypeExplicit = false; - - if ( null == qname ) - { - - if ( inferFinisher && 1 == aggregateArgs.size() - && 1 == func.parameterTypes.length - && func.parameterTypes[0] == - aggregateArgs.get(0).getValue() ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Default name %s for this aggregate would " + - "collide with finish function; use name= to " + - "specify a name", funcName - ); - ok = false; - } - else - qname = funcName; - } - - if ( 1 > func.parameterTypes.length ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Function with no arguments cannot be @Aggregate " + - "accumulate or finish function" - ); - ok = false; - } - else if ( null == _plan.stateType ) - { - _plan.stateType = func.parameterTypes[0]; - if (null != _movingPlan - && null == _movingPlan[0].stateType) - _movingPlan[0].stateType = func.parameterTypes[0]; - } - else - stateTypeExplicit = true; - - if ( inferAccumulator || inferFinisher ) - { - if ( ok ) - { - if ( inferAccumulator ) - { - if ( null == aggregateArgs ) - { - aggregateArgs = - func.parameterInfo() - .skip(1) // skip the state argument - .map(pi -> - (Map.Entry) - new AbstractMap.SimpleImmutableEntry<>( - Identifier.Simple.fromJava( - pi.name() - ), - pi.dt - ) - ) - .collect(toList()); - } - else - checkAccumulatorSig = true; - _plan.accumulate = funcName; - if ( null != _movingPlan - && null == _movingPlan[0].accumulate ) - _movingPlan[0].accumulate = funcName; - } - else // inferFinisher - { - _plan.finish = funcName; - if ( null != _movingPlan - && null == _movingPlan[0].finish ) - _movingPlan[0].finish = funcName; - } - } - - if ( stateTypeExplicit - && ! _plan.stateType.equals(func.parameterTypes[0]) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "First function argument does not match " + - "stateType specified with @Aggregate" - ); - ok = false; - } - } - else if ( funcName.equals(_plan.accumulate) ) - checkAccumulatorSig = true; - else if ( funcName.equals(_plan.finish) ) - checkFinisherSig = true; - else - { - msg(Kind.WARNING, m_targetElement, m_origin, - "@Aggregate annotation on a method not recognized " + - "as either the accumulate or the finish function " + - "for the aggregate"); - } - - // If the method is the accumulator and is RETURNS_NULL, ensure - // there is either an initialState or a first aggregate arg that - // matches the stateType. - if ( ok && ( inferAccumulator || checkAccumulatorSig ) ) - { - if ( Function.OnNullInput.RETURNS_NULL == func.onNullInput() - && ( 0 == aggregateArgs.size() - || ! _plan.stateType.equals( - aggregateArgs.get(0).getValue()) ) - && null == _plan._initialState ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Aggregate without initialState= must have " + - "either a first argument matching the stateType " + - "or an accumulate method with onNullInput=CALLED."); - ok = false; - } - } - } - - if ( null == qname ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Aggregate missing name="); - ok = false; - } - - if ( null == aggregateArgs ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Aggregate missing arguments="); - ok = false; - } - else - unary = 1 == aggregateArgs.size(); - - if ( null == _plan.stateType ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Aggregate missing stateType="); - ok = false; - } - - if ( null == _plan.accumulate ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Aggregate plan missing accumulate="); - ok = false; - } - - // Could check argument count against FUNC_MAX_ARGS, but that would - // hardcode an assumed value for PostgreSQL's FUNC_MAX_ARGS. - - // Check that, if a stateType is polymorphic, there are compatible - // polymorphic arg types? Not today. - - // If a plan has no initialState, then either the accumulate - // function must NOT be RETURNS NULL ON NULL INPUT, or the first - // aggregated argument type must be the same as the state type. - // The type check is easy, but the returnsNull check on the - // accumulate function would require looking up the function (and - // still we wouldn't know, if it's not seen in this compilation). - // For another day. - - // Allow hypothetical only for ordered-set aggregate. - if ( _hypothetical && ! orderedSet ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "hypothetical=true is only allowed for an ordered-set " + - "aggregate (one with directArguments specified, " + - "even if only {})"); - ok = false; - } - - // Allow two-element variadic= only for ordered-set aggregate. - if ( directVariadicExplicit && ! orderedSet ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Two values for variadic= are only allowed for an " + - "ordered-set aggregate (one with directArguments " + - "specified, even if only {})"); - ok = false; - } - - // Require a movingPlan to have a remove function. - if ( moving && null == _movingPlan[0].remove ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "a movingPlan must include a remove function"); - ok = false; - } - - // Checks if the aggregated argument list is declared variadic. - // The last element must be an array type or "any"; an ordered-set - // aggregate allows only one argument and it must be "any". - if ( _variadic[AGG_ARGS] ) - { - if ( 1 > aggregateArgs.size() ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "To declare the aggregated argument list variadic, " + - "there must be at least one argument."); - ok = false; - } - else - { - DBType t = - aggregateArgs.get(aggregateArgs.size() - 1).getValue(); - boolean isAny = // allow omission of pg_catalog namespace - DT_ANY.equals(t) || "\"any\"".equals(t.toString()); - if ( orderedSet && (! isAny || 1 != aggregateArgs.size()) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "If variadic, an ordered-set aggregate's " + - "aggregated argument list must be only one " + - "argument and of type \"any\"."); - ok = false; - } - else if ( ! isAny && ! t.isArray() ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "If variadic, the last aggregated argument must " + - "be an array type (or \"any\")."); - ok = false; - } - } - } - - // Checks specific to ordered-set aggregates. - if ( orderedSet ) - { - if ( 0 == aggregateArgs.size() ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "An ordered-set aggregate needs at least one " + - "aggregated argument"); - ok = false; - } - - // Checks specific to hypothetical-set aggregates. - // The aggregated argument types must match the trailing direct - // arguments, and the two variadic declarations must match. - if ( _hypothetical ) - { - if ( _variadic[DIRECT_ARGS] != _variadic[AGG_ARGS] ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "For a hypothetical-set aggregate, neither or " + - "both the direct and aggregated argument lists " + - "must be declared variadic."); - ok = false; - } - if ( directArgs.size() < aggregateArgs.size() - || - ! directArgs.subList( - directArgs.size() - aggregateArgs.size(), - directArgs.size()) - .equals(aggregateArgs) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "The last direct arguments of a hypothetical-set " + - "aggregate must match the types of the " + - "aggregated arguments"); - ok = false; - } - } - } - - // It is allowed to omit a finisher function, but some things - // make no sense without one. - if ( orderedSet && null == _plan.finish && 0 < directArgs.size() ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Direct arguments serve no purpose without a finisher"); - ok = false; - } - - if ( null == _plan.finish && _plan._polymorphic ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "The polymorphic flag is meaningless with no finisher"); - ok = false; - } - - // The same finisher checks for a movingPlan, if present. - if ( moving ) - { - if ( orderedSet - && null == _movingPlan[0].finish - && directArgs.size() > 0 ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Direct arguments serve no purpose without a finisher"); - ok = false; - } - - if ( null == _movingPlan[0].finish - && _movingPlan[0]._polymorphic ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "The polymorphic flag is meaningless with no finisher"); - ok = false; - } - } - - // Checks involving sortOperator - if ( null != sortop ) - { - if ( orderedSet ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "The sortOperator optimization is not available for " + - "an ordered-set aggregate (one with directArguments)"); - ok = false; - } - - if ( ! unary || _variadic[AGG_ARGS] ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "The sortOperator optimization is only available for " + - "a one-argument (and non-variadic) aggregate"); - ok = false; - } - } - - // Checks involving serialize / deserialize - if ( null != _plan.serialize || null != _plan.deserialize ) - { - if ( null == _plan.combine ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "An aggregate plan without combine= may not have " + - "serialize= or deserialize="); - ok = false; - } - - if ( null == _plan.serialize || null == _plan.deserialize ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "An aggregate plan must have both " + - "serialize= and deserialize= or neither"); - ok = false; - } - - if ( ! DT_INTERNAL.equals(_plan.stateType) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "Only an aggregate plan with stateType " + - "pg_catalog.internal may have serialize=/deserialize="); - ok = false; - } - } - - if ( ! ok ) - return Set.of(); - - Set requires = requireTags(); - - DBType[] accumulatorSig = - Stream.of( - Stream.of(_plan.stateType), - aggregateArgs.stream().map(Map.Entry::getValue)) - .flatMap(identity()).toArray(DBType[]::new); - - DBType[] combinerSig = { _plan.stateType, _plan.stateType }; - - DBType[] finisherSig = - Stream.of( - Stream.of(_plan.stateType), - orderedSet - ? directArgs.stream().map(Map.Entry::getValue) - : Stream.of(), - _plan._polymorphic - ? aggregateArgs.stream().map(Map.Entry::getValue) - : Stream.of() - ) - .flatMap(identity()) - .toArray(DBType[]::new); - - if ( checkAccumulatorSig - && ! Arrays.equals(accumulatorSig, func.parameterTypes) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Aggregate annotation on a method that matches the name " + - "but not argument types expected for the aggregate's " + - "accumulate function"); - ok = false; - } - - if ( checkFinisherSig - && ! Arrays.equals(finisherSig, func.parameterTypes) ) - { - msg(Kind.ERROR, m_targetElement, m_origin, - "@Aggregate annotation on a method that matches the name " + - "but not argument types expected for the aggregate's " + - "finish function"); - ok = false; - } - - requires.add( - new DependTag.Function(_plan.accumulate, accumulatorSig)); - - if ( null != _plan.combine ) - { - DBType[] serialSig = { DT_INTERNAL }; - DBType[] deserialSig = { DT_BYTEA, DT_INTERNAL }; - - requires.add( - new DependTag.Function(_plan.combine, combinerSig)); - - if ( null != _plan.serialize ) - { - requires.add( - new DependTag.Function(_plan.serialize, serialSig)); - requires.add( - new DependTag.Function(_plan.deserialize, deserialSig)); - } - } - - if ( null != _plan.finish ) - requires.add( - new DependTag.Function(_plan.finish, finisherSig)); - - if ( moving ) - { - accumulatorSig[0] = _movingPlan[0].stateType; - Arrays.fill(combinerSig, _movingPlan[0].stateType); - finisherSig[0] = _movingPlan[0].stateType; - - requires.add(new DependTag.Function( - _movingPlan[0].accumulate, accumulatorSig)); - - requires.add(new DependTag.Function( - _movingPlan[0].remove, accumulatorSig)); - - if ( null != _movingPlan[0].combine ) - requires.add(new DependTag.Function( - _movingPlan[0].combine, combinerSig)); - - if ( null != _movingPlan[0].finish ) - requires.add(new DependTag.Function( - _movingPlan[0].finish, finisherSig)); - } - - if ( null != sortop ) - { - DBType arg = aggregateArgs.get(0).getValue(); - DBType[] opSig = { arg, arg }; - requires.add(new DependTag.Operator(sortop, opSig)); - } - - /* - * That establishes dependency on the various support functions, - * which should, transitively, depend on all of the types. But it is - * possible we do not have a whole-program view (perhaps some - * support functions are implemented in other languages, and there - * are @SQLActions setting them up?). Therefore also, redundantly as - * it may be, declare dependency on the types. - */ - - Stream.of( - aggregateArgs.stream().map(Map.Entry::getValue), - orderedSet - ? directArgs.stream().map(Map.Entry::getValue) - : Stream.of(), - Stream.of(_plan.stateType), - moving - ? Stream.of(_movingPlan[0].stateType) - : Stream.of() - ) - .flatMap(identity()) - .map(DBType::dependTag) - .filter(Objects::nonNull) - .forEach(requires::add); - - recordExplicitTags(_provides, _requires); - return Set.of(this); - } - - public String[] deployStrings() - { - List al = new ArrayList<>(); - - StringBuilder sb = new StringBuilder("CREATE AGGREGATE "); - appendNameAndArguments(sb); - sb.append(" ("); - - String[] planStrings = _plan.deployStrings(); - int n = planStrings.length; - for ( String s : planStrings ) - { - sb.append("\n\t").append(s); - if ( 0 < -- n ) - sb.append(','); - } - - if ( null != _movingPlan ) - { - planStrings = _movingPlan[0].deployStrings(); - for ( String s : planStrings ) - sb.append(",\n\tM").append(s); - } - - if ( null != sortop ) - sb.append(",\n\tSORTOP = ").append(sortop); - - if ( Function.Parallel.UNSAFE != _parallel ) - sb.append(",\n\tPARALLEL = ").append(_parallel); - - if ( _hypothetical ) - sb.append(",\n\tHYPOTHETICAL"); - - sb.append(')'); - - al.add(sb.toString()); - - if ( null != comment() ) - { - sb = new StringBuilder("COMMENT ON AGGREGATE "); - appendNameAndArguments(sb); - sb.append(" IS ").append(DDRWriter.eQuote(comment())); - al.add(sb.toString()); - } - - return al.toArray( new String [ al.size() ]); - } - - public String[] undeployStrings() - { - StringBuilder sb = new StringBuilder("DROP AGGREGATE "); - appendNameAndArguments(sb); - return new String[] { sb.toString() }; - } - - private void appendNameAndArguments(StringBuilder sb) - { - ListIterator> iter; - Map.Entry entry; - - sb.append(qname).append('('); - if ( null != directArgs ) - { - iter = directArgs.listIterator(); - while ( iter.hasNext() ) - { - entry = iter.next(); - sb.append("\n\t"); - if ( _variadic[DIRECT_ARGS] && ! iter.hasNext() ) - sb.append("VARIADIC "); - if ( null != entry.getKey() ) - sb.append(entry.getKey()).append(' '); - sb.append(entry.getValue()); - if ( iter.hasNext() ) - sb.append(','); - else - sb.append("\n\t"); - } - sb.append("ORDER BY"); - } - else if ( 0 == aggregateArgs.size() ) - sb.append('*'); - - iter = aggregateArgs.listIterator(); - while ( iter.hasNext() ) - { - entry = iter.next(); - sb.append("\n\t"); - if ( _variadic[AGG_ARGS] && ! iter.hasNext() ) - sb.append("VARIADIC "); - if ( null != entry.getKey() ) - sb.append(entry.getKey()).append(' '); - sb.append(entry.getValue()); - if ( iter.hasNext() ) - sb.append(','); - } - sb.append(')'); - } - - class Plan extends AbstractAnnotationImpl implements Aggregate.Plan - { - public String stateType() { return stateType.toString(); } - public int stateSize() { return _stateSize; } - public String initialState() { return _initialState; } - public String[] accumulate() { return qstrings(accumulate); } - public String[] combine() { return qstrings(combine); } - public String[] finish() { return qstrings(finish); } - public String[] remove() { return qstrings(remove); } - public String[] serialize() { return qstrings(serialize); } - public String[] deserialize() { return qstrings(deserialize); } - public boolean polymorphic() { return _polymorphic; } - public FinishEffect finishEffect() { return _finishEffect; } - - public int _stateSize; - public String _initialState; - public boolean _polymorphic; - public FinishEffect _finishEffect; - - DBType stateType; - Identifier.Qualified accumulate; - Identifier.Qualified combine; - Identifier.Qualified finish; - Identifier.Qualified remove; - Identifier.Qualified serialize; - Identifier.Qualified deserialize; - - public void setStateType(Object o, boolean explicit, Element e) - { - if ( explicit ) - stateType = DBType.fromSQLTypeAnnotation((String)o); - } - - public void setStateSize(Object o, boolean explicit, Element e) - { - _stateSize = (Integer)o; - if ( explicit && 0 >= _stateSize ) - throw new IllegalArgumentException( - "An explicit stateSize must be positive"); - } - - public void setInitialState(Object o, boolean explicit, Element e) - { - if ( explicit ) - _initialState = (String)o; - } - - public void setAccumulate(Object o, boolean explicit, Element e) - { - if ( explicit ) - accumulate = qnameFrom(avToArray( o, String.class)); - } - - public void setCombine(Object o, boolean explicit, Element e) - { - if ( explicit ) - combine = qnameFrom(avToArray( o, String.class)); - } - - public void setFinish(Object o, boolean explicit, Element e) - { - if ( explicit ) - finish = qnameFrom(avToArray( o, String.class)); - } - - public void setRemove(Object o, boolean explicit, Element e) - { - if ( explicit ) - throw new IllegalArgumentException( - "Only a movingPlan may have a remove function"); - } - - public void setSerialize(Object o, boolean explicit, Element e) - { - if ( explicit ) - serialize = qnameFrom(avToArray( o, String.class)); - } - - public void setDeserialize(Object o, boolean explicit, Element e) - { - if ( explicit ) - deserialize = qnameFrom(avToArray( o, String.class)); - } - - public void setFinishEffect( Object o, boolean explicit, Element e) - { - if ( explicit ) - _finishEffect = FinishEffect.valueOf( - ((VariableElement)o).getSimpleName().toString()); - } - - public Set characterize() - { - return Set.of(); - } - - /** - * Returns one string per plan element (not per SQL statement). - *

    - * This method has to be here anyway because the class extends - * {@code AbstractAnnotationImpl}, but it will never be processed as - * an actual SQL snippet. This will be called by the containing - * {@code AggregateImpl} and return the individual plan elements - * that it will build into its own deploy strings. - *

    - * When this class represents a moving plan, the caller will prefix - * each of these strings with {@code M}. - */ - public String[] deployStrings() - { - List al = new ArrayList<>(); - - al.add("STYPE = " + stateType); - - if ( 0 != _stateSize ) - al.add("SSPACE = " + _stateSize); - - if ( null != _initialState ) - al.add("INITCOND = " + DDRWriter.eQuote(_initialState)); - - al.add("SFUNC = " + accumulate); - - if ( null != remove ) - al.add("INVFUNC = " + remove); - - if ( null != finish ) - al.add("FINALFUNC = " + finish); - - if ( _polymorphic ) - al.add("FINALFUNC_EXTRA"); - - if ( null != _finishEffect ) - al.add("FINALFUNC_MODIFY = " + _finishEffect); - - if ( null != combine ) - al.add("COMBINEFUNC = " + combine); - - if ( null != serialize ) - al.add("SERIALFUNC = " + serialize); - - if ( null != deserialize ) - al.add("DESERIALFUNC = " + deserialize); - - return al.toArray( new String [ al.size() ]); - } - - public String[] undeployStrings() - { - return null; - } - } - - class Moving extends Plan - { - public void setRemove(Object o, boolean explicit, Element e) - { - if ( explicit ) - remove = qnameFrom(avToArray( o, String.class)); - } - - public void setSerialize(Object o, boolean explicit, Element e) - { - if ( explicit ) - throw new IllegalArgumentException( - "Only a (non-moving) plan may have a " + - "serialize function"); - } - - public void setDeserialize(Object o, boolean explicit, Element e) - { - if ( explicit ) - throw new IllegalArgumentException( - "Only a (non-moving) plan may have a " + - "deserialize function"); - } - } - } - - /** - * Provides the default mappings from Java types to SQL types. - */ - class TypeMapper - { - ArrayList> protoMappings; - ArrayList> finalMappings; - - TypeMapper() - { - protoMappings = new ArrayList<>(); - - // Primitives (these need not, indeed cannot, be schema-qualified) - // - this.addMap(boolean.class, DT_BOOLEAN); - this.addMap(Boolean.class, DT_BOOLEAN); - this.addMap(byte.class, "smallint"); - this.addMap(Byte.class, "smallint"); - this.addMap(char.class, "smallint"); - this.addMap(Character.class, "smallint"); - this.addMap(double.class, "double precision"); - this.addMap(Double.class, "double precision"); - this.addMap(float.class, "real"); - this.addMap(Float.class, "real"); - this.addMap(int.class, DT_INTEGER); - this.addMap(Integer.class, DT_INTEGER); - this.addMap(long.class, "bigint"); - this.addMap(Long.class, "bigint"); - this.addMap(short.class, "smallint"); - this.addMap(Short.class, "smallint"); - - // Known common mappings - // - this.addMap(Number.class, "pg_catalog", "numeric"); - this.addMap(String.class, "pg_catalog", "varchar"); - this.addMap(java.util.Date.class, "pg_catalog", "timestamp"); - this.addMap(Timestamp.class, "pg_catalog", "timestamp"); - this.addMap(Time.class, "pg_catalog", "time"); - this.addMap(java.sql.Date.class, "pg_catalog", "date"); - this.addMap(java.sql.SQLXML.class, "pg_catalog", "xml"); - this.addMap(BigInteger.class, "pg_catalog", "numeric"); - this.addMap(BigDecimal.class, "pg_catalog", "numeric"); - this.addMap(ResultSet.class, DT_RECORD); - this.addMap(Object.class, DT_ANY); - - this.addMap(byte[].class, DT_BYTEA); - - this.addMap(LocalDate.class, "pg_catalog", "date"); - this.addMap(LocalTime.class, "pg_catalog", "time"); - this.addMap(OffsetTime.class, "pg_catalog", "timetz"); - this.addMap(LocalDateTime.class, "pg_catalog", "timestamp"); - this.addMap(OffsetDateTime.class, "pg_catalog", "timestamptz"); - } - - private boolean mappingsFrozen() - { - return null != finalMappings; - } - - /* - * What worked in Java 6 was to keep a list of Class -> sqltype - * mappings, and get TypeMirrors from the Classes at the time of trying - * to identify types (in the final, after-all-sources-processed round). - * Starting in Java 7, you get different TypeMirror instances in - * different rounds for the same types, so you can't match something - * seen in round 1 to something looked up in the final round. (However, - * you can match things seen in round 1 to things looked up prior to - * the first round, when init() is called and constructs the processor.) - * - * So, this method needs to be called at the end of round 1 (or at the - * end of every round, it just won't do anything but once), and at that - * point it will compute the list order and freeze a list of TypeMirrors - * to avoid looking up the Classes later and getting different - * mirrors. - * - * This should work as long as all the sources containg PL/Java - * annotations will be found in round 1. That would only not be the case - * if some other annotation processor is in use that could generate new - * sources with pljava annotations in them, requiring additional rounds. - * In the present state of things, that simply won't work. Java bug - * http://bugs.java.com/bugdatabase/view_bug.do?bug_id=8038455 might - * cover this, and promises a fix in Java 9, but who knows? - */ - private void workAroundJava7Breakage() - { - if ( mappingsFrozen() ) - return; // after the first round, it's too late! - - // Need to check more specific types before those they are - // assignable to by widening reference conversions, so a - // topological sort is in order. - // - List>> vs = new ArrayList<>( - protoMappings.size()); - - for ( Map.Entry me : protoMappings ) - vs.add( new Vertex<>( me)); - - for ( int i = vs.size(); i --> 1; ) - { - Vertex> vi = vs.get( i); - TypeMirror ci = vi.payload.getKey(); - for ( int j = i; j --> 0; ) - { - Vertex> vj = vs.get( j); - TypeMirror cj = vj.payload.getKey(); - boolean oij = typu.isAssignable( ci, cj); - boolean oji = typu.isAssignable( cj, ci); - if ( oji == oij ) - continue; // no precedence constraint between these two - if ( oij ) - vi.precede( vj); - else - vj.precede( vi); - } - } - - Queue>> q; - if ( reproducible ) - { - q = new PriorityQueue<>( 11, new TypeTiebreaker()); - } - else - { - q = new LinkedList<>(); - } - - for ( Vertex> v : vs ) - if ( 0 == v.indegree ) - q.add( v); - - protoMappings.clear(); - finalMappings = protoMappings; - protoMappings = null; - - while ( ! q.isEmpty() ) - { - Vertex> v = q.remove(); - v.use( q); - finalMappings.add( v.payload); - } - } - - private TypeMirror typeMirrorFromClass( Class k) - { - if ( k.isArray() ) - { - TypeMirror ctm = typeMirrorFromClass( k.getComponentType()); - return typu.getArrayType( ctm); - } - - if ( k.isPrimitive() ) - { - TypeKind tk = TypeKind.valueOf( k.getName().toUpperCase()); - return typu.getPrimitiveType( tk); - } - - String cname = k.getCanonicalName(); - if ( null == cname ) - { - msg( Kind.WARNING, - "Cannot register type mapping for class %s" + - "that lacks a canonical name", k.getName()); - return null; - } - - return declaredTypeForClass(k); - } - - /** - * Add a custom mapping from a Java class to an SQL type identified - * by SQL-standard reserved syntax. - * - * @param k Class representing the Java type - * @param v String representing the SQL (language-reserved) type - * to be used - */ - void addMap(Class k, String v) - { - addMap( typeMirrorFromClass( k), new DBType.Reserved(v)); - } - - /** - * Add a custom mapping from a Java class to an SQL type identified - * by an SQL qualified identifier. - * - * @param k Class representing the Java type - * @param schema String representing the qualifier of the type name - * (may be null) - * @param local String representing the SQL (language-reserved) type - * to be used - */ - void addMap(Class k, String schema, String local) - { - addMap( typeMirrorFromClass( k), - new DBType.Named(qnameFrom(local, schema))); - } - - /** - * Add a custom mapping from a Java class to an SQL type - * already in the form of a {@code DBType}. - * - * @param k Class representing the Java type - * @param DBType representing the SQL type to be used - */ - void addMap(Class k, DBType type) - { - addMap( typeMirrorFromClass( k), type); - } - - /** - * Add a custom mapping from a Java class to an SQL type, if a class - * with the given name exists. - * - * @param k Canonical class name representing the Java type - * @param v String representing the SQL type to be used - */ - void addMapIfExists(String k, String v) - { - TypeElement te = elmu.getTypeElement( k); - if ( null != te ) - addMap( te.asType(), new DBType.Reserved(v)); - } - - /** - * Add a custom mapping from a Java class (represented as a TypeMirror) - * to an SQL type. - * - * @param tm TypeMirror representing the Java type - * @param v String representing the SQL type to be used - */ - void addMap(TypeMirror tm, DBType v) - { - if ( mappingsFrozen() ) - { - msg( Kind.ERROR, - "addMap(%s, %s)\n" + - "called after workAroundJava7Breakage", tm.toString(), v); - return; - } - protoMappings.add( new AbstractMap.SimpleImmutableEntry<>( tm, v)); - } - - /** - * Return the SQL type for the Java type represented by a TypeMirror, - * from an explicit annotation if present, otherwise by applying the - * default mappings. No default-value information is included in the - * string returned. It is assumed that a function return is being typed - * rather than a function parameter. - * - * @param tm Represents the type whose corresponding SQL type is wanted. - * @param e Annotated element (chiefly for use as a location hint in - * diagnostic messages). - */ - DBType getSQLType(TypeMirror tm, Element e) - { - return getSQLType( tm, e, null, false, false); - } - - - /** - * Return the SQL type for the Java type represented by a TypeMirror, - * from an explicit annotation if present, otherwise by applying the - * default mappings. - * - * @param tm Represents the type whose corresponding SQL type is wanted. - * @param e Annotated element (chiefly for use as a location hint in - * diagnostic messages). - * @param st {@code SQLType} annotation, or null if none, explicitly - * given for the element. - * @param contravariant Indicates that the element whose type is wanted - * is a function parameter and should be given the widest type that can - * be assigned to it. If false, find the narrowest type that a function - * return can be assigned to. - * @param withDefault Indicates whether any specified default value - * information should also be included in the "type" string returned. - */ - DBType getSQLType(TypeMirror tm, Element e, SQLType st, - boolean contravariant, boolean withDefault) - { - boolean array = false; - boolean row = false; - DBType rslt = null; - - String[] defaults = null; - boolean optional = false; - - if ( null != st ) - { - String s = st.value(); - if ( null != s ) - rslt = DBType.fromSQLTypeAnnotation(s); - defaults = st.defaultValue(); - optional = st.optional(); - } - - if ( tm.getKind().equals( TypeKind.ARRAY) ) - { - ArrayType at = ((ArrayType)tm); - if ( ! at.getComponentType().getKind().equals( TypeKind.BYTE) ) - { - array = true; - tm = at.getComponentType(); - // only for bytea[] should this ever still be an array - } - } - - if ( ! array && typu.isSameType( tm, TY_RESULTSET) ) - row = true; - - if ( null != rslt ) - return typeWithDefault( - e, rslt, array, row, defaults, optional, withDefault); - - if ( tm.getKind().equals( TypeKind.VOID) ) - return DT_VOID; // return type only; no defaults apply - - if ( tm.getKind().equals( TypeKind.ERROR) ) - { - msg ( Kind.ERROR, e, - "Cannot determine mapping to SQL type for unresolved type"); - rslt = new DBType.Reserved(tm.toString()); - } - else - { - ArrayList> ms = finalMappings; - if ( contravariant ) - ms = reversed(ms); - for ( Map.Entry me : ms ) - { - TypeMirror ktm = me.getKey(); - if ( ktm instanceof PrimitiveType ) - { - if ( typu.isSameType( tm, ktm) ) - { - rslt = me.getValue(); - break; - } - } - else - { - boolean accept; - if ( contravariant ) - accept = typu.isAssignable( ktm, tm); - else - accept = typu.isAssignable( tm, ktm); - if ( accept ) - { - // don't compute a type of Object/"any" for - // a function return (just admit defeat instead) - if ( contravariant - || ! typu.isSameType( ktm, TY_OBJECT) ) - rslt = me.getValue(); - break; - } - } - } - } - - if ( null == rslt ) - { - msg( Kind.ERROR, e, - "No known mapping to an SQL type"); - rslt = new DBType.Reserved(tm.toString()); - } - - if ( array ) - rslt = rslt.asArray("[]"); - - return typeWithDefault( - e, rslt, array, row, defaults, optional, withDefault); - } - - /** - * Given the matching SQL type already determined, return it with or - * without default-value information appended, as the caller desires. - * To ensure that the generated descriptor will be in proper form, the - * default values are emitted as properly-escaped string literals and - * then cast to the appropriate type. This approach will not work for - * defaults given as arbitrary SQL expressions, but covers the typical - * cases of simple literals and even anything that can be computed as - * a Java String constant expression (e.g. ""+Math.PI). - * - * @param e Annotated element (chiefly for use as a location hint in - * diagnostic messages). - * @param rslt The bare SQL type string already determined - * @param array Whether the Java type was determined to be an array - * @param row Whether the Java type was ResultSet, indicating an SQL - * record or row type. - * @param defaults Array (null if not present) of default value strings - * @param withDefault Whether to append the default information to the - * type. - */ - DBType typeWithDefault( - Element e, DBType rslt, boolean array, boolean row, - String[] defaults, boolean optional, boolean withDefault) - { - if ( ! withDefault || null == defaults && ! optional ) - return rslt; - - if ( optional ) - return rslt.withDefault("DEFAULT NULL"); - - int n = defaults.length; - if ( row ) - { - assert ! array; - if ( n > 0 && rslt.toString().equalsIgnoreCase("record") ) - msg( Kind.ERROR, e, - "Only supported default for unknown RECORD type is {}"); - } - else if ( n != 1 ) - array = true; - else if ( ! array ) - array = rslt.isArray(); - - StringBuilder sb = new StringBuilder(); - sb.append( " DEFAULT "); - sb.append( row ? "ROW(" : "CAST("); - if ( array ) - sb.append( "ARRAY["); - if ( n > 1 ) - sb.append( "\n\t"); - for ( String s : defaults ) - { - sb.append( DDRWriter.eQuote( s)); - if ( 0 < -- n ) - sb.append( ",\n\t"); - } - if ( array ) - sb.append( ']'); - if ( ! row ) - sb.append( " AS ").append( rslt); - sb.append( ')'); - return rslt.withDefault(sb.toString()); - } - } - - /** - * Work around bizarre javac behavior that silently supplies an Error - * class in place of an attribute value for glaringly obvious source errors, - * instead of reporting them. - * @param av AnnotationValue to extract the value from - * @return The result of getValue unless {@code av} is an error placeholder - */ - static Object getValue( AnnotationValue av) - { - if ( "com.sun.tools.javac.code.Attribute.Error".equals( - av.getClass().getCanonicalName()) ) - throw new AnnotationValueException(); - return av.getValue(); - } - - /** - * Return a reversed copy of an ArrayList. - */ - static > T reversed(T orig) - { - @SuppressWarnings("unchecked") - T list = (T)orig.clone(); - Collections.reverse(list); - return list; - } - - /** - * Return an {@code Identifier.Qualified} from discrete Java strings - * representing the local name and schema, with a zero-length schema string - * producing a qualified name with null qualifier. - */ - Identifier.Qualified qnameFrom( - String name, String schema) - { - Identifier.Simple qualifier = - "".equals(schema) ? null : Identifier.Simple.fromJava(schema, msgr); - Identifier.Simple local = Identifier.Simple.fromJava(name, msgr); - return local.withQualifier(qualifier); - } - - /** - * Return an {@code Identifier.Qualified} from a single Java string - * representing the local name and possibly a schema. - */ - Identifier.Qualified qnameFrom(String name) - { - return Identifier.Qualified.nameFromJava(name, msgr); - } - - /** - * Return an {@code Identifier.Qualified} from an array of Java strings - * representing schema and local name separately if of length two, or as by - * {@link #qnameFrom(String)} if of length one; invalid if of any other - * length. - *

    - * The first of two elements may be explicitly {@code ""} to produce a - * qualified name with null qualifier. - */ - Identifier.Qualified qnameFrom(String[] names) - { - switch ( names.length ) - { - case 2: return qnameFrom(names[1], names[0]); - case 1: return qnameFrom(names[0]); - default: - throw new IllegalArgumentException( - "Only a one- or two-element String array is accepted"); - } - } - - /** - * Like {@link #qnameFrom(String[])} but for an operator name. - */ - Identifier.Qualified operatorNameFrom(String[] names) - { - switch ( names.length ) - { - case 2: - Identifier.Simple qualifier = null; - if ( ! names[0].isEmpty() ) - qualifier = Identifier.Simple.fromJava(names[0], msgr); - return Identifier.Operator.from(names[1], msgr) - .withQualifier(qualifier); - case 1: - return Identifier.Qualified.operatorFromJava(names[0], msgr); - default: - throw new IllegalArgumentException( - "Only a one- or two-element String array is accepted"); - } - } - - String[] qstrings(Identifier.Qualified qname) - { - if ( null == qname ) - return null; - Identifier.Simple q = qname.qualifier(); - String local = qname.local().toString(); - return new String[] { null == q ? null : q.toString(), local }; - } -} - -/** - * Exception thrown when an expected annotation value is a compiler-internal - * Error class instead, which happens in some javac versions when the annotation - * value wasn't resolved because of a source error the compiler really should - * have reported. - */ -class AnnotationValueException extends RuntimeException { } - -/** - * A code snippet. May contain zero, one, or more complete SQL commands for - * each of deploying and undeploying. The commands contained in one Snippet - * will always be emitted in a fixed order. A collection of Snippets will be - * output in an order constrained by their provides and requires methods. - */ -interface Snippet -{ - /** - * An {@code } that will be used to wrap each command - * from this Snippet as an {@code }. If null, the - * commands will be emitted as plain {@code }s. - */ - public Identifier.Simple implementorName(); - /** - * A {@code DependTag} to represent this snippet's dependence on whatever - * determines whether the implementor name is to be recognized. - *

    - * Represented for now as a {@code DependTag.Explicit} even though the - * dependency is implicitly created; an {@code SQLAction} snippet may have - * an explicit {@code provides=} that has to be matched. - */ - default DependTag implementorTag() - { - return new DependTag.Explicit(implementorName().pgFolded()); - } - /** - * Return an array of SQL commands (one complete command to a string) to - * be executed in order during deployment. - */ - public String[] deployStrings(); - /** - * Return an array of SQL commands (one complete command to a string) to - * be executed in order during undeployment. - */ - public String[] undeployStrings(); - /** - * Return an array of arbitrary labels considered "provided" by this - * Snippet. In generating the final order of the deployment descriptor file, - * this Snippet will come before any whose requires method returns any of - * the same labels. - */ - public Set provideTags(); - /** - * Return an array of arbitrary labels considered "required" by this - * Snippet. In generating the final order of the deployment descriptor file, - * this Snippet will come after those whose provides method returns any of - * the same labels. - */ - public Set requireTags(); - /** - * Method to be called after all annotations' - * element/value pairs have been filled in, to compute any additional - * information derived from those values before deployStrings() or - * undeployStrings() can be called. May also check for and report semantic - * errors that are not easily checked earlier while populating the - * element/value pairs. - * @return A set of snippets that are now prepared and should be added to - * the graph to be scheduled and emitted according to provides/requires. - * Typically Set.of(this) if all went well, or Set.of() in case of an error - * or when the snippet will be emitted by something else. In some cases a - * characterize method can return additional snippets that are ready to be - * scheduled. - */ - public Set characterize(); - - /** - * If it is possible to break an ordering cycle at this snippet, return a - * vertex wrapping a snippet (possibly this one, or another) that can be - * considered ready, otherwise return null. - *

    - * The default implementation returns null unconditionally. - * @param v Vertex that wraps this Snippet - * @param deploy true when generating an ordering for the deploy strings - * @return a Vertex wrapping a Snippet that can be considered ready - */ - default Vertex breakCycle(Vertex v, boolean deploy) - { - return null; - } - - /** - * Called when undeploy ordering breaks a cycle by using - * {@code DROP ... CASCADE} or equivalent on another object, with effects - * that would duplicate or interfere with this snippet's undeploy actions. - *

    - * A snippet for which this can matter should note that this method has been - * called, and later generate its undeploy strings with any necessary - * adjustments. - *

    - * The default implementation does nothing. - */ - default void subsume() - { - } -} - -interface Commentable -{ - public String comment(); - public void setComment( Object o, boolean explicit, Element e); - public String derivedComment( Element e); -} - -/** - * Vertex in a DAG, as used to put things in workable topological order - */ -class Vertex

    -{ - P payload; - int indegree; - List> adj; - - /** - * Construct a new vertex with the supplied payload, indegree zero, and an - * empty out-adjacency list. - * @param payload Object to be associated with this vertex. - */ - Vertex( P payload) - { - this.payload = payload; - indegree = 0; - adj = new ArrayList<>(); - } - - /** - * Record that this vertex must precede the specified vertex. - * @param v a Vertex that this Vertex must precede. - */ - void precede( Vertex

    v) - { - ++ v.indegree; - adj.add( v); - } - - /** - * Record that this vertex has been 'used'. Decrement the indegree of any - * in its adjacency list, and add to the supplied queue any of those whose - * indegree becomes zero. - * @param q A queue of vertices that are ready (have indegree zero). - */ - void use( Collection> q) - { - for ( Vertex

    v : adj ) - if ( 0 == -- v.indegree ) - q.add( v); - } - - /** - * Record that this vertex has been 'used'. Decrement the indegree of any - * in its adjacency list; any of those whose indegree becomes zero should be - * both added to the ready queue {@code q} and removed from the collection - * {@code vs}. - * @param q A queue of vertices that are ready (have indegree zero). - * @param vs A collection of vertices not yet ready. - */ - void use( Collection> q, Collection> vs) - { - for ( Vertex

    v : adj ) - if ( 0 == -- v.indegree ) - { - vs.remove( v); - q.add( v); - } - } - - /** - * Whether a vertex is known to transitively precede, or not so precede, a - * target vertex, or cannot yet be so classified. - */ - enum MemoState { YES, NO, PENDING } - - /** - * Return the memoized state of this vertex or, if none, enqueue the vertex - * for further exploration, memoize its state as {@code PENDING}, and return - * that. - */ - MemoState classifyOrEnqueue( - Queue> queue, IdentityHashMap,MemoState> memos) - { - MemoState state = memos.putIfAbsent(this, MemoState.PENDING); - if ( null == state ) - { - queue.add(this); - return MemoState.PENDING; - } - return state; - } - - /** - * Execute one step of {@code precedesTransitively} determination. - *

    - * On entry, this vertex has been removed from the queue. Its immediate - * adjacency successors will be evaluated. - *

    - * If any immediate successor is a {@code YES}, this vertex - * is a {@code YES}. - *

    - * If any immediate successor is {@code PENDING}, this vertex remains - * {@code PENDING} and is replaced on the queue, to be encountered again - * after all currently pending vertices. - *

    - * Otherwise, this vertex is a {@code NO}. - */ - MemoState stepOfPrecedes( - Queue> queue, IdentityHashMap,MemoState> memos) - { - boolean anyPendingSuccessors = false; - for ( Vertex

    v : adj ) - { - switch ( v.classifyOrEnqueue(queue, memos) ) - { - case YES: - memos.replace(this, MemoState.YES); - return MemoState.YES; - case PENDING: - anyPendingSuccessors = true; - break; - case NO: - break; - } - } - - if ( anyPendingSuccessors ) - { - queue.add(this); - return MemoState.PENDING; - } - - memos.replace(this, MemoState.NO); - return MemoState.NO; - } - - /** - * Determine whether this vertex (transitively) precedes other, - * returning, if so, that subset of its immediate adjacency successors - * through which other is reachable. - * @param other vertex to which reachability is to be tested - * @return array of immediate adjacencies through which other is reachable, - * or null if it is not - */ - Vertex

    [] precedesTransitively(Vertex

    other) - { - Queue> queue = new LinkedList<>(); - IdentityHashMap,MemoState> memos = new IdentityHashMap<>(); - boolean anyYeses = false; - - /* - * Initially: the 'other' vertex itself is known to be a YES. - * Nothing is yet known to be a NO. - */ - memos.put(requireNonNull(other), MemoState.YES); - - /* - * classifyOrEnqueue my immediate successors. Any that is not 'other' - * itself will be enqueued in PENDING status. - */ - for ( Vertex

    v : adj ) - if ( MemoState.YES == v.classifyOrEnqueue(queue, memos) ) - anyYeses = true; - - /* - * After running stepOfPrecedes on every enqueued vertex until the queue - * is empty, every vertex seen will be in memos as a YES or a NO. - */ - while ( ! queue.isEmpty() ) - if ( MemoState.YES == queue.remove().stepOfPrecedes(queue, memos) ) - anyYeses = true; - - if ( ! anyYeses ) - return null; - - @SuppressWarnings("unchecked") // can't quite say Vertex

    []::new - Vertex

    [] result = adj.stream() - .filter(v -> MemoState.YES == memos.get(v)) - .toArray(Vertex[]::new); - - return result; - } - - /** - * Remove successors from the adjacency list of this vertex, and - * add them to the adjacency list of other. - *

    - * No successor's indegree is changed. - */ - void transferSuccessorsTo(Vertex

    other, Vertex

    [] successors) - { - for ( Vertex

    v : successors ) - { - boolean removed = adj.remove(v); - assert removed : "transferSuccessorsTo passed a non-successor"; - other.adj.add(v); - } - } -} - -/** - * A pair of Vertex instances for the same payload, for use when two directions - * of topological ordering must be computed. - */ -class VertexPair

    -{ - Vertex

    fwd; - Vertex

    rev; - - VertexPair( P payload) - { - fwd = new Vertex<>( payload); - rev = new Vertex<>( payload); - } - - P payload() - { - return rev.payload; - } -} - -/** - * Proxy a snippet that 'provides' an implementor tag and has no - * undeployStrings, returning its deployStrings in their place. - */ -class ImpProvider implements Snippet -{ - Snippet s; - - ImpProvider( Snippet s) { this.s = s; } - - @Override public Identifier.Simple implementorName() - { - return s.implementorName(); - } - @Override public String[] deployStrings() { return s.deployStrings(); } - @Override public String[] undeployStrings() { return s.deployStrings(); } - @Override public Set provideTags() { return s.provideTags(); } - @Override public Set requireTags() { return s.requireTags(); } - @Override public Set characterize() { return s.characterize(); } -} - -/** - * Resolve ties in {@code Snippet} ordering in an arbitrary but deterministic - * way, for use when {@code ddr.reproducible} is set. - */ -class SnippetTiebreaker implements Comparator> -{ - @Override - public int compare( Vertex o1, Vertex o2) - { - Snippet s1 = o1.payload; - Snippet s2 = o2.payload; - int diff; - Identifier.Simple s1imp = s1.implementorName(); - Identifier.Simple s2imp = s2.implementorName(); - if ( null != s1imp && null != s2imp ) - { - diff = s1imp.pgFolded().compareTo( s2imp.pgFolded()); - if ( 0 != diff ) - return diff; - } - else - return null == s1imp ? -1 : 1; - String[] ds1 = s1.deployStrings(); - String[] ds2 = s2.deployStrings(); - diff = ds1.length - ds2.length; - if ( 0 != diff ) - return diff; - for ( int i = 0 ; i < ds1.length ; ++ i ) - { - diff = ds1[i].compareTo( ds2[i]); - if ( 0 != diff ) - return diff; - } - assert s1 == s2 : "Two distinct Snippets compare equal by tiebreaker"; - return 0; - } -} - -/** - * Resolve ties in type-mapping resolution in an arbitrary but deterministic - * way, for use when {@code ddr.reproducible} is set. - */ -class TypeTiebreaker -implements Comparator>> -{ - @Override - public int compare( - Vertex> o1, - Vertex> o2) - { - Map.Entry m1 = o1.payload; - Map.Entry m2 = o2.payload; - int diff = - m1.getValue().toString().compareTo( m2.getValue().toString()); - if ( 0 != diff ) - return diff; - diff = m1.getKey().toString().compareTo( m2.getKey().toString()); - if ( 0 != diff ) - return diff; - assert - m1 == m2 : "Two distinct type mappings compare equal by tiebreaker"; - return 0; - } -} - -/** - * Abstraction of a database type, which is usually specified by an - * {@code Identifier.Qualified}, but sometimes by reserved SQL syntax. - */ -abstract class DBType -{ - DBType withModifier(String modifier) - { - return new Modified(this, modifier); - } - - DBType asArray(String notated) - { - return new Array(this, notated); - } - - DBType withDefault(String suffix) - { - return new Defaulting(this, suffix); - } - - String toString(boolean withDefault) - { - return toString(); - } - - abstract DependTag dependTag(); - - /** - * Return the original underlying (leaf) type, either a {@code Named} or - * a {@code Reserved}. - *

    - * Override in non-leaf classes (except {@code Array}). - */ - DBType leaf() - { - return this; - } - - boolean isArray() - { - return false; - } - - @Override - public final boolean equals(Object o) - { - return equals(o, null); - } - - /** - * True if the underlying (leaf) types compare equal (overridden for - * {@code Array}). - *

    - * The assumption is that equality checking will be done for function - * signature equivalence, for which defaults and typmods don't matter - * (but arrayness does). - */ - public final boolean equals(Object o, Messager msgr) - { - if ( this == o ) - return true; - if ( ! (o instanceof DBType) ) - return false; - DBType dt1 = this.leaf(); - DBType dt2 = ((DBType)o).leaf(); - if ( dt1.getClass() != dt2.getClass() ) - return false; - if ( dt1 instanceof Array ) - { - dt1 = ((Array)dt1).m_component.leaf(); - dt2 = ((Array)dt2).m_component.leaf(); - if ( dt1.getClass() != dt2.getClass() ) - return false; - } - if ( dt1 instanceof Named ) - return ((Named)dt1).m_ident.equals(((Named)dt2).m_ident, msgr); - return pgFold(((Reserved)dt1).m_reservedName) - .equals(pgFold(((Reserved)dt2).m_reservedName)); - } - - /** - * Pattern to match type names that are special in SQL, if they appear as - * regular (unquoted) identifiers and without a schema qualification. - *

    - * This list does not include {@code DOUBLE} or {@code NATIONAL}, as the - * reserved SQL form for each includes a following keyword - * ({@code PRECISION} or {@code CHARACTER}/{@code CHAR}, respectively). - * There is a catch-all test in {@code fromSQLTypeAnnotation} that will fall - * back to 'reserved' treatment if the name is followed by anything that - * isn't a parenthesized type modifier, so the fallback will naturally catch - * these two cases. - */ - static final Pattern s_reservedTypeFirstWords = compile( - "(?i:" + - "INT|INTEGER|SMALLINT|BIGINT|REAL|FLOAT|DECIMAL|DEC|NUMERIC|" + - "BOOLEAN|BIT|CHARACTER|CHAR|VARCHAR|TIMESTAMP|TIME|INTERVAL" + - ")" - ); - - /** - * Parse a string, representing an optional parameter/column name followed - * by a type, into an {@code Identifier.Simple}, possibly null, and a - * {@code DBType}. - *

    - * Whitespace (or, strictly, separator; comments would be accepted) must - * separate the name from the type, if the name is not quoted. To omit a - * name and supply only the type, the string must begin with whitespace - * (ahem, separator). - */ - static Map.Entry fromNameAndType(String nandt) - { - Identifier.Simple name = null; - Matcher m = ISO_AND_PG_IDENTIFIER_CAPTURING.matcher(nandt); - if ( m.lookingAt() ) - { - nandt = nandt.substring(m.end()); - name = identifierFrom(m); - } - return - new AbstractMap.SimpleImmutableEntry<>( - name, fromSQLTypeAnnotation(nandt)); - } - - /** - * Make a {@code DBType} from whatever might appear in an {@code SQLType} - * annotation. - *

    - * The possibilities are numerous, as that text used to be dumped rather - * blindly into the descriptor and thus could be whatever PostgreSQL would - * make sense of. The result could be a {@code DBType.Named} if the start of - * the text parses as a (possibly schema-qualified) identifier, or a - * {@code DBType.Reserved} if it doesn't (or it parses as a non-schema- - * qualified regular identifier and matches one of SQL's grammatically - * reserved type names). It could be either of those wrapped in a - * {@code DBType.Modified} if a type modifier was parsed out. It could be - * any of those wrapped in a {@code DBType.Array} if the text ended with any - * of the recognized forms of array dimension notation. The one thing it - * can't be (as a result from this method) is a {@code DBType.Defaulting}; - * that wrapping can be applied to the result later, to carry a default - * value that has been specified at a particular site of use. - *

    - * The parsing strategy is a bit heuristic. An attempt is made to parse a - * (possibly schema-qualified) identifier at the start of the string. - * An attempt is made to find a match for array-dimension notation that runs - * to the end of the string. Whatever lies between gets to be a typmod if it - * looks enough like one, or gets rolled with the front of the string into a - * {@code DBType.Reserved}, which is not otherwise scrutinized; the - * {@code Reserved} case is still more or less a catch-all that will be - * dumped blindly into the descriptor in the hope that PostgreSQL will make - * sense of it. - *

    - * This strategy is used because compared to what can appear in a typmod - * (which could require arbitrary constant expression parsing), the array - * grammar depends on much less. - */ - static DBType fromSQLTypeAnnotation(String value) - { - Identifier.Qualified qname = null; - - Matcher m = SEPARATOR.matcher(value); - separator(m, false); - int postSeparator = m.regionStart(); - - if ( m.usePattern(ISO_AND_PG_IDENTIFIER_CAPTURING).lookingAt() ) - { - Identifier.Simple id1 = identifierFrom(m); - m.region(m.end(), m.regionEnd()); - - separator(m, false); - if ( value.startsWith(".", m.regionStart()) ) - { - m.region(m.regionStart() + 1, m.regionEnd()); - separator(m, false); - if ( m.usePattern(ISO_AND_PG_IDENTIFIER_CAPTURING).lookingAt() ) - { - Identifier.Simple id2 = identifierFrom(m); - qname = id2.withQualifier(id1); - m.region(m.end(), m.regionEnd()); - separator(m, false); - } - } - else - qname = id1.withQualifier(null); - } - - /* - * At this point, qname may have a local name and qualifier, or it may - * have a local name and null qualifier (if a single identifier was - * successfully matched but not followed by a dot). It is also possible - * for qname to be null, either because the start of the string didn't - * look like an identifier at all, or because it did, but was followed - * by a dot, and what followed the dot could not be parsed as another - * identifier. Probably both of those cases are erroneous, but they can - * also be handled by simply treating the content as Reserved and hoping - * PostgreSQL can make sense of it. - * - * Search from here to the end of the string for possible array notation - * that can be stripped off the end, leaving just the middle (if any) to - * be dealt with. - */ - - String arrayNotation = arrayNotationIfPresent(m, value); - - /* - * If arrayNotation is not null, m's region end has been adjusted to - * exclude the array notation. - */ - - boolean reserved; - - if ( null == qname ) - reserved = true; - else if ( null != qname.qualifier() ) - reserved = false; - else - { - Identifier.Simple local = qname.local(); - if ( ! local.folds() ) - reserved = false; - else - { - Matcher m1 = - s_reservedTypeFirstWords.matcher(local.nonFolded()); - reserved = m1.matches(); - } - } - - /* - * If this is a reserved type, just wrap up everything from its start to - * the array notation (if any) as a Reserved; there is no need to try to - * tease out a typmod separately. (The reserved syntax can be quite - * unlike the generic typename(typmod) pattern; there could be what - * looks like a (typmod) between TIME and WITH TIME ZONE, or the moral - * equivalent of a typmod could look like HOUR TO MINUTE, and so on.) - * - * If we think this is a non-reserved type, and there is anything left - * in the matching region (preceding the array notation, if any), then - * it had better be a typmod in the generic form starting with a (. We - * will capture whatever is there and call it a typmod as long as it - * does start that way. (More elaborate checking, such as balancing the - * parens, would require ability to parse an expr_list.) This can allow - * malformed syntax to be uncaught until deployment time when PostgreSQL - * sees it, but that's unchanged from when the entire SQLType string was - * passed along verbatim. The 'threat' model here is just that the - * legitimate developer may get an error later when earlier would be - * more helpful, not a malicious adversary bent on injection. - * - * On the other hand, if what's left doesn't start with a ( then we - * somehow don't know what we're looking at, so fall back and treat it - * as reserved. This will naturally catch the two-token reserved names - * DOUBLE PRECISION, NATIONAL CHARACTER or NATIONAL CHAR, which were - * therefore left out of the s_reservedTypeFirstWords pattern. - */ - - if ( ! reserved && m.regionStart() < m.regionEnd() ) - if ( ! value.startsWith("(", m.regionStart()) ) - reserved = true; - - DBType result; - - if ( reserved ) - result = new DBType.Reserved( - value.substring(postSeparator, m.regionEnd())); - else - { - result = new DBType.Named(qname); - if ( m.regionStart() < m.regionEnd() ) - result = result.withModifier( - value.substring(m.regionStart(), m.regionEnd())); - } - - if ( null != arrayNotation ) - result = result.asArray(arrayNotation); - - return result; - } - - private static final Pattern s_arrayDimStart = compile(String.format( - "(?i:(? - * If a non-null string is returned, the matcher's region-end has been - * adjusted to exclude it. - *

    - * The matcher's associated pattern may have been changed, and the region - * transiently changed, but on return the region will either be the same as - * on entry (if no array notation was found), or have only the region end - * adjusted to exclude the notation. - *

    - * The returned string can include a {@code separator} that followed the - * array notation. - */ - private static String arrayNotationIfPresent(Matcher m, String s) - { - int originalRegionStart = m.regionStart(); - int notationStart; - int dims; - boolean atMostOneDimAllowed; // true after ARRAY keyword - -restart:for ( ;; ) - { - notationStart = -1; - dims = 0; - atMostOneDimAllowed = false; - - m.usePattern(s_arrayDimStart); - if ( ! m.find() ) - break restart; // notationStart is -1 indicating not found - - notationStart = m.start(); - if ( ! "[".equals(m.group()) ) // saw ARRAY - { - atMostOneDimAllowed = true; - m.region(m.end(), m.regionEnd()); - separator(m, false); - if ( ! s.startsWith("[", m.regionStart()) ) - { - if ( m.regionStart() == m.regionEnd() ) - { - dims = 1; // ARRAY separator $ --ok (means 1 dim) - break restart; - } - /* - * ARRAY separator something-other-than-[ - * This is not the match we're looking for. The regionStart - * already points here, so restart the loop to look for - * another potential array notation start beyond this point. - */ - continue restart; - } - m.region(m.regionStart() + 1, m.regionEnd()); - } - - /* - * Invariant: have seen [ and regionStart still points to it. - * Accept optional digits, then ] - * Repeat if followed by a [ - */ - for ( ;; ) - { - m.region(m.regionStart() + 1, m.regionEnd()); - separator(m, false); - - if ( m.usePattern(s_digits).lookingAt() ) - { - m.region(m.end(), m.regionEnd()); - separator(m, false); - } - - if ( ! s.startsWith("]", m.regionStart()) ) - continue restart; - - ++ dims; // have seen a complete [ (\d+)? ] - m.region(m.regionStart() + 1, m.regionEnd()); - separator(m, false); - if ( s.startsWith("[", m.regionStart()) ) - continue; - if ( m.regionStart() == m.regionEnd() ) - if ( ! atMostOneDimAllowed || 1 == dims ) - break restart; - continue restart; // not at end, not at [ --start over - } - } - - if ( -1 == notationStart ) - { - m.region(originalRegionStart, m.regionEnd()); - return null; - } - - m.region(originalRegionStart, notationStart); - return s.substring(notationStart); - } - - static final class Reserved extends DBType - { - private final String m_reservedName; - - Reserved(String name) - { - m_reservedName = name; - } - - @Override - public String toString() - { - return m_reservedName; - } - - @Override - DependTag dependTag() - { - return null; - } - - @Override - public int hashCode() - { - return pgFold(m_reservedName).hashCode(); - } - } - - static final class Named extends DBType - { - private final Identifier.Qualified m_ident; - - Named(Identifier.Qualified ident) - { - m_ident = ident; - } - - @Override - public String toString() - { - return m_ident.toString(); - } - - @Override - DependTag dependTag() - { - return new DependTag.Type(m_ident); - } - - @Override - public int hashCode() - { - return m_ident.hashCode(); - } - } - - static final class Modified extends DBType - { - private final DBType m_raw; - private final String m_modifier; - - Modified(DBType raw, String modifier) - { - m_raw = raw; - m_modifier = modifier; - } - - @Override - public String toString() - { - return m_raw.toString() + m_modifier; - } - - @Override - DBType withModifier(String modifier) - { - throw new UnsupportedOperationException( - "withModifier on a Modified"); - } - - @Override - DependTag dependTag() - { - return m_raw.dependTag(); - } - - @Override - public int hashCode() - { - return m_raw.hashCode(); - } - - @Override - DBType leaf() - { - return m_raw.leaf(); - } - } - - static final class Array extends DBType - { - private final DBType m_component; - private final int m_dims; - private final String m_notated; - - Array(DBType component, String notated) - { - assert component instanceof Named - || component instanceof Reserved - || component instanceof Modified; - int dims = 0; - for ( int pos = 0; -1 != (pos = notated.indexOf('[', pos)); ++ pos ) - ++ dims; - m_dims = 0 == dims ? 1 : dims; // "ARRAY" with no [ has dimension 1 - m_notated = notated; - m_component = requireNonNull(component); - } - - @Override - Array asArray(String notated) - { - /* Implementable in principle, but may never be needed */ - throw new UnsupportedOperationException("asArray on an Array"); - } - - @Override - public String toString() - { - return m_component.toString() + m_notated; - } - - @Override - DependTag dependTag() - { - return m_component.dependTag(); - } - - @Override - boolean isArray() - { - return true; - } - - @Override - public int hashCode() - { - return m_component.hashCode(); - } - } - - static final class Defaulting extends DBType - { - private final DBType m_raw; - private final String m_suffix; - - Defaulting(DBType raw, String suffix) - { - assert ! (raw instanceof Defaulting); - m_raw = requireNonNull(raw); - m_suffix = suffix; - } - - @Override - Modified withModifier(String notated) - { - throw new UnsupportedOperationException( - "withModifier on a Defaulting"); - } - - @Override - Array asArray(String notated) - { - throw new UnsupportedOperationException("asArray on a Defaulting"); - } - - @Override - Array withDefault(String suffix) - { - /* Implementable in principle, but may never be needed */ - throw new UnsupportedOperationException( - "withDefault on a Defaulting"); - } - - @Override - public String toString() - { - return m_raw.toString() + " " + m_suffix; - } - - @Override - String toString(boolean withDefault) - { - return withDefault ? toString() : m_raw.toString(); - } - - @Override - DependTag dependTag() - { - return m_raw.dependTag(); - } - - @Override - boolean isArray() - { - return m_raw.isArray(); - } - - @Override - public int hashCode() - { - return m_raw.hashCode(); - } - - @Override - DBType leaf() - { - return m_raw.leaf(); - } - } -} - -/** - * Abstraction of a dependency tag, encompassing {@code Explicit} ones declared - * in annotations and distinguished by {@code String}s, and others added - * implicitly such as {@code Type}s known by {@code Identifier.Qualified}. - */ -abstract class DependTag -{ - protected final T m_value; - - protected DependTag(T value) - { - m_value = value; - } - - @Override - public int hashCode() - { - return hash(getClass(), m_value); - } - - @Override - public final boolean equals(Object o) - { - return equals(o, null); - } - - public boolean equals(Object o, Messager msgr) - { - if ( this == o ) - return true; - if ( null == o ) - return false; - return - getClass() == o.getClass() - && m_value.equals(((DependTag)o).m_value); - } - - @Override - public String toString() - { - return '(' + getClass().getSimpleName() + ')' + m_value.toString(); - } - - static final class Explicit extends DependTag - { - Explicit(String value) - { - super(requireNonNull(value)); - } - } - - static abstract class Named extends DependTag - { - Named(T value) - { - super(value); - } - - @Override - public boolean equals(Object o, Messager msgr) - { - if ( this == o ) - return true; - if ( null == o ) - return false; - return - getClass() == o.getClass() - && m_value.equals(((DependTag)o).m_value, msgr); - } - } - - static final class Type - extends Named> - { - Type(Identifier.Qualified value) - { - super(requireNonNull(value)); - } - } - - static final class Function - extends Named> - { - private DBType[] m_signature; - - Function( - Identifier.Qualified value, DBType[] signature) - { - super(requireNonNull(value)); - m_signature = signature.clone(); - } - - @Override - public boolean equals(Object o, Messager msgr) - { - if ( ! super.equals(o, msgr) ) - return false; - Function f = (Function)o; - if ( m_signature.length != f.m_signature.length ) - return false; - for ( int i = 0; i < m_signature.length; ++ i ) - { - if ( null == m_signature[i] || null == f.m_signature[i] ) - { - if ( m_signature[i] != f.m_signature[i] ) - return false; - continue; - } - if ( ! m_signature[i].equals(f.m_signature[i], msgr) ) - return false; - } - return true; - } - - @Override - public String toString() - { - return super.toString() + Arrays.toString(m_signature); - } - } - - static final class Operator - extends Named> - { - private DBType[] m_signature; - - Operator( - Identifier.Qualified value, DBType[] signature) - { - super(requireNonNull(value)); - assert 2 == signature.length : "invalid Operator signature length"; - m_signature = signature.clone(); - } - - @Override - public boolean equals(Object o, Messager msgr) - { - if ( ! super.equals(o, msgr) ) - return false; - Operator op = (Operator)o; - if ( m_signature.length != op.m_signature.length ) - return false; - for ( int i = 0; i < m_signature.length; ++ i ) - { - if ( null == m_signature[i] || null == op.m_signature[i] ) - { - if ( m_signature[i] != op.m_signature[i] ) - return false; - continue; - } - if ( ! m_signature[i].equals(op.m_signature[i], msgr) ) - return false; - } - return true; - } - - @Override - public String toString() - { - return super.toString() + Arrays.toString(m_signature); - } - } -} - -/** - * Tiny 'record' used in factoring duplicative operations on function parameter - * lists into operations on streams of these. - */ -class ParameterInfo -{ - final TypeMirror tm; - final VariableElement ve; - final SQLType st; - final DBType dt; - - String name() - { - String name = null == st ? null : st.name(); - if ( null == name ) - name = ve.getSimpleName().toString(); - return name; - } - - ParameterInfo(TypeMirror m, VariableElement e, SQLType t, DBType d) - { - tm = m; - ve = e; - st = t; - dt = d; - } -} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessorImpl.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessorImpl.java new file mode 100644 index 000000000..d67ebd202 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessorImpl.java @@ -0,0 +1,5977 @@ +/* + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.io.IOException; + +import java.lang.annotation.Annotation; + +import java.lang.reflect.Array; +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; + +import java.math.BigDecimal; +import java.math.BigInteger; + +import java.sql.ResultSet; +import java.sql.SQLData; +import java.sql.SQLInput; +import java.sql.SQLOutput; +import java.sql.Time; +import java.sql.Timestamp; + +import java.text.BreakIterator; + +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.OffsetTime; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; + +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import static java.util.Collections.unmodifiableSet; +import java.util.Comparator; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.ListIterator; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import static java.util.Objects.requireNonNull; +import java.util.PriorityQueue; +import java.util.Queue; +import java.util.Set; + +import java.util.function.BiConsumer; +import java.util.function.Supplier; +import static java.util.function.UnaryOperator.identity; + +import java.util.stream.Stream; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.joining; +import static java.util.stream.Collectors.mapping; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toSet; + +import javax.annotation.processing.Filer; +import javax.annotation.processing.Messager; +import javax.annotation.processing.ProcessingEnvironment; +import javax.annotation.processing.RoundEnvironment; + +import javax.lang.model.SourceVersion; + +import javax.lang.model.element.AnnotationMirror; +import javax.lang.model.element.AnnotationValue; +import javax.lang.model.element.Element; +import javax.lang.model.element.ElementKind; +import javax.lang.model.element.ExecutableElement; +import javax.lang.model.element.Modifier; +import javax.lang.model.element.ModuleElement; +import javax.lang.model.element.NestingKind; +import javax.lang.model.element.TypeElement; +import javax.lang.model.element.VariableElement; + +import javax.lang.model.type.ArrayType; +import javax.lang.model.type.DeclaredType; +import javax.lang.model.type.ExecutableType; +import javax.lang.model.type.NoType; +import javax.lang.model.type.PrimitiveType; +import javax.lang.model.type.TypeKind; +import javax.lang.model.type.TypeMirror; + +import javax.lang.model.util.Elements; +import javax.lang.model.util.Types; + +import static javax.lang.model.util.ElementFilter.constructorsIn; +import static javax.lang.model.util.ElementFilter.methodsIn; + +import static javax.tools.Diagnostic.Kind; + +import org.postgresql.pljava.ResultSetHandle; +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.TriggerData; + +import org.postgresql.pljava.annotation.Aggregate; +import org.postgresql.pljava.annotation.Cast; +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.Operator; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLActions; +import org.postgresql.pljava.annotation.SQLType; +import org.postgresql.pljava.annotation.Trigger; +import org.postgresql.pljava.annotation.BaseUDT; +import org.postgresql.pljava.annotation.MappedUDT; + +import org.postgresql.pljava.model.CatalogObject; + +import org.postgresql.pljava.sqlgen.Lexicals; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + +/** + * Where the work happens. + */ +class DDRProcessorImpl +{ + // Things supplied by the calling framework in ProcessingEnvironment, + // used enough that it makes sense to break them out here with + // short names that all nested classes below will inherit. + // + final Elements elmu; + final Filer filr; + final Locale loca; + final Messager msgr; + final Map opts; + final SourceVersion srcv; + final Types typu; + + // Similarly, the TypeMapper should be easily available to code below. + // + final TypeMapper tmpr; + final SnippetTiebreaker snippetTiebreaker; + + // Options obtained from the invocation + // + final Identifier.Simple nameTrusted; + final Identifier.Simple nameUntrusted; + final String output; + final Identifier.Simple defaultImplementor; + final boolean reproducible; + + // Certain known types that need to be recognized in the processed code + // + final DeclaredType TY_ITERATOR; + final DeclaredType TY_OBJECT; + final DeclaredType TY_RESULTSET; + final DeclaredType TY_RESULTSETPROVIDER; + final DeclaredType TY_RESULTSETHANDLE; + final DeclaredType TY_SQLDATA; + final DeclaredType TY_SQLINPUT; + final DeclaredType TY_SQLOUTPUT; + final DeclaredType TY_STRING; + final DeclaredType TY_TRIGGERDATA; + final NoType TY_VOID; + + // Our own annotations + // + final TypeElement AN_FUNCTION; + final TypeElement AN_SQLTYPE; + final TypeElement AN_TRIGGER; + final TypeElement AN_BASEUDT; + final TypeElement AN_MAPPEDUDT; + final TypeElement AN_SQLACTION; + final TypeElement AN_SQLACTIONS; + final TypeElement AN_CAST; + final TypeElement AN_CASTS; + final TypeElement AN_AGGREGATE; + final TypeElement AN_AGGREGATES; + final TypeElement AN_OPERATOR; + final TypeElement AN_OPERATORS; + + // Certain familiar DBTypes (capitalized as this file historically has) + // + final DBType DT_BOOLEAN = new DBType.Reserved("boolean"); + final DBType DT_INTEGER = new DBType.Reserved("integer"); + final DBType DT_RECORD = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.RECORD")); + final DBType DT_TRIGGER = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.trigger")); + final DBType DT_VOID = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.void")); + final DBType DT_ANY = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.\"any\"")); + final DBType DT_BYTEA = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.bytea")); + final DBType DT_INTERNAL = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.internal")); + + // Function signatures for certain known functions + // + final DBType[] SIG_TYPMODIN = + { DBType.fromSQLTypeAnnotation("pg_catalog.cstring[]") }; + final DBType[] SIG_TYPMODOUT = { DT_INTEGER }; + final DBType[] SIG_ANALYZE = { DT_INTERNAL }; + + DDRProcessorImpl( ProcessingEnvironment processingEnv) + { + elmu = processingEnv.getElementUtils(); + filr = processingEnv.getFiler(); + loca = processingEnv.getLocale(); + msgr = processingEnv.getMessager(); + opts = processingEnv.getOptions(); + srcv = processingEnv.getSourceVersion(); + typu = processingEnv.getTypeUtils(); + + tmpr = new TypeMapper(); + + String optv; + + optv = opts.get( "ddr.name.trusted"); + if ( null != optv ) + nameTrusted = Identifier.Simple.fromJava(optv); + else + nameTrusted = Identifier.Simple.fromJava("java"); + + optv = opts.get( "ddr.name.untrusted"); + if ( null != optv ) + nameUntrusted = Identifier.Simple.fromJava(optv); + else + nameUntrusted = Identifier.Simple.fromJava("javaU"); + + optv = opts.get( "ddr.implementor"); + if ( null != optv ) + defaultImplementor = "-".equals( optv) ? null : + Identifier.Simple.fromJava(optv); + else + defaultImplementor = Identifier.Simple.fromJava("PostgreSQL"); + + optv = opts.get( "ddr.output"); + if ( null != optv ) + output = optv; + else + output = "pljava.ddr"; + + optv = opts.get( "ddr.reproducible"); + if ( null != optv ) + reproducible = Boolean.parseBoolean( optv); + else + reproducible = true; + + snippetTiebreaker = reproducible ? new SnippetTiebreaker() : null; + + TY_ITERATOR = declaredTypeForClass(java.util.Iterator.class); + TY_OBJECT = declaredTypeForClass(Object.class); + TY_RESULTSET = declaredTypeForClass(java.sql.ResultSet.class); + TY_RESULTSETPROVIDER = declaredTypeForClass(ResultSetProvider.class); + TY_RESULTSETHANDLE = declaredTypeForClass(ResultSetHandle.class); + TY_SQLDATA = declaredTypeForClass(SQLData.class); + TY_SQLINPUT = declaredTypeForClass(SQLInput.class); + TY_SQLOUTPUT = declaredTypeForClass(SQLOutput.class); + TY_STRING = declaredTypeForClass(String.class); + TY_TRIGGERDATA = declaredTypeForClass(TriggerData.class); + TY_VOID = typu.getNoType(TypeKind.VOID); + + AN_FUNCTION = elmu.getTypeElement( Function.class.getName()); + AN_SQLTYPE = elmu.getTypeElement( SQLType.class.getName()); + AN_TRIGGER = elmu.getTypeElement( Trigger.class.getName()); + AN_BASEUDT = elmu.getTypeElement( BaseUDT.class.getName()); + AN_MAPPEDUDT = elmu.getTypeElement( MappedUDT.class.getName()); + + // Repeatable annotations and their containers. + // + AN_SQLACTION = elmu.getTypeElement( SQLAction.class.getName()); + AN_SQLACTIONS = elmu.getTypeElement( SQLActions.class.getName()); + AN_CAST = elmu.getTypeElement( Cast.class.getName()); + AN_CASTS = elmu.getTypeElement( + Cast.Container.class.getCanonicalName()); + AN_AGGREGATE = elmu.getTypeElement( Aggregate.class.getName()); + AN_AGGREGATES = elmu.getTypeElement( + Aggregate.Container.class.getCanonicalName()); + AN_OPERATOR = elmu.getTypeElement( Operator.class.getName()); + AN_OPERATORS = elmu.getTypeElement( + Operator.Container.class.getCanonicalName()); + } + + void msg( Kind kind, String fmt, Object... args) + { + msgr.printMessage( kind, String.format( fmt, args)); + } + + void msg( Kind kind, Element e, String fmt, Object... args) + { + msgr.printMessage( kind, String.format( fmt, args), e); + } + + void msg( Kind kind, Element e, AnnotationMirror a, + String fmt, Object... args) + { + msgr.printMessage( kind, String.format( fmt, args), e, a); + } + + void msg( Kind kind, Element e, AnnotationMirror a, AnnotationValue v, + String fmt, Object... args) + { + msgr.printMessage( kind, String.format( fmt, args), e, a, v); + } + + /** + * Map a {@code Class} to a {@code TypeElement} and from there to a + * {@code DeclaredType}. + *

    + * This needs to work around some weird breakage in javac 10 and 11 when + * given a {@code --release} option naming an earlier release, as described + * in commit c763cee. The version of of {@code getTypeElement} with a module + * parameter is needed then, because the other version will go bonkers and + * think it found the class in every module that transitively requires + * its actual module and then return null because the result wasn't + * unique. That got fixed in Java 12, but because 11 is the LTS release and + * there won't be another for a while yet, it is better to work around the + * issue here. + *

    + * If not supporting Java 10 or 11, this could be simplified to + * {@code typu.getDeclaredType(elmu.getTypeElement(className))}. + */ + private DeclaredType declaredTypeForClass(Class clazz) + { + String className = clazz.getName(); + String moduleName = clazz.getModule().getName(); + + TypeElement e; + + if ( null == moduleName ) + e = elmu.getTypeElement(className); + else + { + ModuleElement m = elmu.getModuleElement(moduleName); + if ( null == m ) + e = elmu.getTypeElement(className); + else + e = elmu.getTypeElement(m, className); + } + + requireNonNull(e, + () -> "unexpected failure to resolve TypeElement " + className); + + DeclaredType t = typu.getDeclaredType(e); + + requireNonNull(t, + () -> "unexpected failure to resolve DeclaredType " + e); + + return t; + } + + /** + * Key usable in a mapping from (Object, Snippet-subtype) to Snippet. + * Because there's no telling in which order a Map implementation will + * compare two keys, the class matches if either one is assignable to + * the other. That's ok as long as the Snippet-subtype is never Snippet + * itself, no Object ever has two Snippets hung on it where one extends + * the other, and getSnippet is always called for the widest of any of + * the types it may retrieve. + */ + static final class SnippetsKey + { + final Object o; + final Class c; + SnippetsKey(Object o, Class c) + { + assert Snippet.class != c : "Snippet key must be a subtype"; + this.o = o; + this.c = c; + } + public boolean equals(Object oth) + { + if ( ! (oth instanceof SnippetsKey) ) + return false; + SnippetsKey osk = (SnippetsKey)oth; + return o.equals( osk.o) + && ( c.isAssignableFrom( osk.c) || osk.c.isAssignableFrom( c) ); + } + public int hashCode() + { + return o.hashCode(); // must not depend on c (subtypes will match) + } + } + + /** + * Collection of code snippets being accumulated (possibly over more than + * one round), keyed by the object for which each snippet has been + * generated. + */ + /* + * This is a LinkedHashMap so that the order of handling annotation types + * in process() below will be preserved in calling their characterize() + * methods at end-of-round, and so, for example, characterize() on a Cast + * can use values set by characterize() on an associated Function. + */ + Map snippets = new LinkedHashMap<>(); + + S getSnippet(Object o, Class c, Supplier ctor) + { + return + c.cast(snippets + .computeIfAbsent(new SnippetsKey( o, c), k -> ctor.get())); + } + + void putSnippet( Object o, Snippet s) + { + snippets.put( new SnippetsKey( o, s.getClass()), s); + } + + /** + * Queue on which snippets are entered in preparation for topological + * ordering. Has to be an instance field because populating the queue + * (which involves invoking the snippets' characterize methods) cannot + * be left to generateDescriptor, which runs in the final round. This is + * (AFAICT) another workaround for javac 7's behavior of throwing away + * symbol tables between rounds; when characterize was invoked in + * generateDescriptor, any errors reported were being shown with no source + * location info, because it had been thrown away. + */ + List> snippetVPairs = new ArrayList<>(); + + /** + * Map from each arbitrary provides/requires label to the snippet + * that 'provides' it (snippets, in some cases). Has to be out here as an + * instance field for the same reason {@code snippetVPairs} does. + *

    + * Originally limited each tag to have only one provider; that is still + * enforced for implicitly-generated tags, but relaxed for explicit ones + * supplied in annotations, hence the list. + */ + Map>> provider = new HashMap<>(); + + /** + * Find the elements in each round that carry any of the annotations of + * interest and generate code snippets accordingly. On the last round, with + * all processing complete, generate the deployment descriptor file. + */ + boolean process( Set tes, RoundEnvironment re) + { + boolean functionPresent = false; + boolean sqlActionPresent = false; + boolean baseUDTPresent = false; + boolean mappedUDTPresent = false; + boolean castPresent = false; + boolean aggregatePresent = false; + boolean operatorPresent = false; + + boolean willClaim = true; + + for ( TypeElement te : tes ) + { + if ( AN_FUNCTION.equals( te) ) + functionPresent = true; + else if ( AN_BASEUDT.equals( te) ) + baseUDTPresent = true; + else if ( AN_MAPPEDUDT.equals( te) ) + mappedUDTPresent = true; + else if ( AN_SQLTYPE.equals( te) ) + ; // these are handled within FunctionImpl + else if ( AN_SQLACTION.equals( te) || AN_SQLACTIONS.equals( te) ) + sqlActionPresent = true; + else if ( AN_CAST.equals( te) || AN_CASTS.equals( te) ) + castPresent = true; + else if ( AN_AGGREGATE.equals( te) || AN_AGGREGATES.equals( te) ) + aggregatePresent = true; + else if ( AN_OPERATOR.equals( te) || AN_OPERATORS.equals( te) ) + operatorPresent = true; + else + { + msg( Kind.WARNING, te, + "PL/Java annotation processor version may be older than " + + "this annotation:\n%s", te.toString()); + willClaim = false; + } + } + + if ( baseUDTPresent ) + for ( Element e : re.getElementsAnnotatedWith( AN_BASEUDT) ) + processUDT( e, UDTKind.BASE); + + if ( mappedUDTPresent ) + for ( Element e : re.getElementsAnnotatedWith( AN_MAPPEDUDT) ) + processUDT( e, UDTKind.MAPPED); + + if ( functionPresent ) + for ( Element e : re.getElementsAnnotatedWith( AN_FUNCTION) ) + processFunction( e); + + if ( sqlActionPresent ) + for ( Element e + : re.getElementsAnnotatedWithAny( AN_SQLACTION, AN_SQLACTIONS) ) + processRepeatable( + e, AN_SQLACTION, AN_SQLACTIONS, SQLActionImpl.class, null); + + if ( castPresent ) + for ( Element e + : re.getElementsAnnotatedWithAny( AN_CAST, AN_CASTS) ) + processRepeatable( + e, AN_CAST, AN_CASTS, CastImpl.class, null); + + if ( operatorPresent ) + for ( Element e + : re.getElementsAnnotatedWithAny( AN_OPERATOR, AN_OPERATORS) ) + processRepeatable( + e, AN_OPERATOR, AN_OPERATORS, OperatorImpl.class, + this::operatorPreSynthesize); + + if ( aggregatePresent ) + for ( Element e + : re.getElementsAnnotatedWithAny( AN_AGGREGATE, AN_AGGREGATES) ) + processRepeatable( + e, AN_AGGREGATE, AN_AGGREGATES, AggregateImpl.class, null); + + tmpr.workAroundJava7Breakage(); // perhaps to be fixed in Java 9? nope. + + if ( ! re.processingOver() ) + defensiveEarlyCharacterize(); + else if ( ! re.errorRaised() ) + generateDescriptor(); + + return willClaim; + } + + /** + * Iterate over collected snippets, characterize them, and enter them + * (if no error) in the data structures for topological ordering. Was + * originally the first part of {@code generateDescriptor}, but that is + * run in the final round, which is too late for javac 7 anyway, which + * throws symbol tables away between rounds. Any errors reported from + * characterize were being shown without source locations, because the + * information was gone. This may now be run more than once, so the + * {@code snippets} map is cleared before returning. + */ + void defensiveEarlyCharacterize() + { + for ( Snippet snip : snippets.values() ) + { + Set ready = snip.characterize(); + for ( Snippet readySnip : ready ) + { + VertexPair v = new VertexPair<>( readySnip); + snippetVPairs.add( v); + for ( DependTag t : readySnip.provideTags() ) + { + List> ps = + provider.computeIfAbsent(t, k -> new ArrayList<>()); + /* + * Explicit tags are allowed more than one provider. + */ + if ( t instanceof DependTag.Explicit || ps.isEmpty() ) + ps.add(v); + else + msg(Kind.ERROR, "tag %s has more than one provider", t); + } + } + } + snippets.clear(); + } + + /** + * Arrange the collected snippets into a workable sequence (nothing with + * requires="X" can come before whatever has provides="X"), then create + * a deployment descriptor file in proper form. + */ + void generateDescriptor() + { + boolean errorRaised = false; + Set fwdConsumers = new HashSet<>(); + Set revConsumers = new HashSet<>(); + + for ( VertexPair v : snippetVPairs ) + { + List> ps; + + /* + * First handle the implicit requires(implementor()). This is unlike + * the typical provides/requires relationship, in that it does not + * reverse when generating the 'remove' actions. Conditions that + * determined what got installed must also be evaluated early and + * determine what gets removed. + */ + Identifier.Simple impName = v.payload().implementorName(); + DependTag imp = v.payload().implementorTag(); + if ( null != imp ) + { + ps = provider.get( imp); + if ( null != ps ) + { + fwdConsumers.add( imp); + revConsumers.add( imp); + + ps.forEach(p -> + { + p.fwd.precede( v.fwd); + p.rev.precede( v.rev); + + /* + * A snippet providing an implementor tag probably has + * no undeployStrings, because its deployStrings should + * be used on both occasions; if so, replace it with a + * proxy that returns deployStrings for undeployStrings. + */ + if ( 0 == p.rev.payload.undeployStrings().length ) + p.rev.payload = new ImpProvider( p.rev.payload); + }); + } + else if ( ! defaultImplementor.equals( impName, msgr) ) + { + /* + * Don't insist that every implementor tag have a provider + * somewhere in the code. Perhaps the environment will + * provide it at load time. If this is not the default + * implementor, bump the relying vertices' indegree anyway + * so the snippet won't be emitted until the cycle-breaker + * code (see below) sets it free after any others that + * can be handled first. + */ + ++ v.fwd.indegree; + ++ v.rev.indegree; + } + } + for ( DependTag s : v.payload().requireTags() ) + { + ps = provider.get( s); + if ( null != ps ) + { + fwdConsumers.add( s); + revConsumers.add( s); + ps.forEach(p -> + { + p.fwd.precede( v.fwd); + v.rev.precede( p.rev); // these relationships do reverse + }); + } + else if ( s instanceof DependTag.Explicit ) + { + msg( Kind.ERROR, + "tag \"%s\" is required but nowhere provided", s); + errorRaised = true; + } + } + } + + if ( errorRaised ) + return; + + Queue> fwdBlocked = new LinkedList<>(); + Queue> revBlocked = new LinkedList<>(); + + Queue> fwdReady; + Queue> revReady; + if ( reproducible ) + { + fwdReady = new PriorityQueue<>( 11, snippetTiebreaker); + revReady = new PriorityQueue<>( 11, snippetTiebreaker); + } + else + { + fwdReady = new LinkedList<>(); + revReady = new LinkedList<>(); + } + + for ( VertexPair vp : snippetVPairs ) + { + Vertex v = vp.fwd; + if ( 0 == v.indegree ) + fwdReady.add( v); + else + fwdBlocked.add( v); + v = vp.rev; + if ( 0 == v.indegree ) + revReady.add( v); + else + revBlocked.add( v); + } + + Snippet[] fwdSnips = order( fwdReady, fwdBlocked, fwdConsumers, true); + Snippet[] revSnips = order( revReady, revBlocked, revConsumers, false); + + if ( null == fwdSnips || null == revSnips ) + return; // error already reported + + try + { + DDRWriter.emit( fwdSnips, revSnips, this); + } + catch ( IOException ioe ) + { + msg( Kind.ERROR, "while writing %s: %s", output, ioe.getMessage()); + } + } + + /** + * Given a Snippet DAG, either the forward or reverse one, return the + * snippets in a workable order. + * @return Array of snippets in order, or null if no suitable order could + * be found. + */ + Snippet[] order( + Queue> ready, Queue> blocked, + Set consumer, boolean deploying) + { + ArrayList snips = new ArrayList<>(ready.size()+blocked.size()); + Vertex cycleBreaker = null; + +queuerunning: + for ( ; ; ) + { + while ( ! ready.isEmpty() ) + { + Vertex v = ready.remove(); + snips.add(v.payload); + v.use(ready, blocked); + for ( DependTag p : v.payload.provideTags() ) + consumer.remove(p); + } + if ( blocked.isEmpty() ) + break; // all done + + /* + * There are snippets remaining to output but they all have + * indegree > 0, normally a 'cycle' error. But some may have + * breakCycle methods that can help. Add any vertices they return + * onto the ready queue (all at once, so that for reproducible + * builds, the ready queue's ordering constraints will take effect). + */ + boolean cycleBroken = false; + for ( Iterator> it = blocked.iterator(); + it.hasNext(); ) + { + Vertex v = it.next(); + cycleBreaker = v.payload.breakCycle(v, deploying); + if ( null == cycleBreaker ) + continue; + /* + * If v supplied another vertex to go on the ready queue, leave + * v on the blocked queue; it should become ready in due course. + * If v nominated itself as cycle breaker, remove from blocked. + */ + if ( cycleBreaker == v ) + it.remove(); + ready.add(cycleBreaker); + cycleBroken = true; + } + if ( cycleBroken ) + continue; + + /* + * A cycle was detected and no snippet's breakCycle method broke it, + * but there may yet be a way. Somewhere there may be a vertex + * with indegree exactly 1 and an implicit requirement of its + * own implementor tag, with no snippet on record to provide it. + * That's allowed (maybe the installing/removing environment will + * be "providing" that tag anyway), so set one such snippet free + * and see how much farther we get. + */ + for ( Iterator> it = blocked.iterator(); + it.hasNext(); ) + { + Vertex v = it.next(); + if ( 1 < v.indegree ) + continue; + Identifier.Simple impName = v.payload.implementorName(); + if ( null == impName + || defaultImplementor.equals( impName, msgr) ) + continue; + if ( provider.containsKey( v.payload.implementorTag()) ) + continue; + if ( reproducible ) + { + if (null == cycleBreaker || + 0 < snippetTiebreaker.compare(cycleBreaker, v)) + cycleBreaker = v; + } + else + { + -- v.indegree; + it.remove(); + ready.add( v); + continue queuerunning; + } + } + if ( null != cycleBreaker ) + { + blocked.remove( cycleBreaker); + -- cycleBreaker.indegree; + ready.add( cycleBreaker); + cycleBreaker = null; + continue; + } + /* + * Got here? It's a real cycle ... nothing to be done. + */ + for ( DependTag s : consumer ) + msg( Kind.ERROR, "requirement in a cycle: %s", s); + return null; + } + return snips.toArray(new Snippet[snips.size()]); + } + + void putRepeatableSnippet(Element e, T snip) + { + if ( null != snip ) + putSnippet( snip, (Snippet)snip); + } + + /** + * Process an element carrying a repeatable annotation, the container + * of that repeatable annotation, or both. + *

    + * Snippets corresponding to repeatable annotations might not be entered in the + * {@code snippets} map keyed by the target element, as that might not be + * unique. Each populated snippet is passed to putter along with + * the element it annotates, and putter determines what to do with + * it. If putter is null, the default enters the snippet with a key + * made from its class and itself, as typical repeatable snippets are are + * not expected to be looked up, only processed when all of the map entries + * are enumerated. + *

    + * After all snippets of the desired class have been processed for a given + * element, a final call to putter is made passing the element and + * null for the snippet. + */ + void processRepeatable( + Element e, TypeElement annot, TypeElement container, Class clazz, + BiConsumer putter) + { + if ( null == putter ) + putter = this::putRepeatableSnippet; + + for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) + { + Element asElement = am.getAnnotationType().asElement(); + if ( asElement.equals( annot) ) + { + T snip; + try + { + snip = clazz.getDeclaredConstructor( DDRProcessorImpl.class, + Element.class, AnnotationMirror.class) + .newInstance( DDRProcessorImpl.this, e, am); + } + catch ( ReflectiveOperationException re ) + { + throw new RuntimeException( + "Incorrect implementation of annotation processor", re); + } + populateAnnotationImpl( snip, e, am); + putter.accept( e, snip); + } + else if ( asElement.equals( container) ) + { + Container c = new Container<>(clazz); + populateAnnotationImpl( c, e, am); + for ( T snip : c.value() ) + putter.accept( e, snip); + } + } + + putter.accept( e, null); + } + + static enum UDTKind { BASE, MAPPED } + + /** + * Process a single element annotated with @BaseUDT or @MappedUDT, as + * indicated by the UDTKind k. + */ + void processUDT( Element e, UDTKind k) + { + /* + * The allowed target type for the UDT annotations is TYPE, which can + * be a class, interface (including annotation type) or enum, of which + * only CLASS is valid here. If it is anything else, just return, as + * that can only mean a source error prevented the compiler making sense + * of it, and the compiler will have its own messages about that. + */ + switch ( e.getKind() ) + { + case CLASS: + break; + case ANNOTATION_TYPE: + case ENUM: + case INTERFACE: + msg( Kind.ERROR, e, "A PL/Java UDT must be a class"); + default: + return; + } + Set mods = e.getModifiers(); + if ( ! mods.contains( Modifier.PUBLIC) ) + { + msg( Kind.ERROR, e, "A PL/Java UDT must be public"); + } + if ( mods.contains( Modifier.ABSTRACT) ) + { + msg( Kind.ERROR, e, "A PL/Java UDT must not be abstract"); + } + if ( ! ((TypeElement)e).getNestingKind().equals( + NestingKind.TOP_LEVEL) ) + { + if ( ! mods.contains( Modifier.STATIC) ) + { + msg( Kind.ERROR, e, + "When nested, a PL/Java UDT must be static (not inner)"); + } + for ( Element ee = e; null != ( ee = ee.getEnclosingElement() ); ) + { + if ( ! ee.getModifiers().contains( Modifier.PUBLIC) ) + msg( Kind.ERROR, ee, + "A PL/Java UDT must not have a non-public " + + "enclosing class"); + if ( ((TypeElement)ee).getNestingKind().equals( + NestingKind.TOP_LEVEL) ) + break; + } + } + + switch ( k ) + { + case BASE: + BaseUDTImpl bu = getSnippet( e, BaseUDTImpl.class, () -> + new BaseUDTImpl( (TypeElement)e)); + for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) + { + if ( am.getAnnotationType().asElement().equals( AN_BASEUDT) ) + populateAnnotationImpl( bu, e, am); + } + bu.registerFunctions(); + break; + + case MAPPED: + MappedUDTImpl mu = getSnippet( e, MappedUDTImpl.class, () -> + new MappedUDTImpl( (TypeElement)e)); + for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) + { + if ( am.getAnnotationType().asElement().equals( AN_MAPPEDUDT) ) + populateAnnotationImpl( mu, e, am); + } + mu.registerMapping(); + break; + } + } + + ExecutableElement huntFor(List ees, String name, + boolean isStatic, TypeMirror retType, TypeMirror... paramTypes) + { + ExecutableElement quarry = null; +hunt: for ( ExecutableElement ee : ees ) + { + if ( null != name && ! ee.getSimpleName().contentEquals( name) ) + continue; + if ( ee.isVarArgs() ) + continue; + if ( null != retType + && ! typu.isSameType( ee.getReturnType(), retType) ) + continue; + List pts = + ((ExecutableType)ee.asType()).getParameterTypes(); + if ( pts.size() != paramTypes.length ) + continue; + for ( int i = 0; i < paramTypes.length; ++i ) + if ( ! typu.isSameType( pts.get( i), paramTypes[i]) ) + continue hunt; + Set mods = ee.getModifiers(); + if ( ! mods.contains( Modifier.PUBLIC) ) + continue; + if ( isStatic && ! mods.contains( Modifier.STATIC) ) + continue; + if ( null == quarry ) + quarry = ee; + else + { + msg( Kind.ERROR, ee, + "Found more than one candidate " + + (null == name ? "constructor" : (name + " method"))); + } + } + return quarry; + } + + /** + * Process a single element annotated with @Function. After checking that + * it has the right modifiers to be called via PL/Java, analyze its type + * information and annotations and register an appropriate SQL code snippet. + */ + void processFunction( Element e) + { + /* + * METHOD is the only target type allowed for the Function annotation, + * so the only way for e to be anything else is if some source error has + * prevented the compiler making sense of it. In that case just return + * silently on the assumption that the compiler will have its own + * message about the true problem. + */ + if ( ! ElementKind.METHOD.equals( e.getKind()) ) + return; + + Set mods = e.getModifiers(); + if ( ! mods.contains( Modifier.PUBLIC) ) + { + msg( Kind.ERROR, e, "A PL/Java function must be public"); + } + + for ( Element ee = e; null != ( ee = ee.getEnclosingElement() ); ) + { + ElementKind ek = ee.getKind(); + switch ( ek ) + { + case CLASS: + case INTERFACE: + break; + default: + msg( Kind.ERROR, ee, + "A PL/Java function must not have an enclosing " + ek); + return; + } + + // It's a class or interface, represented by TypeElement + TypeElement te = (TypeElement)ee; + mods = ee.getModifiers(); + + if ( ! mods.contains( Modifier.PUBLIC) ) + msg( Kind.ERROR, ee, + "A PL/Java function must not have a non-public " + + "enclosing class"); + + if ( ! te.getNestingKind().isNested() ) + break; // no need to look above top-level class + } + + FunctionImpl f = getSnippet( e, FunctionImpl.class, () -> + new FunctionImpl( (ExecutableElement)e)); + for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) + { + if ( am.getAnnotationType().asElement().equals( AN_FUNCTION) ) + populateAnnotationImpl( f, e, am); + } + } + + /** + * Populate an array of specified type from an annotation value + * representing an array. + * + * AnnotationValue's getValue() method returns Object, where the + * object is known to be an instance of one of a small set of classes. + * Populating an array when that value represents one is a common + * operation, so it is factored out here. + */ + static T[] avToArray( Object o, Class k) + { + boolean isEnum = k.isEnum(); + + @SuppressWarnings({"unchecked"}) + List vs = (List)o; + + @SuppressWarnings({"unchecked"}) + T[] a = (T[])Array.newInstance( k, vs.size()); + + int i = 0; + for ( AnnotationValue av : vs ) + { + Object v = getValue( av); + if ( isEnum ) + { + @SuppressWarnings({"unchecked"}) + T t = (T)Enum.valueOf( k.asSubclass( Enum.class), + ((VariableElement)v).getSimpleName().toString()); + a[i++] = t; + } + else + a[i++] = k.cast( v); + } + return a; + } + + /** + * Abstract superclass for synthetic implementations of annotation + * interfaces; these can be populated with element-value pairs from + * an AnnotationMirror and then used in the natural way for access to + * the values. Each subclass of this should implement the intended + * annotation interface, and should also have a + * setFoo(Object,boolean,Element) method for each foo() method in the + * interface. Rather than longwindedly using the type system to enforce + * that the needed setter methods are all there, they will be looked + * up using reflection. + */ + class AbstractAnnotationImpl implements Annotation + { + private Set m_provideTags = new HashSet<>(); + private Set m_requireTags = new HashSet<>(); + + @Override + public Class annotationType() + { + throw new UnsupportedOperationException(); + } + + /** + * Supply the required implementor() method for those subclasses + * that will implement {@link Snippet}. + */ + public String implementor() + { + return null == _implementor ? null : _implementor.pgFolded(); + } + + /** + * Supply the required implementor() method for those subclasses + * that will implement {@link Snippet}. + */ + public Identifier.Simple implementorName() + { + return _implementor; + } + + Identifier.Simple _implementor = defaultImplementor; + String _comment; + boolean commentDerived; + + public void setImplementor( Object o, boolean explicit, Element e) + { + if ( explicit ) + _implementor = "".equals( o) ? null : + Identifier.Simple.fromJava((String)o, msgr); + } + + @Override + public String toString() + { + return String.format( + "(%s)%s", getClass().getSimpleName(), _comment); + } + + public String comment() { return _comment; } + + public void setComment( Object o, boolean explicit, Element e) + { + if ( explicit ) + { + _comment = (String)o; + if ( "".equals( _comment) ) + _comment = null; + } + else + { + _comment = ((Commentable)this).derivedComment( e); + commentDerived = true; + } + } + + protected void replaceCommentIfDerived( String comment) + { + if ( ! commentDerived ) + return; + commentDerived = false; + _comment = comment; + } + + public String derivedComment( Element e) + { + String dc = elmu.getDocComment( e); + if ( null == dc ) + return null; + return firstSentence( dc); + } + + public String firstSentence( String s) + { + BreakIterator bi = BreakIterator.getSentenceInstance( loca); + bi.setText( s); + int start = bi.first(); + int end = bi.next(); + if ( BreakIterator.DONE == end ) + return null; + return s.substring( start, end).trim(); + } + + /** + * Called by a snippet's {@code characterize} method to install its + * explicit, annotation-supplied 'provides' / 'requires' strings, if + * any, into the {@code provideTags} and {@code requireTags} sets, then + * making those sets immutable. + */ + protected void recordExplicitTags(String[] provides, String[] requires) + { + if ( null != provides ) + for ( String s : provides ) + m_provideTags.add(new DependTag.Explicit(s)); + if ( null != requires ) + for ( String s : requires ) + m_requireTags.add(new DependTag.Explicit(s)); + m_provideTags = unmodifiableSet(m_provideTags); + m_requireTags = unmodifiableSet(m_requireTags); + } + + /** + * Return the set of 'provide' tags, mutable before + * {@code recordExplicitTags} has been called, immutable thereafter. + */ + public Set provideTags() + { + return m_provideTags; + } + + /** + * Return the set of 'require' tags, mutable before + * {@code recordExplicitTags} has been called, immutable thereafter. + */ + public Set requireTags() + { + return m_requireTags; + } + } + + class Repeatable extends AbstractAnnotationImpl + { + final Element m_targetElement; + final AnnotationMirror m_origin; + + Repeatable(Element e, AnnotationMirror am) + { + m_targetElement = e; + m_origin = am; + } + } + + /** + * Populate an AbstractAnnotationImpl-derived Annotation implementation + * from the element-value pairs in an AnnotationMirror. For each element + * foo in the annotation interface, the implementation is assumed to have + * a method setFoo(Object o, boolean explicit, element e) where o is the + * element's value as obtained from AnnotationValue.getValue(), explicit + * indicates whether the element was explicitly present in the annotation + * or filled in from a default value, and e is the element carrying the + * annotation (chiefly for use as a location hint in diagnostic messages). + * + * Some of the annotation implementations below will leave certain elements + * null if they were not given explicit values, in order to have a clear + * indication that they were defaulted, even though that is not the way + * normal annotation objects behave. + * + * If a setFoo(Object o, boolean explicit, element e) method is not found + * but there is an accessible field _foo it will be set directly, but only + * if the value was explicitly present in the annotation or the field value + * is null. By this convention, an implementation can declare a field + * initially null and let its default value be filled in from what the + * annotation declares, or initially some non-null value distinct from + * possible annotation values, and be able to tell whether it was explicitly + * set. Note that a field of primitive type will never be seen as null. + */ + void populateAnnotationImpl( + AbstractAnnotationImpl inst, Element e, AnnotationMirror am) + { + Map explicit = + am.getElementValues(); + Map defaulted = + elmu.getElementValuesWithDefaults( am); + + // Astonishingly, even though JLS3 9.7 clearly says "annotations must + // contain an element-value pair for every element of the corresponding + // annotation type, except for those elements with default values, or a + // compile-time error occurs" - in Sun 1.6.0_39 javac never flags + // the promised error, and instead allows us to NPE on something that + // ought to be guaranteed to be there! >:[ + // + // If you want something done right, you have to do it yourself.... + // + + Element anne = am.getAnnotationType().asElement(); + List keys = methodsIn( anne.getEnclosedElements()); + for ( ExecutableElement k : keys ) + if ( ! defaulted.containsKey( k) ) + msg( Kind.ERROR, e, am, + "annotation missing required element \"%s\"", + k.getSimpleName()); + + for ( + Map.Entry me + : defaulted.entrySet() + ) + { + ExecutableElement k = me.getKey(); + AnnotationValue av = me.getValue(); + boolean isExplicit = explicit.containsKey( k); + String name = k.getSimpleName().toString(); + Class kl = inst.getClass(); + try + { + Object v = getValue( av); + kl.getMethod( // let setter for foo() be setFoo() + "set"+name.substring( 0, 1).toUpperCase() + + name.substring( 1), + Object.class, boolean.class, Element.class) + .invoke(inst, v, isExplicit, e); + } + catch (AnnotationValueException ave) + { + msg( Kind.ERROR, e, am, + "unresolved value for annotation member \"%s\"" + + " (check for missing/misspelled import, etc.)", + name); + } + catch (NoSuchMethodException nsme) + { + Object v = getValue( av); + try + { + Field f = kl.getField( "_"+name); + Class fkl = f.getType(); + if ( ! isExplicit && null != f.get( inst) ) + continue; + if ( fkl.isArray() ) + { + try { + f.set( inst, avToArray( v, fkl.getComponentType())); + } + catch (AnnotationValueException ave) + { + msg( Kind.ERROR, e, am, + "unresolved value for an element of annotation" + + " member \"%s\" (check for missing/misspelled" + + " import, etc.)", + name); + } + } + else if ( fkl.isEnum() ) + { + @SuppressWarnings("unchecked") + Object t = Enum.valueOf( fkl.asSubclass( Enum.class), + ((VariableElement)v).getSimpleName().toString()); + f.set( inst, t); + } + else + f.set( inst, v); + nsme = null; + } + catch (NoSuchFieldException | IllegalAccessException ex) { } + if ( null != nsme ) + throw new RuntimeException( + "Incomplete implementation in annotation processor", + nsme); + } + catch (IllegalAccessException iae) + { + throw new RuntimeException( + "Incorrect implementation of annotation processor", iae); + } + catch (InvocationTargetException ite) + { + String msg = ite.getCause().getMessage(); + msg( Kind.ERROR, e, am, av, "%s", msg); + } + } + } + + // It could be nice to have another annotation-driven tool that could just + // generate these implementations of some annotation types.... + + class SQLTypeImpl extends AbstractAnnotationImpl implements SQLType + { + public String value() { return _value; } + public String[] defaultValue() { return _defaultValue; } + public boolean optional() { return Boolean.TRUE.equals(_optional); } + public String name() { return _name; } + + String _value; + String[] _defaultValue; + String _name; + Boolean _optional; // boxed so it can be null if not explicit + + public void setValue( Object o, boolean explicit, Element e) + { + if ( explicit ) + _value = (String)o; + } + + public void setDefaultValue( Object o, boolean explicit, Element e) + { + if ( explicit ) + _defaultValue = avToArray( o, String.class); + } + + public void setOptional( Object o, boolean explicit, Element e) + { + if ( explicit ) + _optional = (Boolean)o; + } + + public void setName( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + _name = (String)o; + if ( _name.startsWith( "\"") + && ! Lexicals.ISO_DELIMITED_IDENTIFIER.matcher( _name).matches() + ) + msg( Kind.WARNING, e, "malformed parameter name: %s", _name); + } + } + + class Container + extends AbstractAnnotationImpl + { + public T[] value() { return _value; } + + T[] _value; + final Class _clazz; + + Container(Class clazz) + { + _clazz = clazz; + } + + public void setValue( Object o, boolean explicit, Element e) + { + AnnotationMirror[] ams = avToArray( o, AnnotationMirror.class); + + @SuppressWarnings("unchecked") + T[] t = (T[])Array.newInstance( _clazz, ams.length); + _value = t; + + int i = 0; + for ( AnnotationMirror am : ams ) + { + try + { + T a = _clazz.getDeclaredConstructor(DDRProcessorImpl.class, + Element.class, AnnotationMirror.class) + .newInstance(DDRProcessorImpl.this, e, am); + populateAnnotationImpl( a, e, am); + _value [ i++ ] = a; + } + catch ( ReflectiveOperationException re ) + { + throw new RuntimeException( + "Incorrect implementation of annotation processor", re); + } + } + } + } + + class SQLActionImpl + extends Repeatable + implements SQLAction, Snippet + { + SQLActionImpl(Element e, AnnotationMirror am) + { + super(e, am); + } + + public String[] install() { return _install; } + public String[] remove() { return _remove; } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + + public String[] deployStrings() { return _install; } + public String[] undeployStrings() { return _remove; } + + public String[] _install; + public String[] _remove; + public String[] _provides; + public String[] _requires; + + public Set characterize() + { + recordExplicitTags(_provides, _requires); + return Set.of(this); + } + } + + class TriggerImpl + extends AbstractAnnotationImpl + implements Trigger, Snippet, Commentable + { + public String[] arguments() { return _arguments; } + public Constraint constraint() { return _constraint; } + public Event[] events() { return _events; } + public String fromSchema() { return _fromSchema; } + public String from() { return _from; } + public String name() { return _name; } + public String schema() { return _schema; } + public String table() { return _table; } + public Scope scope() { return _scope; } + public Called called() { return _called; } + public String when() { return _when; } + public String[] columns() { return _columns; } + public String tableOld() { return _tableOld; } + public String tableNew() { return _tableNew; } + + public String[] provides() { return new String[0]; } + public String[] requires() { return new String[0]; } + /* Trigger is a Snippet but doesn't directly participate in tsort */ + + public String[] _arguments; + public Constraint _constraint; + public Event[] _events; + public String _fromSchema; + public String _from; + public String _name; + public String _schema; + public String _table; + public Scope _scope; + public Called _called; + public String _when; + public String[] _columns; + public String _tableOld; + public String _tableNew; + + FunctionImpl func; + AnnotationMirror origin; + + boolean refOld; + boolean refNew; + boolean isConstraint = false; + + /* The only values of the Constraint enum are those applicable to + * constraint triggers. To determine whether this IS a constraint + * trigger or not, use the 'explicit' parameter to distinguish whether + * the 'constraint' attribute was or wasn't seen in the annotation. + */ + public void setConstraint( Object o, boolean explicit, Element e) + { + if ( explicit ) + { + isConstraint = true; + _constraint = Constraint.valueOf( + ((VariableElement)o).getSimpleName().toString()); + } + } + + TriggerImpl( FunctionImpl f, AnnotationMirror am) + { + func = f; + origin = am; + } + + public Set characterize() + { + if ( Scope.ROW.equals( _scope) ) + { + for ( Event e : _events ) + if ( Event.TRUNCATE.equals( e) ) + msg( Kind.ERROR, func.func, origin, + "TRUNCATE trigger cannot be FOR EACH ROW"); + } + else if ( Called.INSTEAD_OF.equals( _called) ) + msg( Kind.ERROR, func.func, origin, + "INSTEAD OF trigger cannot be FOR EACH STATEMENT"); + + if ( ! "".equals( _when) && Called.INSTEAD_OF.equals( _called) ) + msg( Kind.ERROR, func.func, origin, + "INSTEAD OF triggers do not support WHEN conditions"); + + if ( 0 < _columns.length ) + { + if ( Called.INSTEAD_OF.equals( _called) ) + msg( Kind.ERROR, func.func, origin, + "INSTEAD OF triggers do not support lists of columns"); + boolean seen = false; + for ( Event e : _events ) + if ( Event.UPDATE.equals( e) ) + seen = true; + if ( ! seen ) + msg( Kind.ERROR, func.func, origin, + "Column list is meaningless unless UPDATE is a trigger event"); + } + + refOld = ! "".equals( _tableOld); + refNew = ! "".equals( _tableNew); + + if ( refOld || refNew ) + { + if ( ! Called.AFTER.equals( _called) ) + msg( Kind.ERROR, func.func, origin, + "Only AFTER triggers can reference OLD TABLE or NEW TABLE"); + boolean badOld = refOld; + boolean badNew = refNew; + for ( Event e : _events ) + { + switch ( e ) + { + case INSERT: badNew = false; break; + case UPDATE: badOld = badNew = false; break; + case DELETE: badOld = false; break; + } + } + if ( badOld ) + msg( Kind.ERROR, func.func, origin, + "Trigger must be callable on UPDATE or DELETE to reference OLD TABLE"); + if ( badNew ) + msg( Kind.ERROR, func.func, origin, + "Trigger must be callable on UPDATE or INSERT to reference NEW TABLE"); + } + + if ( isConstraint ) + { + if ( ! Called.AFTER.equals( _called) ) + msg( Kind.ERROR, func.func, origin, + "A constraint trigger must be an AFTER trigger"); + if ( ! Scope.ROW.equals( _scope) ) + msg( Kind.ERROR, func.func, origin, + "A constraint trigger must be FOR EACH ROW"); + if ( "".equals( _from) && ! "".equals( _fromSchema) ) + msg( Kind.ERROR, func.func, origin, + "To use fromSchema, specify a table name with from"); + } + else + { + if ( ! "".equals( _from) ) + msg( Kind.ERROR, func.func, origin, + "Only a constraint trigger can use 'from'"); + if ( ! "".equals( _fromSchema) ) + msg( Kind.ERROR, func.func, origin, + "Only a constraint trigger can use 'fromSchema'"); + } + + if ( "".equals( _name) ) + _name = TriggerNamer.synthesizeName( this); + return Set.of(); + } + + public String[] deployStrings() + { + StringBuilder sb = new StringBuilder(); + sb.append("CREATE "); + if ( isConstraint ) + { + sb.append("CONSTRAINT "); + } + sb.append("TRIGGER ").append(name()).append("\n\t"); + switch ( called() ) + { + case BEFORE: sb.append( "BEFORE " ); break; + case AFTER: sb.append( "AFTER " ); break; + case INSTEAD_OF: sb.append( "INSTEAD OF "); break; + } + int s = _events.length; + for ( Event e : _events ) + { + sb.append( e.toString()); + if ( Event.UPDATE.equals( e) && 0 < _columns.length ) + { + sb.append( " OF "); + int cs = _columns.length; + for ( String c : _columns ) + { + sb.append( c); + if ( 0 < -- cs ) + sb.append( ", "); + } + } + if ( 0 < -- s ) + sb.append( " OR "); + } + sb.append( "\n\tON "); + sb.append(qnameFrom(table(), schema())); + if ( ! "".equals( from()) ) + { + sb.append("\n\tFROM "); + sb.append(qnameFrom(from(), fromSchema())); + } + if ( isConstraint ) { + sb.append("\n\t"); + switch ( _constraint ) + { + case NOT_DEFERRABLE: + sb.append("NOT DEFERRABLE"); + break; + case INITIALLY_IMMEDIATE: + sb.append("DEFERRABLE INITIALLY IMMEDIATE"); + break; + case INITIALLY_DEFERRED: + sb.append("DEFERRABLE INITIALLY DEFERRED"); + break; + } + } + if ( refOld || refNew ) + { + sb.append( "\n\tREFERENCING"); + if ( refOld ) + sb.append( " OLD TABLE AS ").append( _tableOld); + if ( refNew ) + sb.append( " NEW TABLE AS ").append( _tableNew); + } + sb.append( "\n\tFOR EACH "); + sb.append( scope().toString()); + if ( ! "".equals( _when) ) + sb.append( "\n\tWHEN ").append( _when); + sb.append( "\n\tEXECUTE PROCEDURE "); + func.appendNameAndParams( sb, true, false, false); + sb.setLength( sb.length() - 1); // drop closing ) + s = _arguments.length; + for ( String a : _arguments ) + { + sb.append( "\n\t").append( DDRWriter.eQuote( a)); + if ( 0 < -- s ) + sb.append( ','); + } + sb.append( ')'); + + String comm = comment(); + if ( null == comm ) + return new String[] { sb.toString() }; + + return new String[] { + sb.toString(), + "COMMENT ON TRIGGER " + name() + " ON " + + qnameFrom(table(), schema()) + + "\nIS " + + DDRWriter.eQuote( comm) + }; + } + + public String[] undeployStrings() + { + StringBuilder sb = new StringBuilder(); + sb.append( "DROP TRIGGER ").append( name()).append( "\n\tON "); + sb.append(qnameFrom(table(), schema())); + return new String[] { sb.toString() }; + } + } + + /** + * Enumeration of different method "shapes" and the treatment of + * {@code type=} and {@code out=} annotation elements they need. + *

    + * Each member has a {@code setComposite} method that will be invoked + * by {@code checkOutType} if the method is judged to have a composite + * return type according to the annotations present. + *

    + * There is one case (no {@code out} and a {@code type} other than + * {@code RECORD}) where {@code checkOutType} will resolve the + * ambiguity by assuming composite, and will have set + * {@code assumedComposite} accordingly. The {@code MAYBECOMPOSITE} + * shape checks that assumption against the presence of a countervailing + * {@code SQLType} annotation, the {@code ITERATOR} shape clears it and + * behaves as noncomposite as always, and the {@code PROVIDER} shape + * clears it because that shape is unambiguously composite. + */ + enum MethodShape + { + /** + * Method has the shape {@code boolean foo(..., ResultSet)}, which + * could be an ordinary method with an incoming record parameter and + * boolean return, or a composite-returning method whose last + * a writable ResultSet supplied by PL/Java for the return value. + */ + MAYBECOMPOSITE((f,msgr) -> + { + boolean sqlTyped = null != + f.paramTypeAnnotations[f.paramTypeAnnotations.length - 1]; + if ( ! sqlTyped ) + f.complexViaInOut = true; + else if ( f.assumedComposite ) + f.assumedComposite = false; // SQLType cancels assumption + else + msgr.printMessage(Kind.ERROR, + "no @SQLType annotation may appear on " + + "the return-value ResultSet parameter", f.func); + }), + + /** + * Method has the shape {@code Iterator foo(...)} and represents + * a set-returning function with a non-composite return type. + *

    + * If the shape has been merely assumed composite, clear + * that flag and proceed as if it is not. Otherwise, issue an error + * that it can't be composite. + */ + ITERATOR((f,msgr) -> + { + if ( f.assumedComposite ) + f.assumedComposite = false; + else + msgr.printMessage(Kind.ERROR, + "the iterator style cannot return a row-typed result", + f.func); + }), + + /** + * Method has the shape {@code ResultSetProvider foo(...)} or + * {@code ResultSetHandle foo(...)} and represents + * a set-returning function with a non-composite return type. + *

    + * If the shape has been merely assumed composite, clear + * that flag; for this shape that assumption is not tentative. + */ + PROVIDER((f,msgr) -> f.assumedComposite = false), + + /** + * Method is something else (trigger, for example) for which no + * {@code type} or {@code out} is allowed. + *

    + * The {@code setComposite} method for this shape will never + * be called. + */ + OTHER(null); + + private final BiConsumer compositeSetter; + + MethodShape(BiConsumer setter) + { + compositeSetter = setter; + } + + void setComposite(FunctionImpl f, Messager msgr) + { + compositeSetter.accept(f, msgr); + } + } + + class FunctionImpl + extends AbstractAnnotationImpl + implements Function, Snippet, Commentable + { + public String type() { return _type; } + public String[] out() { return _out; } + public String name() { return _name; } + public String schema() { return _schema; } + public boolean variadic() { return _variadic; } + public OnNullInput onNullInput() { return _onNullInput; } + public Security security() { return _security; } + public Effects effects() { return _effects; } + public Trust trust() { return _trust; } + public Parallel parallel() { return _parallel; } + public boolean leakproof() { return _leakproof; } + public int cost() { return _cost; } + public int rows() { return _rows; } + public String[] settings() { return _settings; } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + public Trigger[] triggers() { return _triggers; } + public String language() + { + return _languageIdent.toString(); + } + + ExecutableElement func; + + public String _type; + public String[] _out; + public String _name; + public String _schema; + public boolean _variadic; + public OnNullInput _onNullInput; + public Security _security; + public Effects _effects; + public Trust _trust; + public Parallel _parallel; + public Boolean _leakproof; + int _cost; + int _rows; + public String[] _settings; + public String[] _provides; + public String[] _requires; + Trigger[] _triggers; + + public Identifier.Simple _languageIdent; + + boolean complexViaInOut = false; + boolean setof = false; + TypeMirror setofComponent = null; + boolean trigger = false; + TypeMirror returnTypeMapKey = null; + SQLType[] paramTypeAnnotations; + + DBType returnType; + DBType[] parameterTypes; + List> outParameters; + boolean assumedComposite = false; + boolean forceResultRecord = false; + + boolean subsumed = false; + + FunctionImpl(ExecutableElement e) + { + func = e; + } + + public void setType( Object o, boolean explicit, Element e) + { + if ( explicit ) + _type = (String)o; + } + + public void setOut( Object o, boolean explicit, Element e) + { + if ( explicit ) + _out = avToArray( o, String.class); + } + + public void setTrust( Object o, boolean explicit, Element e) + { + if ( explicit ) + _trust = Trust.valueOf( + ((VariableElement)o).getSimpleName().toString()); + } + + public void setLanguage( Object o, boolean explicit, Element e) + { + if ( explicit ) + _languageIdent = Identifier.Simple.fromJava((String)o); + } + + public void setCost( Object o, boolean explicit, Element e) + { + _cost = ((Integer)o).intValue(); + if ( _cost < 0 && explicit ) + throw new IllegalArgumentException( "cost must be nonnegative"); + } + + public void setRows( Object o, boolean explicit, Element e) + { + _rows = ((Integer)o).intValue(); + if ( _rows < 0 && explicit ) + throw new IllegalArgumentException( "rows must be nonnegative"); + } + + public void setTriggers( Object o, boolean explicit, Element e) + { + AnnotationMirror[] ams = avToArray( o, AnnotationMirror.class); + _triggers = new Trigger [ ams.length ]; + int i = 0; + for ( AnnotationMirror am : ams ) + { + TriggerImpl ti = new TriggerImpl( this, am); + populateAnnotationImpl( ti, e, am); + _triggers [ i++ ] = ti; + } + } + + public Set characterize() + { + if ( "".equals( _name) ) + _name = func.getSimpleName().toString(); + + resolveLanguage(); + + Set mods = func.getModifiers(); + if ( ! mods.contains( Modifier.STATIC) ) + { + msg( Kind.ERROR, func, "A PL/Java function must be static"); + } + + TypeMirror ret = func.getReturnType(); + if ( ret.getKind().equals( TypeKind.ERROR) ) + { + msg( Kind.ERROR, func, + "Unable to resolve return type of function"); + return Set.of(); + } + + ExecutableType et = (ExecutableType)func.asType(); + List ptms = et.getParameterTypes(); + List typeArgs; + int arity = ptms.size(); + + /* + * Collect the parameter type annotations now, in case needed below + * in checkOutType(MAYBECOMPOSITE) to disambiguate. + */ + + collectParameterTypeAnnotations(); + + /* + * If a type= annotation is present, provisionally set returnType + * accordingly. Otherwise, leave it null, to be filled in by + * resolveParameterAndReturnTypes below. + */ + + if ( null != _type ) + returnType = DBType.fromSQLTypeAnnotation(_type); + + /* + * Take a first look according to the method's Java return type. + */ + if ( ret.getKind().equals( TypeKind.BOOLEAN) ) + { + if ( 0 < arity ) + { + TypeMirror tm = ptms.get( arity - 1); + if ( ! tm.getKind().equals( TypeKind.ERROR) + // unresolved things seem assignable to anything + && typu.isSameType( tm, TY_RESULTSET) ) + { + checkOutType(MethodShape.MAYBECOMPOSITE); + } + } + } + else if ( null != (typeArgs = specialization( ret, TY_ITERATOR)) ) + { + setof = true; + if ( 1 != typeArgs.size() ) + { + msg( Kind.ERROR, func, + "Need one type argument for Iterator return type"); + return Set.of(); + } + setofComponent = typeArgs.get( 0); + if ( null == setofComponent ) + { + msg( Kind.ERROR, func, + "Failed to find setof component type"); + return Set.of(); + } + checkOutType(MethodShape.ITERATOR); + } + else if ( typu.isAssignable( ret, TY_RESULTSETPROVIDER) + || typu.isAssignable( ret, TY_RESULTSETHANDLE) ) + { + setof = true; + checkOutType(MethodShape.PROVIDER); + } + else if ( ret.getKind().equals( TypeKind.VOID) && 1 == arity ) + { + TypeMirror tm = ptms.get( 0); + if ( ! tm.getKind().equals( TypeKind.ERROR) + // unresolved things seem assignable to anything + && typu.isSameType( tm, TY_TRIGGERDATA) ) + { + trigger = true; + checkOutType(MethodShape.OTHER); + } + } + + returnTypeMapKey = ret; + + if ( ! setof && -1 != rows() ) + msg( Kind.ERROR, func, + "ROWS specified on a function not returning SETOF"); + + if ( ! trigger && 0 != _triggers.length ) + msg( Kind.ERROR, func, + "a function with triggers needs void return and " + + "one TriggerData parameter"); + + /* + * Report any unmappable types now that could appear in + * deployStrings (return type or parameter types) ... so that the + * error messages won't be missing the source location, as they can + * with javac 7 throwing away symbol tables between rounds. + */ + resolveParameterAndReturnTypes(); + + if ( _variadic ) + { + int last = parameterTypes.length - 1; + if ( 0 > last || ! parameterTypes[last].isArray() ) + msg( Kind.ERROR, func, + "VARIADIC function must have a last, non-output " + + "parameter that is an array"); + } + + recordImplicitTags(); + + recordExplicitTags(_provides, _requires); + + for ( Trigger t : triggers() ) + ((TriggerImpl)t).characterize(); + return Set.of(this); + } + + void resolveLanguage() + { + if ( null != _trust && null != _languageIdent ) + msg( Kind.ERROR, func, "A PL/Java function may specify " + + "only one of trust, language"); + if ( null == _languageIdent ) + { + if ( null == _trust || Trust.SANDBOXED == _trust ) + _languageIdent = nameTrusted; + else + _languageIdent = nameUntrusted; + } + } + + /* + * Factored out of characterize() so it could be called if needed by + * BaseUDTFunctionImpl.characterize(), which does not need anything else + * from its super.characterize(). But for now it doesn't need this + * either; it knows what parameters the base UDT functions take, and it + * takes no heed of @SQLType annotations. Perhaps it should warn if such + * annotations are used, but that's for another day. + */ + void collectParameterTypeAnnotations() + { + List ves = func.getParameters(); + paramTypeAnnotations = new SQLType [ ves.size() ]; + int i = 0; + boolean anyOptional = false; + for ( VariableElement ve : ves ) + { + for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( ve) ) + { + if ( am.getAnnotationType().asElement().equals(AN_SQLTYPE) ) + { + SQLTypeImpl sti = new SQLTypeImpl(); + populateAnnotationImpl( sti, ve, am); + paramTypeAnnotations[i] = sti; + + if (null != sti._optional && null != sti._defaultValue) + msg(Kind.ERROR, ve, "Only one of optional= or " + + "defaultValue= may be given"); + + anyOptional |= sti.optional(); + } + } + ++ i; + } + + if ( anyOptional && OnNullInput.RETURNS_NULL.equals(_onNullInput) ) + msg(Kind.ERROR, func, "A PL/Java function with " + + "onNullInput=RETURNS_NULL may not have parameters with " + + "optional=true"); + } + + private static final int NOOUT = 0; + private static final int ONEOUT = 4; + private static final int MOREOUT = 8; + + private static final int NOTYPE = 0; + private static final int RECORDTYPE = 1; + private static final int OTHERTYPE = 2; + + /** + * Reads the tea leaves of the {@code type=} and {@code out=} + * annotation elements to decide whether the method has a composite + * or noncomposite return. + *

    + * This is complicated by the PostgreSQL behavior of treating a function + * declared with one {@code OUT} parameter, or as + * a one-element {@code TABLE} function, as not + * returning a row type. + *

    + * This method avoids rejecting the case of a one-element {@code out=} + * with an explicit {@code type=RECORD}, to provide a way to explicitly + * request composite behavior for that case, on the chance that some + * future PostgreSQL version may accept it, though as of this writing + * no current version does. + *

    + * If the {@code MAYBECOMPOSITE} shape is used with a single {@code out} + * parameter, it is likely a mistake (what are the odds the developer + * wanted a function with a row-typed input parameter and a named out + * parameter of boolean type?), and will be rejected unless the + * {@code ResultSet} final parameter has been given an {@code SQLType} + * annotation. + */ + void checkOutType(MethodShape shape) + { + int out = + null == _out ? NOOUT : 1 == _out.length ? ONEOUT : MOREOUT; + + /* + * The caller will have set returnType from _type if present, + * or left it null otherwise. We know RECORD is a composite type; + * we don't presume here to know whether any other type is or not. + */ + int type = + null == returnType ? NOTYPE : + DT_RECORD.equals(returnType) ? RECORDTYPE : OTHERTYPE; + + if ( MethodShape.OTHER == shape && 0 != (out | type) ) + { + msg( Kind.ERROR, func, + "no type= or out= element may be applied to this method"); + return; + } + + switch ( out | type ) + { + case NOOUT | OTHERTYPE: + assumedComposite = true; // annotations not definitive; assume + shape.setComposite(this, msgr); + return; + case NOOUT | RECORDTYPE: + case MOREOUT | NOTYPE: + shape.setComposite(this, msgr); + return; + case ONEOUT | RECORDTYPE: // in case PostgreSQL one day allows this + forceResultRecord = true; + shape.setComposite(this, msgr); + return; + case ONEOUT | NOTYPE: + /* + * No special action needed here except for the MAYBECOMPOSITE + * or PROVIDER shapes, to check for likely mistakes. + */ + if ( MethodShape.MAYBECOMPOSITE == shape + && null == + paramTypeAnnotations[paramTypeAnnotations.length - 1] ) + { + msg(Kind.ERROR, func, + "a function with one declared OUT parameter returns " + + "it normally, not through an extra ResultSet " + + "parameter. If the trailing ResultSet parameter is " + + "intended as an input, it can be marked with an " + + "@SQLType annotation"); + } + else if ( MethodShape.PROVIDER == shape ) + { + msg(Kind.ERROR, func, + "a set-returning function with one declared OUT " + + "parameter must return an Iterator, not a " + + "ResultSetProvider or ResultSetHandle"); + } + return; + case NOOUT | NOTYPE: + /* + * No special action; MAYBECOMPOSITE will treat as noncomposite, + * ITERATOR and PROVIDER will behave as they always do. + */ + return; + case ONEOUT | OTHERTYPE: + msg( Kind.ERROR, func, + "no type= allowed here (the out parameter " + + "declares its own type)"); + return; + case MOREOUT | RECORDTYPE: + case MOREOUT | OTHERTYPE: + msg( Kind.ERROR, func, + "type= and out= may not be combined here"); + return; + default: + throw new AssertionError("unhandled case"); + } + } + + /** + * Return a stream of {@code ParameterInfo} 'records' for the function's + * parameters in order. + *

    + * If {@code paramTypeAnnotations} has not been set, every element in + * the stream will have null for {@code st}. + *

    + * If {@code parameterTypes} has not been set, every element in + * the stream will have null for {@code dt}. + */ + Stream parameterInfo() + { + if ( trigger ) + return Stream.empty(); + + ExecutableType et = (ExecutableType)func.asType(); + List tms = et.getParameterTypes(); + if ( complexViaInOut ) + tms = tms.subList( 0, tms.size() - 1); + + Iterator ves = + func.getParameters().iterator(); + + Supplier sts = + null == paramTypeAnnotations + ? () -> null + : Arrays.asList(paramTypeAnnotations).iterator()::next; + + Supplier dts = + null == parameterTypes + ? () -> null + : Arrays.asList(parameterTypes).iterator()::next; + + return tms.stream().map(tm -> + new ParameterInfo(tm, ves.next(), sts.get(), dts.get())); + } + + /** + * Create the {@code DBType}s to populate {@code returnType} and + * {@code parameterTypes}. + */ + void resolveParameterAndReturnTypes() + { + if ( null != returnType ) + /* it was already set from a type= attribute */; + else if ( null != setofComponent ) + returnType = tmpr.getSQLType( setofComponent, func); + else if ( setof ) + returnType = DT_RECORD; + else + returnType = tmpr.getSQLType( returnTypeMapKey, func); + + parameterTypes = parameterInfo() + .map(i -> tmpr.getSQLType(i.tm, i.ve, i.st, true, true)) + .toArray(DBType[]::new); + + if ( null != _out ) + { + outParameters = Arrays.stream(_out) + .map(DBType::fromNameAndType) + .collect(toList()); + if ( 1 < _out.length || forceResultRecord ) + returnType = DT_RECORD; + else + returnType = outParameters.get(0).getValue(); + } + } + + /** + * Record that this function provides itself, and requires its + * parameter and return types. + *

    + * Must be called before {@code recordExplicitTags}, which makes the + * provides and requires sets immutable. + */ + void recordImplicitTags() + { + Set provides = provideTags(); + Set requires = requireTags(); + + provides.add(new DependTag.Function( + qnameFrom(_name, _schema), parameterTypes)); + + DependTag t = returnType.dependTag(); + if ( null != t ) + requires.add(t); + + for ( DBType dbt : parameterTypes ) + { + t = dbt.dependTag(); + if ( null != t ) + requires.add(t); + } + + if ( null != outParameters ) + outParameters.stream() + .map(m -> m.getValue().dependTag()) + .filter(Objects::nonNull) + .forEach(requires::add); + } + + @Override + public void subsume() + { + subsumed = true; + } + + /** + * Append SQL syntax for the function's name (schema-qualified if + * appropriate) and parameters, either with any defaults indicated + * (for use in CREATE FUNCTION) or without (for use in DROP FUNCTION). + * + * @param sb StringBuilder in which to generate the SQL. + * @param names Whether to include the parameter names. + * @param outs Whether to include out parameters. + * @param dflts Whether to include the defaults, if any. + */ + void appendNameAndParams( + StringBuilder sb, boolean names, boolean outs, boolean dflts) + { + appendNameAndParams(sb, names, outs, dflts, + qnameFrom(name(), schema()), parameterInfo().collect(toList())); + } + + /** + * Internal version taking name and parameter stream as extra arguments + * so they can be overridden from {@link Transformed}. + */ + void appendNameAndParams( + StringBuilder sb, boolean names, boolean outs, boolean dflts, + Identifier.Qualified qname, + Iterable params) + { + sb.append(qname).append( '('); + appendParams( sb, names, outs, dflts, params); + // TriggerImpl relies on ) being the very last character + sb.append( ')'); + } + + /** + * Takes the parameter stream as an extra argument + * so it can be overridden from {@link Transformed}. + */ + void appendParams( + StringBuilder sb, boolean names, boolean outs, boolean dflts, + Iterable params) + { + int lengthOnEntry = sb.length(); + + Iterator iter = params.iterator(); + ParameterInfo i; + while ( iter.hasNext() ) + { + i = iter.next(); + + String name = i.name(); + + sb.append("\n\t"); + + if ( _variadic && ! iter.hasNext() ) + sb.append("VARIADIC "); + + if ( names ) + sb.append(name).append(' '); + + sb.append(i.dt.toString(dflts)); + + sb.append(','); + } + + if ( outs && null != outParameters ) + { + outParameters.forEach(e -> { + sb.append("\n\tOUT "); + if ( null != e.getKey() ) + sb.append(e.getKey()).append(' '); + sb.append(e.getValue().toString(false)).append(','); + }); + } + + if ( lengthOnEntry < sb.length() ) + sb.setLength(sb.length() - 1); // that last pesky comma + } + + String makeAS() + { + StringBuilder sb = new StringBuilder(); + if ( ! ( complexViaInOut || setof || trigger ) ) + sb.append( typu.erasure( func.getReturnType())).append( '='); + Element e = func.getEnclosingElement(); + // e was earlier checked and ensured to be a class or interface + sb.append( elmu.getBinaryName((TypeElement)e)).append( '.'); + sb.append( trigger ? func.getSimpleName() : func.toString()); + return sb.toString(); + } + + public String[] deployStrings() + { + return deployStrings( + qnameFrom(name(), schema()), parameterInfo().collect(toList()), + makeAS(), comment()); + } + + /** + * Internal version taking the function name, parameter stream, + * AS string, and comment (if any) as extra arguments so they can be + * overridden from {@link Transformed}. + */ + String[] deployStrings( + Identifier.Qualified qname, + Iterable params, String as, String comment) + { + ArrayList al = new ArrayList<>(); + StringBuilder sb = new StringBuilder(); + if ( assumedComposite ) + sb.append("/*\n * PL/Java generated this declaration assuming" + + "\n * a composite-returning function was intended." + + "\n * If a boolean function with a row-typed parameter" + + "\n * was intended, add any @SQLType annotation on the" + + "\n * ResultSet final parameter to make the intent clear." + + "\n */\n"); + if ( forceResultRecord ) + sb.append("/*\n * PL/Java generated this declaration for a" + + "\n * function with one OUT parameter that was annotated" + + "\n * to explicitly request treatment as a function that" + + "\n * returns RECORD. A given version of PostgreSQL might" + + "\n * not accept such a declaration. More at" + + "\n * https://www.postgresql.org/message-id/" + + "619BBE78.7040009%40anastigmatix.net" + + "\n */\n"); + sb.append( "CREATE OR REPLACE FUNCTION "); + appendNameAndParams( sb, true, true, true, qname, params); + sb.append( "\n\tRETURNS "); + if ( trigger ) + sb.append( DT_TRIGGER.toString()); + else + { + if ( setof ) + sb.append( "SETOF "); + sb.append( returnType); + } + sb.append( "\n\tLANGUAGE "); + sb.append( _languageIdent.toString()); + sb.append( ' ').append( effects()); + if ( leakproof() ) + sb.append( " LEAKPROOF"); + sb.append( '\n'); + if ( OnNullInput.RETURNS_NULL.equals( onNullInput()) ) + sb.append( "\tRETURNS NULL ON NULL INPUT\n"); + if ( Security.DEFINER.equals( security()) ) + sb.append( "\tSECURITY DEFINER\n"); + if ( ! Parallel.UNSAFE.equals( parallel()) ) + sb.append( "\tPARALLEL ").append( parallel()).append( '\n'); + if ( -1 != cost() ) + sb.append( "\tCOST ").append( cost()).append( '\n'); + if ( -1 != rows() ) + sb.append( "\tROWS ").append( rows()).append( '\n'); + for ( String s : settings() ) + sb.append( "\tSET ").append( s).append( '\n'); + sb.append( "\tAS ").append( DDRWriter.eQuote( as)); + al.add( sb.toString()); + + if ( null != comment ) + { + sb.setLength( 0); + sb.append( "COMMENT ON FUNCTION "); + appendNameAndParams( sb, true, false, false, qname, params); + sb.append( "\nIS "); + sb.append( DDRWriter.eQuote( comment)); + al.add( sb.toString()); + } + + for ( Trigger t : triggers() ) + for ( String s : ((TriggerImpl)t).deployStrings() ) + al.add( s); + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + return undeployStrings( + qnameFrom(name(), schema()), parameterInfo().collect(toList())); + } + + String[] undeployStrings( + Identifier.Qualified qname, + Iterable params) + { + if ( subsumed ) + return new String[0]; + + String[] rslt = new String [ 1 + triggers().length ]; + int i = rslt.length - 1; + for ( Trigger t : triggers() ) + for ( String s : ((TriggerImpl)t).undeployStrings() ) + rslt [ --i ] = s; + + StringBuilder sb = new StringBuilder(); + sb.append( "DROP FUNCTION "); + appendNameAndParams( sb, true, false, false, qname, params); + rslt [ rslt.length - 1 ] = sb.toString(); + return rslt; + } + + /** + * Test whether the type {@code tm} is, directly or indirectly, + * a specialization of generic type {@code dt}. + * @param tm a type to be checked + * @param dt known generic type to check for + * @return null if {@code tm} does not extend {@code dt}, otherwise the + * list of type arguments with which it specializes {@code dt} + */ + List specialization( + TypeMirror tm, DeclaredType dt) + { + if ( ! typu.isAssignable( typu.erasure( tm), dt) ) + return null; + + List pending = new LinkedList<>(); + pending.add( tm); + while ( ! pending.isEmpty() ) + { + tm = pending.remove( 0); + if ( typu.isSameType( typu.erasure( tm), dt) ) + return ((DeclaredType)tm).getTypeArguments(); + pending.addAll( typu.directSupertypes( tm)); + } + /* + * This is a can't-happen: tm is assignable to dt but has no + * supertype that's dt? Could throw an AssertionError, but returning + * an empty list will lead the caller to report an error, and that + * will give more information about the location in the source being + * compiled. + */ + return Collections.emptyList(); + } + + private Map m_variants= new HashMap<>(); + + /** + * Return an instance representing a transformation of this function, + * or null on second and subsequent requests for the same + * transformation (so the caller will not register the variant more + * than once). + */ + Transformed transformed( + Identifier.Qualified qname, + boolean commute, boolean negate) + { + Transformed prospect = new Transformed(qname, commute, negate); + DependTag.Function tag = + (DependTag.Function)prospect.provideTags().iterator().next(); + Transformed found = m_variants.putIfAbsent(tag, prospect); + if ( null == found ) + return prospect; + return null; + } + + class Transformed implements Snippet + { + final Identifier.Qualified m_qname; + final boolean m_commute; + final boolean m_negate; + final String m_comment; + + Transformed( + Identifier.Qualified qname, + boolean commute, boolean negate) + { + EnumSet how = + EnumSet.noneOf(OperatorPath.Transform.class); + if ( commute ) + how.add(OperatorPath.Transform.COMMUTATION); + if ( negate ) + how.add(OperatorPath.Transform.NEGATION); + assert ! how.isEmpty() : "no transformation to apply"; + m_qname = requireNonNull(qname); + m_commute = commute; + m_negate = negate; + m_comment = "Function automatically derived by " + how + + " from " + qnameFrom( + FunctionImpl.this.name(), FunctionImpl.this.schema()); + } + + List parameterInfo() + { + List params = + FunctionImpl.this.parameterInfo().collect(toList()); + if ( ! m_commute ) + return params; + assert 2 == params.size() : "commute with arity != 2"; + Collections.reverse(params); + return params; + } + + @Override + public Set characterize() + { + return Set.of(); + } + + @Override + public Identifier.Simple implementorName() + { + return FunctionImpl.this.implementorName(); + } + + @Override + public Set requireTags() + { + return FunctionImpl.this.requireTags(); + } + + @Override + public Set provideTags() + { + DBType[] sig = + parameterInfo().stream() + .map(p -> p.dt) + .toArray(DBType[]::new); + return Set.of(new DependTag.Function(m_qname, sig)); + } + + @Override + public String[] deployStrings() + { + String as = Stream.of( + m_commute ? "commute" : (String)null, + m_negate ? "negate" : (String)null) + .filter(Objects::nonNull) + .collect(joining(",", "[", "]")) + + FunctionImpl.this.makeAS(); + + return FunctionImpl.this.deployStrings( + m_qname, parameterInfo(), as, m_comment); + } + + @Override + public String[] undeployStrings() + { + return FunctionImpl.this.undeployStrings( + m_qname, parameterInfo()); + } + } + } + + static enum BaseUDTFunctionID + { + INPUT("in", null, "pg_catalog.cstring", "pg_catalog.oid", "integer"), + OUTPUT("out", "pg_catalog.cstring", (String[])null), + RECEIVE("recv", null, "pg_catalog.internal","pg_catalog.oid","integer"), + SEND("send", "pg_catalog.bytea", (String[])null); + BaseUDTFunctionID( String suffix, String ret, String... param) + { + this.suffix = suffix; + this.param = null == param ? null : + Arrays.stream(param) + .map(DBType::fromSQLTypeAnnotation) + .toArray(DBType[]::new); + this.ret = null == ret ? null : + new DBType.Named(Identifier.Qualified.nameFromJava(ret)); + } + private String suffix; + private DBType[] param; + private DBType ret; + String getSuffix() { return suffix; } + DBType[] getParam( BaseUDTImpl u) + { + if ( null != param ) + return param; + return new DBType[] { u.qname }; + } + DBType getRet( BaseUDTImpl u) + { + if ( null != ret ) + return ret; + return u.qname; + } + } + + class BaseUDTFunctionImpl extends FunctionImpl + { + BaseUDTFunctionImpl( + BaseUDTImpl ui, TypeElement te, BaseUDTFunctionID id) + { + super( null); + this.ui = ui; + this.te = te; + this.id = id; + + returnType = id.getRet( ui); + parameterTypes = id.getParam( ui); + + _type = returnType.toString(); + _name = Identifier.Simple.fromJava(ui.name()) + .concat("_", id.getSuffix()).toString(); + _schema = ui.schema(); + _variadic = false; + _cost = -1; + _rows = -1; + _onNullInput = OnNullInput.CALLED; + _security = Security.INVOKER; + _effects = Effects.VOLATILE; + _parallel = Parallel.UNSAFE; + _leakproof = false; + _settings = new String[0]; + _triggers = new Trigger[0]; + _provides = _settings; + _requires = _settings; + } + + BaseUDTImpl ui; + TypeElement te; + BaseUDTFunctionID id; + + @Override + public String[] deployStrings() + { + return deployStrings( + qnameFrom(name(), schema()), + null, // parameter iterable unused in appendParams below + "UDT[" + elmu.getBinaryName(te) + "] " + id.name(), + comment()); + } + + @Override + public String[] undeployStrings() + { + return undeployStrings( + qnameFrom(name(), schema()), + null); // parameter iterable unused in appendParams below + } + + @Override + void appendParams( + StringBuilder sb, boolean names, boolean outs, boolean dflts, + Iterable params) + { + sb.append( + Arrays.stream(id.getParam( ui)) + .map(Object::toString) + .collect(joining(", ")) + ); + } + + StringBuilder appendTypeOp( StringBuilder sb) + { + sb.append( id.name()).append( " = "); + return sb.append(qnameFrom(name(), schema())); + } + + @Override + public Set characterize() + { + resolveLanguage(); + recordImplicitTags(); + recordExplicitTags(_provides, _requires); + return Set.of(this); + } + + public void setType( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "The type of a UDT function may not be changed"); + } + + public void setOut( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "The type of a UDT function may not be changed"); + } + + public void setVariadic( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, "A UDT function is never variadic"); + } + + public void setRows( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "The rows attribute of a UDT function may not be set"); + } + + public void setProvides( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "A UDT function does not have its own provides/requires"); + } + + public void setRequires( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "A UDT function does not have its own provides/requires"); + } + + public void setTriggers( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "A UDT function may not have associated triggers"); + } + + public void setImplementor( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "A UDT function does not have its own implementor"); + } + + public String implementor() + { + return ui.implementor(); + } + + public String derivedComment( Element e) + { + String comm = super.derivedComment( e); + if ( null != comm ) + return comm; + return id.name() + " method for type " + ui.qname; + } + } + + abstract class AbstractUDTImpl + extends AbstractAnnotationImpl + implements Snippet, Commentable + { + public String name() { return _name; } + public String schema() { return _schema; } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + + public String[] _provides; + public String[] _requires; + public String _name; + public String _schema; + + TypeElement tclass; + + DBType qname; + + AbstractUDTImpl(TypeElement e) + { + tclass = e; + + if ( ! typu.isAssignable( e.asType(), TY_SQLDATA) ) + { + msg( Kind.ERROR, e, "A PL/Java UDT must implement %s", + TY_SQLDATA); + } + + ExecutableElement niladicCtor = huntFor( + constructorsIn( tclass.getEnclosedElements()), null, false, + null); + + if ( null == niladicCtor ) + { + msg( Kind.ERROR, tclass, + "A PL/Java UDT must have a public no-arg constructor"); + } + } + + protected void setQname() + { + if ( "".equals( _name) ) + _name = tclass.getSimpleName().toString(); + + qname = new DBType.Named(qnameFrom(_name, _schema)); + + if ( ! tmpr.mappingsFrozen() ) + tmpr.addMap( tclass.asType(), qname); + } + + protected void addComment( ArrayList al) + { + String comm = comment(); + if ( null == comm ) + return; + al.add( "COMMENT ON TYPE " + qname + "\nIS " + + DDRWriter.eQuote( comm)); + } + } + + class MappedUDTImpl + extends AbstractUDTImpl + implements MappedUDT + { + public String[] structure() { return _structure; } + + String[] _structure; + + public void setStructure( Object o, boolean explicit, Element e) + { + if ( explicit ) + _structure = avToArray( o, String.class); + } + + MappedUDTImpl(TypeElement e) + { + super( e); + } + + public void registerMapping() + { + setQname(); + } + + public Set characterize() + { + if ( null != structure() ) + { + DependTag t = qname.dependTag(); + if ( null != t ) + provideTags().add(t); + } + recordExplicitTags(_provides, _requires); + return Set.of(this); + } + + public String[] deployStrings() + { + ArrayList al = new ArrayList<>(); + if ( null != structure() ) + { + StringBuilder sb = new StringBuilder(); + sb.append( "CREATE TYPE ").append( qname).append( " AS ("); + int i = structure().length; + for ( String s : structure() ) + sb.append( "\n\t").append( s).append( + ( 0 < -- i ) ? ',' : '\n'); + sb.append( ')'); + al.add( sb.toString()); + } + al.add( "SELECT sqlj.add_type_mapping(" + + DDRWriter.eQuote( qname.toString()) + ", " + + DDRWriter.eQuote( elmu.getBinaryName(tclass)) + ')'); + addComment( al); + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + ArrayList al = new ArrayList<>(); + al.add( "SELECT sqlj.drop_type_mapping(" + + DDRWriter.eQuote( qname.toString()) + ')'); + if ( null != structure() ) + al.add( "DROP TYPE " + qname); + return al.toArray( new String [ al.size() ]); + } + } + + class BaseUDTImpl + extends AbstractUDTImpl + implements BaseUDT + { + class Shell implements Snippet + { + @Override + public Identifier.Simple implementorName() + { + return BaseUDTImpl.this.implementorName(); + } + + @Override + public String[] deployStrings() + { + return new String[] { "CREATE TYPE " + qname }; + } + + @Override + public String[] undeployStrings() + { + return new String[0]; + } + + @Override + public Set provideTags() + { + return Set.of(); + } + + @Override + public Set requireTags() + { + return Set.of(); + } + + @Override + public Set characterize() + { + return Set.of(); + } + } + + public String typeModifierInput() { return _typeModifierInput; } + public String typeModifierOutput() { return _typeModifierOutput; } + public String analyze() { return _analyze; } + public int internalLength() { return _internalLength; } + public boolean passedByValue() { return _passedByValue; } + public Alignment alignment() { return _alignment; } + public Storage storage() { return _storage; } + public String like() { return _like; } + public char category() { return _category; } + public boolean preferred() { return _preferred; } + public String defaultValue() { return _defaultValue; } + public String element() { return _element; } + public char delimiter() { return _delimiter; } + public boolean collatable() { return _collatable; } + + BaseUDTFunctionImpl in, out, recv, send; + + public String _typeModifierInput; + public String _typeModifierOutput; + public String _analyze; + int _internalLength; + public Boolean _passedByValue; + Alignment _alignment; + Storage _storage; + public String _like; + char _category; + public Boolean _preferred; + String _defaultValue; + public String _element; + char _delimiter; + public Boolean _collatable; + + boolean lengthExplicit; + boolean alignmentExplicit; + boolean storageExplicit; + boolean categoryExplicit; + boolean delimiterExplicit; + + public void setInternalLength( Object o, boolean explicit, Element e) + { + _internalLength = (Integer)o; + lengthExplicit = explicit; + } + + public void setAlignment( Object o, boolean explicit, Element e) + { + _alignment = Alignment.valueOf( + ((VariableElement)o).getSimpleName().toString()); + alignmentExplicit = explicit; + } + + public void setStorage( Object o, boolean explicit, Element e) + { + _storage = Storage.valueOf( + ((VariableElement)o).getSimpleName().toString()); + storageExplicit = explicit; + } + + public void setDefaultValue( Object o, boolean explicit, Element e) + { + if ( explicit ) + _defaultValue = (String)o; // "" could be a real default value + } + + public void setCategory( Object o, boolean explicit, Element e) + { + _category = (Character)o; + categoryExplicit = explicit; + } + + public void setDelimiter( Object o, boolean explicit, Element e) + { + _delimiter = (Character)o; + delimiterExplicit = explicit; + } + + BaseUDTImpl(TypeElement e) + { + super( e); + } + + void registerFunctions() + { + setQname(); + + ExecutableElement instanceReadSQL = huntFor( + methodsIn( tclass.getEnclosedElements()), "readSQL", false, + TY_VOID, TY_SQLINPUT, TY_STRING); + + ExecutableElement instanceWriteSQL = huntFor( + methodsIn( tclass.getEnclosedElements()), "writeSQL", false, + TY_VOID, TY_SQLOUTPUT); + + ExecutableElement instanceToString = huntFor( + methodsIn( tclass.getEnclosedElements()), "toString", false, + TY_STRING); + + ExecutableElement staticParse = huntFor( + methodsIn( tclass.getEnclosedElements()), "parse", true, + tclass.asType(), TY_STRING, TY_STRING); + + if ( null == staticParse ) + { + msg( Kind.ERROR, tclass, + "A PL/Java UDT must have a public static " + + "parse(String,String) method that returns the UDT"); + } + else + { + in = new BaseUDTFunctionImpl( + this, tclass, BaseUDTFunctionID.INPUT); + putSnippet( staticParse, in); + } + + out = new BaseUDTFunctionImpl( + this, tclass, BaseUDTFunctionID.OUTPUT); + putSnippet( null != instanceToString ? instanceToString : out, out); + + recv = new BaseUDTFunctionImpl( + this, tclass, BaseUDTFunctionID.RECEIVE); + putSnippet( null != instanceReadSQL ? instanceReadSQL : recv, recv); + + send = new BaseUDTFunctionImpl( + this, tclass, BaseUDTFunctionID.SEND); + putSnippet( null != instanceWriteSQL ? instanceWriteSQL : send, + send); + } + + public Set characterize() + { + if ( "".equals( typeModifierInput()) + && ! "".equals( typeModifierOutput()) ) + msg( Kind.ERROR, tclass, + "UDT typeModifierOutput useless without typeModifierInput"); + + if ( 1 > internalLength() && -1 != internalLength() ) + msg( Kind.ERROR, tclass, + "UDT internalLength must be positive, or -1 for varying"); + + if ( passedByValue() && + ( 8 < internalLength() || -1 == internalLength() ) ) + msg( Kind.ERROR, tclass, + "Only a UDT of fixed length <= 8 can be passed by value"); + + if ( -1 == internalLength() && + -1 == alignment().compareTo( Alignment.INT4) ) + msg( Kind.ERROR, tclass, + "A variable-length UDT must have alignment at least INT4"); + + if ( -1 != internalLength() && Storage.PLAIN != storage() ) + msg( Kind.ERROR, tclass, + "Storage for a fixed-length UDT must be PLAIN"); + + // see PostgreSQL backend/commands/typecmds.c "must be simple ASCII" + if ( 32 > category() || category() > 126 ) + msg( Kind.ERROR, tclass, + "UDT category must be a printable ASCII character"); + + if ( categoryExplicit && Character.isUpperCase(category()) ) + if ( null == PredefinedCategory.valueOf(category()) ) + msg( Kind.WARNING, tclass, + "upper-case letters are reserved for PostgreSQL's " + + "predefined UDT categories, but '%c' is not recognized", + category()); + + recordImplicitTags(); + recordExplicitTags(_provides, _requires); + + return Set.of(this); + } + + void recordImplicitTags() + { + Set provides = provideTags(); + Set requires = requireTags(); + + provides.add(qname.dependTag()); + + for ( BaseUDTFunctionImpl f : List.of(in, out, recv, send) ) + requires.add(new DependTag.Function( + qnameFrom(f._name, f._schema), f.parameterTypes)); + + String s = typeModifierInput(); + if ( ! s.isEmpty() ) + requires.add(new DependTag.Function( + qnameFrom(s), SIG_TYPMODIN)); + + s = typeModifierOutput(); + if ( ! s.isEmpty() ) + requires.add(new DependTag.Function( + qnameFrom(s), SIG_TYPMODOUT)); + + s = analyze(); + if ( ! s.isEmpty() ) + requires.add(new DependTag.Function(qnameFrom(s), SIG_ANALYZE)); + } + + public String[] deployStrings() + { + ArrayList al = new ArrayList<>(); + + StringBuilder sb = new StringBuilder(); + sb.append( "CREATE TYPE ").append( qname).append( " (\n\t"); + in.appendTypeOp( sb).append( ",\n\t"); + out.appendTypeOp( sb).append( ",\n\t"); + recv.appendTypeOp( sb).append( ",\n\t"); + send.appendTypeOp( sb); + + if ( ! "".equals( typeModifierInput()) ) + sb.append( ",\n\tTYPMOD_IN = ").append( typeModifierInput()); + + if ( ! "".equals( typeModifierOutput()) ) + sb.append( ",\n\tTYPMOD_OUT = ").append( typeModifierOutput()); + + if ( ! "".equals( analyze()) ) + sb.append( ",\n\tANALYZE = ").append( analyze()); + + if ( lengthExplicit || "".equals( like()) ) + sb.append( ",\n\tINTERNALLENGTH = ").append( + -1 == internalLength() ? "VARIABLE" + : String.valueOf( internalLength())); + + if ( passedByValue() ) + sb.append( ",\n\tPASSEDBYVALUE"); + + if ( alignmentExplicit || "".equals( like()) ) + sb.append( ",\n\tALIGNMENT = ").append( alignment().name()); + + if ( storageExplicit || "".equals( like()) ) + sb.append( ",\n\tSTORAGE = ").append( storage().name()); + + if ( ! "".equals( like()) ) + sb.append( ",\n\tLIKE = ").append( like()); + + if ( categoryExplicit ) + sb.append( ",\n\tCATEGORY = ").append( + DDRWriter.eQuote( String.valueOf( category()))); + + if ( preferred() ) + sb.append( ",\n\tPREFERRED = true"); + + if ( null != defaultValue() ) + sb.append( ",\n\tDEFAULT = ").append( + DDRWriter.eQuote( defaultValue())); + + if ( ! "".equals( element()) ) + sb.append( ",\n\tELEMENT = ").append( element()); + + if ( delimiterExplicit ) + sb.append( ",\n\tDELIMITER = ").append( + DDRWriter.eQuote( String.valueOf( delimiter()))); + + if ( collatable() ) + sb.append( ",\n\tCOLLATABLE = true"); + + al.add( sb.append( "\n)").toString()); + addComment( al); + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + return new String[] + { + "DROP TYPE " + qname + " CASCADE" + }; + } + + @Override + public Vertex breakCycle(Vertex v, boolean deploy) + { + assert this == v.payload; + + /* + * Find the entries in my adjacency list that are implicated in the + * cycle (that is, that precede, perhaps transitively, me). + */ + Vertex[] vs = v.precedesTransitively(v); + + assert null != vs && 0 < vs.length : "breakCycle not in a cycle"; + + if ( vs.length < v.indegree ) + return null; // other non-cyclic edges not satisfied yet + + if ( deploy ) + { + Vertex breaker = new Vertex<>(new Shell()); + v.transferSuccessorsTo(breaker, vs); + return breaker; + } + + for ( Vertex subsumed : vs ) + subsumed.payload.subsume(); + + /* + * Set indegree now to zero, so that when the subsumed snippets are + * themselves emitted, they will not decrement it to zero and cause + * this to be scheduled again. + */ + v.indegree = 0; + + return v; // use this vertex itself in the undeploy case + } + } + + class CastImpl + extends Repeatable + implements Cast, Snippet, Commentable + { + CastImpl(Element e, AnnotationMirror am) + { + super(e, am); + } + + public String from() { return _from; } + public String to() { return _to; } + public Cast.Path path() { return _path; } + public Cast.Application application() { return _application; } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + + public String _from; + public String _to; + public Cast.Path _path; + public Cast.Application _application; + public String[] _provides; + public String[] _requires; + + FunctionImpl func; + DBType fromType; + DBType toType; + + public void setPath( Object o, boolean explicit, Element e) + { + if ( explicit ) + _path = Path.valueOf( + ((VariableElement)o).getSimpleName().toString()); + } + + public Set characterize() + { + boolean ok = true; + + if ( ElementKind.METHOD.equals(m_targetElement.getKind()) ) + { + func = getSnippet(m_targetElement, FunctionImpl.class, + () -> (FunctionImpl)null); + if ( null == func ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A method annotated with @Cast must also have @Function" + ); + ok = false; + } + } + + if ( null == func && "".equals(_from) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Cast not annotating a method must specify from=" + ); + ok = false; + } + + if ( null == func && "".equals(_to) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Cast not annotating a method must specify to=" + ); + ok = false; + } + + if ( null == func && null == _path ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Cast not annotating a method, and without path=, " + + "is not yet supported" + ); + ok = false; + } + + if ( ok ) + { + fromType = ("".equals(_from)) + ? func.parameterTypes[0] + : DBType.fromSQLTypeAnnotation(_from); + + toType = ("".equals(_to)) + ? func.returnType + : DBType.fromSQLTypeAnnotation(_to); + } + + if ( null != _path ) + { + if ( ok && Path.BINARY == _path && fromType.equals(toType) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A cast with from and to types the same can only " + + "apply a type modifier; path=BINARY will have " + + "no effect"); + ok = false; + } + } + else if ( null != func ) + { + int nparams = func.parameterTypes.length; + + if ( ok && 2 > nparams && fromType.equals(toType) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A cast with from and to types the same can only " + + "apply a type modifier, therefore must have at least " + + "two parameters"); + ok = false; + } + + if ( 1 > nparams || nparams > 3 ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A cast function must have 1, 2, or 3 parameters"); + ok = false; + } + + if (1 < nparams && ! DT_INTEGER.equals(func.parameterTypes[1])) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Parameter 2 of a cast function must have integer type" + ); + ok = false; + } + + if (3 == nparams && ! DT_BOOLEAN.equals(func.parameterTypes[2])) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Parameter 3 of a cast function must have boolean type" + ); + ok = false; + } + } + + if ( ! ok ) + return Set.of(); + + recordImplicitTags(); + recordExplicitTags(_provides, _requires); + return Set.of(this); + } + + void recordImplicitTags() + { + Set requires = requireTags(); + + DependTag dt = fromType.dependTag(); + if ( null != dt ) + requires.add(dt); + + dt = toType.dependTag(); + if ( null != dt ) + requires.add(dt); + + if ( null == _path ) + { + dt = func.provideTags().stream() + .filter(DependTag.Function.class::isInstance) + .findAny().get(); + requires.add(dt); + } + } + + public String[] deployStrings() + { + List al = new ArrayList<>(); + + StringBuilder sb = new StringBuilder(); + + sb.append("CREATE CAST (") + .append(fromType).append(" AS ").append(toType).append(")\n\t"); + + if ( Path.BINARY == _path ) + sb.append("WITHOUT FUNCTION"); + else if ( Path.INOUT == _path ) + sb.append("WITH INOUT"); + else + { + sb.append("WITH FUNCTION "); + func.appendNameAndParams(sb, false, false, false); + } + + switch ( _application ) + { + case ASSIGNMENT: sb.append("\n\tAS ASSIGNMENT"); break; + case EXPLICIT: break; + case IMPLICIT: sb.append("\n\tAS IMPLICIT"); + } + + al.add(sb.toString()); + + if ( null != comment() ) + al.add( + "COMMENT ON CAST (" + + fromType + " AS " + toType + ") IS " + + DDRWriter.eQuote(comment())); + + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + return new String[] + { + "DROP CAST (" + fromType + " AS " + toType + ")" + }; + } + } + + /* + * Called by processRepeatable for each @Operator processed. + * This happens before characterize, but after populating, so the + * operator's name and commutator/negator/synthetic elements can be + * inspected. All operators annotating a given element e are processed + * consecutively, and followed by a call with the same e and null snip. + * + * This will accumulate the snippets onto two lists, for non-synthetic and + * synthetic ones and, on the final call, process the lists to find possible + * paths from non-synthetic to synthetic ones via commutation and/or + * negation. The possible paths will be recorded on each synthetic operator. + * They will have to be confirmed during characterize after things like + * operand types and arity have been resolved. + */ + void operatorPreSynthesize( Element e, OperatorImpl snip) + { + if ( ! ElementKind.METHOD.equals(e.getKind()) ) + { + if ( null != snip ) + putSnippet( snip, (Snippet)snip); + return; + } + + if ( null != snip ) + { + if ( snip.selfCommutator || snip.twinCommutator ) + snip.commutator = snip.qname; + + (snip.isSynthetic ? m_synthetic : m_nonSynthetic).add(snip); + return; + } + + /* + * Initially: + * processed: is empty + * ready: contains all non-synthetic snippets + * pending: contains all synthetic snippets + * Step: + * A snippet s is removed from ready and added to processed. + * If s.commutator or s.negator matches a synthetic snippet in pending, + * a corresponding path is recorded on that snippet. If it is + * the first path recorded on that snippet, the snippet is moved + * to ready. + */ + + List processed = + new ArrayList<>(m_nonSynthetic.size() + m_synthetic.size()); + Queue ready = new LinkedList<>(m_nonSynthetic); + LinkedList pending = new LinkedList<>(m_synthetic); + m_nonSynthetic.clear(); + m_synthetic.clear(); + + while ( null != (snip = ready.poll()) ) + { + processed.add(snip); + if ( null != snip.commutator ) + { + ListIterator it = pending.listIterator(); + while ( it.hasNext() ) + { + OperatorImpl other = it.next(); + if ( maybeAddPath(snip, other, + OperatorPath.Transform.COMMUTATION) ) + { + it.remove(); + ready.add(other); + } + } + } + if ( null != snip.negator ) + { + ListIterator it = pending.listIterator(); + while ( it.hasNext() ) + { + OperatorImpl other = it.next(); + if ( maybeAddPath(snip, other, + OperatorPath.Transform.NEGATION) ) + { + it.remove(); + ready.add(other); + } + } + } + } + + if ( ! pending.isEmpty() ) + msg(Kind.ERROR, e, "Cannot synthesize operator(s) (%s)", + pending.stream() + .map(o -> o.qname.toString()) + .collect(joining(" "))); + + for ( OperatorImpl s : processed ) + putSnippet( s, (Snippet)s); + } + + boolean maybeAddPath( + OperatorImpl from, OperatorImpl to, OperatorPath.Transform how) + { + if ( ! to.isSynthetic ) + return false; // don't add paths to a non-synthetic operator + + /* + * setSynthetic will have left synthetic null in the synthetic=TWIN + * case. That case imposes more constraints on what paths can be added: + * an acceptable path must involve commutation (and only commutation) + * from another operator that will have a function name (so, either + * a non-synthetic one, or a synthetic one given an actual name, other + * than TWIN). In the latter case, copy the name here (for the former, + * it will be copied from the function's name, in characterize()). + */ + boolean syntheticTwin = null == to.synthetic; + + switch ( how ) + { + case COMMUTATION: + if ( ! from.commutator.equals(to.qname) ) + return false; // this is not the operator you're looking for + if ( null != to.commutator && ! to.commutator.equals(from.qname) ) + return false; // you're not the one it's looking for + break; + case NEGATION: + if ( ! from.negator.equals(to.qname) ) + return false; // move along + if ( null != to.negator && ! to.negator.equals(from.qname) ) + return false; // move along + if ( syntheticTwin ) + return false; + break; + } + + if ( syntheticTwin ) + { + /* + * We will apply commutation to 'from' (the negation case + * would have been rejected above). Either 'from' is nonsynthetic + * and its function name will be copied in characterize(), or it is + * synthetic and must have a name or we reject it here. If not + * rejected, copy the name. + */ + if ( from.isSynthetic ) + { + if ( null == from.synthetic ) + return false; + to.synthetic = from.synthetic; + } + } + + if ( null == to.paths ) + to.paths = new ArrayList<>(); + + if ( ! from.isSynthetic ) + to.paths.add(new OperatorPath(from, from, null, EnumSet.of(how))); + else + { + for ( OperatorPath path : from.paths ) + { + to.paths.add(new OperatorPath( + path.base, from, path.fromBase, EnumSet.of(how))); + } + } + + return true; + } + + /** + * Why has {@code Set} or at least {@code EnumSet} not got this? + */ + static > EnumSet symmetricDifference( + EnumSet a, EnumSet b) + { + EnumSet result = a.clone(); + result.removeAll(b); + b = b.clone(); + b.removeAll(a); + result.addAll(b); + return result; + } + + List m_nonSynthetic = new ArrayList<>(); + List m_synthetic = new ArrayList<>(); + + static class OperatorPath + { + OperatorImpl base; + OperatorImpl proximate; + EnumSet fromBase; + EnumSet fromProximate; + + enum Transform { NEGATION, COMMUTATION } + + OperatorPath( + OperatorImpl base, OperatorImpl proximate, + EnumSet baseToProximate, + EnumSet proximateToNew) + { + this.base = base; + this.proximate = proximate; + fromProximate = proximateToNew.clone(); + + if ( base == proximate ) + fromBase = fromProximate; + else + fromBase = symmetricDifference(baseToProximate, proximateToNew); + } + + public String toString() + { + return + base.commentDropForm() + " " + fromBase + + (base == proximate + ? "" + : " (... " + proximate.commentDropForm() + + " " + fromProximate); + } + } + + class OperatorImpl + extends Repeatable + implements Operator, Snippet, Commentable + { + OperatorImpl(Element e, AnnotationMirror am) + { + super(e, am); + } + + public String[] name() { return qstrings(qname); } + public String left() { return operand(0); } + public String right() { return operand(1); } + public String[] function() { return qstrings(funcName); } + public String[] synthetic() { return qstrings(synthetic); } + public String[] commutator() { return qstrings(commutator); } + public String[] negator() { return qstrings(negator); } + public boolean hashes() { return _hashes; } + public boolean merges() { return _merges; } + public String[] restrict() { return qstrings(restrict); } + public String[] join() { return qstrings(join); } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + + public String[] _provides; + public String[] _requires; + public boolean _hashes; + public boolean _merges; + + Identifier.Qualified qname; + DBType[] operands = { null, null }; + FunctionImpl func; + Identifier.Qualified funcName; + Identifier.Qualified commutator; + Identifier.Qualified negator; + Identifier.Qualified restrict; + Identifier.Qualified join; + Identifier.Qualified synthetic; + boolean isSynthetic; + boolean selfCommutator; + boolean twinCommutator; + List paths; + + private String operand(int i) + { + return null == operands[i] ? null : operands[i].toString(); + } + + public void setName( Object o, boolean explicit, Element e) + { + qname = operatorNameFrom(avToArray( o, String.class)); + } + + public void setLeft( Object o, boolean explicit, Element e) + { + if ( explicit ) + operands[0] = DBType.fromSQLTypeAnnotation((String)o); + } + + public void setRight( Object o, boolean explicit, Element e) + { + if ( explicit ) + operands[1] = DBType.fromSQLTypeAnnotation((String)o); + } + + public void setFunction( Object o, boolean explicit, Element e) + { + if ( explicit ) + funcName = qnameFrom(avToArray( o, String.class)); + } + + public void setSynthetic( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + /* + * Use isSynthetic to indicate that synthetic= has been used at all. + * Set synthetic to the supplied qname only if it is a qname, and + * not the distinguished value TWIN. + * + * Most of the processing below only needs to look at isSynthetic. + * The TWIN case, recognized by isSynthetic && null == synthetic, + * will be handled late in the game by copying the base function's + * qname. + */ + + isSynthetic = true; + String[] ss = avToArray( o, String.class); + if ( 1 != ss.length || ! TWIN.equals(ss[0]) ) + synthetic = qnameFrom(ss); + } + + public void setCommutator( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + String[] ss = avToArray( o, String.class); + if ( 1 == ss.length ) + { + if ( SELF.equals(ss[0]) ) + { + selfCommutator = true; + return; + } + if ( TWIN.equals(ss[0]) ) + { + twinCommutator = true; + return; + } + } + commutator = operatorNameFrom(ss); + } + + public void setNegator( Object o, boolean explicit, Element e) + { + if ( explicit ) + negator = operatorNameFrom(avToArray( o, String.class)); + } + + public void setRestrict( + Object o, boolean explicit, Element e) + { + if ( explicit ) + restrict = qnameFrom(avToArray( o, String.class)); + } + + public void setJoin( + Object o, boolean explicit, Element e) + { + if ( explicit ) + join = qnameFrom(avToArray( o, String.class)); + } + + public Set characterize() + { + boolean ok = true; + Snippet syntheticFunction = null; + + if ( ElementKind.METHOD.equals(m_targetElement.getKind()) ) + { + func = getSnippet(m_targetElement, FunctionImpl.class, + () -> (FunctionImpl)null); + } + + if ( isSynthetic ) + { + if ( null != funcName ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator may not specify both function= and " + + "synthetic=" + ); + ok = false; + } + funcName = synthetic; // can be null (the TWIN case) + } + + if ( null == func && null == funcName && ! isSynthetic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator not annotating a method must specify function=" + ); + ok = false; + } + + if ( null == func ) + { + if ( null == operands[0] && null == operands[1] ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator not annotating a method must specify " + + "left= or right= or both" + ); + ok = false; + } + } + else + { + Identifier.Qualified fn = + qnameFrom(func.name(), func.schema()); + + if ( null == funcName ) + funcName = fn; + else if ( ! funcName.equals(fn) && ! isSynthetic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator annotates a method but function= gives a " + + "different name" + ); + ok = false; + } + + long explicit = + Arrays.stream(operands).filter(Objects::nonNull).count(); + + if ( 0 != explicit && isSynthetic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator with synthetic= must not specify " + + "operand types" + ); + ok = false; + } + + if ( 0 == explicit ) + { + int nparams = func.parameterTypes.length; + if ( 1 > nparams || nparams > 2 ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "method annotated with @Operator must take one " + + "or two parameters" + ); + ok = false; + } + if ( 1 == nparams ) + operands[1] = func.parameterTypes[0]; + else + System.arraycopy(func.parameterTypes,0, operands,0,2); + } + else if ( explicit != func.parameterTypes.length ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator annotates a method but specifies " + + "a different number of operands" + ); + ok = false; + } + else if ( 2 == explicit + && ! Arrays.equals(operands, func.parameterTypes) + || 1 == explicit + && ! Arrays.asList(operands) + .contains(func.parameterTypes[0]) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator annotates a method but specifies " + + "different operand types" + ); + ok = false; + } + } + + /* + * At this point, ok ==> there is a non-null funcName ... UNLESS + * isSynthetic is true, synthetic=TWIN was given, and we are not + * annotating a method (that last condition is currently not + * supported, so we could in fact rely on having a funcName here, + * but that condition may be worth supporting in the future, so + * better to keep the exception in mind). + */ + + if ( ! ok ) + return Set.of(); + + long arity = + Arrays.stream(operands).filter(Objects::nonNull).count(); + + if ( 1 == arity && null == operands[1] ) + { + msg(Kind.WARNING, m_targetElement, m_origin, + "Right unary (postfix) operators are deprecated and will " + + "be removed in PostgreSQL version 14." + ); + } + + if ( null != commutator ) + { + if ( 2 != arity ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "unary @Operator cannot have a commutator" + ); + ok = false; + } + else if ( selfCommutator && ! operands[0].equals(operands[1]) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator with different left and right operand " + + "types cannot have commutator=SELF" + ); + ok = false; + } + else if ( twinCommutator && operands[0].equals(operands[1]) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator with matching left and right operand " + + "types cannot have commutator=TWIN" + ); + ok = false; + } + } + + boolean knownNotBoolean = + null != func && ! DT_BOOLEAN.equals(func.returnType); + + if ( null != negator ) + { + if ( knownNotBoolean ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "negator= only belongs on a boolean @Operator" + ); + ok = false; + } + else if ( negator.equals(qname) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator can never be its own negator" + ); + ok = false; + } + } + + boolean knownNotBinaryBoolean = 2 != arity || knownNotBoolean; + boolean knownVolatile = + null != func && Function.Effects.VOLATILE == func.effects(); + boolean operandTypesDiffer = + 2 == arity && ! operands[0].equals(operands[1]); + boolean selfCommutates = + null != commutator && commutator.equals(qname); + + ok &= Stream.of( + _hashes ? "hashes" : null, + _merges ? "merges" : null) + .filter(Objects::nonNull) + .map(s -> + { + boolean inner_ok = true; + if ( knownNotBinaryBoolean ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "%s= only belongs on a boolean " + + "binary @Operator", s + ); + inner_ok = false; + } + if ( null == commutator ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "%s= requires that the @Operator " + + "have a commutator", s + ); + inner_ok = false; + } + else if ( ! (operandTypesDiffer || selfCommutates) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "%s= requires the @Operator to be its own" + + "commutator as its operand types are the same", s + ); + inner_ok = false; + } + if ( knownVolatile ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "%s= requires an underlying function " + + "declared IMMUTABLE or STABLE", s + ); + inner_ok = false; + } + return inner_ok; + }) + .allMatch(t -> t); + + if ( null != restrict && knownNotBinaryBoolean ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "restrict= only belongs on a boolean binary @Operator" + ); + ok = false; + } + + if ( null != join && knownNotBinaryBoolean ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "join= only belongs on a boolean binary @Operator" + ); + ok = false; + } + + if ( ! ok ) + return Set.of(); + + if ( isSynthetic ) + { + if ( null == func ) + { + /* + * It could be possible to relax this requirement if there + * is a need, but this way is easier. + */ + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator annotation must appear " + + "on the method to be used as the base"); + ok = false; + } + + if ( paths.isEmpty() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s has no derivation path " + + "involving negation or commutation from another " + + "operator", qnameUnwrapped()); + /* + * If no paths at all, return empty from here; no point in + * further checks. + */ + return Set.of(); + } + + /* + * Check for conditions where deriving by commutation wouldn't + * make sense. Any of these three conditions will trigger the + * test of available paths. The conditions are rechecked but the + * third one is changed, so either of the first two will always + * preclude commutation, but ! operandTypesDiffer only does if + * the synthetic function's name will be the same as the base's. + * (If the types were different, PostgreSQL overloading would + * allow the functions to share a name, but that's not possible + * if the types are the same.) In those cases, any commutation + * paths are filtered out; if no path remains, that's an error. + */ + if ( 2 != arity || selfCommutator || ! operandTypesDiffer ) + { + List filtered = + paths.stream() + .filter( + p -> ! p.fromBase.contains( + OperatorPath.Transform.COMMUTATION)) + .collect(toList()); + if ( 2 != arity || selfCommutator + || null == synthetic || + synthetic.equals(qnameFrom(func.name(), func.schema()))) + { + if ( filtered.isEmpty() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s cannot be another " + + "operator's commutator, but found only " + + "path(s) involving commutation: %s", + qnameUnwrapped(), paths.toString()); + ok = false; + } + else + paths = filtered; + } + } + + ok &= paths.stream().collect( + groupingBy(p -> p.base, + mapping(p -> p.fromBase, toSet()))) + .entrySet().stream() + .filter(e -> 1 < e.getValue().size()) + .map(e -> + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s found paths with " + + "different transforms %s from same base %s", + qnameUnwrapped(), + e.getValue(), e.getKey().qnameUnwrapped()); + return false; + }) + .allMatch(t -> t); + + ok &= paths.stream().collect( + groupingBy(p -> p.proximate, + mapping(p -> p.fromProximate, toSet()))) + .entrySet().stream() + .filter(e -> 1 < e.getValue().size()) + .map(e -> + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s found paths with " + + "different transforms %s from %s", + qnameUnwrapped(), + e.getValue(), e.getKey().qnameUnwrapped()); + return false; + }) + .allMatch(t -> t); + + Set> + commutatorCandidates = + paths.stream() + .filter( + p -> p.fromProximate.contains( + OperatorPath.Transform.COMMUTATION)) + .map(p -> p.proximate.qname) + .collect(toSet()); + if ( null == commutator && 0 < commutatorCandidates.size() ) + { + if ( 1 == commutatorCandidates.size() ) + commutator = commutatorCandidates.iterator().next(); + else + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s has muliple commutator " + + "candidates %s", + qnameUnwrapped(), commutatorCandidates); + ok = false; + } + } + + Set> + negatorCandidates = + paths.stream() + .filter( + p -> p.fromProximate.contains( + OperatorPath.Transform.NEGATION)) + .map(p -> p.proximate.qname) + .collect(toSet()); + if ( null == negator && 0 < negatorCandidates.size() ) + { + if ( 1 == negatorCandidates.size() ) + negator = negatorCandidates.iterator().next(); + else + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s has muliple negator " + + "candidates %s", + qnameUnwrapped(), negatorCandidates); + ok = false; + } + } + + /* + * Filter paths to only those based on an operator that is built + * over this method. (That's currently guaranteed by the way + * operatorPreSynthesize generates paths, but may as well check + * here to ensure sanity during future maintenance.) + * + * For synthetic=TWIN (represented here by null==synthetic), + * also filter out paths that don't involve commutation (without + * it, the synthetic function would collide with the base one). + */ + + boolean nonCommutedOK = null != synthetic; + + paths = paths.stream() + .filter( + p -> p.base.func == func + && (nonCommutedOK || p.fromBase.contains( + OperatorPath.Transform.COMMUTATION)) + ).collect(toList()); + + if ( 0 == paths.size() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s has no derivation path " + + "from an operator that is based on this method%s", + qnameUnwrapped(), + nonCommutedOK ? "" : " and involves commutation"); + ok = false; + } + + if ( ! ok ) + return Set.of(); + + /* + * Select a base. Could there be more than one? As the checks + * for transform inconsistencies above found none, we will + * assume any should be ok, and choose one semi-arbitrarily. + */ + + OperatorPath selected = + paths.stream() + .sorted( + Comparator.comparingInt( + p -> p.fromBase.size()) + .thenComparingInt( + p -> p.fromBase.stream() + .mapToInt(Enum::ordinal) + .max().getAsInt()) + .thenComparing(p -> p.base.qnameUnwrapped())) + .findFirst().get(); + + /* + * At last, the possibly null funcName (synthetic=TWIN case) + * can be fixed up. + */ + if ( null == synthetic ) + { + FunctionImpl f = selected.base.func; + funcName = synthetic = qnameFrom(f.name(), f.schema()); + } + + replaceCommentIfDerived("Operator " + qnameUnwrapped() + + " automatically derived by " + + selected.fromBase + " from " + + selected.base.qnameUnwrapped()); + + boolean commute = selected.fromBase + .contains(OperatorPath.Transform.COMMUTATION); + boolean negate = selected.fromBase + .contains(OperatorPath.Transform.NEGATION); + + if ( operandTypesDiffer && commute ) + { + DBType t = operands[0]; + operands[0] = operands[1]; + operands[1] = t; + } + + syntheticFunction = + func.transformed(synthetic, commute, negate); + } + + recordImplicitTags(); + recordExplicitTags(_provides, _requires); + return null == syntheticFunction + ? Set.of(this) : Set.of(syntheticFunction, this); + } + + void recordImplicitTags() + { + Set provides = provideTags(); + Set requires = requireTags(); + + provides.add(new DependTag.Operator(qname, operands)); + + /* + * Commutator and negator often involve cycles. PostgreSQL already + * has its own means of breaking them, so it is not necessary here + * even to declare dependencies based on them. + * + * There is also, for now, no point in declaring dependencies on + * selectivity estimators; they can't be written in Java, so they + * won't be products of this compilation. + * + * So, just require the operand types and the function. + */ + + Arrays.stream(operands) + .filter(Objects::nonNull) + .map(DBType::dependTag) + .filter(Objects::nonNull) + .forEach(requires::add); + + if ( null != func && null == synthetic ) + { + func.provideTags().stream() + .filter(DependTag.Function.class::isInstance) + .forEach(requires::add); + } + else + { + requires.add(new DependTag.Function(funcName, + Arrays.stream(operands) + .filter(Objects::nonNull) + .toArray(DBType[]::new))); + } + } + + /** + * Just to keep things interesting, a schema-qualified operator name is + * wrapped in OPERATOR(...) pretty much everywhere, except as the guest + * of honor in a CREATE OPERATOR or DROP OPERATOR, where the unwrapped + * form is needed. + */ + private String qnameUnwrapped() + { + String local = qname.local().toString(); + Identifier.Simple qualifier = qname.qualifier(); + return null == qualifier ? local : qualifier + "." + local; + } + + /** + * An operator is identified this way in a COMMENT or DROP. + */ + private String commentDropForm() + { + return qnameUnwrapped() + " (" + + (null == operands[0] ? "NONE" : operands[0]) + ", " + + (null == operands[1] ? "NONE" : operands[1]) + ")"; + } + + public String[] deployStrings() + { + List al = new ArrayList<>(); + + StringBuilder sb = new StringBuilder(); + + sb.append("CREATE OPERATOR ").append(qnameUnwrapped()); + sb.append(" (\n\tPROCEDURE = ").append(funcName); + + if ( null != operands[0] ) + sb.append(",\n\tLEFTARG = ").append(operands[0]); + + if ( null != operands[1] ) + sb.append(",\n\tRIGHTARG = ").append(operands[1]); + + if ( null != commutator ) + sb.append(",\n\tCOMMUTATOR = ").append(commutator); + + if ( null != negator ) + sb.append(",\n\tNEGATOR = ").append(negator); + + if ( null != restrict ) + sb.append(",\n\tRESTRICT = ").append(restrict); + + if ( null != join ) + sb.append(",\n\tJOIN = ").append(join); + + if ( _hashes ) + sb.append(",\n\tHASHES"); + + if ( _merges ) + sb.append(",\n\tMERGES"); + + sb.append(')'); + + al.add(sb.toString()); + if ( null != comment() ) + al.add( + "COMMENT ON OPERATOR " + commentDropForm() + " IS " + + DDRWriter.eQuote(comment())); + + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + return new String[] + { + "DROP OPERATOR " + commentDropForm() + }; + } + } + + class AggregateImpl + extends Repeatable + implements Aggregate, Snippet, Commentable + { + AggregateImpl(Element e, AnnotationMirror am) + { + super(e, am); + } + + public String[] name() { return qstrings(qname); } + public String[] arguments() { return argsOut(aggregateArgs); } + public String[] directArguments() { return argsOut(directArgs); } + public boolean hypothetical() { return _hypothetical; } + public boolean[] variadic() { return _variadic; } + public Plan[] plan() { return new Plan[]{_plan}; } + public Plan[] movingPlan() { return _movingPlan; } + public Function.Parallel parallel() { return _parallel; } + public String[] sortOperator() { return qstrings(sortop); } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + + public boolean _hypothetical; + public boolean[] _variadic = {false, false}; + public Plan _plan; + public Plan[] _movingPlan; + public Function.Parallel _parallel; + public String[] _provides; + public String[] _requires; + + FunctionImpl func; + Identifier.Qualified qname; + List> aggregateArgs; + List> directArgs; + Identifier.Qualified sortop; + static final int DIRECT_ARGS = 0; // index into _variadic[] + static final int AGG_ARGS = 1; // likewise + boolean directVariadicExplicit; + + private List> + argsIn(String[] names) + { + return Arrays.stream(names) + .map(DBType::fromNameAndType) + .collect(toList()); + } + + private String[] + argsOut(List> names) + { + return names.stream() + .map(e -> e.getKey() + " " + e.getValue()) + .toArray(String[]::new); + } + + @Override + public String derivedComment( Element e) + { + /* + * When this annotation targets a TYPE, just as a + * place to hang it, there's no particular reason to believe a + * doc comment on the type is a good choice for this aggregate. + * When the annotation is on a method, the chances are better. + */ + if ( ElementKind.METHOD.equals(e.getKind()) ) + return super.derivedComment(e); + return null; + } + + public void setName( Object o, boolean explicit, Element e) + { + if ( explicit ) + qname = qnameFrom(avToArray( o, String.class)); + } + + public void setArguments( Object o, boolean explicit, Element e) + { + if ( explicit ) + aggregateArgs = argsIn( avToArray( o, String.class)); + } + + public void setDirectArguments( Object o, boolean explicit, Element e) + { + if ( explicit ) + directArgs = argsIn( avToArray( o, String.class)); + } + + public void setSortOperator( Object o, boolean explicit, Element e) + { + if ( explicit ) + sortop = operatorNameFrom(avToArray( o, String.class)); + } + + public void setVariadic( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + Boolean[] a = avToArray( o, Boolean.class); + + if ( 1 > a.length || a.length > 2 ) + throw new IllegalArgumentException( + "supply only boolean or {boolean,boolean} for variadic"); + + if ( ! Arrays.asList(a).contains(true) ) + throw new IllegalArgumentException( + "supply variadic= only if aggregated arguments, direct " + + "arguments, or both, are variadic"); + + _variadic[AGG_ARGS] = a[a.length - 1]; + if ( 2 == a.length ) + { + directVariadicExplicit = true; + _variadic[DIRECT_ARGS] = a[0]; + } + } + + public void setPlan( Object o, boolean explicit, Element e) + { + _plan = new Plan(); // always a plan, even if members uninitialized + + if ( explicit ) + _plan = planFrom( _plan, o, e, "plan"); + } + + public void setMovingPlan( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + _movingPlan = new Plan[1]; + _movingPlan [ 0 ] = planFrom( new Moving(), o, e, "movingPlan"); + } + + Plan planFrom( Plan p, Object o, Element e, String which) + { + AnnotationMirror[] ams = avToArray( o, AnnotationMirror.class); + + if ( 1 != ams.length ) + throw new IllegalArgumentException( + which + " must be given exactly one @Plan"); + + populateAnnotationImpl( p, e, ams[0]); + return p; + } + + public Set characterize() + { + boolean ok = true; + boolean orderedSet = null != directArgs; + boolean moving = null != _movingPlan; + boolean checkAccumulatorSig = false; + boolean checkFinisherSig = false; + boolean unary = false; + + if ( ElementKind.METHOD.equals(m_targetElement.getKind()) ) + { + func = getSnippet(m_targetElement, FunctionImpl.class, + () -> (FunctionImpl)null); + if ( null == func ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A method annotated with @Aggregate must " + + "also have @Function" + ); + ok = false; + } + } + + if ( null != func ) + { + Identifier.Qualified funcName = + qnameFrom(func.name(), func.schema()); + boolean inferAccumulator = + null == _plan.accumulate || null == aggregateArgs; + boolean inferFinisher = + null == _plan.finish && ! inferAccumulator; + boolean stateTypeExplicit = false; + + if ( null == qname ) + { + + if ( inferFinisher && 1 == aggregateArgs.size() + && 1 == func.parameterTypes.length + && func.parameterTypes[0] == + aggregateArgs.get(0).getValue() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Default name %s for this aggregate would " + + "collide with finish function; use name= to " + + "specify a name", funcName + ); + ok = false; + } + else + qname = funcName; + } + + if ( 1 > func.parameterTypes.length ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Function with no arguments cannot be @Aggregate " + + "accumulate or finish function" + ); + ok = false; + } + else if ( null == _plan.stateType ) + { + _plan.stateType = func.parameterTypes[0]; + if (null != _movingPlan + && null == _movingPlan[0].stateType) + _movingPlan[0].stateType = func.parameterTypes[0]; + } + else + stateTypeExplicit = true; + + if ( inferAccumulator || inferFinisher ) + { + if ( ok ) + { + if ( inferAccumulator ) + { + if ( null == aggregateArgs ) + { + aggregateArgs = + func.parameterInfo() + .skip(1) // skip the state argument + .map(pi -> + (Map.Entry) + new AbstractMap.SimpleImmutableEntry<>( + Identifier.Simple.fromJava( + pi.name() + ), + pi.dt + ) + ) + .collect(toList()); + } + else + checkAccumulatorSig = true; + _plan.accumulate = funcName; + if ( null != _movingPlan + && null == _movingPlan[0].accumulate ) + _movingPlan[0].accumulate = funcName; + } + else // inferFinisher + { + _plan.finish = funcName; + if ( null != _movingPlan + && null == _movingPlan[0].finish ) + _movingPlan[0].finish = funcName; + } + } + + if ( stateTypeExplicit + && ! _plan.stateType.equals(func.parameterTypes[0]) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "First function argument does not match " + + "stateType specified with @Aggregate" + ); + ok = false; + } + } + else if ( funcName.equals(_plan.accumulate) ) + checkAccumulatorSig = true; + else if ( funcName.equals(_plan.finish) ) + checkFinisherSig = true; + else + { + msg(Kind.WARNING, m_targetElement, m_origin, + "@Aggregate annotation on a method not recognized " + + "as either the accumulate or the finish function " + + "for the aggregate"); + } + + // If the method is the accumulator and is RETURNS_NULL, ensure + // there is either an initialState or a first aggregate arg that + // matches the stateType. + if ( ok && ( inferAccumulator || checkAccumulatorSig ) ) + { + if ( Function.OnNullInput.RETURNS_NULL == func.onNullInput() + && ( 0 == aggregateArgs.size() + || ! _plan.stateType.equals( + aggregateArgs.get(0).getValue()) ) + && null == _plan._initialState ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate without initialState= must have " + + "either a first argument matching the stateType " + + "or an accumulate method with onNullInput=CALLED."); + ok = false; + } + } + } + + if ( null == qname ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate missing name="); + ok = false; + } + + if ( null == aggregateArgs ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate missing arguments="); + ok = false; + } + else + unary = 1 == aggregateArgs.size(); + + if ( null == _plan.stateType ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate missing stateType="); + ok = false; + } + + if ( null == _plan.accumulate ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate plan missing accumulate="); + ok = false; + } + + // Could check argument count against FUNC_MAX_ARGS, but that would + // hardcode an assumed value for PostgreSQL's FUNC_MAX_ARGS. + + // Check that, if a stateType is polymorphic, there are compatible + // polymorphic arg types? Not today. + + // If a plan has no initialState, then either the accumulate + // function must NOT be RETURNS NULL ON NULL INPUT, or the first + // aggregated argument type must be the same as the state type. + // The type check is easy, but the returnsNull check on the + // accumulate function would require looking up the function (and + // still we wouldn't know, if it's not seen in this compilation). + // For another day. + + // Allow hypothetical only for ordered-set aggregate. + if ( _hypothetical && ! orderedSet ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "hypothetical=true is only allowed for an ordered-set " + + "aggregate (one with directArguments specified, " + + "even if only {})"); + ok = false; + } + + // Allow two-element variadic= only for ordered-set aggregate. + if ( directVariadicExplicit && ! orderedSet ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Two values for variadic= are only allowed for an " + + "ordered-set aggregate (one with directArguments " + + "specified, even if only {})"); + ok = false; + } + + // Require a movingPlan to have a remove function. + if ( moving && null == _movingPlan[0].remove ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "a movingPlan must include a remove function"); + ok = false; + } + + // Checks if the aggregated argument list is declared variadic. + // The last element must be an array type or "any"; an ordered-set + // aggregate allows only one argument and it must be "any". + if ( _variadic[AGG_ARGS] ) + { + if ( 1 > aggregateArgs.size() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "To declare the aggregated argument list variadic, " + + "there must be at least one argument."); + ok = false; + } + else + { + DBType t = + aggregateArgs.get(aggregateArgs.size() - 1).getValue(); + boolean isAny = // allow omission of pg_catalog namespace + DT_ANY.equals(t) || "\"any\"".equals(t.toString()); + if ( orderedSet && (! isAny || 1 != aggregateArgs.size()) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "If variadic, an ordered-set aggregate's " + + "aggregated argument list must be only one " + + "argument and of type \"any\"."); + ok = false; + } + else if ( ! isAny && ! t.isArray() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "If variadic, the last aggregated argument must " + + "be an array type (or \"any\")."); + ok = false; + } + } + } + + // Checks specific to ordered-set aggregates. + if ( orderedSet ) + { + if ( 0 == aggregateArgs.size() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "An ordered-set aggregate needs at least one " + + "aggregated argument"); + ok = false; + } + + // Checks specific to hypothetical-set aggregates. + // The aggregated argument types must match the trailing direct + // arguments, and the two variadic declarations must match. + if ( _hypothetical ) + { + if ( _variadic[DIRECT_ARGS] != _variadic[AGG_ARGS] ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "For a hypothetical-set aggregate, neither or " + + "both the direct and aggregated argument lists " + + "must be declared variadic."); + ok = false; + } + if ( directArgs.size() < aggregateArgs.size() + || + ! directArgs.subList( + directArgs.size() - aggregateArgs.size(), + directArgs.size()) + .equals(aggregateArgs) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The last direct arguments of a hypothetical-set " + + "aggregate must match the types of the " + + "aggregated arguments"); + ok = false; + } + } + } + + // It is allowed to omit a finisher function, but some things + // make no sense without one. + if ( orderedSet && null == _plan.finish && 0 < directArgs.size() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Direct arguments serve no purpose without a finisher"); + ok = false; + } + + if ( null == _plan.finish && _plan._polymorphic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The polymorphic flag is meaningless with no finisher"); + ok = false; + } + + // The same finisher checks for a movingPlan, if present. + if ( moving ) + { + if ( orderedSet + && null == _movingPlan[0].finish + && directArgs.size() > 0 ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Direct arguments serve no purpose without a finisher"); + ok = false; + } + + if ( null == _movingPlan[0].finish + && _movingPlan[0]._polymorphic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The polymorphic flag is meaningless with no finisher"); + ok = false; + } + } + + // Checks involving sortOperator + if ( null != sortop ) + { + if ( orderedSet ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The sortOperator optimization is not available for " + + "an ordered-set aggregate (one with directArguments)"); + ok = false; + } + + if ( ! unary || _variadic[AGG_ARGS] ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The sortOperator optimization is only available for " + + "a one-argument (and non-variadic) aggregate"); + ok = false; + } + } + + // Checks involving serialize / deserialize + if ( null != _plan.serialize || null != _plan.deserialize ) + { + if ( null == _plan.combine ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "An aggregate plan without combine= may not have " + + "serialize= or deserialize="); + ok = false; + } + + if ( null == _plan.serialize || null == _plan.deserialize ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "An aggregate plan must have both " + + "serialize= and deserialize= or neither"); + ok = false; + } + + if ( ! DT_INTERNAL.equals(_plan.stateType) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Only an aggregate plan with stateType " + + "pg_catalog.internal may have serialize=/deserialize="); + ok = false; + } + } + + if ( ! ok ) + return Set.of(); + + Set requires = requireTags(); + + DBType[] accumulatorSig = + Stream.of( + Stream.of(_plan.stateType), + aggregateArgs.stream().map(Map.Entry::getValue)) + .flatMap(identity()).toArray(DBType[]::new); + + DBType[] combinerSig = { _plan.stateType, _plan.stateType }; + + DBType[] finisherSig = + Stream.of( + Stream.of(_plan.stateType), + orderedSet + ? directArgs.stream().map(Map.Entry::getValue) + : Stream.of(), + _plan._polymorphic + ? aggregateArgs.stream().map(Map.Entry::getValue) + : Stream.of() + ) + .flatMap(identity()) + .toArray(DBType[]::new); + + if ( checkAccumulatorSig + && ! Arrays.equals(accumulatorSig, func.parameterTypes) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate annotation on a method that matches the name " + + "but not argument types expected for the aggregate's " + + "accumulate function"); + ok = false; + } + + if ( checkFinisherSig + && ! Arrays.equals(finisherSig, func.parameterTypes) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate annotation on a method that matches the name " + + "but not argument types expected for the aggregate's " + + "finish function"); + ok = false; + } + + requires.add( + new DependTag.Function(_plan.accumulate, accumulatorSig)); + + if ( null != _plan.combine ) + { + DBType[] serialSig = { DT_INTERNAL }; + DBType[] deserialSig = { DT_BYTEA, DT_INTERNAL }; + + requires.add( + new DependTag.Function(_plan.combine, combinerSig)); + + if ( null != _plan.serialize ) + { + requires.add( + new DependTag.Function(_plan.serialize, serialSig)); + requires.add( + new DependTag.Function(_plan.deserialize, deserialSig)); + } + } + + if ( null != _plan.finish ) + requires.add( + new DependTag.Function(_plan.finish, finisherSig)); + + if ( moving ) + { + accumulatorSig[0] = _movingPlan[0].stateType; + Arrays.fill(combinerSig, _movingPlan[0].stateType); + finisherSig[0] = _movingPlan[0].stateType; + + requires.add(new DependTag.Function( + _movingPlan[0].accumulate, accumulatorSig)); + + requires.add(new DependTag.Function( + _movingPlan[0].remove, accumulatorSig)); + + if ( null != _movingPlan[0].combine ) + requires.add(new DependTag.Function( + _movingPlan[0].combine, combinerSig)); + + if ( null != _movingPlan[0].finish ) + requires.add(new DependTag.Function( + _movingPlan[0].finish, finisherSig)); + } + + if ( null != sortop ) + { + DBType arg = aggregateArgs.get(0).getValue(); + DBType[] opSig = { arg, arg }; + requires.add(new DependTag.Operator(sortop, opSig)); + } + + /* + * That establishes dependency on the various support functions, + * which should, transitively, depend on all of the types. But it is + * possible we do not have a whole-program view (perhaps some + * support functions are implemented in other languages, and there + * are @SQLActions setting them up?). Therefore also, redundantly as + * it may be, declare dependency on the types. + */ + + Stream.of( + aggregateArgs.stream().map(Map.Entry::getValue), + orderedSet + ? directArgs.stream().map(Map.Entry::getValue) + : Stream.of(), + Stream.of(_plan.stateType), + moving + ? Stream.of(_movingPlan[0].stateType) + : Stream.of() + ) + .flatMap(identity()) + .map(DBType::dependTag) + .filter(Objects::nonNull) + .forEach(requires::add); + + recordExplicitTags(_provides, _requires); + return Set.of(this); + } + + public String[] deployStrings() + { + List al = new ArrayList<>(); + + StringBuilder sb = new StringBuilder("CREATE AGGREGATE "); + appendNameAndArguments(sb); + sb.append(" ("); + + String[] planStrings = _plan.deployStrings(); + int n = planStrings.length; + for ( String s : planStrings ) + { + sb.append("\n\t").append(s); + if ( 0 < -- n ) + sb.append(','); + } + + if ( null != _movingPlan ) + { + planStrings = _movingPlan[0].deployStrings(); + for ( String s : planStrings ) + sb.append(",\n\tM").append(s); + } + + if ( null != sortop ) + sb.append(",\n\tSORTOP = ").append(sortop); + + if ( Function.Parallel.UNSAFE != _parallel ) + sb.append(",\n\tPARALLEL = ").append(_parallel); + + if ( _hypothetical ) + sb.append(",\n\tHYPOTHETICAL"); + + sb.append(')'); + + al.add(sb.toString()); + + if ( null != comment() ) + { + sb = new StringBuilder("COMMENT ON AGGREGATE "); + appendNameAndArguments(sb); + sb.append(" IS ").append(DDRWriter.eQuote(comment())); + al.add(sb.toString()); + } + + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + StringBuilder sb = new StringBuilder("DROP AGGREGATE "); + appendNameAndArguments(sb); + return new String[] { sb.toString() }; + } + + private void appendNameAndArguments(StringBuilder sb) + { + ListIterator> iter; + Map.Entry entry; + + sb.append(qname).append('('); + if ( null != directArgs ) + { + iter = directArgs.listIterator(); + while ( iter.hasNext() ) + { + entry = iter.next(); + sb.append("\n\t"); + if ( _variadic[DIRECT_ARGS] && ! iter.hasNext() ) + sb.append("VARIADIC "); + if ( null != entry.getKey() ) + sb.append(entry.getKey()).append(' '); + sb.append(entry.getValue()); + if ( iter.hasNext() ) + sb.append(','); + else + sb.append("\n\t"); + } + sb.append("ORDER BY"); + } + else if ( 0 == aggregateArgs.size() ) + sb.append('*'); + + iter = aggregateArgs.listIterator(); + while ( iter.hasNext() ) + { + entry = iter.next(); + sb.append("\n\t"); + if ( _variadic[AGG_ARGS] && ! iter.hasNext() ) + sb.append("VARIADIC "); + if ( null != entry.getKey() ) + sb.append(entry.getKey()).append(' '); + sb.append(entry.getValue()); + if ( iter.hasNext() ) + sb.append(','); + } + sb.append(')'); + } + + class Plan extends AbstractAnnotationImpl implements Aggregate.Plan + { + public String stateType() { return stateType.toString(); } + public int stateSize() { return _stateSize; } + public String initialState() { return _initialState; } + public String[] accumulate() { return qstrings(accumulate); } + public String[] combine() { return qstrings(combine); } + public String[] finish() { return qstrings(finish); } + public String[] remove() { return qstrings(remove); } + public String[] serialize() { return qstrings(serialize); } + public String[] deserialize() { return qstrings(deserialize); } + public boolean polymorphic() { return _polymorphic; } + public FinishEffect finishEffect() { return _finishEffect; } + + public int _stateSize; + public String _initialState; + public boolean _polymorphic; + public FinishEffect _finishEffect; + + DBType stateType; + Identifier.Qualified accumulate; + Identifier.Qualified combine; + Identifier.Qualified finish; + Identifier.Qualified remove; + Identifier.Qualified serialize; + Identifier.Qualified deserialize; + + public void setStateType(Object o, boolean explicit, Element e) + { + if ( explicit ) + stateType = DBType.fromSQLTypeAnnotation((String)o); + } + + public void setStateSize(Object o, boolean explicit, Element e) + { + _stateSize = (Integer)o; + if ( explicit && 0 >= _stateSize ) + throw new IllegalArgumentException( + "An explicit stateSize must be positive"); + } + + public void setInitialState(Object o, boolean explicit, Element e) + { + if ( explicit ) + _initialState = (String)o; + } + + public void setAccumulate(Object o, boolean explicit, Element e) + { + if ( explicit ) + accumulate = qnameFrom(avToArray( o, String.class)); + } + + public void setCombine(Object o, boolean explicit, Element e) + { + if ( explicit ) + combine = qnameFrom(avToArray( o, String.class)); + } + + public void setFinish(Object o, boolean explicit, Element e) + { + if ( explicit ) + finish = qnameFrom(avToArray( o, String.class)); + } + + public void setRemove(Object o, boolean explicit, Element e) + { + if ( explicit ) + throw new IllegalArgumentException( + "Only a movingPlan may have a remove function"); + } + + public void setSerialize(Object o, boolean explicit, Element e) + { + if ( explicit ) + serialize = qnameFrom(avToArray( o, String.class)); + } + + public void setDeserialize(Object o, boolean explicit, Element e) + { + if ( explicit ) + deserialize = qnameFrom(avToArray( o, String.class)); + } + + public void setFinishEffect( Object o, boolean explicit, Element e) + { + if ( explicit ) + _finishEffect = FinishEffect.valueOf( + ((VariableElement)o).getSimpleName().toString()); + } + + public Set characterize() + { + return Set.of(); + } + + /** + * Returns one string per plan element (not per SQL statement). + *

    + * This method has to be here anyway because the class extends + * {@code AbstractAnnotationImpl}, but it will never be processed as + * an actual SQL snippet. This will be called by the containing + * {@code AggregateImpl} and return the individual plan elements + * that it will build into its own deploy strings. + *

    + * When this class represents a moving plan, the caller will prefix + * each of these strings with {@code M}. + */ + public String[] deployStrings() + { + List al = new ArrayList<>(); + + al.add("STYPE = " + stateType); + + if ( 0 != _stateSize ) + al.add("SSPACE = " + _stateSize); + + if ( null != _initialState ) + al.add("INITCOND = " + DDRWriter.eQuote(_initialState)); + + al.add("SFUNC = " + accumulate); + + if ( null != remove ) + al.add("INVFUNC = " + remove); + + if ( null != finish ) + al.add("FINALFUNC = " + finish); + + if ( _polymorphic ) + al.add("FINALFUNC_EXTRA"); + + if ( null != _finishEffect ) + al.add("FINALFUNC_MODIFY = " + _finishEffect); + + if ( null != combine ) + al.add("COMBINEFUNC = " + combine); + + if ( null != serialize ) + al.add("SERIALFUNC = " + serialize); + + if ( null != deserialize ) + al.add("DESERIALFUNC = " + deserialize); + + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + return null; + } + } + + class Moving extends Plan + { + public void setRemove(Object o, boolean explicit, Element e) + { + if ( explicit ) + remove = qnameFrom(avToArray( o, String.class)); + } + + public void setSerialize(Object o, boolean explicit, Element e) + { + if ( explicit ) + throw new IllegalArgumentException( + "Only a (non-moving) plan may have a " + + "serialize function"); + } + + public void setDeserialize(Object o, boolean explicit, Element e) + { + if ( explicit ) + throw new IllegalArgumentException( + "Only a (non-moving) plan may have a " + + "deserialize function"); + } + } + } + + /** + * Provides the default mappings from Java types to SQL types. + */ + class TypeMapper + { + ArrayList> protoMappings; + ArrayList> finalMappings; + + TypeMapper() + { + protoMappings = new ArrayList<>(); + + // Primitives (these need not, indeed cannot, be schema-qualified) + // + this.addMap(boolean.class, DT_BOOLEAN); + this.addMap(Boolean.class, DT_BOOLEAN); + this.addMap(byte.class, "smallint"); + this.addMap(Byte.class, "smallint"); + this.addMap(char.class, "smallint"); + this.addMap(Character.class, "smallint"); + this.addMap(double.class, "double precision"); + this.addMap(Double.class, "double precision"); + this.addMap(float.class, "real"); + this.addMap(Float.class, "real"); + this.addMap(int.class, DT_INTEGER); + this.addMap(Integer.class, DT_INTEGER); + this.addMap(long.class, "bigint"); + this.addMap(Long.class, "bigint"); + this.addMap(short.class, "smallint"); + this.addMap(Short.class, "smallint"); + + // Known common mappings + // + this.addMap(Number.class, "pg_catalog", "numeric"); + this.addMap(String.class, "pg_catalog", "varchar"); + this.addMap(java.util.Date.class, "pg_catalog", "timestamp"); + this.addMap(Timestamp.class, "pg_catalog", "timestamp"); + this.addMap(Time.class, "pg_catalog", "time"); + this.addMap(java.sql.Date.class, "pg_catalog", "date"); + this.addMap(java.sql.SQLXML.class, "pg_catalog", "xml"); + this.addMap(BigInteger.class, "pg_catalog", "numeric"); + this.addMap(BigDecimal.class, "pg_catalog", "numeric"); + this.addMap(ResultSet.class, DT_RECORD); + this.addMap(Object.class, DT_ANY); + + this.addMap(byte[].class, DT_BYTEA); + + this.addMap(LocalDate.class, "pg_catalog", "date"); + this.addMap(LocalTime.class, "pg_catalog", "time"); + this.addMap(OffsetTime.class, "pg_catalog", "timetz"); + this.addMap(LocalDateTime.class, "pg_catalog", "timestamp"); + this.addMap(OffsetDateTime.class, "pg_catalog", "timestamptz"); + + this.addMap(CatalogObject.class, "pg_catalog", "oid"); + } + + private boolean mappingsFrozen() + { + return null != finalMappings; + } + + /* + * What worked in Java 6 was to keep a list of Class -> sqltype + * mappings, and get TypeMirrors from the Classes at the time of trying + * to identify types (in the final, after-all-sources-processed round). + * Starting in Java 7, you get different TypeMirror instances in + * different rounds for the same types, so you can't match something + * seen in round 1 to something looked up in the final round. (However, + * you can match things seen in round 1 to things looked up prior to + * the first round, when init() is called and constructs the processor.) + * + * So, this method needs to be called at the end of round 1 (or at the + * end of every round, it just won't do anything but once), and at that + * point it will compute the list order and freeze a list of TypeMirrors + * to avoid looking up the Classes later and getting different + * mirrors. + * + * This should work as long as all the sources containg PL/Java + * annotations will be found in round 1. That would only not be the case + * if some other annotation processor is in use that could generate new + * sources with pljava annotations in them, requiring additional rounds. + * In the present state of things, that simply won't work. Java bug + * http://bugs.java.com/bugdatabase/view_bug.do?bug_id=8038455 might + * cover this, and promises a fix in Java 9, but who knows? + */ + private void workAroundJava7Breakage() + { + if ( mappingsFrozen() ) + return; // after the first round, it's too late! + + // Need to check more specific types before those they are + // assignable to by widening reference conversions, so a + // topological sort is in order. + // + List>> vs = new ArrayList<>( + protoMappings.size()); + + for ( Map.Entry me : protoMappings ) + vs.add( new Vertex<>( me)); + + for ( int i = vs.size(); i --> 1; ) + { + Vertex> vi = vs.get( i); + TypeMirror ci = vi.payload.getKey(); + for ( int j = i; j --> 0; ) + { + Vertex> vj = vs.get( j); + TypeMirror cj = vj.payload.getKey(); + boolean oij = typu.isAssignable( ci, cj); + boolean oji = typu.isAssignable( cj, ci); + if ( oji == oij ) + continue; // no precedence constraint between these two + if ( oij ) + vi.precede( vj); + else + vj.precede( vi); + } + } + + Queue>> q; + if ( reproducible ) + { + q = new PriorityQueue<>( 11, new TypeTiebreaker()); + } + else + { + q = new LinkedList<>(); + } + + for ( Vertex> v : vs ) + if ( 0 == v.indegree ) + q.add( v); + + protoMappings.clear(); + finalMappings = protoMappings; + protoMappings = null; + + while ( ! q.isEmpty() ) + { + Vertex> v = q.remove(); + v.use( q); + finalMappings.add( v.payload); + } + } + + private TypeMirror typeMirrorFromClass( Class k) + { + if ( k.isArray() ) + { + TypeMirror ctm = typeMirrorFromClass( k.getComponentType()); + return typu.getArrayType( ctm); + } + + if ( k.isPrimitive() ) + { + TypeKind tk = TypeKind.valueOf( k.getName().toUpperCase()); + return typu.getPrimitiveType( tk); + } + + String cname = k.getCanonicalName(); + if ( null == cname ) + { + msg( Kind.WARNING, + "Cannot register type mapping for class %s" + + "that lacks a canonical name", k.getName()); + return null; + } + + return declaredTypeForClass(k); + } + + /** + * Add a custom mapping from a Java class to an SQL type identified + * by SQL-standard reserved syntax. + * + * @param k Class representing the Java type + * @param v String representing the SQL (language-reserved) type + * to be used + */ + void addMap(Class k, String v) + { + addMap( typeMirrorFromClass( k), new DBType.Reserved(v)); + } + + /** + * Add a custom mapping from a Java class to an SQL type identified + * by an SQL qualified identifier. + * + * @param k Class representing the Java type + * @param schema String representing the qualifier of the type name + * (may be null) + * @param local String representing the SQL (language-reserved) type + * to be used + */ + void addMap(Class k, String schema, String local) + { + addMap( typeMirrorFromClass( k), + new DBType.Named(qnameFrom(local, schema))); + } + + /** + * Add a custom mapping from a Java class to an SQL type + * already in the form of a {@code DBType}. + * + * @param k Class representing the Java type + * @param type DBType representing the SQL type to be used + */ + void addMap(Class k, DBType type) + { + addMap( typeMirrorFromClass( k), type); + } + + /** + * Add a custom mapping from a Java class to an SQL type, if a class + * with the given name exists. + * + * @param k Canonical class name representing the Java type + * @param v String representing the SQL type to be used + */ + void addMapIfExists(String k, String v) + { + TypeElement te = elmu.getTypeElement( k); + if ( null != te ) + addMap( te.asType(), new DBType.Reserved(v)); + } + + /** + * Add a custom mapping from a Java class (represented as a TypeMirror) + * to an SQL type. + * + * @param tm TypeMirror representing the Java type + * @param v String representing the SQL type to be used + */ + void addMap(TypeMirror tm, DBType v) + { + if ( mappingsFrozen() ) + { + msg( Kind.ERROR, + "addMap(%s, %s)\n" + + "called after workAroundJava7Breakage", tm.toString(), v); + return; + } + protoMappings.add( new AbstractMap.SimpleImmutableEntry<>( tm, v)); + } + + /** + * Return the SQL type for the Java type represented by a TypeMirror, + * from an explicit annotation if present, otherwise by applying the + * default mappings. No default-value information is included in the + * string returned. It is assumed that a function return is being typed + * rather than a function parameter. + * + * @param tm Represents the type whose corresponding SQL type is wanted. + * @param e Annotated element (chiefly for use as a location hint in + * diagnostic messages). + */ + DBType getSQLType(TypeMirror tm, Element e) + { + return getSQLType( tm, e, null, false, false); + } + + + /** + * Return the SQL type for the Java type represented by a TypeMirror, + * from an explicit annotation if present, otherwise by applying the + * default mappings. + * + * @param tm Represents the type whose corresponding SQL type is wanted. + * @param e Annotated element (chiefly for use as a location hint in + * diagnostic messages). + * @param st {@code SQLType} annotation, or null if none, explicitly + * given for the element. + * @param contravariant Indicates that the element whose type is wanted + * is a function parameter and should be given the widest type that can + * be assigned to it. If false, find the narrowest type that a function + * return can be assigned to. + * @param withDefault Indicates whether any specified default value + * information should also be included in the "type" string returned. + */ + DBType getSQLType(TypeMirror tm, Element e, SQLType st, + boolean contravariant, boolean withDefault) + { + boolean array = false; + boolean row = false; + DBType rslt = null; + + String[] defaults = null; + boolean optional = false; + + if ( null != st ) + { + String s = st.value(); + if ( null != s ) + rslt = DBType.fromSQLTypeAnnotation(s); + defaults = st.defaultValue(); + optional = st.optional(); + } + + if ( tm.getKind().equals( TypeKind.ARRAY) ) + { + ArrayType at = ((ArrayType)tm); + if ( ! at.getComponentType().getKind().equals( TypeKind.BYTE) ) + { + array = true; + tm = at.getComponentType(); + // only for bytea[] should this ever still be an array + } + } + + if ( ! array && typu.isSameType( tm, TY_RESULTSET) ) + row = true; + + if ( null != rslt ) + return typeWithDefault( + e, rslt, array, row, defaults, optional, withDefault); + + if ( tm.getKind().equals( TypeKind.VOID) ) + return DT_VOID; // return type only; no defaults apply + + if ( tm.getKind().equals( TypeKind.ERROR) ) + { + msg ( Kind.ERROR, e, + "Cannot determine mapping to SQL type for unresolved type"); + rslt = new DBType.Reserved(tm.toString()); + } + else + { + ArrayList> ms = finalMappings; + if ( contravariant ) + ms = reversed(ms); + for ( Map.Entry me : ms ) + { + TypeMirror ktm = me.getKey(); + if ( ktm instanceof PrimitiveType ) + { + if ( typu.isSameType( tm, ktm) ) + { + rslt = me.getValue(); + break; + } + } + else + { + boolean accept; + if ( contravariant ) + accept = typu.isAssignable( ktm, tm); + else + accept = typu.isAssignable( tm, ktm); + if ( accept ) + { + // don't compute a type of Object/"any" for + // a function return (just admit defeat instead) + if ( contravariant + || ! typu.isSameType( ktm, TY_OBJECT) ) + rslt = me.getValue(); + break; + } + } + } + } + + if ( null == rslt ) + { + msg( Kind.ERROR, e, + "No known mapping to an SQL type"); + rslt = new DBType.Reserved(tm.toString()); + } + + if ( array ) + rslt = rslt.asArray("[]"); + + return typeWithDefault( + e, rslt, array, row, defaults, optional, withDefault); + } + + /** + * Given the matching SQL type already determined, return it with or + * without default-value information appended, as the caller desires. + * To ensure that the generated descriptor will be in proper form, the + * default values are emitted as properly-escaped string literals and + * then cast to the appropriate type. This approach will not work for + * defaults given as arbitrary SQL expressions, but covers the typical + * cases of simple literals and even anything that can be computed as + * a Java String constant expression (e.g. ""+Math.PI). + * + * @param e Annotated element (chiefly for use as a location hint in + * diagnostic messages). + * @param rslt The bare SQL type string already determined + * @param array Whether the Java type was determined to be an array + * @param row Whether the Java type was ResultSet, indicating an SQL + * record or row type. + * @param defaults Array (null if not present) of default value strings + * @param withDefault Whether to append the default information to the + * type. + */ + DBType typeWithDefault( + Element e, DBType rslt, boolean array, boolean row, + String[] defaults, boolean optional, boolean withDefault) + { + if ( ! withDefault || null == defaults && ! optional ) + return rslt; + + if ( optional ) + return rslt.withDefault("DEFAULT NULL"); + + int n = defaults.length; + if ( row ) + { + assert ! array; + if ( n > 0 && rslt.toString().equalsIgnoreCase("record") ) + msg( Kind.ERROR, e, + "Only supported default for unknown RECORD type is {}"); + } + else if ( n != 1 ) + array = true; + else if ( ! array ) + array = rslt.isArray(); + + StringBuilder sb = new StringBuilder(); + sb.append( " DEFAULT "); + sb.append( row ? "ROW(" : "CAST("); + if ( array ) + sb.append( "ARRAY["); + if ( n > 1 ) + sb.append( "\n\t"); + for ( String s : defaults ) + { + sb.append( DDRWriter.eQuote( s)); + if ( 0 < -- n ) + sb.append( ",\n\t"); + } + if ( array ) + sb.append( ']'); + if ( ! row ) + sb.append( " AS ").append( rslt); + sb.append( ')'); + return rslt.withDefault(sb.toString()); + } + } + + /** + * Work around bizarre javac behavior that silently supplies an Error + * class in place of an attribute value for glaringly obvious source errors, + * instead of reporting them. + * @param av AnnotationValue to extract the value from + * @return The result of getValue unless {@code av} is an error placeholder + */ + static Object getValue( AnnotationValue av) + { + if ( "com.sun.tools.javac.code.Attribute.Error".equals( + av.getClass().getCanonicalName()) ) + throw new AnnotationValueException(); + return av.getValue(); + } + + /** + * Return a reversed copy of an ArrayList. + */ + static > T reversed(T orig) + { + @SuppressWarnings("unchecked") + T list = (T)orig.clone(); + Collections.reverse(list); + return list; + } + + /** + * Return an {@code Identifier.Qualified} from discrete Java strings + * representing the local name and schema, with a zero-length schema string + * producing a qualified name with null qualifier. + */ + Identifier.Qualified qnameFrom( + String name, String schema) + { + Identifier.Simple qualifier = + "".equals(schema) ? null : Identifier.Simple.fromJava(schema, msgr); + Identifier.Simple local = Identifier.Simple.fromJava(name, msgr); + return local.withQualifier(qualifier); + } + + /** + * Return an {@code Identifier.Qualified} from a single Java string + * representing the local name and possibly a schema. + */ + Identifier.Qualified qnameFrom(String name) + { + return Identifier.Qualified.nameFromJava(name, msgr); + } + + /** + * Return an {@code Identifier.Qualified} from an array of Java strings + * representing schema and local name separately if of length two, or as by + * {@link #qnameFrom(String)} if of length one; invalid if of any other + * length. + *

    + * The first of two elements may be explicitly {@code ""} to produce a + * qualified name with null qualifier. + */ + Identifier.Qualified qnameFrom(String[] names) + { + switch ( names.length ) + { + case 2: return qnameFrom(names[1], names[0]); + case 1: return qnameFrom(names[0]); + default: + throw new IllegalArgumentException( + "Only a one- or two-element String array is accepted"); + } + } + + /** + * Like {@link #qnameFrom(String[])} but for an operator name. + */ + Identifier.Qualified operatorNameFrom(String[] names) + { + switch ( names.length ) + { + case 2: + Identifier.Simple qualifier = null; + if ( ! names[0].isEmpty() ) + qualifier = Identifier.Simple.fromJava(names[0], msgr); + return Identifier.Operator.from(names[1], msgr) + .withQualifier(qualifier); + case 1: + return Identifier.Qualified.operatorFromJava(names[0], msgr); + default: + throw new IllegalArgumentException( + "Only a one- or two-element String array is accepted"); + } + } + + String[] qstrings(Identifier.Qualified qname) + { + if ( null == qname ) + return null; + Identifier.Simple q = qname.qualifier(); + String local = qname.local().toString(); + return new String[] { null == q ? null : q.toString(), local }; + } +} + +/** + * Exception thrown when an expected annotation value is a compiler-internal + * Error class instead, which happens in some javac versions when the annotation + * value wasn't resolved because of a source error the compiler really should + * have reported. + */ +class AnnotationValueException extends RuntimeException { } diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DependTag.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DependTag.java new file mode 100644 index 000000000..a9aeec059 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DependTag.java @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2020-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.Arrays; +import static java.util.Objects.hash; +import static java.util.Objects.requireNonNull; + +import javax.annotation.processing.Messager; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + +/** + * Abstraction of a dependency tag, encompassing {@code Explicit} ones declared + * in annotations and distinguished by {@code String}s, and others added + * implicitly such as {@code Type}s known by {@code Identifier.Qualified}. + */ +abstract class DependTag +{ + protected final T m_value; + + protected DependTag(T value) + { + m_value = value; + } + + @Override + public int hashCode() + { + return hash(getClass(), m_value); + } + + @Override + public final boolean equals(Object o) + { + return equals(o, null); + } + + public boolean equals(Object o, Messager msgr) + { + if ( this == o ) + return true; + if ( null == o ) + return false; + return + getClass() == o.getClass() + && m_value.equals(((DependTag)o).m_value); + } + + @Override + public String toString() + { + return '(' + getClass().getSimpleName() + ')' + m_value.toString(); + } + + static final class Explicit extends DependTag + { + Explicit(String value) + { + super(requireNonNull(value)); + } + } + + static abstract class Named extends DependTag + { + Named(T value) + { + super(value); + } + + @Override + public boolean equals(Object o, Messager msgr) + { + if ( this == o ) + return true; + if ( null == o ) + return false; + return + getClass() == o.getClass() + && m_value.equals(((DependTag)o).m_value, msgr); + } + } + + static final class Type + extends Named> + { + Type(Identifier.Qualified value) + { + super(requireNonNull(value)); + } + } + + static final class Function + extends Named> + { + private DBType[] m_signature; + + Function( + Identifier.Qualified value, DBType[] signature) + { + super(requireNonNull(value)); + m_signature = signature.clone(); + } + + @Override + public boolean equals(Object o, Messager msgr) + { + if ( ! super.equals(o, msgr) ) + return false; + Function f = (Function)o; + if ( m_signature.length != f.m_signature.length ) + return false; + for ( int i = 0; i < m_signature.length; ++ i ) + { + if ( null == m_signature[i] || null == f.m_signature[i] ) + { + if ( m_signature[i] != f.m_signature[i] ) + return false; + continue; + } + if ( ! m_signature[i].equals(f.m_signature[i], msgr) ) + return false; + } + return true; + } + + @Override + public String toString() + { + return super.toString() + Arrays.toString(m_signature); + } + } + + static final class Operator + extends Named> + { + private DBType[] m_signature; + + Operator( + Identifier.Qualified value, DBType[] signature) + { + super(requireNonNull(value)); + assert 2 == signature.length : "invalid Operator signature length"; + m_signature = signature.clone(); + } + + @Override + public boolean equals(Object o, Messager msgr) + { + if ( ! super.equals(o, msgr) ) + return false; + Operator op = (Operator)o; + if ( m_signature.length != op.m_signature.length ) + return false; + for ( int i = 0; i < m_signature.length; ++ i ) + { + if ( null == m_signature[i] || null == op.m_signature[i] ) + { + if ( m_signature[i] != op.m_signature[i] ) + return false; + continue; + } + if ( ! m_signature[i].equals(op.m_signature[i], msgr) ) + return false; + } + return true; + } + + @Override + public String toString() + { + return super.toString() + Arrays.toString(m_signature); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/ImpProvider.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/ImpProvider.java new file mode 100644 index 000000000..ed51f4bfa --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/ImpProvider.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.Set; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + +/** + * Proxy a snippet that 'provides' an implementor tag and has no + * undeployStrings, returning its deployStrings in their place. + */ +class ImpProvider implements Snippet +{ + Snippet s; + + ImpProvider( Snippet s) { this.s = s; } + + @Override public Identifier.Simple implementorName() + { + return s.implementorName(); + } + @Override public String[] deployStrings() { return s.deployStrings(); } + @Override public String[] undeployStrings() { return s.deployStrings(); } + @Override public Set provideTags() { return s.provideTags(); } + @Override public Set requireTags() { return s.requireTags(); } + @Override public Set characterize() { return s.characterize(); } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/ParameterInfo.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/ParameterInfo.java new file mode 100644 index 000000000..a419c4133 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/ParameterInfo.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2020-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import javax.lang.model.element.VariableElement; + +import javax.lang.model.type.TypeMirror; + +import org.postgresql.pljava.annotation.SQLType; + +/** + * Tiny 'record' used in factoring duplicative operations on function parameter + * lists into operations on streams of these. + */ +class ParameterInfo +{ + final TypeMirror tm; + final VariableElement ve; + final SQLType st; + final DBType dt; + + String name() + { + String name = null == st ? null : st.name(); + if ( null == name ) + name = ve.getSimpleName().toString(); + return name; + } + + ParameterInfo(TypeMirror m, VariableElement e, SQLType t, DBType d) + { + tm = m; + ve = e; + st = t; + dt = d; + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Snippet.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Snippet.java new file mode 100644 index 000000000..bd84faf7a --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Snippet.java @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.Set; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + +/** + * A code snippet. May contain zero, one, or more complete SQL commands for + * each of deploying and undeploying. The commands contained in one Snippet + * will always be emitted in a fixed order. A collection of Snippets will be + * output in an order constrained by their provides and requires methods. + */ +interface Snippet +{ + /** + * An {@code } that will be used to wrap each command + * from this Snippet as an {@code }. If null, the + * commands will be emitted as plain {@code }s. + */ + public Identifier.Simple implementorName(); + /** + * A {@code DependTag} to represent this snippet's dependence on whatever + * determines whether the implementor name is to be recognized. + *

    + * Represented for now as a {@code DependTag.Explicit} even though the + * dependency is implicitly created; an {@code SQLAction} snippet may have + * an explicit {@code provides=} that has to be matched. + */ + default DependTag implementorTag() + { + return new DependTag.Explicit(implementorName().nonFolded()); + } + /** + * Return an array of SQL commands (one complete command to a string) to + * be executed in order during deployment. + */ + public String[] deployStrings(); + /** + * Return an array of SQL commands (one complete command to a string) to + * be executed in order during undeployment. + */ + public String[] undeployStrings(); + /** + * Return an array of arbitrary labels considered "provided" by this + * Snippet. In generating the final order of the deployment descriptor file, + * this Snippet will come before any whose requires method returns any of + * the same labels. + */ + public Set provideTags(); + /** + * Return an array of arbitrary labels considered "required" by this + * Snippet. In generating the final order of the deployment descriptor file, + * this Snippet will come after those whose provides method returns any of + * the same labels. + */ + public Set requireTags(); + /** + * Method to be called after all annotations' + * element/value pairs have been filled in, to compute any additional + * information derived from those values before deployStrings() or + * undeployStrings() can be called. May also check for and report semantic + * errors that are not easily checked earlier while populating the + * element/value pairs. + * @return A set of snippets that are now prepared and should be added to + * the graph to be scheduled and emitted according to provides/requires. + * Typically Set.of(this) if all went well, or Set.of() in case of an error + * or when the snippet will be emitted by something else. In some cases a + * characterize method can return additional snippets that are ready to be + * scheduled. + */ + public Set characterize(); + + /** + * If it is possible to break an ordering cycle at this snippet, return a + * vertex wrapping a snippet (possibly this one, or another) that can be + * considered ready, otherwise return null. + *

    + * The default implementation returns null unconditionally. + * @param v Vertex that wraps this Snippet + * @param deploy true when generating an ordering for the deploy strings + * @return a Vertex wrapping a Snippet that can be considered ready + */ + default Vertex breakCycle(Vertex v, boolean deploy) + { + return null; + } + + /** + * Called when undeploy ordering breaks a cycle by using + * {@code DROP ... CASCADE} or equivalent on another object, with effects + * that would duplicate or interfere with this snippet's undeploy actions. + *

    + * A snippet for which this can matter should note that this method has been + * called, and later generate its undeploy strings with any necessary + * adjustments. + *

    + * The default implementation does nothing. + */ + default void subsume() + { + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/SnippetTiebreaker.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/SnippetTiebreaker.java new file mode 100644 index 000000000..d21e59e84 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/SnippetTiebreaker.java @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.Arrays; +import java.util.Comparator; +import static java.util.Comparator.comparing; +import static java.util.Comparator.naturalOrder; +import static java.util.Comparator.nullsFirst; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Resolve ties in {@code Snippet} ordering in an arbitrary but deterministic + * way, for use when {@code ddr.reproducible} is set. + */ +class SnippetTiebreaker implements Comparator> +{ + private static final Comparator> VCMP; + + static + { + Comparator scmp = + comparing(Snippet::implementorName, + nullsFirst(comparing(Simple::pgFolded, naturalOrder())) + ) + .thenComparing(Snippet::deployStrings, Arrays::compare) + .thenComparing(Snippet::undeployStrings, Arrays::compare); + + VCMP = comparing(v -> v.payload, scmp); + } + + @Override + public int compare(Vertex o1, Vertex o2) + { + return VCMP.compare(o1, o2); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/TypeTiebreaker.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/TypeTiebreaker.java new file mode 100644 index 000000000..04fa37f82 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/TypeTiebreaker.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.Comparator; +import static java.util.Comparator.comparing; +import java.util.Map; + +import javax.lang.model.type.TypeMirror; + +/** + * Resolve ties in type-mapping resolution in an arbitrary but deterministic + * way, for use when {@code ddr.reproducible} is set. + */ +class TypeTiebreaker +implements Comparator>> +{ + private static final Comparator>> VCMP; + + static + { + Comparator> ecmp = + comparing( + (Map.Entry e) -> e.getValue().toString()) + .thenComparing(e -> e.getKey().toString()); + + VCMP = comparing(v -> v.payload, ecmp); + } + + @Override + public int compare( + Vertex> o1, + Vertex> o2) + { + return VCMP.compare(o1, o2); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Vertex.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Vertex.java new file mode 100644 index 000000000..3175e91c8 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Vertex.java @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.IdentityHashMap; +import java.util.LinkedList; +import java.util.List; +import static java.util.Objects.requireNonNull; +import java.util.Queue; + +/** + * Vertex in a DAG, as used to put things in workable topological order + */ +class Vertex

    +{ + P payload; + int indegree; + List> adj; + + /** + * Construct a new vertex with the supplied payload, indegree zero, and an + * empty out-adjacency list. + * @param payload Object to be associated with this vertex. + */ + Vertex( P payload) + { + this.payload = payload; + indegree = 0; + adj = new ArrayList<>(); + } + + /** + * Record that this vertex must precede the specified vertex. + * @param v a Vertex that this Vertex must precede. + */ + void precede( Vertex

    v) + { + ++ v.indegree; + adj.add( v); + } + + /** + * Record that this vertex has been 'used'. Decrement the indegree of any + * in its adjacency list, and add to the supplied queue any of those whose + * indegree becomes zero. + * @param q A queue of vertices that are ready (have indegree zero). + */ + void use( Collection> q) + { + for ( Vertex

    v : adj ) + if ( 0 == -- v.indegree ) + q.add( v); + } + + /** + * Record that this vertex has been 'used'. Decrement the indegree of any + * in its adjacency list; any of those whose indegree becomes zero should be + * both added to the ready queue {@code q} and removed from the collection + * {@code vs}. + * @param q A queue of vertices that are ready (have indegree zero). + * @param vs A collection of vertices not yet ready. + */ + void use( Collection> q, Collection> vs) + { + for ( Vertex

    v : adj ) + if ( 0 == -- v.indegree ) + { + vs.remove( v); + q.add( v); + } + } + + /** + * Whether a vertex is known to transitively precede, or not so precede, a + * target vertex, or cannot yet be so classified. + */ + enum MemoState { YES, NO, PENDING } + + /** + * Return the memoized state of this vertex or, if none, enqueue the vertex + * for further exploration, memoize its state as {@code PENDING}, and return + * that. + */ + MemoState classifyOrEnqueue( + Queue> queue, IdentityHashMap,MemoState> memos) + { + MemoState state = memos.putIfAbsent(this, MemoState.PENDING); + if ( null == state ) + { + queue.add(this); + return MemoState.PENDING; + } + return state; + } + + /** + * Execute one step of {@code precedesTransitively} determination. + *

    + * On entry, this vertex has been removed from the queue. Its immediate + * adjacency successors will be evaluated. + *

    + * If any immediate successor is a {@code YES}, this vertex + * is a {@code YES}. + *

    + * If any immediate successor is {@code PENDING}, this vertex remains + * {@code PENDING} and is replaced on the queue, to be encountered again + * after all currently pending vertices. + *

    + * Otherwise, this vertex is a {@code NO}. + */ + MemoState stepOfPrecedes( + Queue> queue, IdentityHashMap,MemoState> memos) + { + boolean anyPendingSuccessors = false; + for ( Vertex

    v : adj ) + { + switch ( v.classifyOrEnqueue(queue, memos) ) + { + case YES: + memos.replace(this, MemoState.YES); + return MemoState.YES; + case PENDING: + anyPendingSuccessors = true; + break; + case NO: + break; + } + } + + if ( anyPendingSuccessors ) + { + queue.add(this); + return MemoState.PENDING; + } + + memos.replace(this, MemoState.NO); + return MemoState.NO; + } + + /** + * Determine whether this vertex (transitively) precedes other, + * returning, if so, that subset of its immediate adjacency successors + * through which other is reachable. + * @param other vertex to which reachability is to be tested + * @return array of immediate adjacencies through which other is reachable, + * or null if it is not + */ + Vertex

    [] precedesTransitively(Vertex

    other) + { + Queue> queue = new LinkedList<>(); + IdentityHashMap,MemoState> memos = new IdentityHashMap<>(); + boolean anyYeses = false; + + /* + * Initially: the 'other' vertex itself is known to be a YES. + * Nothing is yet known to be a NO. + */ + memos.put(requireNonNull(other), MemoState.YES); + + /* + * classifyOrEnqueue my immediate successors. Any that is not 'other' + * itself will be enqueued in PENDING status. + */ + for ( Vertex

    v : adj ) + if ( MemoState.YES == v.classifyOrEnqueue(queue, memos) ) + anyYeses = true; + + /* + * After running stepOfPrecedes on every enqueued vertex until the queue + * is empty, every vertex seen will be in memos as a YES or a NO. + */ + while ( ! queue.isEmpty() ) + if ( MemoState.YES == queue.remove().stepOfPrecedes(queue, memos) ) + anyYeses = true; + + if ( ! anyYeses ) + return null; + + @SuppressWarnings("unchecked") // can't quite say Vertex

    []::new + Vertex

    [] result = adj.stream() + .filter(v -> MemoState.YES == memos.get(v)) + .toArray(Vertex[]::new); + + return result; + } + + /** + * Remove successors from the adjacency list of this vertex, and + * add them to the adjacency list of other. + *

    + * No successor's indegree is changed. + */ + void transferSuccessorsTo(Vertex

    other, Vertex

    [] successors) + { + for ( Vertex

    v : successors ) + { + boolean removed = adj.remove(v); + assert removed : "transferSuccessorsTo passed a non-successor"; + other.adj.add(v); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/VertexPair.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/VertexPair.java new file mode 100644 index 000000000..b381563cd --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/VertexPair.java @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +/** + * A pair of Vertex instances for the same payload, for use when two directions + * of topological ordering must be computed. + */ +class VertexPair

    +{ + Vertex

    fwd; + Vertex

    rev; + + VertexPair( P payload) + { + fwd = new Vertex<>( payload); + rev = new Vertex<>( payload); + } + + P payload() + { + return rev.payload; + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/AccessMethod.java b/pljava-api/src/main/java/org/postgresql/pljava/model/AccessMethod.java new file mode 100644 index 000000000..7620b6632 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/AccessMethod.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a relation access method. + */ +public interface AccessMethod +extends + Addressed, Named +{ + RegClass.Known CLASSID = + formClassId(AccessMethodRelationId, AccessMethod.class); + + enum Type { TABLE, INDEX } + + interface AMHandler extends Why { } + + RegProcedure handler(); + + Type type(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Attribute.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Attribute.java new file mode 100644 index 000000000..3f9d6040a --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Attribute.java @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.Map; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.annotation.BaseUDT.Alignment; +import org.postgresql.pljava.annotation.BaseUDT.Storage; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * An attribute (column), either of a known relation, or of a transient record + * type. + *

    + * Instances of the transient kind may be retrieved from a + * {@link TupleDescriptor TupleDescriptor} and will compare unequal to other + * {@code Attribute} instances even with the same {@code classId}, + * {@code subId}, and {@code oid} (which will be {@code InvalidOid}); for such + * instances, {@link #containingTupleDescriptor() containingTupleDescriptor} + * will return the specific transient {@code TupleDescriptor} to which + * the attribute belongs. Such 'virtual' instances will appear to have + * the invalid {@code RegClass} as {@code relation()}, and all access granted + * to {@code public}. + */ +public interface Attribute +extends + Addressed, Component, Named, + AccessControlled +{ + /** + * CLASS rather than CLASSID because Attribute isn't an object class + * in its own right. + *

    + * This simply identifies the table in the catalog that holds attribute + * definitions. An Attribute is not regarded as an object of that 'class'; + * it is a subId of whatever other RegClass object it defines an attribute + * of. + */ + RegClass CLASS = formObjectId(RegClass.CLASSID, AttributeRelationId); + + enum Identity { INAPPLICABLE, GENERATED_ALWAYS, GENERATED_BY_DEFAULT } + + enum Generated { INAPPLICABLE, STORED } + + RegClass relation(); + RegType type(); + short length(); + int dimensions(); + boolean byValue(); + Alignment alignment(); + Storage storage(); + boolean notNull(); + boolean hasDefault(); + boolean hasMissing(); + Identity identity(); + Generated generated(); + boolean dropped(); + boolean local(); + int inheritanceCount(); + RegCollation collation(); + Map options(); + Map fdwoptions(); + // missingValue + + /** + * Returns the tuple descriptor to which this attribute belongs. + *

    + * For a 'cataloged' attribute corresponding to a known relation + * or row type, returns a {@code TupleDescriptor} for that. For a 'virtual' + * attribute obtained from some non-cataloged tuple descriptor, returns + * whatever {@code TupleDescriptor} it came from. + */ + TupleDescriptor containingTupleDescriptor(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/CatalogObject.java b/pljava-api/src/main/java/org/postgresql/pljava/model/CatalogObject.java new file mode 100644 index 000000000..bdcb65f00 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/CatalogObject.java @@ -0,0 +1,665 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.List; +import java.util.ServiceConfigurationError; +import java.util.ServiceLoader; + +import java.util.function.IntPredicate; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + +/** + * Base interface representing some object in the PostgreSQL catalogs, + * identified by its {@link #oid() oid}. + *

    + * The {@link #oid() oid} by itself does not constitute an object address until + * combined with a {@code classId} identifying the catalog to which it belongs. + * This topmost interface, therefore, represents a catalog object when only + * the {@code oid} is known, and the {@code classId} is: unknown, or simply + * understood from context. An instance of this interface can be explicitly + * combined with a {@code classId}, using the {@link #of of(classId)} method, + * which will yield an instance of an interface that extends {@link Addressed} + * and is specific to catalog objects of that class. + *

    + * A {@code classId}, in turn, is simply an instance of + * {@link RegClass RegClass} (the catalog of relations, whose name "class" + * reflects PostgreSQL's object-relational origins). It identifies the specific + * relation in the PostgreSQL catalogs where objects with that {@code classId} + * can be looked up. + *

    + * Every user relation, of course, is also represented by a {@code RegClass} + * instance, but not one that can be used to form a catalog object address. + * For that matter, not every class in the PostgreSQL catalogs is modeled by + * a class in PL/Java. Therefore, not just any {@code RegClass} instance can be + * passed to {@link #of of(classId)} as a {@code classId}. Those that can be + * have the more-specific type {@code RegClass.Known}, which also identifies + * the Java model class T that will be returned. + */ +public interface CatalogObject +{ + /** + * The distinct integer value that {@link #oid oid()} will return when + * {@link #isValid isValid()} is false. + *

    + * PostgreSQL catalogs typically use this value (rather than a nullable + * column and a null value) in cases where an object may or may not be + * specified and has not been. + */ + int InvalidOid = 0; + + /** + * This catalog object's object ID; the integer value that identifies the + * object to PostgreSQL when the containing catalog is known. + */ + int oid(); + + /** + * Whether this catalog object has a valid {@code oid} + * (any value other than {@code InvalidOid}). + *

    + * This is not the same as whether any corresponding catalog object actually + * exists. This question can be answered directly from the value of + * {@code oid()}. The existence question (which can be asked sensibly only + * of an {@link Addressed Addressed} instance with its + * {@link Addressed#exists exists()} method} can be answered only through + * a lookup attempt for the {@code oid} in the corresponding catalog. + *

    + * There is not a unique singleton invalid catalog object instance. Rather, + * there can be distinct {@link Addressed Addressed} instances that have + * the invalid {@code oid} and distinct {@code classId}s, as well as one + * singleton {@code CatalogObject} that has the invalid {@code oid} and + * no valid {@code classId}. + *

    + * When applied to a {@link RegRole.Grantee RegRole.Grantee}, this method + * simply returns the negation of {@link RegRole.Grantee#isPublic isPublic}, + * which is the method that should be preferred for clarity in that case. + */ + boolean isValid(); + + /** + * Return a catalog object as an {@code Addressed} instance in a known + * class. + *

    + * For example, if a {@code CatalogObject o} is read from an {@code oid} + * column known to represent a namespace, {@code o.of(RegNamespace.CLASSID)} + * will return a {@code RegNamespace} instance. + *

    + * An instance whose class id is already the desired one will return itself. + * On an instance that lacks a valid class id, {@code of} can apply any + * desired class id (a different instance will be returned). The invalid + * instance of any class can be converted to the (distinct) invalid instance + * of any other class. On an instance that is valid and already has a valid + * class id, {@code of} will throw an exception if the desired class id + * differs. + * @param classId A known class id, often from the CLASSID field of a known + * CatalogObject subclass. + * @param Specific subtype of Addressed that represents catalog objects + * with the given class id. + * @return An instance with this instance's oid and the desired class id + * (this instance, if the class id matches). + */ + > T of(RegClass.Known classId); + + /** + * A catalog object that has both {@code oid} and {@code classId} specified, + * and can be looked up in the PostgreSQL catalogs (where it may, or may + * not, be found). + * @param Specific subtype of Addressed that represents catalog objects + * with the given class id. + */ + interface Addressed> extends CatalogObject + { + /** + * Returns the {@code classId} (which is an instance of + * {@link RegClass.Known RegClass.Known} of this addressed catalog + * object. + */ + RegClass.Known classId(); + + /** + * Whether a catalog object with this address in fact exists in + * the PostgreSQL catalogs. + *

    + * Unlike {@link #isValid isValid()}, which depends only on the value + * of {@code oid()}, this reflects the result of a catalog lookup. + */ + boolean exists(); + + /** + * Whether this catalog object is shared across all databases in the + * cluster. + *

    + * Contrast {@link RegClass#isShared() isShared()}, a method found only + * on {@code RegClass}, which indicates whether that {@code RegClass} + * instance represents a shared relation. Catalog objects formed with + * that {@code RegClass} instance as their {@code classId} will have + * {@code shared() == true}, though the {@code RegClass} instance itself + * will have {@code shared() == false} (because it models a row in + * {@code pg_class} itself, a catalog that isn't shared). + * @return classId().isShared() + */ + default boolean shared() + { + return classId().isShared(); + } + } + + /** + * Interface for an object that is regarded as a component of some, other, + * addressed catalog object, and is identified by that other object's + * {@code classId} and {@code oid} along with an integer {@code subId}. + *

    + * The chief (only?) example is an {@link Attribute Attribute}, which is + * identified by the {@code classId} and {@code oid} of its containing + * relation, plus a {@code subId}. + */ + interface Component + { + int subId(); + } + + /** + * Interface for any catalog object that has a name, which can be + * an {@link Identifier.Simple Identifier.Simple} or an + * {@link Identifier.Operator Identifier.Operator}. + */ + interface Named> + { + T name(); + } + + /** + * Interface for any catalog object that has a name and also a namespace + * or schema (an associated instance of {@link RegNamespace RegNamespace}). + */ + interface Namespaced> + extends Named + { + RegNamespace namespace(); + + default Identifier.Qualified qualifiedName() + { + return name().withQualifier(namespaceName()); + } + + default Identifier.Simple namespaceName() + { + return namespace().name(); + } + } + + /** + * Interface for any catalog object that has an owner (an associated + * instance of {@link RegRole RegRole}. + */ + interface Owned + { + RegRole owner(); + } + + /** + * Interface for any catalog object with an access control list + * (a list of some type of {@code Grant}). + * @param The subtype of {@link Grant Grant} that applies to catalog + * objects of this type. + */ + interface AccessControlled + { + /** + * Simple list of direct grants. + *

    + * For any T except {@code Grant.OnRole}, simply returns the list of + * grants directly found in this catalog object's ACL. When T is + * {@code Grant.OnRole}, this catalog object is a {@code RegRole}, and + * the result contains a {@code Grant.OnRole} for every role R that is + * directly a member of the role this catalog object represents; each + * such grant has {@code maySetRole()} by definition, and + * {@code mayExercisePrivileges()} if and only if R has {@code inherit}. + */ + List grants(); + + /** + * Computed list of (possibly transitive) grants to grantee. + *

    + * For any T except {@code Grant.OnRole}, a list of grants to + * grantee assembled from: direct grants in this object's ACL + * to {@code PUBLIC}, or to grantee, or to any role R for which + * {@code R.grants(grantee).mayExercisePrivileges()} is true. + *

    + * When T is {@code Grant.OnRole}, this catalog object is a + * {@code RegRole}, and the result contains a {@code Grant.OnRole} for + * which {@code maySetRole()} is true if a membership path from + * grantee to this role exists, and + * {@code mayExercisePrivileges()} is true if such a path exists using + * only roles with {@code inherit()} true. (The {@code inherit()} status + * of this object itself is not considered.) + */ + List grants(RegRole grantee); // transitive closure when on RegRole + // aclitem[] acl(); + // { Oid grantee; Oid grantor; AclMode bits; } see nodes/parsenodes.h + } + + /** + * Interface representing any single {@code Grant} (or ACL item), a grant + * of some set of possible privileges, to some role, granted by some role. + */ + interface Grant + { + /** + * Role to which the accompanying privileges are granted. + *

    + * There is no actual role named {@code public}, but there is + * a distinguished instance {@link RegRole.Grantee#PUBLIC PUBLIC} of + * {@link RegRole.Grantee RegRole.Grantee}. + */ + RegRole.Grantee to(); + + /** + * Role responsible for granting these privileges. + */ + RegRole by(); + + /** + * Subtype of {@code Grant} representing the privileges that may be + * granted on an attribute (or column). + */ + interface OnAttribute extends SELECT, INSERT, UPDATE, REFERENCES { } + + /** + * Subtype of {@code Grant} representing the privileges that may be + * granted on a class (or relation, table, view). + */ + interface OnClass + extends OnAttribute, DELETE, TRUNCATE, TRIGGER, MAINTAIN { } + + /** + * Subtype of {@code Grant} representing the privileges that may be + * granted on a database. + */ + interface OnDatabase extends CONNECT, CREATE, CREATE_TEMP { } + + /** + * Subtype of {@code Grant} representing the privileges that may be + * granted on a namespace (or schema). + */ + interface OnNamespace extends CREATE, USAGE { } + + /** + * Subtype of {@code Grant} representing the privileges that may be + * granted on a configuration setting. + */ + interface OnSetting extends SET, ALTER_SYSTEM { } + + /** + * Subtype of {@code Grant} representing the grants (of membership in, + * and/or privileges of, other roles) that may be made to a role. + */ + interface OnRole extends Grant + { + boolean mayExercisePrivileges(); + boolean maySetRole(); + boolean mayAdmin(); + } + } + + /** + * @hidden + */ + interface INSERT extends Grant + { + boolean insertGranted(); + boolean insertGrantable(); + } + + /** + * @hidden + */ + interface SELECT extends Grant + { + boolean selectGranted(); + boolean selectGrantable(); + } + + /** + * @hidden + */ + interface UPDATE extends Grant + { + boolean updateGranted(); + boolean updateGrantable(); + } + + /** + * @hidden + */ + interface DELETE extends Grant + { + boolean deleteGranted(); + boolean deleteGrantable(); + } + + /** + * @hidden + */ + interface TRUNCATE extends Grant + { + boolean truncateGranted(); + boolean truncateGrantable(); + } + + /** + * @hidden + */ + interface REFERENCES extends Grant + { + boolean referencesGranted(); + boolean referencesGrantable(); + } + + /** + * @hidden + */ + interface TRIGGER extends Grant + { + boolean triggerGranted(); + boolean triggerGrantable(); + } + + /** + * @hidden + */ + interface EXECUTE extends Grant + { + boolean executeGranted(); + boolean executeGrantable(); + } + + /** + * @hidden + */ + interface USAGE extends Grant + { + boolean usageGranted(); + boolean usageGrantable(); + } + + /** + * @hidden + */ + interface CREATE extends Grant + { + boolean createGranted(); + boolean createGrantable(); + } + + /** + * @hidden + */ + interface CREATE_TEMP extends Grant + { + boolean create_tempGranted(); + boolean create_tempGrantable(); + } + + /** + * @hidden + */ + interface CONNECT extends Grant + { + boolean connectGranted(); + boolean connectGrantable(); + } + + /** + * @hidden + */ + interface SET extends Grant + { + boolean setGranted(); + boolean setGrantable(); + } + + /** + * @hidden + */ + interface ALTER_SYSTEM extends Grant + { + boolean alterSystemGranted(); + boolean alterSystemGrantable(); + } + + /** + * @hidden + */ + interface MAINTAIN extends Grant + { + boolean maintainGranted(); + boolean maintainGrantable(); + } + + /** + * @hidden + */ + abstract class Factory + { + static final Factory INSTANCE; + + static + { + INSTANCE = ServiceLoader + .load(Factory.class.getModule().getLayer(), Factory.class) + .findFirst().orElseThrow(() -> new ServiceConfigurationError( + "could not load PL/Java CatalogObject.Factory")); + } + + static > + RegClass.Known formClassId(int classId, Class clazz) + { + return INSTANCE.formClassIdImpl(classId, clazz); + } + + static > + T formObjectId(RegClass.Known classId, int objId) + { + return INSTANCE.formObjectIdImpl(classId, objId, v -> true); + } + + static > + T formObjectId( + RegClass.Known classId, int objId, IntPredicate versionTest) + { + return INSTANCE.formObjectIdImpl(classId, objId, versionTest); + } + + static Database currentDatabase(RegClass.Known classId) + { + return INSTANCE.currentDatabaseImpl(classId); + } + + static RegRole.Grantee publicGrantee() + { + return INSTANCE.publicGranteeImpl(); + } + + protected abstract > + RegClass.Known formClassIdImpl( + int classId, Class clazz); + + protected abstract > + T formObjectIdImpl( + RegClass.Known classId, int objId, IntPredicate versionTest); + + protected abstract Database + currentDatabaseImpl(RegClass.Known classId); + + protected abstract RegRole.Grantee publicGranteeImpl(); + + protected abstract CharsetEncoding serverEncoding(); + protected abstract CharsetEncoding clientEncoding(); + protected abstract CharsetEncoding encodingFromOrdinal(int ordinal); + protected abstract CharsetEncoding encodingFromName(String name); + protected abstract long fetchAll(); + + /* + * These magic numbers are hardcoded here inside the pljava-api project + * so they can be used in static initializers in API interfaces. The + * verification that they are the right magic numbers takes place in + * compilation of the pljava and pljava-so projects, where they are + * included from here, exported in JNI .h files, and compared using + * StaticAssertStmt to the corresponding values from PostgreSQL headers. + * + * Within groups here, numerical order is as good as any. When adding a + * constant here, add a corresponding CONFIRMCONST in ModelConstants.c. + */ + protected static final int TableSpaceRelationId = 1213; + protected static final int TypeRelationId = 1247; + protected static final int AttributeRelationId = 1249; + protected static final int ProcedureRelationId = 1255; + protected static final int RelationRelationId = 1259; + protected static final int AuthIdRelationId = 1260; + protected static final int DatabaseRelationId = 1262; + protected static final int ForeignServerRelationId = 1417; + protected static final int ForeignDataWrapperRelationId = 2328; + protected static final int AccessMethodRelationId = 2601; + protected static final int ConstraintRelationId = 2606; + protected static final int LanguageRelationId = 2612; + protected static final int NamespaceRelationId = 2615; + protected static final int OperatorRelationId = 2617; + protected static final int TriggerRelationId = 2620; + protected static final int ExtensionRelationId = 3079; + protected static final int CollationRelationId = 3456; + protected static final int TransformRelationId = 3576; + protected static final int TSDictionaryRelationId = 3600; + protected static final int TSConfigRelationId = 3602; + + /* + * PG types good to have around because of corresponding JDBC types. + */ + protected static final int BOOLOID = 16; + protected static final int BYTEAOID = 17; + protected static final int CHAROID = 18; + protected static final int INT8OID = 20; + protected static final int INT2OID = 21; + protected static final int INT4OID = 23; + protected static final int XMLOID = 142; + protected static final int FLOAT4OID = 700; + protected static final int FLOAT8OID = 701; + protected static final int BPCHAROID = 1042; + protected static final int VARCHAROID = 1043; + protected static final int DATEOID = 1082; + protected static final int TIMEOID = 1083; + protected static final int TIMESTAMPOID = 1114; + protected static final int TIMESTAMPTZOID = 1184; + protected static final int TIMETZOID = 1266; + protected static final int BITOID = 1560; + protected static final int VARBITOID = 1562; + protected static final int NUMERICOID = 1700; + + /* + * PG types not mentioned in JDBC but bread-and-butter to PG devs. + */ + protected static final int TEXTOID = 25; + protected static final int UNKNOWNOID = 705; + protected static final int RECORDOID = 2249; + protected static final int CSTRINGOID = 2275; + protected static final int VOIDOID = 2278; + protected static final int TRIGGEROID = 2279; + + /* + * Of the several polymorphic types, API features this one because it + * can also be the resolved actual type of some system catalog columns. + */ + protected static final int ANYARRAYOID = 2277; + + /* + * PG types used in modeling PG types themselves. + */ + protected static final int NAMEOID = 19; + protected static final int REGPROCOID = 24; + protected static final int OIDOID = 26; + protected static final int PG_NODE_TREEOID = 194; + protected static final int ACLITEMOID = 1033; + protected static final int REGPROCEDUREOID = 2202; + protected static final int REGOPEROID = 2203; + protected static final int REGOPERATOROID = 2204; + protected static final int REGCLASSOID = 2205; + protected static final int REGTYPEOID = 2206; + protected static final int REGCONFIGOID = 3734; + protected static final int REGDICTIONARYOID = 3769; + protected static final int REGNAMESPACEOID = 4089; + protected static final int REGROLEOID = 4096; + protected static final int REGCOLLATIONOID = 4191; + + /* + * The well-known, pinned procedural languages. + */ + protected static final int INTERNALlanguageId = 12; + protected static final int ClanguageId = 13; + protected static final int SQLlanguageId = 14; + + /* + * The well-known, pinned namespaces. + */ + protected static final int PG_CATALOG_NAMESPACE = 11; + protected static final int PG_TOAST_NAMESPACE = 99; + + /* + * The well-known, pinned collations. + */ + protected static final int DEFAULT_COLLATION_OID = 100; + protected static final int C_COLLATION_OID = 950; + + /* + * These magic numbers are assigned here to allow the various well-known + * PostgreSQL ResourceOwners to be retrieved without a proliferation of + * methods on the factory interface. These are arbitrary array indices, + * visible also to JNI code through the generated headers just as + * described above. The native initialization method may create, + * for example, an array of ByteBuffers that window the corresponding + * PostgreSQL globals, ordered according to these indices. The Java code + * implementing resourceOwner() can be ignorant of these specific values + * and simply use them to index the array. HOWEVER, it does know that + * the first one, index 0, refers to the current resource owner. + */ + protected static final int RSO_Current = 0; // must be index 0 + protected static final int RSO_CurTransaction = 1; + protected static final int RSO_TopTransaction = 2; + protected static final int RSO_AuxProcess = 3; + + protected abstract ResourceOwner resourceOwner(int which); + + /* + * Same as above but for the well-known PostgreSQL MemoryContexts. + * Again, the implementing code knows index 0 is for the current one. + */ + protected static final int MCX_CurrentMemory = 0; // must be index 0 + protected static final int MCX_TopMemory = 1; + protected static final int MCX_Error = 2; + protected static final int MCX_Postmaster = 3; + protected static final int MCX_CacheMemory = 4; + protected static final int MCX_Message = 5; + protected static final int MCX_TopTransaction = 6; + protected static final int MCX_CurTransaction = 7; + protected static final int MCX_Portal = 8; + /* + * A long-lived, never-reset context created by PL/Java as a child of + * TopMemoryContext. + */ + protected static final int MCX_JavaMemory = 9; + + protected abstract MemoryContext memoryContext(int which); + + protected abstract MemoryContext upperMemoryContext(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/CharsetEncoding.java b/pljava-api/src/main/java/org/postgresql/pljava/model/CharsetEncoding.java new file mode 100644 index 000000000..6e5543893 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/CharsetEncoding.java @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; + +import java.nio.charset.Charset; +import java.nio.charset.CharsetDecoder; +import java.nio.charset.CharsetEncoder; + +import java.nio.charset.CharacterCodingException; + +import java.sql.SQLException; + +import static org.postgresql.pljava.model.CatalogObject.Factory; + +import org.postgresql.pljava.adt.spi.Datum; + +/** + * Represents one of PostgreSQL's available character set encodings. + *

    + * Not all of the encodings that PostgreSQL supports for communication with + * the client are also supported for use in the backend and in storage. + * The {@link #usableOnServer usableOnServer} method identifies which ones + * are suitable as server encodings. + *

    + * The encoding that is in use for the current database cannot change during + * a session, and is found in the final {@link #SERVER_ENCODING SERVER_ENCODING} + * field. + *

    + * The encoding currently in use by the connected client may change during + * a session, and is returned by the {@link #clientEncoding clientEncoding} + * method. + *

    + * The {@link #charset charset} method returns the corresponding Java + * {@link Charset Charset} if that can be identified, and several convenience + * methods are provided to decode or encode values accordingly. + */ +public interface CharsetEncoding +{ + CharsetEncoding SERVER_ENCODING = Factory.INSTANCE.serverEncoding(); + + /** + * A distinguished {@code CharsetEncoding} representing uses such as + * {@code -1} in the {@code collencoding} column of {@code pg_collation}, + * indicating the collation is usable with any encoding. + *

    + * This is its only instance. + */ + Any ANY = new Any(); + + /** + * Returns the encoding currently selected by the connected client. + */ + static CharsetEncoding clientEncoding() + { + return Factory.INSTANCE.clientEncoding(); + } + + /** + * Returns the {@code CharsetEncoding} for the given PostgreSQL encoding + * number (as used in the {@code encoding} columns of some system catalogs). + * @throws IllegalArgumentException if the argument is not the ordinal of + * some known encoding + */ + static CharsetEncoding fromOrdinal(int ordinal) + { + return Factory.INSTANCE.encodingFromOrdinal(ordinal); + } + + /** + * Returns the {@code CharsetEncoding} for the given PostgreSQL encoding + * name. + * @throws IllegalArgumentException if the argument is not the name of + * some known encoding + */ + static CharsetEncoding fromName(String name) + { + return Factory.INSTANCE.encodingFromName(name); + } + + /** + * Returns the PostgreSQL encoding number (as used in the {@code encoding} + * columns of some system catalogs) for this encoding. + */ + int ordinal(); + + /** + * Returns the PostgreSQL name for this encoding. + *

    + * The PostgreSQL encoding names have a long history and may not match + * cleanly with more standardized names in modern libraries. + */ + String name(); + + /** + * Returns the name identifying this encoding in ICU (international + * components for Unicode), or null if its implementation in PostgreSQL + * does not define one. + *

    + * When present, the ICU name can be a better choice for matching encodings + * in other libraries. + */ + String icuName(); + + /** + * Indicates whether this encoding is usable as a server encoding. + */ + boolean usableOnServer(); + + /** + * Returns the corresponding Java {@link Charset Charset}, or null if none + * can be identified. + */ + Charset charset(); + + /** + * Returns a {@link CharsetDecoder CharsetDecoder}, configured to report + * all decoding errors (rather than silently substituting data), if + * {@link #charset charset()} would return a non-null value. + */ + default CharsetDecoder newDecoder() + { + return charset().newDecoder(); + } + + /** + * Returns a {@link CharsetEncoder CharsetEncoder}, configured to report + * all encoding errors (rather than silently substituting data), if + * {@link #charset charset()} would return a non-null value. + */ + default CharsetEncoder newEncoder() + { + return charset().newEncoder(); + } + + /** + * Decode bytes to characters, with exceptions reported. + *

    + * Unlike the corresponding convenience method on {@link Charset Charset}, + * this method will throw exceptions rather than silently substituting + * characters. This is a database system; it doesn't go changing your data + * without telling you. + *

    + * Other behaviors can be obtained by calling {@link #newDecoder newDecoder} + * and configuring it as desired. + */ + default CharBuffer decode(ByteBuffer bb) throws CharacterCodingException + { + return newDecoder().decode(bb); + } + + /** + * Encode characters to bytes, with exceptions reported. + *

    + * Unlike the corresponding convenience method on {@link Charset Charset}, + * this method will throw exceptions rather than silently substituting + * characters. This is a database system; it doesn't go changing your data + * without telling you. + *

    + * Other behaviors can be obtained by calling {@link #newEncoder newEncoder} + * and configuring it as desired. + */ + default ByteBuffer encode(CharBuffer cb) throws CharacterCodingException + { + return newEncoder().encode(cb); + } + + /** + * Encode characters to bytes, with exceptions reported. + *

    + * Unlike the corresponding convenience method on {@link Charset Charset}, + * this method will throw exceptions rather than silently substituting + * characters. This is a database system; it doesn't go changing your data + * without telling you. + *

    + * Other behaviors can be obtained by calling {@link #newEncoder newEncoder} + * and configuring it as desired. + */ + default ByteBuffer encode(String s) throws CharacterCodingException + { + return encode(CharBuffer.wrap(s)); + } + + /** + * Decode bytes to characters, with exceptions reported. + *

    + * The input {@link Datum Datum} is pinned around the decoding operation. + */ + default CharBuffer decode(Datum.Input in, boolean close) + throws SQLException, IOException + { + in.pin(); + try + { + return decode(in.buffer()); + } + finally + { + in.unpin(); + if ( close ) + in.close(); + } + } + + /** + * Return an {@link InputStreamReader InputStreamReader} that reports + * exceptions. + *

    + * Other behaviors can be obtained by calling {@link #newDecoder newDecoder} + * and configuring it as desired before constructing an + * {@code InputStreamReader}. + */ + default InputStreamReader reader(InputStream in) + { + return new InputStreamReader(in, newDecoder()); + } + + /** + * Return an {@link OutputStreamWriter OutputStreamWriter} that reports + * exceptions. + *

    + * Other behaviors can be obtained by calling {@link #newEncoder newEncoder} + * and configuring it as desired before constructing an + * {@code OutputStreamWriter}. + */ + default OutputStreamWriter writer(OutputStream out) + { + return new OutputStreamWriter(out, newEncoder()); + } + + /** + * A distinguished {@code CharsetEncoding} representing uses such as + * {@code -1} in the {@code collencoding} column of {@code pg_collation}, + * indicating the collation is usable with any encoding. + *

    + * This returns -1 from {@code ordinal()} and {@code null} or {@code false} + * from the other non-default methods according to their types. The only + * instance of this class is {@code CharsetEncoding.ANY}. + */ + class Any implements CharsetEncoding + { + private Any() + { + } + + @Override + public int ordinal() + { + return -1; + } + + @Override + public String name() + { + return null; + } + + @Override + public String icuName() + { + return null; + } + + @Override + public boolean usableOnServer() + { + return false; + } + + @Override + public Charset charset() + { + return null; + } + + @Override + public String toString() + { + return "CharsetEncoding.ANY"; + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Constraint.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Constraint.java new file mode 100644 index 000000000..20705e1d9 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Constraint.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +import org.postgresql.pljava.TargetList.Projection; + +import java.sql.SQLXML; +import java.util.List; + +/** + * Model of the PostgreSQL {@code pg_constraint} system catalog. + */ +public interface Constraint +extends Addressed, Namespaced +{ + RegClass.Known CLASSID = + formClassId(ConstraintRelationId, Constraint.class); + + enum Type + { + CHECK, FOREIGN_KEY, NOT_NULL, PRIMARY_KEY, UNIQUE, CONSTRAINT_TRIGGER, + EXCLUSION + } + + enum ReferentialAction { NONE, RESTRICT, CASCADE, SET_NULL, SET_DEFAULT } + + enum MatchType { FULL, PARTIAL, SIMPLE } + + Type type(); + boolean deferrable(); + boolean deferred(); + boolean validated(); + RegClass onTable(); + RegType onDomain(); + RegClass index(); + Constraint parent(); + RegClass referencedTable(); + /** + * The action specified for update of a referenced column; null if not + * a foreign key constraint. + */ + ReferentialAction updateAction(); + /** + * The action specified for delete of a referenced row; null if not + * a foreign key constraint. + */ + ReferentialAction deleteAction(); + /** + * How foreign-key columns are to be matched; null if not + * a foreign key constraint. + */ + MatchType matchType(); + boolean isLocal(); + short inheritCount(); + boolean noInherit(); + Projection key(); + Projection fkey(); + List pfEqOp(); + List ppEqOp(); + List ffEqOp(); + /** + * Which columns are to be set null in a referential action; all referencing + * columns if this value is null. + *

    + * Returns null always on PostgreSQL versions earlier than 15. + */ + Projection fdelSetColumns(); + List exclOp(); + SQLXML bin(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Database.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Database.java new file mode 100644 index 000000000..8e11e4d42 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Database.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a database defined within the PostgreSQL cluster. + */ +public interface Database +extends + Addressed, Named, Owned, + AccessControlled +{ + RegClass.Known CLASSID = + formClassId(DatabaseRelationId, Database.class); + + Database CURRENT = currentDatabase(CLASSID); + + CharsetEncoding encoding(); + + /** + * A string identifying the collation rules for use in this database (when + * not overridden for a specific column or expression). + *

    + * At least through PostgreSQL 14, this is always the identifier of an + * operating system ("libc") collation, even in builds with ICU available. + */ + String collate(); + + /** + * A string identifying the collation rules for use in this database (when + * not overridden for a specific column or expression). + *

    + * At least through PostgreSQL 14, this is always the identifier of an + * operating system ("libc") collation, even in builds with ICU available. + */ + String ctype(); + + boolean template(); + boolean allowConnection(); + int connectionLimit(); + // oid lastsysoid + // xid frozenxid + // xid minmxid + Tablespace tablespace(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Extension.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Extension.java new file mode 100644 index 000000000..0bf782037 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Extension.java @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.List; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a PostgreSQL extension that has been installed for the current + * database. + */ +public interface Extension +extends Addressed, Named, Owned +{ + RegClass.Known CLASSID = + formClassId(ExtensionRelationId, Extension.class); + + /** + * Namespace in which most (or all, for a relocatable extension) of the + * namespace-qualified objects belonging to the extension are installed. + *

    + * Not a namespace qualifying the extension's name; extensions are not + * namespace-qualified. + */ + RegNamespace namespace(); + boolean relocatable(); + String version(); + List config(); + List condition(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/ForeignDataWrapper.java b/pljava-api/src/main/java/org/postgresql/pljava/model/ForeignDataWrapper.java new file mode 100644 index 000000000..0d5c984e2 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/ForeignDataWrapper.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.Map; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a foreign data wrapper that can provide the implementation for + * one or more {@link ForeignServer} declarations. + */ +public interface ForeignDataWrapper +extends + Addressed, Named, + Owned, AccessControlled +{ + RegClass.Known CLASSID = + formClassId(ForeignDataWrapperRelationId, ForeignDataWrapper.class); + + interface FDWHandler extends Why { } + interface FDWValidator extends Why { } + + RegProcedure handler(); + + RegProcedure validator(); + + Map options(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/ForeignServer.java b/pljava-api/src/main/java/org/postgresql/pljava/model/ForeignServer.java new file mode 100644 index 000000000..4bf8d45ef --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/ForeignServer.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.Map; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a foreign server, with which foreign tables can be declared. + */ +public interface ForeignServer +extends + Addressed, Named, + Owned, AccessControlled +{ + RegClass.Known CLASSID = + formClassId(ForeignServerRelationId, ForeignServer.class); + + ForeignDataWrapper fdw(); + + String type(); + + String version(); + + Map options(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/MemoryContext.java b/pljava-api/src/main/java/org/postgresql/pljava/model/MemoryContext.java new file mode 100644 index 000000000..917012e4c --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/MemoryContext.java @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.Lifespan; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +/** + * A PostgreSQL {@code MemoryContext}, which is usable as a PL/Java + * {@link Lifespan Lifespan} to scope the lifetimes of PL/Java objects + * (as when they depend on native memory allocated in the underlying context). + *

    + * The {@code MemoryContext} API in PostgreSQL is described here. + *

    + * Static getters for the globally known contexts are spelled and capitalized + * as they are in PostgreSQL. + */ +public interface MemoryContext extends Lifespan +{ + /** + * The top level of the context tree, of which every other context is + * a descendant. + *

    + * Used as described here. + */ + MemoryContext TopMemoryContext = + INSTANCE.memoryContext(MCX_TopMemory); + + /** + * The "current" memory context, which supplies all allocations made by + * PostgreSQL {@code palloc} and related functions that do not explicitly + * specify a context. + *

    + * Used as described here. + */ + static MemoryContext CurrentMemoryContext() + { + return INSTANCE.memoryContext(MCX_CurrentMemory); + } + + /** + * Getter method equivalent to the final + * {@link #TopMemoryContext TopMemoryContext} field, for consistency with + * the other static getters. + */ + static MemoryContext TopMemoryContext() + { + return TopMemoryContext; + } + + /** + * Holds everything that lives until end of the top-level transaction. + *

    + * Can be appropriate when a specification, for example JDBC, provides that + * an object should remain valid for the life of the transaction. + *

    + * Uses are described here. + */ + static MemoryContext TopTransactionContext() + { + return INSTANCE.memoryContext(MCX_TopTransaction); + } + + /** + * The same as {@link #TopTransactionContext() TopTransactionContext} when + * in a top-level transaction, but different in subtransactions (such as + * those associated with PL/Java savepoints). + *

    + * Used as described here. + */ + static MemoryContext CurTransactionContext() + { + return INSTANCE.memoryContext(MCX_CurTransaction); + } + + /** + * Context of the currently active execution portal. + *

    + * Used as described here. + */ + static MemoryContext PortalContext() + { + return INSTANCE.memoryContext(MCX_Portal); + } + + /** + * A permanent context switched into for error recovery processing. + *

    + * Used as described here. + */ + static MemoryContext ErrorContext() + { + return INSTANCE.memoryContext(MCX_Error); + } + + /** + * A long-lived, never-reset context created by PL/Java as a child of + * {@code TopMemoryContext}. + *

    + * Perhaps useful for PL/Java-related allocations that will be long-lived, + * or managed only from the Java side, as a way of accounting for them + * separately, as opposed to just putting them in {@code TopMemoryContext}. + * It hasn't been used consistently even in the historical PL/Java + * code base, and should perhaps be a candidate for deprecation (or for + * a thorough code review to establish firmer guidelines for its use). + */ + static MemoryContext JavaMemoryContext() + { + return INSTANCE.memoryContext(MCX_JavaMemory); + } + + /** + * The "upper executor" memory context (that is, the context on entry, prior + * to any use of SPI) associated with the current (innermost) PL/Java + * function invocation. + *

    + * This is "precisely the right context for a value returned" from a + * function that uses SPI, as described + * here. + */ + static MemoryContext UpperMemoryContext() + { + return INSTANCE.upperMemoryContext(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Portal.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Portal.java new file mode 100644 index 000000000..42babf51f --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Portal.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLException; + +import java.util.List; + +/** + * Models a PostgreSQL {@code Portal}, an object representing the ongoing + * execution of a query and capable of returning a {@link TupleDescriptor} for + * the result, and fetching tuples of the result, either all at once, or in + * smaller batches. + */ +public interface Portal extends AutoCloseable +{ + /** + * The direction modes that can be used with {@link #fetch fetch} + * and {@link #move move}. + */ + enum Direction { FORWARD, BACKWARD, ABSOLUTE, RELATIVE } + + /** + * A distinguished value for the count argument to + * {@link #fetch fetch} or {@link #move move}. + */ + long ALL = CatalogObject.Factory.INSTANCE.fetchAll(); + + @Override + void close(); // AutoCloseable without checked exceptions + + /** + * Returns the {@link TupleDescriptor} describing any tuples that may be + * fetched from this {@code Portal}. + */ + TupleDescriptor tupleDescriptor() throws SQLException; + + /** + * Fetches count more tuples (or {@link #ALL ALL} of them) in the + * specified direction. + * @return a notional List of the fetched tuples. Iterating through the list + * may return the same TupleTableSlot repeatedly, with each tuple in turn + * stored in the slot. + * @see "PostgreSQL documentation for SPI_scroll_cursor_fetch" + */ + List fetch(Direction dir, long count) + throws SQLException; + + /** + * Moves the {@code Portal}'s current position count rows (or + * {@link #ALL ALL} possible) in the specified direction. + * @return the number of rows by which the position actually moved + * @see "PostgreSQL documentation for SPI_scroll_cursor_move" + */ + long move(Direction dir, long count) + throws SQLException; +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/ProceduralLanguage.java b/pljava-api/src/main/java/org/postgresql/pljava/model/ProceduralLanguage.java new file mode 100644 index 000000000..9c3a29978 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/ProceduralLanguage.java @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.model.RegProcedure.Call; +import org.postgresql.pljava.model.RegProcedure.Lookup; +import org.postgresql.pljava.model.RegProcedure.Memo.How; +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; // javadoc +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +import org.postgresql.pljava.PLJavaBasedLanguage; // javadoc +import org.postgresql.pljava.PLPrincipal; + +import org.postgresql.pljava.annotation.Function.Trust; + +import java.util.BitSet; +import java.util.List; + +/** + * Model of a PostgreSQL procedural language, including (for non-built-in + * languages, like PL/Java) the handler functions used in its implementation. + */ +public interface ProceduralLanguage +extends + Addressed, Named, Owned, AccessControlled +{ + RegClass.Known CLASSID = + formClassId(LanguageRelationId, ProceduralLanguage.class); + + /** + * The well-known language "internal", for routines implemented within + * PostgreSQL itself. + */ + ProceduralLanguage INTERNAL = formObjectId(CLASSID, INTERNALlanguageId); + + /** + * The well-known language "c", for extension routines implemented using + * PostgreSQL's C language conventions. + */ + ProceduralLanguage C = formObjectId(CLASSID, ClanguageId); + + /** + * The well-known language "sql", for routines in that PostgreSQL + * built-in language. + */ + ProceduralLanguage SQL = formObjectId(CLASSID, SQLlanguageId); + + interface Handler extends Why { } + interface InlineHandler extends Why { } + interface Validator extends Why { } + + /** + * A {@link How How} memo attached to a {@link RegProcedure} that represents + * a PL/Java-based routine, retaining additional information useful to + * a PL/Java-based language implementation. + *

    + * A valid memo of this type will be maintained by PL/Java's dispatcher + * on {@code RegProcedure} instances that represent PL/Java-based routines. + * When passing such a {@code RegProcedure} to a language-handler method, + * the dispatcher also passes the memo. + */ + interface PLJavaBased extends How + { + /** + * A {@link TupleDescriptor} describing the expected parameters, based + * only on the routine declaration. + *

    + * The {@code TupleDescriptor} returned here depends only on the static + * catalog information for the {@link RegProcedure} carrying this memo. + * A language handler can use it to generate template code that can be + * cached with the target {@code RegProcedure}, independently of any one + * call site. + *

    + * {@link Identifier.None} may be encountered among the member names; + * parameters do not have to be named. + *

    + * Some reported types may have + * {@link RegType#needsResolution needsResolution} true, and require + * resolution to specific types using the expression context at + * a given call site. + *

    + * For a routine declared variadic, if the declared type of the variadic + * parameter is the wildcard {@code "any"} type, + * {@link Call#arguments arguments()}{@code .size()} at a call site can + * differ from {@code inputsTemplate().size()}, the variadic arguments + * delivered in "spread" form as distinct (and individually typed) + * arguments. Variadic arguments of any other declared type are always + * delivered in "collected" form as a PostgreSQL array of that type. + * A variadic {@code "any"} routine can also receive its arguments + * collected, when it has been called that way; therefore, there is an + * ambiguity when such a routine is called with a single array argument + * in the variadic position. A language handler must call + * {@link Lookup#inputsAreSpread Lookup.inputsAreSpread()} to determine + * the caller's intent in that case. + * @see #unresolvedInputs() + */ + TupleDescriptor inputsTemplate(); + + /** + * A {@code BitSet} indicating (by zero-based index into + * {@link #inputsTemplate inputsTemplate}) which of the input + * parameter types need resolution against actual supplied argument + * types at a call site. + *

    + * If the set is empty, such per-call-site resolution can be skipped. + * @return a cloned {@code BitSet} + */ + BitSet unresolvedInputs(); + + /** + * A {@link TupleDescriptor} describing the expected result, based + * only on the routine declaration. + *

    + * The {@code TupleDescriptor} returned here depends only on the static + * catalog information for the {@link RegProcedure} carrying this memo. + * A language handler can use it to generate template code that can be + * cached with the target {@code RegProcedure}, independently of any one + * call site. + *

    + * For a function whose return type (in SQL) is not composite (or + * a function with only one output parameter, which PostgreSQL treats + * the same way), this method returns a synthetic ephemeral descriptor + * with one attribute of the declared return type. This convention + * allows {@link TupleTableSlot} to be the uniform API for the data type + * conversions to and from PostgreSQL, regardless of how a routine + * is declared. + *

    + * This method returns null in two cases: if the target returns + * {@link RegType#VOID VOID} and no descriptor is needed, or if the + * target is a function whose call sites must supply a column definition + * list, so there is no template descriptor that can be cached with + * the routine proper. A descriptor can only be obtained later from + * {@link RegProcedure.Lookup#outputsDescriptor outputsDescriptor()} + * when a call site is at hand. + *

    + * Some reported types may have + * {@link RegType#needsResolution needsResolution} true, and require + * resolution to specific types using the expression context at + * a given call site. + *

    + * {@link Identifier.None} will be the name of the single attribute in + * the synthetic descriptor wrapping a scalar. Because PL/Java's + * function dispatcher will undo the wrapping to return a scalar + * to PostgreSQL, the name matters not. + * @see #unresolvedOutputs() + * @return a {@code TupleDescriptor}, null if the target returns + * {@code VOID}, or is a function and can only be called with + * a column definition list supplied at the call site. + */ + TupleDescriptor outputsTemplate(); + + /** + * A {@code BitSet} indicating (by zero-based index into + * {@link #outputsTemplate outputsTemplate}) which + * result types need resolution against actual supplied argument types + * at each call site. + *

    + * If the set is empty, such per-call-site resolution can be skipped. + * @return a cloned {@code BitSet}. In the two circumstances where + *{@link #outputsTemplate outputsTemplate} returns null, this method + * returns either null or an empty {@code BitSet}. It is null for the + * unspecified-record-returning case, where a column definition list + * must be consulted at each call site; it is an empty set for the + * {@code VOID}-returning case where no further resolution is needed + * (just as an empty {@code BitSet} here would normally indicate). + */ + BitSet unresolvedOutputs(); + + /** + * A list of {@link Transform} instances (null if none) indicating + * transforms to be applied on data types supplied to or supplied by + * this routine. + *

    + * When this method returns a non-null result, each {@code Transform} + * in the list has already been checked by the language implementation's + * {@link PLJavaBasedLanguage.UsingTransforms#essentialTransformChecks + * essentialTransformChecks} method. Any exceptions those checks might + * throw should have been thrown when the dispatcher invoked this method + * before dispatching to the language handler, so a language handler + * using this method need not normally expect to handle them. + */ + List transforms(); + } + + default Trust trust() + { + return principal().trust(); + } + + PLPrincipal principal(); + RegProcedure handler(); + RegProcedure inlineHandler(); + RegProcedure validator(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegClass.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegClass.java new file mode 100644 index 000000000..f5681b000 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegClass.java @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLXML; + +import java.util.Map; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of PostgreSQL relations/"classes"/tables. + *

    + * Instances of {@code RegClass} also serve as the "class ID" values for + * objects within the catalog (including for {@code RegClass} objects, which + * are no different from others in being defined by rows that appear in a + * catalog table; there is a row in {@code pg_class} for {@code pg_class}). + */ +public interface RegClass +extends + Addressed, Namespaced, Owned, + AccessControlled +{ + Known CLASSID = formClassId(RelationRelationId, RegClass.class); + + /** + * A more-specifically-typed subinterface of {@code RegClass}, used in the + * {@code CLASSID} static fields of interfaces in this package. + * @param identifies the specific CatalogObject.Addressed subinterface + * to result when this is applied as the {@code classId} to a bare + * {@code CatalogObject}. + */ + interface Known> extends RegClass + { + /** + * Returns the invalid CatalogObject with this class ID. + */ + default T invalid() + { + return formObjectId(this, InvalidOid); + } + } + + enum Persistence { PERMANENT, UNLOGGED, TEMPORARY } + + enum Kind + { + TABLE, INDEX, SEQUENCE, TOAST, VIEW, MATERIALIZED_VIEW, COMPOSITE_TYPE, + FOREIGN_TABLE, PARTITIONED_TABLE, PARTITIONED_INDEX + } + + enum ReplicaIdentity { DEFAULT, NOTHING, ALL, INDEX } + + /** + * The PostgreSQL type that is associated with this relation as its + * "row type". + *

    + * This is the type that will be found in a + * {@link TupleDescriptor TupleDescriptor} for this relation. + */ + RegType type(); + + /** + * Only for a relation that was created with {@code CREATE TABLE ... OF} + * type, this will be that type; the invalid {@code RegType} + * otherwise. + *

    + * Even though the tuple structure will match, this is not the same type + * returned by {@link #type() type()}; that will still be a type distinctly + * associated with this relation. + */ + RegType ofType(); + + AccessMethod accessMethod(); + + /* Of limited interest ... used in forming pathname of relation on disk, + * but in very fiddly ways and dependent on the access method. + * + int filenode(); + */ + + Tablespace tablespace(); + + /* Of limited interest ... estimates used by planner + * + int pages(); + float tuples(); + int allVisible(); + */ + + RegClass toastRelation(); + boolean hasIndex(); + + /** + * Whether this relation is shared across all databases in the cluster. + *

    + * Contrast {@link shared()}, which indicates, for any catalog object, + * whether that object is shared across the cluster. For any + * {@code RegClass} instance, {@code shared()} will be false (the + * {@code pg_class} catalog is not shared), but if the instance represents + * a shared class, {@code isShared()} will be true (and {@code shared()} + * will be true for any catalog object formed with that instance as its + * {@code classId}). + * @return whether the relation represented by this RegClass instance is + * shared across all databases in the cluster. + */ + boolean isShared(); + Persistence persistence(); + Kind kind(); + short nAttributes(); + short checks(); + boolean hasRules(); + boolean hasTriggers(); + boolean hasSubclass(); + boolean rowSecurity(); + boolean forceRowSecurity(); + boolean isPopulated(); + ReplicaIdentity replicaIdentity(); + boolean isPartition(); + // rewrite + // frozenxid + // minmxid + Map options(); + SQLXML partitionBound(); + + /** + * The {@link ForeignServer} if this is a foreign table, otherwise null. + */ + ForeignServer foreignServer(); + + /** + * Table options understood by the {@link #foreignServer foreign server} + * if this is a foreign table, otherwise null. + */ + Map foreignOptions(); + + TupleDescriptor.Interned tupleDescriptor(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegCollation.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegCollation.java new file mode 100644 index 000000000..b8658d84e --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegCollation.java @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a registered PostgreSQL collation, consisting of a provider and + * version, {@code collate} and {@code ctype} strings meaningful to that + * provider, and a {@code CharsetEncoding} (or {@code ANY} if the collation + * is usable with any encoding). + */ +public interface RegCollation +extends Addressed, Namespaced, Owned +{ + RegClass.Known CLASSID = + formClassId(CollationRelationId, RegCollation.class); + + RegCollation DEFAULT = formObjectId(CLASSID, DEFAULT_COLLATION_OID); + RegCollation C = formObjectId(CLASSID, C_COLLATION_OID); + + /* + * Static lc_messages/lc_monetary/lc_numeric/lc_time getters? They are not + * components of RegCollation, but simply GUCs. They don't have PGDLLIMPORT, + * so on Windows they'd have to be retrieved through the GUC machinery + * by name. At least they're strings anyway. + */ + + enum Provider { DEFAULT, LIBC, ICU } + + CharsetEncoding encoding(); + String collate(); + String ctype(); + + /** + * @since PG 10 + */ + Provider provider(); + + /** + * @since PG 10 + */ + String version(); + + /** + * @since PG 12 + */ + boolean deterministic(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegConfig.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegConfig.java new file mode 100644 index 000000000..ee923ff89 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegConfig.java @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * A PostgreSQL text search configuration. + *

    + * This interface is included in the model per the (arguably arbitrary) goal of + * covering all the catalog classes for which a {@code Reg...} type is provided + * in PostgreSQL. However, completing its implementation (to include a + * {@code parser()} method) would require also defining an interface to + * represent a text search parser. + */ +public interface RegConfig +extends Addressed, Namespaced, Owned +{ + RegClass.Known CLASSID = + formClassId(TSConfigRelationId, RegConfig.class); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegDictionary.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegDictionary.java new file mode 100644 index 000000000..3e70d8a98 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegDictionary.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * A PostgreSQL text search dictionary. + *

    + * This interface is included in the model per the (arguably arbitrary) goal of + * covering all the catalog classes for which a {@code Reg...} type is provided + * in PostgreSQL. However, completing its implementation (to include a + * {@code template()} method) would require also defining an interface to + * represent a text search template. + */ +public interface RegDictionary +extends Addressed, Namespaced, Owned +{ + RegClass.Known CLASSID = + formClassId(TSDictionaryRelationId, RegDictionary.class); + + /* + * dictinitoption is a text column, but it clearly (see CREATE TEXT SEARCH + * DICTIONARY and examples in the catalog) has an option = value , ... + * structure. An appropriate return type for a method could be a map, + * and the implementation would have to match the quoting/escaping/parsing + * rules used by PG. + */ +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegNamespace.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegNamespace.java new file mode 100644 index 000000000..8dc28cc40 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegNamespace.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a namespace (named schema) entry in the PostgreSQL catalogs. + */ +public interface RegNamespace +extends + Addressed, Named, Owned, + AccessControlled +{ + RegClass.Known CLASSID = + formClassId(NamespaceRelationId, RegNamespace.class); + + RegNamespace PG_CATALOG = formObjectId(CLASSID, PG_CATALOG_NAMESPACE); + RegNamespace PG_TOAST = formObjectId(CLASSID, PG_TOAST_NAMESPACE); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegOperator.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegOperator.java new file mode 100644 index 000000000..ef54acea8 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegOperator.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Operator; + +/** + * Model of a PostgreSQL operator as defined in the system catalogs, including + * its kind (infix or prefix), operand and result types, and a number of + * properties helpful in query planning. + */ +public interface RegOperator +extends Addressed, Namespaced, Owned +{ + RegClass.Known CLASSID = + formClassId(OperatorRelationId, RegOperator.class); + + enum Kind + { + /** + * An operator used between a left and a right operand. + */ + INFIX, + + /** + * An operator used to the left of a single right operand. + */ + PREFIX, + + /** + * An operator used to the right of a single left operand. + * @deprecated Postfix operators are deprecated since PG 13 and + * unsupported since PG 14. + */ + @Deprecated(since="PG 13") + POSTFIX + } + + interface Evaluator extends Why { } + interface RestrictionSelectivity extends Why { } + interface JoinSelectivity extends Why { } + + Kind kind(); + boolean canMerge(); + boolean canHash(); + RegType leftOperand(); + RegType rightOperand(); + RegType result(); + RegOperator commutator(); + RegOperator negator(); + RegProcedure evaluator(); + RegProcedure restrictionEstimator(); + RegProcedure joinEstimator(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegProcedure.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegProcedure.java new file mode 100644 index 000000000..744b94db9 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegProcedure.java @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLException; +import java.sql.SQLXML; + +import java.util.BitSet; +import java.util.List; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +import org.postgresql.pljava.PLJavaBasedLanguage.SRFTemplate; +import org.postgresql.pljava.TargetList.Projection; + +import org.postgresql.pljava.annotation.Function.Effects; +import org.postgresql.pljava.annotation.Function.OnNullInput; +import org.postgresql.pljava.annotation.Function.Parallel; +import org.postgresql.pljava.annotation.Function.Security; + +import org.postgresql.pljava.annotation.Trigger.Called; +import org.postgresql.pljava.annotation.Trigger.Event; +import org.postgresql.pljava.annotation.Trigger.Scope; + +/** + * Model of a PostgreSQL "routine" (which in late versions can include + * procedures and functions of various kinds) as defined in the system catalogs, + * including its parameter and result types and many other properties. + * @param distinguishes {@code RegProcedure} instances used for different + * known purposes, by specifying the type of a 'memo' that could be attached to + * the instance, perhaps with extra information helpful for the intended use. + * At present, such memo interfaces are nearly all empty, but still this + * parameter can serve a compile-time role to discourage mixing different + * procedures up. + */ +public interface RegProcedure> +extends + Addressed>, Namespaced, Owned, + AccessControlled +{ + RegClass.Known> CLASSID = + formClassId(ProcedureRelationId, (Class>)null); + + ProceduralLanguage language(); + + float cost(); + + float rows(); + + RegType variadicType(); + + /** + * A planner-support function that may transform call sites of + * this function. + *

    + * In PG 9.5 to 11, there was a similar, but less flexible, "transform" + * function that this method can return when running on those versions. + * @since PG 12 + */ + RegProcedure support(); + + /** + * The kind of procedure or function. + *

    + * Before PG 11, there were separate booleans to indicate an aggregate or + * window function, which this method can consult when running on earlier + * versions. + * @since PG 11 + */ + Kind kind(); + + Security security(); + + boolean leakproof(); + + OnNullInput onNullInput(); + + boolean returnsSet(); + + Effects effects(); + + Parallel parallel(); + + RegType returnType(); + + List argTypes(); + + List allArgTypes(); + + /** + * Modes corresponding 1-for-1 to the arguments in {@code allArgTypes}. + */ + List argModes(); + + /** + * Names corresponding 1-for-1 to the arguments in {@code allArgTypes}. + */ + List argNames(); + + /** + * A {@code pg_node_tree} representation of a list of n + * expression trees, corresponding to the last n input arguments + * (that is, the last n returned by {@code argTypes}). + */ + SQLXML argDefaults(); + + List transformTypes(); + + String src(); + + String bin(); + + /** + * A {@code pg_node_tree} representation of a pre-parsed SQL function body, + * used when it is given in SQL-standard notation rather than as a string + * literal, otherwise null. + * @since PG 14 + */ + SQLXML sqlBody(); + + /** + * This is surely a list of {@code guc=value} pairs and ought to have + * a more specific return type. + *

    + * XXX + */ + List config(); + + enum ArgMode { IN, OUT, INOUT, VARIADIC, TABLE }; + + enum Kind { FUNCTION, PROCEDURE, AGGREGATE, WINDOW }; + + /** + * Obtain memo attached to this {@code RegProcedure}, if any. + *

    + * A {@code RegProcedure} may have an implementation of {@link Memo Memo} + * attached, providing additional information on what sort of procedure + * it is and how to use it. Many catalog getters that return + * {@code RegProcedure} specialize the return type to indicate + * an expected subinterface of {@code Memo}. + *

    + * It may not be the case that a given {@code RegProcedure} has a valid + * {@code Memo} attached at all times. Documentation for a specific + * {@code Memo} subinterface should explain the circumstances when this + * method can be called to rely on a memo of that type. + */ + M memo(); + + /** + * Superinterface of two memo types a {@code RegProcedure} can carry, + * {@link Why Why} and {@link How How}. + *

    + * A {@code Why} memo pertains to the intended use of a + * {@code RegProcedure}, for example as a + * {@link RegType.TypeInput TypeInput} function or as a + * {@link PlannerSupport PlannerSupport} function. The {@code Why} memo, + * if present, can be retrieved by the {@link #memo() memo()} method, and + * the type parameter of {@code RegProcedure} reflects it, as a compile-time + * safeguard against mixing up {@code RegProcedure}s with different + * purposes. + *

    + * Orthogonally to {@code Why}, a {@code How} memo pertains to how the + * {@code RegProcedure} is implemented, such as + * {@link ProceduralLanguage.PLJavaBased PLJavaBased} if the + * {@code RegProcedure} is implemented in a language built atop PL/Java. + * The language of implementation is ideally independent of the intended + * use, so {@code RegProcedure} is not parameterized with a {@code How} + * type, and has no API method to retrieve an associated {@code How} memo. + * For PL/Java-based languages, PL/Java's dispatcher will pass the + * associated {@code How} memo to the language handler. + */ + interface Memo> + { + /** + * Superinterface of memos that pertain to the intended use of a + * {@link RegProcedure RegProcedure} (why it is used). + */ + interface Why> extends Memo { } + + /** + * Superinterface of memos that pertain to the internal implementation + * of a {@link RegProcedure RegProcedure} (how it is + * implemented). + */ + interface How> extends Memo { } + } + + interface PlannerSupport extends Memo.Why { } + + /** + * Counterpart to the PostgreSQL {@code FmgrInfo}. + */ + interface Lookup + { + /** + * The PostgreSQL function or procedure being called. + */ + RegProcedure target(); + + /* + * Most of the C members of FmgrInfo are just as easy here to look up + * on target. The API here will focus on exposing such higher-level + * queries as might be made in C with the functions in fmgr.h and + * funcapi.h. + */ + + /** + * A {@link TupleDescriptor} describing the incoming arguments, with any + * polymorphic types from the routine's declaration resolved to the + * actual types at this call site. + *

    + * If there are no polymorphic types among the routine's declared + * parameters, an unchanged {@code TupleDescriptor} cached with the + * routine may be returned. + *

    + * See {@link #inputsAreSpread inputsAreSpread} for one case where the + * {@code size()} of this {@code TupleDescriptor} can exceed the + * {@code size()} of a {@code TupleDescriptor} constructed from the + * routine's declaration. + *

    + * {@link RegType#ANYARRAY ANYARRAY}, normally seen only in templates + * as a polymorphic pseudotype, can appear in this result in rare cases, + * where an expression involves certain columns of statistics-related + * system catalogs. An argument with this resolved type represents an + * array, but one whose element type may differ from call to call. See + * {@link RegType#ANYARRAY ANYARRAY} for how such an array can be + * handled. + */ + TupleDescriptor inputsDescriptor() throws SQLException; + + /** + * A {@link TupleDescriptor} describing the expected result, with any + * polymorphic types from the routine's declaration resolved to the + * actual types at this call site. + *

    + * Returns null if the routine has a declared return type of + * {@link RegType#VOID VOID} and does not need to return anything. + *

    + * If there are no polymorphic types among the routine's declared + * outputs, an unchanged {@code TupleDescriptor} cached with the + * routine may be returned. + *

    + * When the routine is a function declared with a non-composite return + * type (or with a single {@code OUT} parameter, a case PostgreSQL + * treats the same way), this method returns a synthetic ephemeral + * {@code TupleDescriptor} with one unnamed attribute of that type. + *

    + * {@link RegType#ANYARRAY ANYARRAY}, normally seen only in templates + * as a polymorphic pseudotype, can appear in this result in rare cases, + * where an expression involves certain columns of statistics-related + * system catalogs. An argument with this resolved type represents an + * array, but one whose element type may differ from call to call. See + * {@link RegType#ANYARRAY ANYARRAY} for how such an array can be + * handled. + */ + TupleDescriptor outputsDescriptor() throws SQLException; + + /** + * Returns true if a routine with a variadic parameter declared with the + * wildcard {@code "any"} type is being called with its arguments in + * "spread" form at this call site. + *

    + * In "spread" form, {@link Call#arguments arguments()}{@code .size()} + * can exceed the routine's declared number of parameters, with + * the values and types of the variadic arguments to be found + * at successive positions of {@link Call#arguments}. In "collected" + * form, the position of the variadic parameter is passed a single + * PostgreSQL array of the variadic arguments' type. A call with zero + * arguments for the variadic parameter can only be made in + * "collected" form, with an empty array at the variadic parameter's + * declared position; therefore, no case arises where the passed + * arguments are fewer than the declared parameters. + *

    + * When the routine declaration has a variadic parameter of any type + * other than the wildcard {@code "any"}, collected form is always used. + * In the wildcard case, collected or spread form may be seen, at the + * caller's option. Therefore, there is an ambiguity when such a routine + * receives a single argument of array type at the variadic position, + * and this method must be used in that case to determine the caller's + * intent. + * @return always false, except for a routine declared + * {@code VARIADIC "any"} when its arguments are being passed + * in "spread" form. + */ + boolean inputsAreSpread(); + + /** + * For the arguments at (zero-based) positions in {@code arguments()} + * indicated by ofInterest, report (in the returned bit set) + * which of those are 'stable', that is, will keep their values across + * calls associated with the current {@code Lookup}. + */ + BitSet stableInputs(BitSet ofInterest); + } + + /** + * Counterpart to the PostgreSQL {@code FunctionCallInfoBaseData}. + *

    + * Presents arguments in the form of a {@code TupleTableSlot}. + */ + interface Call + { + Lookup lookup(); + TupleTableSlot arguments() throws SQLException; + TupleTableSlot result() throws SQLException; + void isNull(boolean nullness); + RegCollation collation(); + Context context(); + ResultInfo resultInfo(); + /* + * Using TupleTableSlot, this interface does not so much need to + * expose the get_call_result_type / get_fn_expr_argtype / + * get_fn_expr_variadic routines as to just go ahead and use them + * and present a coherent picture. + */ + + /** + * Common base of interfaces that can be returned by + * {@link Call#context() Call.context()}. + */ + interface Context + { + /** + * Supplied when the routine is being called as a trigger. + */ + interface TriggerData extends Context + { + /** + * When the trigger is being called (before, after, or instead) + * with respect to the triggering event. + */ + Called called(); + + /** + * The event that has fired this trigger. + */ + Event event(); + + /** + * The scope (per-row or per-statement) of this trigger. + */ + Scope scope(); + + /** + * The relation on which this trigger is declared. + */ + RegClass relation(); + + /** + * The row for which the trigger was fired. + *

    + * In a trigger fired for {@code INSERT} or {@code DELETE}, this + * is the row to return if not altering or skipping the + * operation. + */ + TupleTableSlot triggerTuple(); + + /** + * The proposed new version of the row, only in a trigger fired + * for {@code UPDATE}. + *

    + * In a trigger fired for {@code UPDATE}, this is the row + * to return if not altering or skipping the operation. + */ + TupleTableSlot newTuple(); + + /** + * Information from the trigger's declaration in the system + * catalogs. + */ + Trigger trigger(); + + /** + * For {@code UPDATE} triggers, which columns have been updated + * by the triggering command; null for other triggers. + */ + Projection updatedColumns(); + } + + interface EventTriggerData extends Context + { + } + + interface AggState extends Context + { + } + + interface WindowAggState extends Context + { + } + + interface WindowObject extends Context + { + } + + /** + * Supplied when the routine being called is a procedure + * rather than a function. + */ + interface CallContext extends Context + { + /** + * Indicates whether transaction control operations within + * the procedure are disallowed (true) or allowed (false). + */ + boolean atomic(); + } + + interface ErrorSaveContext extends Context + { + } + } + + /** + * Common base of interfaces that can be returned by + * {@link Call#resultInfo() Call.resultInfo()}. + */ + interface ResultInfo + { + /** + * Supplied when the routine is being called with the expectation + * of a set (not just a single value or row) return. + */ + interface ReturnSetInfo extends ResultInfo + { + /** + * List indicating the set-returning interfaces the caller + * is prepared to accept. + *

    + * Ordering can reflect the caller's preference, + * with a more-preferred interface earlier in the list. + */ + List> allowedModes(); + } + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegRole.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegRole.java new file mode 100644 index 000000000..acb8e55c3 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegRole.java @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.List; + +import org.postgresql.pljava.RolePrincipal; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Pseudo; + +/** + * Model of a PostgreSQL role. + *

    + * In addition to the methods returning the information in the {@code pg_authid} + * system catalog, there are methods to return four different flavors of + * {@link RolePrincipal RolePrincipal}, all representing this role. + *

    + * The {@code ...Principal()} methods should not be confused with environment + * accessors returning actual information about the execution context. Each of + * the methods simply returns an instance of the corresponding class that would + * be appropriate to find in the execution context if this role were, + * respectively, the authenticated, session, outer, or current role. + *

    + * {@link RolePrincipal.Current} implements the + * {@code UserPrincipal/GroupPrincipal} interfaces of + * {@code java.nio.file.attribute}, so + * {@link #currentPrincipal() currentPrincipal()} can also be used to obtain + * {@code Principal}s that will work in the Java NIO.2 filesystem API. + *

    + * The {@code ...Principal} methods only succeed when {@code name()} does, + * therefore not when {@code isValid} is false. The {@code RegRole.Grantee} + * representing {@code PUBLIC} is, for all other purposes, not a valid role, + * including for its {@code ...Principal} methods. + */ +public interface RegRole +extends Addressed, Named, AccessControlled +{ + RegClass.Known CLASSID = + formClassId(AuthIdRelationId, RegRole.class); + + /** + * A {@code RegRole.Grantee} representing {@code PUBLIC}; not a valid + * {@code RegRole} for other purposes. + */ + RegRole.Grantee PUBLIC = publicGrantee(); + + /** + * Subinterface of {@code RegRole} returned by methods of + * {@link CatalogObject.AccessControlled CatalogObject.AccessControlled} + * identifying the role to which a privilege has been granted. + *

    + * A {@code RegRole} appearing as a grantee can be {@link #PUBLIC PUBLIC}, + * unlike a {@code RegRole} in any other context, so the + * {@link #isPublic isPublic()} method appears only on this subinterface, + * as well as the {@link #nameAsGrantee nameAsGrantee} method, which will + * return the correct name even in that case (the ordinary {@code name} + * method will not). + */ + interface Grantee extends RegRole + { + /** + * In the case of a {@code RegRole} obtained as the {@code grantee} of a + * {@link Grant}, indicate whether it is a grant to "public". + */ + default boolean isPublic() + { + return ! isValid(); + } + + /** + * Like {@code name()}, but also returns the expected name for a + * {@code Grantee} representing {@code PUBLIC}. + */ + Simple nameAsGrantee(); + } + + /** + * Return a {@code RolePrincipal} that would represent this role as a + * session's authenticated identity (which was established at connection + * time and will not change for the life of a session). + */ + default RolePrincipal.Authenticated authenticatedPrincipal() + { + return new RolePrincipal.Authenticated(name()); + } + + /** + * Return a {@code RolePrincipal} that would represent this role as a + * session's "session" identity (which can be changed during a session + * by {@code SET SESSION AUTHORIZATION}). + */ + default RolePrincipal.Session sessionPrincipal() + { + return new RolePrincipal.Session(name()); + } + + /** + * Return a {@code RolePrincipal} that would represent this role as the one + * last established by {@code SET ROLE}, and outside of any + * {@code SECURITY DEFINER} function. + */ + default RolePrincipal.Outer outerPrincipal() + { + return new RolePrincipal.Outer(name()); + } + + /** + * Return a {@code RolePrincipal} that would represent this role as the + * effective one for normal privilege checks, usually the same as the + * session or outer, but changed during {@code SECURITY DEFINER} functions. + *

    + * This method can also be used to obtain a {@code Principal} that will work + * in the Java NIO.2 filesystem API. + */ + default RolePrincipal.Current currentPrincipal() + { + return new RolePrincipal.Current(name()); + } + + /** + * Roles of which this role is directly a member. + *

    + * For the other direction, see {@link #grants() grants()}. + */ + List memberOf(); + + boolean superuser(); + boolean inherit(); + boolean createRole(); + boolean createDB(); + boolean canLogIn(); + boolean replication(); + boolean bypassRLS(); + int connectionLimit(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/RegType.java b/pljava-api/src/main/java/org/postgresql/pljava/model/RegType.java new file mode 100644 index 000000000..7cf472e00 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/RegType.java @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLType; +import java.sql.SQLXML; + +import org.postgresql.pljava.Adapter; // javadoc + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +import org.postgresql.pljava.annotation.BaseUDT.Alignment; +import org.postgresql.pljava.annotation.BaseUDT.PredefinedCategory; // javadoc +import org.postgresql.pljava.annotation.BaseUDT.Storage; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; // javadoc +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a PostgreSQL data type, as defined in the system catalogs. + *

    + * This class also has static final fields for a selection of commonly used + * {@code RegType}s, such as those that correspond to types mentioned in JDBC, + * and others that are just ubiquitous when working in PostgreSQL in general, + * or are used in this model package. + *

    + * An instance of {@code RegType} also implements the JDBC + * {@link SQLType SQLType} interface, with the intention that it could be used + * with a suitably-aware JDBC implementation to identify any type available + * in PostgreSQL. + *

    + * A type can have a 'modifier' (think {@code NUMERIC(4)} versus plain + * {@code NUMERIC}). In PostgreSQL's C code, a type oid and modifier have to + * be passed around in tandem. Here, you apply + * {@link #modifier(int) modifier(int)} to the unmodified {@code RegType} and + * obtain a distinct {@code RegType} instance incorporating the modifier. + */ +public interface RegType +extends + Addressed, Namespaced, Owned, AccessControlled, + SQLType +{ + RegClass.Known CLASSID = + formClassId(TypeRelationId, RegType.class); + + /* + * PG types good to have around because of corresponding JDBC types. + */ + RegType BOOL = formObjectId(CLASSID, BOOLOID); + RegType BYTEA = formObjectId(CLASSID, BYTEAOID); + /** + * The PostgreSQL type {@code "char"} (the quotes are needed to distinguish + * it from the different SQL type named {@code CHAR}), which is an eight-bit + * signed value with no associated character encoding (though it is often + * used in the catalogs with ASCII-letter values as an ersatz enum). + *

    + * It can be mapped to the JDBC type {@code TINYINT}, or Java {@code byte}. + */ + RegType CHAR = formObjectId(CLASSID, CHAROID); + RegType INT8 = formObjectId(CLASSID, INT8OID); + RegType INT2 = formObjectId(CLASSID, INT2OID); + RegType INT4 = formObjectId(CLASSID, INT4OID); + RegType XML = formObjectId(CLASSID, XMLOID); + RegType FLOAT4 = formObjectId(CLASSID, FLOAT4OID); + RegType FLOAT8 = formObjectId(CLASSID, FLOAT8OID); + /** + * "Blank-padded CHAR", the PostgreSQL type that corresponds to the SQL + * standard {@code CHAR} (spelled without quotes) type. + */ + RegType BPCHAR = formObjectId(CLASSID, BPCHAROID); + RegType VARCHAR = formObjectId(CLASSID, VARCHAROID); + RegType DATE = formObjectId(CLASSID, DATEOID); + RegType TIME = formObjectId(CLASSID, TIMEOID); + RegType TIMESTAMP = formObjectId(CLASSID, TIMESTAMPOID); + RegType TIMESTAMPTZ = formObjectId(CLASSID, TIMESTAMPTZOID); + RegType TIMETZ = formObjectId(CLASSID, TIMETZOID); + RegType BIT = formObjectId(CLASSID, BITOID); + RegType VARBIT = formObjectId(CLASSID, VARBITOID); + RegType NUMERIC = formObjectId(CLASSID, NUMERICOID); + + /* + * PG types not mentioned in JDBC but bread-and-butter to PG devs. + */ + RegType TEXT = formObjectId(CLASSID, TEXTOID); + RegType UNKNOWN = formObjectId(CLASSID, UNKNOWNOID); + RegType RECORD = formObjectId(CLASSID, RECORDOID); + RegType CSTRING = formObjectId(CLASSID, CSTRINGOID); + RegType VOID = formObjectId(CLASSID, VOIDOID); + RegType TRIGGER = formObjectId(CLASSID, TRIGGEROID); + + /* + * API treatment for one of the several polymorphic types, because this one + * can also be the actual resolved type of some system catalog columns. + */ + /** + * Normally a pseudotype used in declaring polymorphic functions, this + * can also be the actual resolved type of some statistics-related system + * catalog columns or expressions derived from them. + *

    + * When this type is encountered as the resolved type for an array, + * different instances of the array may have different element types. + * {@link Adapter.Array#elementType()} can be used to get an {@code Adapter} + * that reports the element type of any array, so that a suitable + * {@code Adapter} for that element type can be chosen and used to construct + * an array adapter for access to the array's elements. + */ + RegType ANYARRAY = formObjectId(CLASSID, ANYARRAYOID); + + /* + * PG types used in modeling PG types themselves. + */ + RegType NAME = formObjectId(CLASSID, NAMEOID); + RegType REGPROC = formObjectId(CLASSID, REGPROCOID); + RegType OID = formObjectId(CLASSID, OIDOID); + RegType PG_NODE_TREE = formObjectId(CLASSID, PG_NODE_TREEOID); + RegType ACLITEM = formObjectId(CLASSID, ACLITEMOID); + RegType REGPROCEDURE = formObjectId(CLASSID, REGPROCEDUREOID); + RegType REGOPER = formObjectId(CLASSID, REGOPEROID); + RegType REGOPERATOR = formObjectId(CLASSID, REGOPERATOROID); + RegType REGCLASS = formObjectId(CLASSID, REGCLASSOID); + RegType REGTYPE = formObjectId(CLASSID, REGTYPEOID); + RegType REGCONFIG = formObjectId(CLASSID, REGCONFIGOID); + RegType REGDICTIONARY = formObjectId(CLASSID, REGDICTIONARYOID); + RegType REGNAMESPACE = formObjectId(CLASSID, REGNAMESPACEOID); + RegType REGROLE = formObjectId(CLASSID, REGROLEOID); + RegType REGCOLLATION = formObjectId(CLASSID, REGCOLLATIONOID); + + enum Type { BASE, COMPOSITE, DOMAIN, ENUM, PSEUDO, RANGE, MULTIRANGE } + + interface TypeInput extends Why { } + interface TypeOutput extends Why { } + interface TypeReceive extends Why { } + interface TypeSend extends Why { } + interface TypeModifierInput extends Why { } + interface TypeModifierOutput extends Why { } + interface TypeAnalyze extends Why { } + interface TypeSubscript extends Why { } + + /** + * Interface additionally implemented by an instance that represents a type + * (such as the PostgreSQL polymorphic pseudotypes or the even wilder "any" + * type) needing resolution to an actual type used at a given call site. + */ + interface Unresolved extends RegType + { + /** + * Returns true, indicating resolution to an actual type is needed. + */ + @Override + default boolean needsResolution() + { + return true; + } + } + + /** + * Whether this instance represents a type (such as the PostgreSQL + * polymorphic pseudotypes or the even wilder "any" type) needing resolution + * to an actual type used at a given call site. + *

    + * This information does not come from the {@code pg_type} catalog, but + * simply reflects PostgreSQL-version-specific knowledge of which types + * require such treatment. + *

    + * This default implementation returns false. + * @see Unresolved#needsResolution + */ + default boolean needsResolution() + { + return false; + } + + short length(); + boolean byValue(); + Type type(); + /** + * A one-character code representing the type's 'category'. + *

    + * Custom categories are possible, so not every value here need correspond + * to a {@link PredefinedCategory PredefinedCategory}, but common ones will, + * and can be 'decoded' with {@link PredefinedCategory#valueOf(char)}. + */ + char category(); + boolean preferred(); + boolean defined(); + byte delimiter(); + RegClass relation(); + RegType element(); + RegType array(); + RegProcedure input(); + RegProcedure output(); + RegProcedure receive(); + RegProcedure send(); + RegProcedure modifierInput(); + RegProcedure modifierOutput(); + RegProcedure analyze(); + RegProcedure subscript(); + Alignment alignment(); + Storage storage(); + boolean notNull(); + RegType baseType(); + int dimensions(); + RegCollation collation(); + SQLXML defaultBin(); + String defaultText(); + RegType modifier(int typmod); + + /** + * Returns the {@code RegType} for this type with no modifier, if this + * instance has one. + *

    + * If not, simply returns {@code this}. + */ + RegType withoutModifier(); + + /** + * Returns the modifier if this instance has one, else -1. + */ + int modifier(); + + /** + * The corresponding {@link TupleDescriptor TupleDescriptor}, non-null only + * for composite types. + */ + TupleDescriptor.Interned tupleDescriptor(); + + /** + * The name of this type as a {@code String}, as the JDBC + * {@link SQLType SQLType} interface requires. + *

    + * The string produced here is as would be produced by + * {@link Identifier#deparse deparse(StandardCharsets.UTF_8)} applied to + * the result of {@link #qualifiedName qualifiedName()}. + * The returned string may include double-quote marks, which affect its case + * sensitivity and the characters permitted within it. If an application is + * not required to use this method for JDBC compatibility, it can avoid + * needing to fuss with those details by using {@code qualifiedName} + * instead. + */ + @Override + default String getName() + { + return qualifiedName().toString(); + } + + /** + * A string identifying the "vendor" for which the type name and number here + * are meaningful, as the JDBC {@link SQLType SQLType} interface requires. + *

    + * The JDBC API provides that the result "typically is the package name for + * this vendor", and this method returns {@code org.postgresql} as + * a constant string. + *

    + * Note, however, that every type that is defined in the current PostgreSQL + * database can be represented by an instance of this interface, whether + * built in to PostgreSQL, installed with an extension, or user-defined. + * Therefore, not every instance with this "vendor" string can be assumed + * to be a type known to all PostgreSQL databases. Moreover, even if + * the same extension-provided or user-defined type is present in different + * PostgreSQL databases, it need not be installed with the same + * {@link #qualifiedName qualifiedName} in each, and will almost certainly + * have different object IDs, so {@link #getName getName} and + * {@link #getVendorTypeNumber getVendorTypeNumber} may not in general + * identify the same type across unrelated PostgreSQL databases. + */ + @Override + default String getVendor() + { + return "org.postgresql"; + } + + /** + * A vendor-specific type number identifying this type, as the JDBC + * {@link SQLType SQLType} interface requires. + *

    + * This implementation returns the {@link #oid oid} of the type in + * the current database. However, except for the subset of types that are + * built in to PostgreSQL with oid values that are fixed, the result of this + * method should only be relied on to identify a type within the current + * database. + */ + @Override + default Integer getVendorTypeNumber() + { + return oid(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/ResourceOwner.java b/pljava-api/src/main/java/org/postgresql/pljava/model/ResourceOwner.java new file mode 100644 index 000000000..919a05e00 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/ResourceOwner.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.Lifespan; + +import org.postgresql.pljava.model.CatalogObject.Factory; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +/** + * The representation of a PostgreSQL {@code ResourceOwner}, usable as + * a PL/Java {@link Lifespan Lifespan}. + *

    + * The {@code ResourceOwner} API in PostgreSQL is described here. + *

    + * PostgreSQL invokes callbacks in phases when a {@code ResourceOwner} + * is released, and all of its built-in consumers get notified before + * loadable modules (like PL/Java) for each phase in turn. The release + * behavior of this PL/Java instance is tied to the + * {@code RESOURCE_RELEASE_LOCKS} phase of the underlying PostgreSQL object, + * and therefore occurs after all of the built-in PostgreSQL lock-related + * releases, but before any of the built-in stuff released in the + * {@code RESOURCE_RELEASE_AFTER_LOCKS} phase. + */ +public interface ResourceOwner extends Lifespan +{ + static ResourceOwner CurrentResourceOwner() + { + return INSTANCE.resourceOwner(RSO_Current); + } + + static ResourceOwner CurTransactionResourceOwner() + { + return INSTANCE.resourceOwner(RSO_CurTransaction); + } + + static ResourceOwner TopTransactionResourceOwner() + { + return INSTANCE.resourceOwner(RSO_TopTransaction); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/SlotTester.java b/pljava-api/src/main/java/org/postgresql/pljava/model/SlotTester.java new file mode 100644 index 000000000..3e30f18a2 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/SlotTester.java @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.ResultSet; +import java.sql.SQLException; + +import java.util.List; + +import org.postgresql.pljava.Adapter; + +/** + * A temporary test jig during TupleTableSlot development, not intended to last. + */ +public interface SlotTester +{ + /** + * Unwrap a {@link ResultSet} instance from the legacy JDBC layer as a + * {@link Portal} instance so results can be retrieved using new API. + * @param rs a ResultSet, which can only be an SPIResultSet obtained from + * the legacy JDBC implementation, not yet closed or used to fetch anything, + * and will be closed. + */ + Portal unwrapAsPortal(ResultSet rs) throws SQLException; + + /** + * Execute query, returning its complete result as a {@code List} + * of {@link TupleTableSlot}. + */ + List test(String query); + + /** + * Return one of the predefined {@link Adapter} instances, given knowledge + * of the class name and static final field name within that class inside + * PL/Java's implementation module. + *

    + * Example: + *

    +	 * adapterPlease(
    +	 *  "org.postgresql.pljava.pg.adt.Primitives", "FLOAT8_INSTANCE");
    +	 *
    + */ + Adapter adapterPlease(String clazz, String field) + throws ReflectiveOperationException; + + /** + * A temporary marker interface used on classes or interfaces whose + * static final fields should be visible to {@code adapterPlease}. + */ + interface Visible + { + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Tablespace.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Tablespace.java new file mode 100644 index 000000000..e2249e51f --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Tablespace.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.util.Map; + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a tablespace used in laying out on-disk storage. + */ +public interface Tablespace +extends + Addressed, Named, Owned, AccessControlled +{ + RegClass.Known CLASSID = + formClassId(TableSpaceRelationId, Tablespace.class); + + Map options(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Transform.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Transform.java new file mode 100644 index 000000000..68f45e036 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Transform.java @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.PLJavaBasedLanguage.UsingTransforms; // for javadoc +import org.postgresql.pljava.annotation.Function.Effects; // for javadoc + +import org.postgresql.pljava.model.CatalogObject.*; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +/** + * Model of the PostgreSQL {@code pg_transform} system catalog. + *

    + * A transform is a very open-ended PostgreSQL arrangement for controlling how + * values of a target PostgreSQL type may be converted to values of some + * appropriate data type available in a given procedural language, and back + * again. PostgreSQL does none of this work itself, but simply provides a way to + * declare + * a transform (associating a {@code fromSQL} and a {@code toSQL} function + * with a procedural language and a PostgreSQL type), and syntax in + * {@code CREATE FUNCTION} and {@code CREATE PROCEDURE} to indicate + * {@linkplain RegProcedure#transformTypes() which types} should have + * such transforms applied. + *

    + * Beyond verifying, at {@code CREATE FUNCTION} or {@code CREATE PROCEDURE} + * time, that any transforms mentioned in the declaration do exist, PostgreSQL + * does nothing to apply any transforms when the function or procedure + * is called. If a function's or procedure's declaration indicates any types + * for which transforms should be applied, the full responsibility for doing so + * (including all details of how it is done) falls to the function's or + * procedure's implementing procedural language. + *

    + * If a procedural language implementation does not contain logic to apply + * transforms when requested, it should reject any function or + * procedure with non-null {@link RegProcedure#transformTypes() transformTypes}, + * at validation time when possible. If it does not, PostgreSQL will allow + * functions and procedures in that language to declare transforms for types, + * and the declarations will have no effect. + *

    + * For a PL/Java-based language, such declarations will always be rejected + * if the language does not implement the {@link UsingTransforms} interface. + */ +public interface Transform extends Addressed +{ + RegClass.Known CLASSID = + formClassId(TransformRelationId, Transform.class); + + interface FromSQL extends Why { } + interface ToSQL extends Why { } + + /** + * The PostgreSQL data type to which this transform is intended to apply. + */ + RegType type(); + + /** + * The procedural language with which this transform can be used. + */ + ProceduralLanguage language(); + + /** + * Function that, at least conceptually, converts a value of + * {@linkplain #type() the intended PostgreSQL type} to a value of some + * appropriate type in the {@linkplain #language() target language}. + *

    + * A result with {@link RegProcedure#isValid() isValid()}{@code =false} + * indicates that the target language should use its default from-SQL + * conversion for this transform's type. The language's + * {@link UsingTransforms#essentialTransformChecks essentialTransformChecks} + * method, in that case, should verify that the language has a usable + * default from-SQL conversion for the type. + *

    + * Otherwise, PostgreSQL will already have ensured that this is a + * non-{@linkplain RegProcedure#returnsSet() set-returning}, + * non-{@linkplain Effects#VOLATILE volatile} + * {@linkplain RegProcedure.Kind#FUNCTION function} + * declared with a {@linkplain RegProcedure#returnType() return type} of + * {@code INTERNAL} and a single argument of type {@code INTERNAL}. + *

    + * There are no other assurances made by PostgreSQL, and there can be many + * functions with such a signature that are not transform functions at all. + * It will be up to the {@linkplain #language() target language} and, if it + * is a PL/Java-based language, its + * {@link UsingTransforms#essentialTransformChecks essentialTransformChecks} + * method, to verify (if there is any way to do so) that this function is + * one that the language implementation can use to convert the intended + * PostgreSQL type to a usable type in the target language. + *

    + * Because both the argument and the return type are declared + * {@code INTERNAL}, there is no way to be sure from the declaration alone + * that this is a transform function expecting the transform's PostgreSQL + * type. + *

    + * Whatever use is to be made of this function, including exactly what is + * passed as its {@code INTERNAL} argument and what it is expected to + * produce as its {@code INTERNAL} return type, is completely up to the + * {@linkplain #language() target language}. Therefore, each target language + * defines how to write transform functions that it can apply. A target + * language may impose requirements (such as what the function's + * {@linkplain RegProcedure#language() language of implementation} must be) + * to simplify the problem of determining whether the function is suitable, + * perhaps by inspection of the function's + * {@linkplain RegProcedure#src() source text} when its language of + * implementation is known. It is even up to the target language + * implementation whether this function is ever 'called' in the usual sense + * at all, as opposed to, say, having its source text interpreted in some + * other way. + */ + RegProcedure fromSQL(); + + /** + * Function that, at least conceptually, converts a value of + * some appropriate type in the {@linkplain #language() target language} + * to a value of {@linkplain #type() the intended PostgreSQL type}. + *

    + * A result with {@link RegProcedure#isValid() isValid()}{@code =false} + * indicates that the target language should use its default to-SQL + * conversion for this transform's type. The language's + * {@link UsingTransforms#essentialTransformChecks essentialTransformChecks} + * method, in that case, should verify that the language has a usable + * default to-SQL conversion for the type. + *

    + * Otherwise, PostgreSQL will already have ensured that this is a + * non-{@linkplain RegProcedure#returnsSet() set-returning}, + * non-{@linkplain Effects#VOLATILE volatile} + * {@linkplain RegProcedure.Kind#FUNCTION function} + * declared with a {@linkplain RegProcedure#returnType() return type} of + * {@linkplain #type() the intended PostgreSQL type} and a single argument + * of type {@code INTERNAL}. + *

    + * There are no other assurances made by PostgreSQL, and there could be + * functions with such a signature that are not transform functions at all. + * It will be up to the {@linkplain #language() target language} and, if it + * is a PL/Java-based language, its + * {@link UsingTransforms#essentialTransformChecks essentialTransformChecks} + * method, to verify (if there is any way to do so) that this function is + * one that the language implementation can use to convert the expected + * target-language type to the intended PostgreSQL type. + *

    + * The return type of this function will match the transform's PostgreSQL + * type, but as the argument type is declared {@code INTERNAL}, there is + * no way to be sure from the declaration alone that the argument this + * function expects is what the target language implementation will pass + * to a transform function. + *

    + * Whatever use is to be made of this function, including exactly what is + * passed as its {@code INTERNAL} argument, is completely up to the + * {@linkplain #language() target language}. Therefore, each target language + * defines how to write transform functions that it can apply. A target + * language may impose requirements (such as what the function's + * {@linkplain RegProcedure#language() language of implementation} must be) + * to simplify the problem of determining whether the function is suitable, + * perhaps by inspection of the function's + * {@linkplain RegProcedure#src() source text} when its language of + * implementation is known. It is even up to the target language + * implementation whether this function is ever 'called' in the usual sense + * at all, as opposed to, say, having its source text interpreted in some + * other way. + */ + RegProcedure toSQL(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/Trigger.java b/pljava-api/src/main/java/org/postgresql/pljava/model/Trigger.java new file mode 100644 index 000000000..99e432c39 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/Trigger.java @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLXML; + +import java.util.List; +import java.util.Set; + +import org.postgresql.pljava.TargetList.Projection; + +import org.postgresql.pljava.annotation.Trigger.Called; +import org.postgresql.pljava.annotation.Trigger.Event; +import org.postgresql.pljava.annotation.Trigger.Scope; + +import org.postgresql.pljava.model.CatalogObject.*; +import org.postgresql.pljava.model.RegProcedure.Call.Context.TriggerData; + +import static org.postgresql.pljava.model.CatalogObject.Factory.*; + +import org.postgresql.pljava.model.RegProcedure.Memo.Why; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Model of a trigger entry in the PostgreSQL catalogs. + *

    + * This catalog object, at least at first, will have an unusual limitation: + * its accessor methods (other than those of {@link Addressed}) may only work + * when called by a trigger function or its language handler within the scope + * of the function's specialization and execution. Some may be unimplemented + * even then, as noted in the documentation of the methods themselves. + */ +public interface Trigger +extends + Addressed, Named +{ + RegClass.Known CLASSID = + formClassId(TriggerRelationId, Trigger.class); + + enum ReplicationRole { ON_ORIGIN, ALWAYS, ON_REPLICA, DISABLED }; + + interface ForTrigger extends Why { } + + /** + * Name of this trigger. + *

    + * The table on which the trigger is declared serves as a namespace, + * within which trigger names on the same table must be unique. + */ + @Override + Simple name(); + + /** + * The table on which this trigger is declared. + *

    + * May throw {@code UnsupportedOperationException}. Within a trigger + * function or its handler, {@link TriggerData} can supply the same + * information. + */ + RegClass relation(); + + /** + * Parent trigger this trigger is cloned from (applies to partitioned + * tables), null if not a clone. + *

    + * May throw {@code UnsupportedOperationException}. + * @see #isClone + */ + Trigger parent(); + + /** + * The function to be called. + *

    + * May throw {@code UnsupportedOperationException}. Within a trigger + * function or its handler, this is just the function being called. + */ + RegProcedure function(); + + /** + * When this trigger is to fire (before, after, or instead of the + * triggering event). + */ + Called called(); + + /** + * The event(s) for which the trigger can fire. + */ + Set events(); + + /** + * The scope (per-statement or per-row) of this trigger. + */ + Scope scope(); + + /** + * For which {@code session_replication_role} modes the trigger fires. + */ + ReplicationRole enabled(); + + /** + * True if the trigger is internally generated (usually to enforce the + * constraint identified by {@link #constraint}). + */ + boolean internal(); + + /** + * The referenced table if this trigger pertains to a referential integrity + * constraint, otherwise null. + */ + RegClass constraintRelation(); + + /** + * The index supporting a unique, primary key, referential integrity, or + * exclusion constraint, null if this trigger is not for such a constraint. + */ + RegClass constraintIndex(); + + /** + * The constraint associated with the trigger, null if none. + * @return null, no {@code Constraint} catalog object is implemented yet + */ + Constraint constraint(); + + /** + * True for a constraint trigger that is deferrable. + */ + boolean deferrable(); + + /** + * True for a constraint trigger initially deferred. + */ + boolean initiallyDeferred(); + + /** + * The columns of interest (as a {@link Projection} of {@link #relation}'s + * columns) if the trigger is column-specific, otherwise null. + */ + Projection columns(); + + /** + * Any additional {@code String} arguments to pass to the trigger function. + */ + List arguments(); + + /** + * A {@code pg_node_tree} representation of a boolean expression restricting + * when this trigger can fire, or null if none. + */ + SQLXML when(); + + /** + * Name by which an ephemeral table showing prior row values can be queried + * via SPI by the function for an {@code AFTER} trigger whose + * {@link #events events} include {@code UPDATE} or {@code DELETE}. + */ + Simple tableOld(); + + /** + * Name by which an ephemeral table showing new row values can be queried + * via SPI by the function for an {@code AFTER} trigger whose + * {@link #events events} include {@code UPDATE} or {@code INSERT}. + */ + Simple tableNew(); + + /** + * True if this trigger is a clone. + *

    + * This information will be available to a trigger function or its handler, + * even if the actual {@link #parent parent} trigger is not. + */ + boolean isClone(); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/TupleDescriptor.java b/pljava-api/src/main/java/org/postgresql/pljava/model/TupleDescriptor.java new file mode 100644 index 000000000..f3cff5a08 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/TupleDescriptor.java @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; // javadoc + +import java.util.List; + +import org.postgresql.pljava.TargetList.Projection; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Conceptually, a {@code TupleDescriptor} is a list of {@code Attribute}, with + * a {@code RegType} that identifies its corresponding row type. + *

    + * The row type might be just {@code RECORD}, though, representing a + * transient, unregistered type. + *

    + * The {@code Attribute} instances may then correspond to nothing that exists in + * {@code pg_attribute}, in which case they will be 'virtual' instances whose + * {@code CatalogObject.Addressed} methods don't work, but which simply hold a + * reference to the {@code TupleDescriptor} they came from instead. + *

    + * A {@code TupleDescriptor} may also contain attribute defaults and/or + * constraints. These would be less often of interest in Java; if there is + * a need to make them available, rather than complicating + * {@code TupleDescriptor}, it will probably be more natural to make them + * available by methods on {@code Attribute}. + */ +public interface TupleDescriptor extends Projection +{ + /** + * @deprecated As a subinterface of {@link Projection Projection}, + * a {@code TupleDescriptor} already is a {@code List}, and there + * is no need for this method to simply return its own receiver. + */ + @Deprecated(forRemoval=true) + default List attributes() + { + return this; + } + + /** + * If this tuple descriptor is not ephemeral, returns the PostgreSQL type + * that identifies it. + *

    + * If the descriptor is for a known composite type in the PostgreSQL + * catalog, this method returns that type. + *

    + * If the descriptor has been created programmatically and interned, this + * method returns the type + * {@link RegType#RECORD RECORD}.{@link RegType#modifier(int) modifier(n)} + * where n was uniquely assigned by PostgreSQL when the + * descriptor was interned, and will reliably refer to this tuple descriptor + * for the duration of the session. + *

    + * For any ephemeral descriptor passed around in code without being + * interned, this method returns plain {@link RegType#RECORD RECORD}, which + * is useless for identifying the tuple structure. + */ + RegType rowType(); + + /** + * Gets an attribute by name. + *

    + * This API should be considered scaffolding or preliminary, until an API + * can be designed that might offer a convenient usage idiom without + * presupposing something like a name-to-attribute map in every decriptor. + *

    + * This default implementation simply does {@code project(name).get(0)}. + * Code that will do so repeatedly might be improved by doing so once and + * retaining the result. + * @throws SQLSyntaxErrorException 42703 if no attribute name matches + * @deprecated A one-by-one lookup-by-name API forces the implementation to + * cater to an inefficient usage pattern, when callers will often have a + * number of named attributes to look up, which can be done more efficiently + * in one go; see the methods of {@link Projection Projection}. + */ + @Deprecated(forRemoval=true) + default Attribute get(Simple name) throws SQLException + { + return project(name).get(0); + } + + /** + * Equivalent to {@code get(Simple.fromJava(name))}. + *

    + * This API should be considered scaffolding or preliminary, until an API + * can be designed that might offer a convenient usage idiom without + * presupposing something like a name-to-attribute map in every descriptor. + * @throws SQLSyntaxErrorException 42703 if no attribute name matches + * @deprecated A one-by-one lookup-by-name API forces the implementation to + * cater to an inefficient usage pattern, when callers will often have a + * number of named attributes to look up, which can be done more efficiently + * in one go; see the methods of {@link Projection Projection}. + */ + @Deprecated(forRemoval=true) + default Attribute get(String name) throws SQLException + { + return get(Simple.fromJava(name)); + } + + /** + * Return this descriptor unchanged if it is already interned in + * PostgreSQL's type cache, otherwise an equivalent new descriptor with + * a different {@link #rowType rowType} uniquely assigned to identify it + * for the duration of the session. + *

    + * PostgreSQL calls this operation "BlessTupleDesc", which updates the + * descriptor in place; in PL/Java code, the descriptor returned by this + * method should be used in place of the original. + */ + Interned intern(); + + /** + * A descriptor that either describes a known composite type in the + * catalogs, or has been interned in PostgreSQL's type cache, and has + * a distinct {@link #rowType rowType} that can be used to identify it + * for the duration of the session. + *

    + * Some operations, such as constructing a composite value for a function + * to return, require this. + */ + interface Interned extends TupleDescriptor + { + @Override + default Interned intern() + { + return this; + } + } + + /** + * A descriptor that has been constructed on the fly and has not been + * interned. + *

    + * For all such descriptors, {@link #rowType rowType} returns + * {@link RegType#RECORD RECORD}, which is of no use for identification. + * For some purposes (such as constructing a composite value for a function + * to return), an ephemeral descriptor must be interned before it can + * be used. + */ + interface Ephemeral extends TupleDescriptor + { + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/TupleTableSlot.java b/pljava-api/src/main/java/org/postgresql/pljava/model/TupleTableSlot.java new file mode 100644 index 000000000..ba492f308 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/TupleTableSlot.java @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.model; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.Adapter.AsLong; +import org.postgresql.pljava.Adapter.AsDouble; +import org.postgresql.pljava.Adapter.AsInt; +import org.postgresql.pljava.Adapter.AsFloat; +import org.postgresql.pljava.Adapter.AsShort; +import org.postgresql.pljava.Adapter.AsChar; +import org.postgresql.pljava.Adapter.AsByte; +import org.postgresql.pljava.Adapter.AsBoolean; + +/** + * A PostgreSQL abstraction that can present a variety of underlying tuple + * representations in a common way. + *

    + * PL/Java may take the liberty of extending this class to present even some + * other tuple-like things that are not native tuple forms to PostgreSQL. + *

    + * A readable instance that relies on PostgreSQL's "deforming" can be + * constructed over any supported flavor of underlying tuple. Retrieving + * its values can involve JNI calls to the support functions in PostgreSQL. + * Its writable counterpart is also what must be used for constructing a tuple + * on the fly; after its values/nulls have been set (pure Java), it can be + * flattened (at the cost of a JNI call) to return a pass-by-reference + * {@code Datum} usable as a composite function argument or return value. + *

    + * A specialized instance, with support only for reading, can be constructed + * over a PostgreSQL tuple in its widely-used 'heap' form. PL/Java knows that + * form well enough to walk it and retrieve values mostly without JNI calls. + *

    + * A {@code TupleTableSlot} is not safe for concurrent use by multiple threads, + * in the absence of appropriate synchronization. + */ +public interface TupleTableSlot +{ + TupleDescriptor descriptor(); + RegClass relation(); + + /* + * Idea: move these methods out of public API, as they aren't very + * efficient. Make them invocable internally via TargetList. As an interim + * measure, remove their "throws SQLException" clauses; the implementation + * hasn't been throwing those anyway, but wrapping them in a runtime + * version. (Which needs to get unwrapped eventually, somewhere suitable.) + */ + T get(Attribute att, As adapter); + long get(Attribute att, AsLong adapter); + double get(Attribute att, AsDouble adapter); + int get(Attribute att, AsInt adapter); + float get(Attribute att, AsFloat adapter); + short get(Attribute att, AsShort adapter); + char get(Attribute att, AsChar adapter); + byte get(Attribute att, AsByte adapter); + boolean get(Attribute att, AsBoolean adapter); + + T get(int idx, As adapter); + long get(int idx, AsLong adapter); + double get(int idx, AsDouble adapter); + int get(int idx, AsInt adapter); + float get(int idx, AsFloat adapter); + short get(int idx, AsShort adapter); + char get(int idx, AsChar adapter); + byte get(int idx, AsByte adapter); + boolean get(int idx, AsBoolean adapter); + + default T sqlGet(int idx, As adapter) + { + return get(idx - 1, adapter); + } + + default long sqlGet(int idx, AsLong adapter) + { + return get(idx - 1, adapter); + } + + default double sqlGet(int idx, AsDouble adapter) + { + return get(idx - 1, adapter); + } + + default int sqlGet(int idx, AsInt adapter) + { + return get(idx - 1, adapter); + } + + default float sqlGet(int idx, AsFloat adapter) + { + return get(idx - 1, adapter); + } + + default short sqlGet(int idx, AsShort adapter) + { + return get(idx - 1, adapter); + } + + default char sqlGet(int idx, AsChar adapter) + { + return get(idx - 1, adapter); + } + + default byte sqlGet(int idx, AsByte adapter) + { + return get(idx - 1, adapter); + } + + default boolean sqlGet(int idx, AsBoolean adapter) + { + return get(idx - 1, adapter); + } + + /** + * A form of {@code TupleTableSlot} consisting of a number of indexable + * elements all of the same type, described by the single {@code Attribute} + * of a one-element {@code TupleDescriptor}. + *

    + * This is one form in which a PostgreSQL array can be accessed. + *

    + * The {@code get} methods that take an {@code Attribute} are not especially + * useful with this type of slot, and will simply return its first element. + */ + interface Indexed extends TupleTableSlot + { + /** + * Count of the slot's elements (one greater than the maximum index + * that may be passed to {@code get}). + */ + int elements(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/model/package-info.java b/pljava-api/src/main/java/org/postgresql/pljava/model/package-info.java new file mode 100644 index 000000000..abfe63218 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/model/package-info.java @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +/** + * Interfaces that model a useful subset of the PostgreSQL system catalogs + * and related PostgreSQL abstractions for convenient Java access. + *

    CatalogObject and its subinterfaces

    + *

    + * The bulk of this package consists of interfaces extending + * {@link CatalogObject CatalogObject}, corresponding to various database + * objects represented in the PostgreSQL system catalogs. + *

    + * In many of the PostgreSQL catalog tables, each row is identified by an + * integer {@code oid}. When a row in a catalog table represents an object of + * some kind, the {@code oid} of that row (plus an identifier for which table + * it is defined in) will be enough to identify that object. + *

    CatalogObject

    + *

    + * In most of the catalog tables, reference to another object is by its bare + * {@code oid}; the containing table is understood. For example, the + * {@code prorettype} attribute of a row in {@code pg_proc} (the catalog of + * procedures and functions) is a bare {@code oid}, understood to identify a row + * in {@code pg_type}, namely, the data type that the function returns. + *

    + * Such an {@code oid} standing alone, when the containing catalog is only + * implied in context, is represented in PL/Java by an instance of the root + * class {@link CatalogObject CatalogObject} itself. Such an object does not + * carry much information; it can be asked for its {@code oid}, and it can be + * combined with the {@code oid} of some catalog table to produce a + * {@link CatalogObject.Addressed CatalogObject.Addressed}. + *

    CatalogObject.Addressed

    + *

    + * When the {@code oid} of a row in some catalog table is combined with an + * identifier for which catalog table, the result is the explicit + * address of an object. Because catalog tables themselves are defined by rows + * in one particular catalog table ({@code pg_class}), all that is needed to + * identify one is the {@code oid} of its defining row in {@code pg_class}. + * Therefore, a pair of numbers {@code (classId, objectId)} is a complete + * "object address" for most types of object in PostgreSQL. The {@code classId} + * identifies a catalog table (by its row in {@code pg_class}), and therefore + * what kind of object is intended, and the {@code objectId} identifies + * the specific row in that catalog table, and therefore the specific object. + *

    + * Such an {@code oid} pair is represented in PL/Java by an instance of + * {@link CatalogObject.Addressed CatalogObject.Addressed}—or, more + * likely, one of its specific subinterfaces in this package corresponding to + * the type of object. A function, for example, may be identified by a + * {@link RegProcedure RegProcedure} instance ({@code classId} identifies the + * {@code pg_proc} table, {@code objectId} is the row for the function), and its + * return type by a {@link RegType RegType} instance ({@code classId} identifies + * the {@code pg_type} table, and {@code objectId} the row defining the data + * type). + *

    CatalogObject.Component

    + *

    + * The only current exception in PostgreSQL to the + * two-{@code oid}s-identify-an-object rule is for attributes (columns of tables + * or components of composite types), which are identified by three numbers, + * the {@code classId} and {@code objectId} of the parent object, plus a third + * number {@code subId} for the component's position in the parent. + * {@link Attribute Attribute}, therefore, is that rare subinterface that also + * implements {@link CatalogObject.Component CatalogObject.Component}. + *

    + * For the most part, that detail should be of no consequence to a user of this + * package, who will probably only ever obtain {@code Attribute} instances + * from a {@link TupleDescriptor TupleDescriptor}. + *

    CatalogObject instances are singletons

    + *

    + * Object instances in this catalog model are lazily-populated singletons + * that exist upon being mentioned, and thereafter reliably identify the same + * {@code (classId,objectId)} in the PostgreSQL catalogs. (Whether that + * {@code (classId,objectId)} continues to identify the "same" thing in + * PostgreSQL can be affected by data-definition commands being issued in + * the same or some other session.) An instance is born lightweight, with only + * its identifying triple of numbers. Its methods that further expose properties + * of the addressed object (including whether any such object even exists) + * do not obtain that information from the PostgreSQL system caches until + * requested, and may then cache it in Java until signaled by PostgreSQL that + * some catalog change has invalidated it. + *

    CharsetEncoding

    + *

    + * While not strictly a catalog object (PostgreSQL's supported encodings are + * a hard-coded set, not represented in the catalogs), they are exposed by + * {@link CharsetEncoding CharsetEncoding} instances that otherwise behave much + * like the modeled catalog objects, and are returned by the {@code encoding()} + * methods on {@link Database Database} and {@link RegCollation RegCollation}. + * The one in use on the server (an often-needed value) is exposed by the + * {@link CharsetEncoding#SERVER_ENCODING SERVER_ENCODING} static. + *

    Lifespan subinterfaces

    + * Some PL/Java objects correspond to certain native structures in PostgreSQL + * and therefore must not be used beyond the native structures' lifespan. + * {@link Lifespan Lifespan} abstractly models any object in PostgreSQL that + * can be used to define, and detect the end of, a native-object lifespan. + * Two interfaces in this package that extend it and model specific PostgreSQL + * objects with that ability are {@link MemoryContext MemoryContext} and + * {@link ResourceOwner ResourceOwner}. + *

    TupleTableSlot, TupleDescriptor, and Adapter

    + *

    + * {@code TupleTableSlot} in PostgreSQL is a flexible abstraction that can + * present several variant forms of native tuples to be manipulated with + * a common API. Modeled on that, {@link TupleTableSlot TupleTableSlot} is + * further abstracted, and can present a uniform API in PL/Java even to + * tuple-like things—anything with a sequence of typed, possibly named + * values—that might not be in the form of PostgreSQL native tuples. + *

    + * The key to the order, types, and names of the components of a tuple is + * its {@link TupleDescriptor TupleDescriptor}, which in broad strokes is little + * more than a {@code List} of {@link Attribute Attribute}. + *

    + * Given a tuple, and an {@code Attribute} that identifies its PostgreSQL data + * type, the job of accessing that value as some appropriate Java type falls to + * an {@link Adapter Adapter}, of which PL/Java provides a selection to cover + * common types, and there is + * a {@link org.postgresql.pljava.adt.spi service-provider interface} allowing + * independent development of others. + *

    + * PL/Java supplies simple adapters when a Java primitive or some existing + * standard Java class is clearly the appropriate mapping for a PostgreSQL type. + * Other than that (and excepting the model classes in this package), PL/Java + * avoids defining new Java classes to represent other PostgreSQL types. Such + * classes may already have been developed for an application, or may be found + * in existing Java driver libraries for PostgreSQL, such as PGJDBC or + * PGJDBC-NG. It would be unhelpful for PL/Java to offer another such, + * independent and incompatible, set. + *

    + * Instead, for PostgreSQL types that might not have an obvious, appropriate + * mapping to a standard Java type, or that might have more than one plausible + * mapping, PL/Java provides a set of functional interfaces in the + * package {@link org.postgresql.pljava.adt}. An {@code Adapter} (encapsulating + * internal details of a data type) can then expose the content in a documented, + * semantically clear form, to a simple application-supplied functional + * interface implementation or lambda that will produce a result of whatever + * Java type the application may already wish to use. + * + * @author Chapman Flack + */ +package org.postgresql.pljava.model; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.Lifespan; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/Lexicals.java b/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/Lexicals.java index 1f6338534..228613b64 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/Lexicals.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/Lexicals.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2022 Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -47,6 +47,44 @@ public abstract class Lexicals { private Lexicals() { } // do not instantiate + static + { + /* + * Reject a Java version affected by JDK-8309515 bug. + */ + Boolean hasBug = null; + Pattern p1 = Pattern.compile("(?.)(?.)"); + Pattern p2 = Pattern.compile("(?.)(?.)"); + Matcher m = p1.matcher("xy"); + + if ( m.matches() && 0 == m.start("a") ) + { + m.usePattern(p2); + if ( m.matches() ) + { + switch ( m.start("a") ) + { + case 0: + hasBug = true; + break; + case 1: + hasBug = false; + break; + } + } + } + + if ( null == hasBug ) + throw new ExceptionInInitializerError( + "Unexpected result while testing for bug JDK-8309515"); + + if ( hasBug ) + throw new ExceptionInInitializerError( + "Java bug JDK-8309515 affects this version of Java. PL/Java " + + "requires a Java version earlier than 20 (when the bug first " + + "appears) or recent enough to have had the bug fixed."); + } + /** Allowed as the first character of a regular identifier by ISO. */ public static final Pattern ISO_REGULAR_IDENTIFIER_START = Pattern.compile( @@ -241,7 +279,7 @@ private Lexicals() { } // do not instantiate * engine, letting it handle the details. */ public static final Pattern NEWLINE = Pattern.compile( - "(?ms:$(?:(?except newline, for any Java-recognized newline. @@ -494,6 +532,16 @@ public String toString() return deparse(UTF_8); } + /** + * Whether this instance represents the name of something unnamed. + * @return false except where overridden + * @see None#isUnnamed + */ + public boolean isUnnamed() + { + return false; + } + /** * Ensure deserialization doesn't produce any unknown {@code Identifier} * subclass. @@ -506,7 +554,7 @@ private void readObject(ObjectInputStream in) in.defaultReadObject(); Class c = getClass(); if ( c != Simple.class && c != Foldable.class && c != Folding.class - && c != Pseudo.class && c != Operator.class + && c != Pseudo.class && c != None.class && c != Operator.class && c != Qualified.class ) throw new InvalidObjectException( "deserializing unknown Identifier subclass: " @@ -584,8 +632,9 @@ public static Simple from(String s, boolean quoted) * Concatenates one or more strings or identifiers to the end of * this identifier. *

    - * The arguments may be instances of {@code Simple} or of - * {@code CharSequence}, in any combination. + * The arguments may be instances of {@code Simple} (but not of + * {@link None None}) or of {@code CharSequence}, in any + * combination. *

    * The resulting identifier folds if this identifier and all * identifier arguments fold and the concatenation (with all @@ -599,7 +648,7 @@ public Simple concat(Object... more) for ( Object o : more ) { - if ( o instanceof Simple ) + if ( o instanceof Simple && ! (o instanceof None) ) { Simple si = (Simple)o; foldable = foldable && si.folds(); @@ -613,7 +662,7 @@ else if ( o instanceof CharSequence ) else throw new IllegalArgumentException( "arguments to Identifier.Simple.concat() must be " + - "Identifier.Simple or CharSequence"); + "Identifier.Simple (and not None) or CharSequence"); } if ( foldable ) @@ -631,6 +680,12 @@ else if ( o instanceof CharSequence ) * does not have the form of a regular identifier, or if it has that * form but does not match its pgFold-ed form (without quotes, PG * would have folded it in that case). + *

    + * The PostgreSQL catalogs can contain empty strings in some + * contexts where a name might not be provided (for example, when + * {@code pg_proc.proargnames} is present because some parameters + * have names but not all of them do). So this method can accept an + * empty string, returning the {@link None None} instance. * @param s name of the simple identifier, as found in a system * catalog. * @return an Identifier.Simple or subclass appropriate to the form @@ -638,6 +693,9 @@ else if ( o instanceof CharSequence ) */ public static Simple fromCatalog(String s) { + if ( "".equals(s) ) + return None.INSTANCE; + if ( PG_REGULAR_IDENTIFIER.matcher(s).matches() ) { if ( s.equals(Folding.pgFold(s)) ) @@ -828,6 +886,14 @@ private Simple(String nonFolded) m_nonFolded = nonFolded; } + /** + * Used only by {@link None} below. + */ + private Simple() + { + m_nonFolded = ""; + } + private static String checkLength(String s) { int cpc = s.codePointCount(0, s.length()); @@ -842,6 +908,8 @@ private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); + if ( this instanceof None ) + return; String diag = checkLength(m_nonFolded); if ( null != diag ) throw new InvalidObjectException(diag); @@ -1069,6 +1137,81 @@ private Object readResolve() throws ObjectStreamException } } + /** + * What is the name of an unnamed parameter or column? + */ + public static final class None extends Simple + { + private static final long serialVersionUID = 1L; + + public static final None INSTANCE = new None(); + + /** + * A {@code None} identifier never equals anything. + */ + @Override + public boolean equals(Object other) + { + return false; + } + + /** + * True. + */ + @Override + public boolean isUnnamed() + { + return true; + } + + private None() + { + } + + private Object readResolve() throws ObjectStreamException + { + switch ( m_nonFolded ) + { + case "": return INSTANCE; + default: + throw new InvalidObjectException( + "not the string value of None: " + m_nonFolded); + } + } + + /** + * Returns this object if there are zero arguments; otherwise throws + * {@link IllegalArgumentException}. + */ + @Override + public Simple concat(Object... more) + { + if ( 0 == more.length ) + return this; + throw new IllegalArgumentException( + "may not concatenate anything to None"); + } + + /** + * Throws {@link UnsupportedOperationException}. + */ + @Override + public String deparse(Charset cs) + { + throw new UnsupportedOperationException( + "no valid deparse result for Identifier.Simple.None"); + } + + /** + * Returns the empty string. + */ + @Override + public String toString() + { + return ""; + } + } + /** * Class representing an Identifier that names a PostgreSQL operator. */ @@ -1473,6 +1616,10 @@ else if ( 0 != opStart ) private Qualified(Simple qualifier, T local) { + if ( qualifier instanceof None || local instanceof None ) + throw new IllegalArgumentException( + "no component of a qualified identifier may be None"); + m_qualifier = qualifier; m_local = requireNonNull(local); } @@ -1485,6 +1632,10 @@ private void readObject(ObjectInputStream in) throw new InvalidObjectException( "Identifier.Qualified deserialized with " + "null local part"); + if ( m_qualifier instanceof None || m_local instanceof None ) + throw new InvalidObjectException( + "Identifier.Qualified deserialized with None as " + + "a component"); } @Override diff --git a/pljava-api/src/test/java/CatalogTest.java b/pljava-api/src/test/java/CatalogTest.java new file mode 100644 index 000000000..15aea321f --- /dev/null +++ b/pljava-api/src/test/java/CatalogTest.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import org.postgresql.pljava.model.RegNamespace; + +public class CatalogTest +{ + public boolean whatbits(RegNamespace n) + { + return n.grants().stream().anyMatch( + g -> g.usageGranted() && g.createGranted() ); + } +} diff --git a/pljava-api/src/test/java/LexicalsTest.java b/pljava-api/src/test/java/LexicalsTest.java index 174258115..c712ba9ab 100644 --- a/pljava-api/src/test/java/LexicalsTest.java +++ b/pljava-api/src/test/java/LexicalsTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2016-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -29,6 +29,8 @@ import static org.postgresql.pljava.sqlgen.Lexicals.ISO_AND_PG_IDENTIFIER_CAPTURING; +import static + org.postgresql.pljava.sqlgen.Lexicals.NEWLINE; import static org.postgresql.pljava.sqlgen.Lexicals.SEPARATOR; import static @@ -45,6 +47,22 @@ public class LexicalsTest extends TestCase { public LexicalsTest(String name) { super(name); } + public void testNewline() throws Exception + { + Matcher m = NEWLINE.matcher("abcd\nefgh"); + m.region(4, 9); + assertTrue("newline 0", m.lookingAt()); + assertTrue("newline 1", m.lookingAt()); + + m.reset("abcd\r\nefgh").region(4, 10); + assertTrue("newline 2", m.lookingAt()); + assertEquals("\r\n", m.group()); + + m.reset("abcd\n\refgh").region(4, 10); + assertTrue("newline 3", m.lookingAt()); + assertEquals("\n", m.group()); + } + public void testSeparator() throws Exception { Pattern allTheRest = Pattern.compile(".*", Pattern.DOTALL); @@ -291,7 +309,9 @@ public void testIdentifierSerialization() throws Exception Operator.from("!@#%*"), null, - null + null, + + Simple.fromCatalog("") }; orig[5] = (( Simple )orig[2]).withQualifier((Simple)orig[1]); diff --git a/pljava-examples/pom.xml b/pljava-examples/pom.xml index f371bedf3..1f4ab03bc 100644 --- a/pljava-examples/pom.xml +++ b/pljava-examples/pom.xml @@ -53,11 +53,17 @@ org/postgresql/pljava/example/*.java org/postgresql/pljava/example/annotation/*.java + org/postgresql/pljava/example/polyglot/*.java --processor-module-path ${basedir}/../pljava-api/target/pljava-api-${project.version}.jar + + + org.postgresql.pljava.annotation.processing.DDRProcessor + + @@ -187,7 +193,8 @@ function executeReport(report, locale) var packages = [ "org.postgresql.pljava.example", - "org.postgresql.pljava.example.annotation" + "org.postgresql.pljava.example.annotation", + "org.postgresql.pljava.example.polyglot" ]; if ( isProfileActive('saxon-examples') ) @@ -230,11 +237,13 @@ function executeReport(report, locale) * A special file manager that will rewrite the RELDOTS seen in * -linkoffline above. The options a file manager recognizes must be the * first ones in args; handleFirstOptions below returns at the first one - * the file manager doesn't know what to do with. + * the file manager doesn't know what to do with. Java 19 seems to have + * learned to pass the args to the file manager without the fuss here. */ var rmgr = new org.postgresql.pljava.pgxs.RelativizingFileManager( smgr, Charset.forName(report.outputEncoding)); - rmgr.handleFirstOptions(args); + if ( 0 > v.compareTo(java.lang.Runtime.Version.parse("19-ea")) ) + rmgr.handleFirstOptions(args); var task = tool.getTask(null, rmgr, diagListener, null, args, null); if (task.call()) diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/CatalogObjects.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/CatalogObjects.java new file mode 100644 index 000000000..9e2087c8f --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/CatalogObjects.java @@ -0,0 +1,497 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.lang.reflect.Method; +import static java.lang.reflect.Modifier.isPublic; + +import java.sql.Connection; +import static java.sql.DriverManager.getConnection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLXML; +import java.sql.Statement; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import java.util.logging.Logger; +import java.util.logging.Level; +import static java.util.logging.Level.INFO; +import static java.util.logging.Level.WARNING; + +import static java.util.stream.Collectors.joining; +import static java.util.stream.Collectors.toList; +import java.util.stream.Stream; + +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.TargetList.Cursor; +import org.postgresql.pljava.TargetList.Projection; + +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLType; + +import org.postgresql.pljava.model.CatalogObject; +import org.postgresql.pljava.model.CatalogObject.Addressed; +import org.postgresql.pljava.model.CatalogObject.Named; +import org.postgresql.pljava.model.Portal; +import static org.postgresql.pljava.model.Portal.ALL; +import static org.postgresql.pljava.model.Portal.Direction.FORWARD; +import org.postgresql.pljava.model.ProceduralLanguage; +import org.postgresql.pljava.model.RegClass; +import org.postgresql.pljava.model.RegClass.Known; +import org.postgresql.pljava.model.RegProcedure; +import org.postgresql.pljava.model.RegType; +import org.postgresql.pljava.model.SlotTester; +import org.postgresql.pljava.model.Transform; +import org.postgresql.pljava.model.Trigger; +import org.postgresql.pljava.model.TupleTableSlot; + +/** + * A test that PL/Java's various {@link CatalogObject} implementations are + * usable. + *

    + * They rely on named attributes, in PostgreSQL's system catalogs, that are + * looked up at class initialization, so on a PostgreSQL version that may not + * supply all the expected attributes, the issue may not be detected until + * an affected {@code CatalogObject} subclass is first used. This test uses as + * many of them as it can. + */ +@SQLAction(requires="catalogClasses function", install= + "SELECT javatest.catalogClasses()" +) +@SQLAction(requires="catalogInval function", install= + "SELECT javatest.catalogInval()" +) +public class CatalogObjects { + static final Logger logr = Logger.getAnonymousLogger(); + + static void log(Level v, String m, Object... p) + { + logr.log(v, m, p); + } + + static final As CatObjAdapter; + static final As PrLangAdapter; + static final As RegClsAdapter; + static final As ,?> RegPrcAdapter; + static final As RegTypAdapter; + static final As TrnsfmAdapter; + + static + { + try + { + Connection conn = getConnection("jdbc:default:connection"); + + // Get access to the hacked-together interim testing API + SlotTester t = conn.unwrap(SlotTester.class); + + String cls = "org.postgresql.pljava.pg.adt.OidAdapter"; + + @SuppressWarnings("unchecked") Object _1 = + CatObjAdapter = + (As)t.adapterPlease(cls, "INSTANCE"); + @SuppressWarnings("unchecked") Object _2 = + PrLangAdapter = + (As)t.adapterPlease(cls,"PLANG_INSTANCE"); + RegClsAdapter = + (As)t.adapterPlease(cls, "REGCLASS_INSTANCE"); + RegPrcAdapter = + (As,?>)t.adapterPlease( + cls, "REGPROCEDURE_INSTANCE"); + RegTypAdapter = + (As)t.adapterPlease(cls, "REGTYPE_INSTANCE"); + TrnsfmAdapter = + (As)t.adapterPlease(cls, "TRANSFORM_INSTANCE"); + } + catch ( SQLException | ReflectiveOperationException e ) + { + throw new ExceptionInInitializerError(e); + } + } + + @Function(schema="javatest", provides="catalogInval function") + public static void catalogInval() throws SQLException + { + try ( + Connection conn = getConnection("jdbc:default:connection"); + Statement s = conn.createStatement(); + ) + { + SlotTester st = conn.unwrap(SlotTester.class); + CatalogObject.Addressed catObj; + String description1; + String description2; + boolean passing = true; + + s.executeUpdate("CREATE TABLE tbl_a ()"); + catObj = findObj(s, st, RegClsAdapter, + "SELECT CAST ('tbl_a' AS pg_catalog.regclass)"); + description1 = catObj.toString(); + s.executeUpdate("ALTER TABLE tbl_a RENAME TO tbl_b"); + description2 = catObj.toString(); + if ( ! description2.equals(description1.replace("tbl_a", "tbl_b")) ) + { + log(WARNING, "RegClass before/after rename: {0} / {1}", + description1, description2); + passing = false; + } + s.executeUpdate("DROP TABLE tbl_b"); + description1 = catObj.toString(); + if ( ! description2.matches("\\Q"+description1+"\\E(?<=]).*") ) + { + log(WARNING, "RegClass before/after drop: {1} / {0}", + description1, description2); + passing = false; + } + + s.executeQuery( + "SELECT sqlj.alias_java_language('lng_a', sandboxed => true)") + .next(); + catObj = findObj(s, st, PrLangAdapter, + "SELECT oid FROM pg_catalog.pg_language " + + "WHERE lanname OPERATOR(pg_catalog.=) 'lng_a'"); + description1 = catObj.toString(); + s.executeUpdate("ALTER LANGUAGE lng_a RENAME TO lng_b"); + description2 = catObj.toString(); + if ( ! description2.equals(description1.replace("lng_a", "lng_b")) ) + { + log(WARNING, + "ProceduralLanguage before/after rename: {0} / {1}", + description1, description2); + passing = false; + } + s.executeUpdate("DROP LANGUAGE lng_b"); + description1 = catObj.toString(); + if ( ! description2.matches("\\Q"+description1+"\\E(?<=]).*") ) + { + log(WARNING, "ProceduralLanguage before/after drop: {1} / {0}", + description1, description2); + passing = false; + } + + s.executeUpdate( + "CREATE FUNCTION fn_a() RETURNS INTEGER LANGUAGE SQL " + + "AS 'SELECT 1'"); + catObj = findObj(s, st, RegPrcAdapter, + "SELECT CAST ('fn_a()' AS pg_catalog.regprocedure)"); + description1 = catObj.toString(); + s.executeUpdate("ALTER FUNCTION fn_a RENAME TO fn_b"); + description2 = catObj.toString(); + if ( ! description2.equals(description1.replace("fn_a", "fn_b")) ) + { + log(WARNING, "RegProcedure before/after rename: {0} / {1}", + description1, description2); + passing = false; + } + s.executeUpdate("DROP FUNCTION fn_b"); + description1 = catObj.toString(); + if ( ! description2.matches("\\Q"+description1+"\\E(?<=]).*") ) + { + log(WARNING, "RegProcedure before/after drop: {1} / {0}", + description1, description2); + passing = false; + } + + s.executeUpdate("CREATE TYPE typ_a AS ()"); + catObj = findObj(s, st, RegTypAdapter, + "SELECT CAST ('typ_a' AS pg_catalog.regtype)"); + description1 = catObj.toString(); + s.executeUpdate("ALTER TYPE typ_a RENAME TO typ_b"); + description2 = catObj.toString(); + if ( ! description2.equals(description1.replace("typ_a", "typ_b")) ) + { + log(WARNING, "RegType before/after rename: {0} / {1}", + description1, description2); + passing = false; + } + s.executeUpdate("DROP TYPE typ_b"); + description1 = catObj.toString(); + if ( ! description2.matches("\\Q"+description1+"\\E(?<=]).*") ) + { + log(WARNING, "RegType before/after drop: {1} / {0}", + description1, description2); + passing = false; + } + + s.executeUpdate( // a completely bogus transform, don't use it! + "CREATE TRANSFORM FOR pg_catalog.circle LANGUAGE sql" + + " (FROM SQL WITH FUNCTION time_support)"); + catObj = findObj(s, st, TrnsfmAdapter, + "SELECT CAST (trf.oid AS pg_catalog.oid)" + + " FROM pg_catalog.pg_transform AS trf" + + " JOIN pg_catalog.pg_language AS lan ON trflang = lan.oid" + + " WHERE lanname = 'sql'" + + " AND trftype = CAST ('circle' AS pg_catalog.regtype)"); + boolean exists1 = catObj.exists(); + s.executeUpdate( + "DROP TRANSFORM FOR pg_catalog.circle LANGUAGE sql"); + boolean exists2 = catObj.exists(); + if ( exists2 ) + { + log(WARNING, "Transform.exists() before/after drop: {0} / {1}", + exists1, exists2); + passing = false; + } + + if ( passing ) + log(INFO, "selective invalidation ok"); + } + } + + private static > T findObj( + Statement s, SlotTester st, As adapter, String query) + throws SQLException + { + try ( + Portal p = st.unwrapAsPortal(s.executeQuery(query)) + ) + { + return + p.tupleDescriptor().applyOver(p.fetch(FORWARD, 1), c0 -> c0 + .stream() + .map(c -> c.apply(adapter, o -> o)) + .findFirst().get()); + } + } + + @Function(schema="javatest", provides="catalogClasses function") + public static void catalogClasses() throws SQLException + { + String catalogRelationsQuery = + "SELECT" + + " oid" + + " FROM" + + " pg_catalog.pg_class" + + " WHERE" + + " relnamespace = CAST ('pg_catalog' AS pg_catalog.regnamespace)" + + " AND" + + " relkind = 'r'"; + + try ( + Connection conn = getConnection("jdbc:default:connection"); + Statement s = conn.createStatement(); + ) + { + SlotTester st = conn.unwrap(SlotTester.class); + + List knownRegClasses; + + try ( + Portal p = + st.unwrapAsPortal(s.executeQuery(catalogRelationsQuery)) + ) + { + Projection proj = p.tupleDescriptor(); + List tups = p.fetch(FORWARD, ALL); + + Class knownCls = Known.class; + + knownRegClasses = + proj.applyOver(tups, c0 -> c0.stream() + .map(c -> c.apply(RegClsAdapter, regcls -> regcls)) + .filter(knownCls::isInstance) + .map(knownCls::cast) + .collect(toList()) + ); + } + + int passed = 0; + int untested = 0; + + for ( Known regc : knownRegClasses ) + { + String objectQuery = + "SELECT oid FROM " + regc.qualifiedName() + " LIMIT 1"; + + Class classUnderTest = null; + + try ( + Portal p = + st.unwrapAsPortal(s.executeQuery(objectQuery)) + ) + { + Projection proj = p.tupleDescriptor(); + List tups = p.fetch(FORWARD, ALL); + Optional cobj = + proj.applyOver(tups, c0 -> c0.stream() + .map(c -> c.apply(CatObjAdapter, o -> o)) + .findAny()); + + if ( ! cobj.isPresent() ) + { + log(INFO, + "database has no {0} objects " + + "for representation test", regc.name()); + ++ untested; + continue; + } + + Addressed aobj = cobj.get().of(regc); + + classUnderTest = aobj.getClass(); + + if ( aobj instanceof Named ) + { + if ( aobj instanceof Trigger ) // name() won't work here + aobj.exists(); + else + ((Named)aobj).name(); + ++ passed; + continue; + } + + log(INFO, + "{0} untested, not instance of Named " + + "(does implement {1})", + classUnderTest.getCanonicalName().substring( + 1 + classUnderTest.getPackageName().length()), + Arrays.stream(classUnderTest.getInterfaces()) + .map(Class::getSimpleName) + .collect(joining(", ")) + ); + ++ untested; + } + catch ( LinkageError e ) + { + Throwable t = e.getCause(); + if ( null == t ) + t = e; + log(WARNING, + "{0} failed initialization: {1}", + classUnderTest.getName().substring( + 1 + classUnderTest.getPackageName().length()), + t.getMessage()); + } + } + + log((knownRegClasses.size() == passed + untested)? INFO : WARNING, + "of {0} catalog representations, {1} worked " + + "and {2} could not be tested", + knownRegClasses.size(), passed, untested); + } + } + + private static boolean engulfs(Class a, Class b) + { + return a.isAssignableFrom(b) || a == b.getDeclaringClass(); + } + + static final Comparator> + partialByEngulfs = (a,b) -> engulfs(a,b) ? 1 : engulfs(b,a) ? -1 : 0; + + /** + * Given a PostgreSQL classid and objid, obtains the corresponding Java + * CatalogObject, then finds the no-parameter, non-void-returning methods + * of all the CatalogObject interfaces it implements, and returns a table + * with the results of calling those methods. + */ + @Function( + schema="javatest", + out={ "interface text", "method text", "result text", "exception text" } + ) + public static ResultSetProvider catalogIntrospect( + @SQLType("regclass") CatalogObject cls, CatalogObject obj) + throws SQLException + { + cls = cls.of(RegClass.CLASSID); + if ( ! ( cls instanceof Known ) ) + throw new SQLException( + "Not a supported known catalog class: " + cls); + + Known kcls = (Known)cls; + Addressed aobj = obj.of(kcls); + + Class clazz = aobj.getClass(); + + Stream s = + Stream.iterate( + (new Class[] { clazz }), (a -> 0 < a.length), a -> + ( + Arrays.stream(a) + .flatMap(c -> + Stream.concat( + (c.isInterface() ? + Stream.of() : Stream.of(c.getSuperclass())), + Arrays.stream(c.getInterfaces()) + ) + ) + .filter(Objects::nonNull) + .toArray(Class[]::new) + ) + ) + .flatMap(Arrays::stream) + .filter(c -> c.isInterface() && engulfs(CatalogObject.class, c)) + .sorted(partialByEngulfs.thenComparing(Class::getSimpleName)) + .distinct() + .filter(i -> CatalogObject.class.getModule().equals(i.getModule())) + .filter(i -> isPublic(i.getModifiers())) + .flatMap(i -> + { + return Arrays.stream(i.getMethods()) + .filter(m -> i == m.getDeclaringClass()); + }) + .filter(m -> void.class != m.getReturnType()) + .filter(m -> 0 == m.getParameterCount()) + .filter(m -> ! (m.isSynthetic())); + + Iterator itr = s.iterator(); + + return new ResultSetProvider.Large() + { + @Override public boolean assignRowValues(ResultSet r, long rownum) + throws SQLException + { + if ( ! itr.hasNext() ) + return false; + + Method m = itr.next(); + r.updateString(1, m.getDeclaringClass().getSimpleName()); + r.updateString(2, m.getName()); + + try + { + Object v = m.invoke(aobj); + String text; + if ( v instanceof SQLXML ) + text = ((SQLXML)v).getString(); + else + text = Objects.toString(v); + r.updateString(3, text); + } + catch ( Throwable t ) + { + String s = + Stream.iterate(t, Objects::nonNull, Throwable::getCause) + .dropWhile( + ReflectiveOperationException.class::isInstance) + .map(Object::toString) + .collect(joining("\n")); + r.updateString(4, s); + } + + return true; + } + + @Override public void close() { s.close(); } + }; + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/CharsetEncodings.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/CharsetEncodings.java new file mode 100644 index 000000000..dfed6caa7 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/CharsetEncodings.java @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.nio.charset.Charset; + +import java.sql.ResultSet; +import java.sql.SQLException; + +import java.util.Iterator; + +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.annotation.Function; +import static + org.postgresql.pljava.annotation.Function.OnNullInput.RETURNS_NULL; +import static org.postgresql.pljava.annotation.Function.Effects.IMMUTABLE; +import org.postgresql.pljava.annotation.SQLAction; + +import org.postgresql.pljava.model.CharsetEncoding; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; +import static org.postgresql.pljava.model.CharsetEncoding.clientEncoding; + +/** + * Example using the {@link CharsetEncoding CharsetEncoding} interface. + */ +public class CharsetEncodings implements ResultSetProvider.Large +{ + /** + * Enumerate PostgreSQL's known character set encodings, indicating for + * each one whether it is the server encoding, whether it's the client + * encoding, its PostgreSQL name, its corresponding Java + * {@link Charset Charset} name, and the Java module that provides it. + */ + @Function( + schema = "javatest", + out = { + "server boolean", "client boolean", "server_usable boolean", + "ordinal int", "pg_name text", "icu_name text", + "java_name text", "module text" + } + ) + public static ResultSetProvider charsets() + { + return new CharsetEncodings(); + } + + /** + * Enumerate Java's known character set encodings, trying to map them to + * PostgreSQL encodings, and indicating for + * each one whether it is the server encoding, whether it's the client + * encoding, its PostgreSQL name, its corresponding Java + * {@link Charset Charset} name, and the Java module that provides it. + */ + @Function( + schema = "javatest", + out = { + "server boolean", "client boolean", "server_usable boolean", + "ordinal int", "pg_name text", "icu_name text", + "java_name text", "module text" + } + ) + public static ResultSetProvider java_charsets(boolean try_aliases) + { + return new JavaEncodings(try_aliases); + } + + @Override + public void close() + { + } + + @Override + public boolean assignRowValues(ResultSet receiver, long currentRow) + throws SQLException + { + /* + * Shamelessly exploit the fact that currentRow will be passed as + * consecutive values starting at zero and that's the same way PG + * encodings are numbered. + */ + + CharsetEncoding cse; + + try + { + cse = CharsetEncoding.fromOrdinal((int)currentRow); + } + catch ( IllegalArgumentException e ) + { + return false; + } + + if ( SERVER_ENCODING == cse ) + receiver.updateBoolean("server", true); + if ( clientEncoding() == cse ) + receiver.updateBoolean("client", true); + if ( cse.usableOnServer() ) + receiver.updateBoolean("server_usable", true); + receiver.updateInt("ordinal", cse.ordinal()); + receiver.updateString("pg_name", cse.name()); + receiver.updateString("icu_name", cse.icuName()); + + Charset cs = cse.charset(); + if ( null == cs ) + return true; + + receiver.updateString("java_name", cs.name()); + receiver.updateString("module", cs.getClass().getModule().getName()); + + return true; + } + + static class JavaEncodings implements ResultSetProvider.Large + { + final Iterator iter = + Charset.availableCharsets().values().iterator(); + final boolean tryAliases; + + JavaEncodings(boolean tryAliases) + { + this.tryAliases = tryAliases; + } + + @Override + public void close() + { + } + + @Override + public boolean assignRowValues(ResultSet receiver, long currentRow) + throws SQLException + { + if ( ! iter.hasNext() ) + return false; + + Charset cs = iter.next(); + + receiver.updateString("java_name", cs.name()); + receiver.updateString("module", + cs.getClass().getModule().getName()); + + CharsetEncoding cse = null; + + try + { + cse = CharsetEncoding.fromName(cs.name()); + } + catch ( IllegalArgumentException e ) + { + } + + /* + * If the canonical Java name didn't match up with a PG encoding, + * try the first match found for any of the Java charset's aliases. + * This is not an especially dependable idea: the aliases are a Set, + * so they don't enumerate in a reproducible order, and some Java + * aliases are PG aliases for different charsets. + */ + if ( null == cse && tryAliases ) + { + for ( String alias : cs.aliases() ) + { + try + { + cse = CharsetEncoding.fromName(alias); + break; + } + catch ( IllegalArgumentException e ) + { + } + } + } + + if ( null == cse ) + return true; + + if ( SERVER_ENCODING == cse ) + receiver.updateBoolean("server", true); + if ( clientEncoding() == cse ) + receiver.updateBoolean("client", true); + if ( cse.usableOnServer() ) + receiver.updateBoolean("server_usable", true); + receiver.updateInt("ordinal", cse.ordinal()); + receiver.updateString("pg_name", cse.name()); + receiver.updateString("icu_name", cse.icuName()); + + return true; + } + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ConditionalDDR.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ConditionalDDR.java index 0140f4372..d982be0be 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ConditionalDDR.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ConditionalDDR.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -11,6 +11,7 @@ */ package org.postgresql.pljava.example.annotation; +import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.SQLAction; /** @@ -24,38 +25,51 @@ * that are not tagged with an implementor name). The default setting of * {@code pljava.implementors} is simply {@code postgresql}. *

    - * In this example, an SQLAction (with the default implementor name PostgreSQL - * so it should always execute) tests some condition and, based on the result, - * adds {@code LifeIsGood} to the list of recognized implementor names. + * In this example, an {@code SQLAction} (with the default implementor name + * {@code PostgreSQL} so it should always execute) tests some condition and, + * based on the result, adds {@code LifeIsGood} to the list of recognized + * implementor names. *

    - * Later SQLActions with that implementor name should also be executed, while - * those with a different, unrecognized implementor should not. + * Later {@code SQLAction}s with that implementor name should also be executed, + * while those with a different, unrecognized implementor should not. *

    * That is what happens at deployment (or undeployment) time, when the * jar has been loaded into the target database and the deployment descriptor is * being processed. *

    - * The {@code provides} and {@code requires} attributes matter at + * The {@code provides} attributes matter at * compile time: they are hints to the DDR generator so it will be sure - * to write the SQLAction that tests the condition ahead of the ones that - * depend on the condition having been tested. The example illustrates that an - * SQLAction's {@code implementor} is treated as an implicit {@code requires}. - * Unlike an explicit one, it is weak: if there is nothing declared that - * {@code provides} it, that's not an error; affected SQLActions will just be - * placed as late in the generated DDR as other dependencies allow, in case - * something in the preceding actions will be setting those implementor tags. + * to write the {@code SQLAction} that tests the condition ahead of whatever + * depends on the condition having been tested. The example illustrates that + * {@code implementor} is treated also as an implicit {@code requires}. *

    - * The implicit {@code requires} derived from an {@code implementor} is also - * special in another way: it does not have its sense reversed when generating - * the "undeploy" actions of the deployment descriptor. Ordinary requirements - * do, so the dependent objects get dropped before the things they depend on. - * But the code for setting a conditional implementor tag has to be placed - * ahead of the uses of the tag, whether deploying or undeploying. + * Note: while ISO SQL/JRT specifies that an {@code } is an + * SQL identifier, which would match case-insensitively unless quoted, PL/Java + * treats {@code provides} elements as arbitrary strings that can only be + * matched with identical spelling and case. Therefore, the matching of the + * implicit {@code requires} of an {@code } and the explicit + * {@code provides} on an {@code SQLAction} depends on the {@code implementor} + * and {@code provides} values being supplied with identical spelling and case, *

    - * An {@code SQLAction} setting an implementor tag does not need to have any - * {@code remove=} actions. If it does not (the usual case), its + * The dependency created when matching {@code implementor} to {@code provides} + * differs in three ways from an explicit dependency between {@code requires} + * and {@code provides}: + *

    *

    * This example adds {@code LifeIsGood} ahead of the prior content of * {@code pljava.implementors}. Simply replacing the value would stop the @@ -64,8 +78,8 @@ * local, so it is reverted when the transaction completes. *

    * In addition to the goodness-of-life examples, this file also generates - * several statements setting PostgreSQL-version-based implementor tags that - * are relied on by various other examples in this directory. + * one or more statements setting PostgreSQL-version-based implementor names + * that are relied on by various other examples in this directory. */ @SQLAction(provides={"LifeIsGood","LifeIsNotGood"}, install= "SELECT CASE 42 WHEN 42 THEN " + @@ -78,51 +92,12 @@ ) @SQLAction(implementor="LifeIsGood", install= - "SELECT javatest.logmessage('INFO', 'Looking good!')" + "SELECT javatest.logmessage('INFO', 'ConditionalDDR looking good!')" ) @SQLAction(implementor="LifeIsNotGood", install= - "SELECT javatest.logmessage('WARNING', 'This should not be executed')" -) - -@SQLAction(provides="postgresql_ge_80300", install= - "SELECT CASE WHEN" + - " 80300 <= CAST(current_setting('server_version_num') AS integer)" + - " THEN set_config('pljava.implementors', 'postgresql_ge_80300,' || " + - " current_setting('pljava.implementors'), true) " + - "END" -) - -@SQLAction(provides="postgresql_ge_80400", install= - "SELECT CASE WHEN" + - " 80400 <= CAST(current_setting('server_version_num') AS integer)" + - " THEN set_config('pljava.implementors', 'postgresql_ge_80400,' || " + - " current_setting('pljava.implementors'), true) " + - "END" -) - -@SQLAction(provides="postgresql_ge_90000", install= - "SELECT CASE WHEN" + - " 90000 <= CAST(current_setting('server_version_num') AS integer)" + - " THEN set_config('pljava.implementors', 'postgresql_ge_90000,' || " + - " current_setting('pljava.implementors'), true) " + - "END" -) - -@SQLAction(provides="postgresql_ge_90100", install= - "SELECT CASE WHEN" + - " 90100 <= CAST(current_setting('server_version_num') AS integer)" + - " THEN set_config('pljava.implementors', 'postgresql_ge_90100,' || " + - " current_setting('pljava.implementors'), true) " + - "END" -) - -@SQLAction(provides="postgresql_ge_90300", install= - "SELECT CASE WHEN" + - " 90300 <= CAST(current_setting('server_version_num') AS integer)" + - " THEN set_config('pljava.implementors', 'postgresql_ge_90300,' || " + - " current_setting('pljava.implementors'), true) " + - "END" + "SELECT javatest.logmessage('WARNING', " + + " 'ConditionalDDR: This should not be executed')" ) @SQLAction(provides="postgresql_ge_100000", install= @@ -132,4 +107,28 @@ " current_setting('pljava.implementors'), true) " + "END" ) -public class ConditionalDDR { } +public class ConditionalDDR +{ + private ConditionalDDR() { } // do not instantiate + + /** + * Tests class names in the supplied order, returning false as soon as any + * cannot be found by the class loader(s) available to the examples jar, or + * true if all can be found. + */ + @Function(variadic = true, provides = "presentOnClassPath") + public static boolean presentOnClassPath(String[] className) + { + try + { + ClassLoader myLoader = ConditionalDDR.class.getClassLoader(); + for ( String cn : className ) + Class.forName(cn, false, myLoader); + return true; + } + catch ( ClassNotFoundException e ) + { + return false; + } + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Enumeration.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Enumeration.java index 2fcee7df1..359bb2871 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Enumeration.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Enumeration.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -21,16 +21,12 @@ /** * Confirms the mapping of PG enum and Java String, and arrays of each, as * parameter and return types. - *

    - * This example relies on {@code implementor} tags reflecting the PostgreSQL - * version, set up in the {@link ConditionalDDR} example. PostgreSQL before 8.3 - * did not have enum types. */ -@SQLAction(provides="mood type", implementor="postgresql_ge_80300", +@SQLAction(provides="mood type", install="CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy')", remove="DROP TYPE mood" ) -@SQLAction(implementor="postgresql_ge_80300", +@SQLAction( requires={"textToMood", "moodToText", "textsToMoods", "moodsToTexts"}, install={ "SELECT textToMood('happy')", @@ -41,26 +37,22 @@ ) public class Enumeration { - @Function(requires="mood type", provides="textToMood", type="mood", - implementor="postgresql_ge_80300") + @Function(requires="mood type", provides="textToMood", type="mood") public static String textToMood(String s) { return s; } - @Function(requires="mood type", provides="moodToText", - implementor="postgresql_ge_80300") + @Function(requires="mood type", provides="moodToText") public static String moodToText(@SQLType("mood")String s) { return s; } - @Function(requires="mood type", provides="textsToMoods", type="mood", - implementor="postgresql_ge_80300") + @Function(requires="mood type", provides="textsToMoods", type="mood") public static Iterator textsToMoods(String[] ss) { return Arrays.asList(ss).iterator(); } - @Function(requires="mood type", provides="moodsToTexts", - implementor="postgresql_ge_80300") + @Function(requires="mood type", provides="moodsToTexts") public static Iterator moodsToTexts(@SQLType("mood[]")String[] ss) { return Arrays.asList(ss).iterator(); diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/JDBC42_21.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/JDBC42_21.java index 1b0d35e28..dde2eaabf 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/JDBC42_21.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/JDBC42_21.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022 Tada AB and other contributors, as listed below. + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -11,23 +11,22 @@ */ package org.postgresql.pljava.example.annotation; +import java.sql.SQLException; + +import org.postgresql.pljava.SessionManager; + import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.example.annotation.ConditionalDDR; // for javadoc - /** * Exercise new mappings between date/time types and java.time classes * (JDBC 4.2 change 21). *

    * Defines a method {@link #javaSpecificationGE javaSpecificationGE} that may be * of use for other examples. - *

    - * Relies on PostgreSQL-version-specific implementor tags set up in the - * {@link ConditionalDDR} example. */ @SQLAction( - implementor="postgresql_ge_90300",requires="TypeRoundTripper.roundTrip", + requires="TypeRoundTripper.roundTrip", install={ " SELECT" + " CASE WHEN every(orig = roundtripped)" + @@ -138,9 +137,10 @@ public class JDBC42_21 * recent as the argument ('1.6', '1.7', '1.8', '9', '10', '11', ...). */ @Function(schema="javatest", provides="javaSpecificationGE") - public static boolean javaSpecificationGE(String want) + public static boolean javaSpecificationGE(String want) throws SQLException { - String got = System.getProperty("java.specification.version"); + String got = SessionManager.current().frozenSystemProperties() + .getProperty("java.specification.version"); if ( want.startsWith("1.") ) want = want.substring(2); if ( got.startsWith("1.") ) diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/MemoryContexts.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/MemoryContexts.java new file mode 100644 index 000000000..2e6be6496 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/MemoryContexts.java @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.Connection; +import static java.sql.DriverManager.getConnection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Savepoint; +import java.sql.Statement; +import java.sql.SQLException; + +import java.util.Iterator; + +import java.util.stream.Stream; + +import org.postgresql.pljava.ResultSetProvider; + +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLType; + +/** + * Functions to check that allocations are being made in the "upper" memory + * context as necessary when SPI has been used. + */ +public class MemoryContexts { + private MemoryContexts() + { + } + + private static Connection ensureSPIConnected() throws SQLException + { + Connection c = getConnection("jdbc:default:connection"); + try ( Statement s = c.createStatement() ) + { + s.execute("UPDATE javatest.foobar_1 SET stuff = 'a' WHERE FALSE"); + } + return c; + } + + /** + * Return an array result after connecting SPI, to ensure the result isn't + * allocated in SPI's short-lived memory context. + */ + @Function(schema = "javatest") + public static String[] nonSetArrayResult() throws SQLException + { + ensureSPIConnected(); + return new String[] { "Hello", "world" }; + } + + /** + * Return a coerced result after connecting SPI, to ensure the result isn't + * allocated in SPI's short-lived memory context. + *

    + * The mismatch of the Java type {@code int} and the PostgreSQL type + * {@code numeric} forces PL/Java to create a {@code Coerce} node applying + * a cast, the correct allocation of which is tested here. + */ + @Function(schema = "javatest", type = "numeric") + public static int nonSetCoercedResult() throws SQLException + { + ensureSPIConnected(); + return 42; + } + + /** + * Return a composite result after connecting SPI, to ensure the result + * isn't allocated in SPI's short-lived memory context. + */ + @Function(schema = "javatest", out = { "a text", "b text" }) + public static boolean nonSetCompositeResult(ResultSet out) + throws SQLException + { + ensureSPIConnected(); + out.updateString(1, "Hello"); + out.updateString(2, "world"); + return true; + } + + /** + * Return a fixed-length base UDT result after connecting SPI, to ensure + * the result isn't allocated in SPI's short-lived memory context. + */ + @Function(schema = "javatest") + public static ComplexScalar nonSetFixedUDTResult() throws SQLException + { + ensureSPIConnected(); + return new ComplexScalar(1.2, 3.4, "javatest.complexscalar"); + } + + /** + * Return a composite UDT result after connecting SPI, to ensure + * the result isn't allocated in SPI's short-lived memory context. + */ + @Function(schema = "javatest") + public static ComplexTuple nonSetCompositeUDTResult() throws SQLException + { + Connection c = ensureSPIConnected(); + try ( + Statement s = c.createStatement(); + ResultSet r = s.executeQuery( + "SELECT CAST ( '(1.2,3.4)' AS javatest.complextuple )") + ) + { + r.next(); + return r.getObject(1, ComplexTuple.class); + } + } + + /** + * Return a set-of (non-composite) result after connecting SPI, to ensure + * the result isn't allocated in SPI's short-lived memory context. + */ + @Function(schema = "javatest") + public static Iterator setNonCompositeResult() + { + final Iterator it = Stream.of("a", "b", "c").iterator(); + return new Iterator<>() + { + @Override + public boolean hasNext() + { + try + { + ensureSPIConnected(); + return it.hasNext(); + } + catch ( SQLException e ) + { + throw new RuntimeException(e.getMessage(), e); + } + } + + @Override + public String next() + { + try + { + ensureSPIConnected(); + return it.next(); + } + catch ( SQLException e ) + { + throw new RuntimeException(e.getMessage(), e); + } + } + }; + } + + /** + * Return a set-of composite result after connecting SPI, to ensure + * the result isn't allocated in SPI's short-lived memory context. + */ + @Function(schema = "javatest", out = {"a text", "b text"}) + public static ResultSetProvider setCompositeResult() + { + return new ResultSetProvider.Large() + { + @Override + public boolean assignRowValues(ResultSet out, long currentRow) + throws SQLException + { + ensureSPIConnected(); + if ( currentRow > 2 ) + return false; + out.updateString(1, "a"); + out.updateString(2, "b"); + return true; + } + + @Override + public void close() + { + } + }; + } + + /** + * Prepare a statement after connecting SPI and use it later, to ensure + * important allocations are not in SPI's short-lived memory context. + */ + @Function(schema = "javatest", out = {"a text", "b text"}) + public static ResultSetProvider preparedStatementContext() + throws SQLException + { + Connection c = ensureSPIConnected(); + final PreparedStatement ps = c.prepareStatement( + "SELECT " + + " to_char( " + + " extract(microseconds FROM statement_timestamp()) % 3999, " + + " ?)"); + ps.setString(1, "RN"); + + return new ResultSetProvider.Large() + { + @Override + public boolean assignRowValues(ResultSet out, long currentRow) + throws SQLException + { + ensureSPIConnected(); + if ( currentRow > 2 ) + return false; + try ( ResultSet rs = ps.executeQuery() ) + { + rs.next(); + out.updateString(1, rs.getString(1)); + ps.setString(1, "RN"); + return true; + } + } + + @Override + public void close() + { + } + }; + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/MishandledExceptions.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/MishandledExceptions.java new file mode 100644 index 000000000..95073d3f9 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/MishandledExceptions.java @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2025 + Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.Connection; +import static java.sql.DriverManager.getConnection; +import java.sql.SQLException; +import java.sql.Statement; + +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLType; + +/** + * Illustrates how not to handle an exception thrown by a call into PostgreSQL. + *

    + * Such an exception must either be rethrown (or result in some higher-level + * exception being rethrown) or cleared by rolling back the transaction or + * a previously-established savepoint. If it is simply caught and not propagated + * and the error condition is not cleared, no further calls into PostgreSQL + * functionality can be made within the containing transaction. + * + * @see Catching PostgreSQL exceptions + * in Java + */ +public interface MishandledExceptions +{ + /** + * Executes an SQL statement that produces an error (twice, if requested), + * catching the resulting exception but not propagating it or rolling back + * a savepoint; then throws an unrelated exception if succeed is false. + */ + @Function(schema = "javatest") + static String mishandle( + boolean twice, @SQLType(defaultValue="true")boolean succeed) + throws SQLException + { + String rslt = null; + do + { + try + ( + Connection c = getConnection("jdbc:default:connection"); + Statement s = c.createStatement(); + ) + { + s.execute("DO LANGUAGE \"no such language\" 'no such thing'"); + } + catch ( SQLException e ) + { + rslt = e.toString(); + /* nothing rethrown, nothing rolled back <- BAD PRACTICE */ + } + } + while ( ! (twice ^= true) ); + + if ( succeed ) + return rslt; + + throw new SQLException("unrelated"); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Modules.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Modules.java new file mode 100644 index 000000000..b2e9826a1 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Modules.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.lang.module.ModuleDescriptor; + +import java.sql.ResultSet; +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.Objects; + +import java.util.stream.Stream; + +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.annotation.Function; +import static org.postgresql.pljava.annotation.Function.Effects.STABLE; + +/** + * Example code to support querying for the modules in Java's boot layer. + */ +public class Modules implements ResultSetProvider.Large { + /** + * Returns information on the named modules in Java's boot module layer. + */ + @Function( + effects = STABLE, + out = { + "name pg_catalog.text", + "any_unqualified_exports boolean", + "any_unqualified_opens boolean" + } + ) + public static ResultSetProvider java_modules() + { + return new Modules( + ModuleLayer.boot().modules().stream().map(Module::getDescriptor) + .filter(Objects::nonNull)); + } + + private final Iterator iterator; + private final Runnable closer; + + private Modules(Stream s) + { + iterator = s.iterator(); + closer = s::close; + } + + @Override + public boolean assignRowValues(ResultSet receiver, long currentRow) + throws SQLException + { + if ( ! iterator.hasNext() ) + return false; + + ModuleDescriptor md = iterator.next(); + + receiver.updateString(1, md.name()); + + receiver.updateBoolean(2, + md.exports().stream().anyMatch(e -> ! e.isQualified())); + + receiver.updateBoolean(3, + md.isOpen() || + md.opens().stream().anyMatch(o -> ! o.isQualified())); + + return true; + } + + @Override + public void close() + { + closer.run(); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/OnInterface.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/OnInterface.java new file mode 100644 index 000000000..e98a8cfb8 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/OnInterface.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import org.postgresql.pljava.annotation.Function; + +/** + * Illustrates PL/Java functions on an interface instead of a class. + *

    + * The SQL/JRT standard has always just said "class", but there is no technical + * obstacle to permitting a PL/Java function to be a static interface method, so + * that earlier restriction has been relaxed. + */ +public interface OnInterface +{ + /** + * Returns the answer. + */ + @Function(schema = "javatest") + static int answer() + { + return 42; + } + + interface A + { + /** + * Again the answer. + */ + @Function(schema = "javatest") + static int nestedAnswer() + { + return 42; + } + } + + class B + { + /** + * Still the answer. + */ + @Function(schema = "javatest") + public static int nestedClassAnswer() + { + return 42; + } + + public static class C + { + /** + * That answer again. + */ + @Function(schema = "javatest") + public static int moreNestedAnswer() + { + return 42; + } + } + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PGF1010962.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PGF1010962.java index 2b06f481e..747f2ef6d 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PGF1010962.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PGF1010962.java @@ -10,12 +10,8 @@ /** * A gnarly test of TupleDesc reference management, crafted by Johann Oskarsson * for bug report 1010962 on pgFoundry. - *

    - * This example relies on {@code implementor} tags reflecting the PostgreSQL - * version, set up in the {@link ConditionalDDR} example. Before PostgreSQL 8.4, - * there is no array of {@code RECORD}, which this test requires. */ -@SQLAction(requires="1010962 func", implementor="postgresql_ge_80400", +@SQLAction(requires="1010962 func", install={ "CREATE TYPE javatest.B1010962 AS ( b1_val float8, b2_val int)", @@ -51,8 +47,7 @@ public class PGF1010962 * @param receiver Looks polymorphic, but expects an array of A1010962 * @return 0 */ - @Function(schema="javatest", provides="1010962 func", - implementor="postgresql_ge_80400") + @Function(schema="javatest", provides="1010962 func") public static int complexParam( ResultSet receiver[] ) throws SQLException { diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java index e735376c2..f6be420a8 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Tada AB and other contributors, as listed below. + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -36,6 +36,7 @@ import java.io.IOException; +import java.util.List; import java.util.Map; import java.util.HashMap; @@ -65,11 +66,14 @@ import javax.xml.validation.SchemaFactory; import org.postgresql.pljava.Adjusting; +import static org.postgresql.pljava.Adjusting.XML.setFirstSupported; import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.MappedUDT; import org.postgresql.pljava.annotation.SQLAction; import org.postgresql.pljava.annotation.SQLType; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; + import static org.postgresql.pljava.example.LoggerTest.logMessage; /* Imports needed just for the SAX flavor of "low-level XML echo" below */ @@ -112,16 +116,7 @@ "END" ) -@SQLAction(implementor="postgresql_ge_80400", - provides="postgresql_xml_ge84", - install= - "SELECT CASE (SELECT 1 FROM pg_type WHERE typname = 'xml') WHEN 1" + - " THEN set_config('pljava.implementors', 'postgresql_xml_ge84,' || " + - " current_setting('pljava.implementors'), true) " + - "END" -) - -@SQLAction(implementor="postgresql_xml_ge84", requires="echoXMLParameter", +@SQLAction(implementor="postgresql_xml", requires="echoXMLParameter", install= "WITH" + " s(how) AS (SELECT generate_series(1, 7))," + @@ -146,7 +141,7 @@ " r" ) -@SQLAction(implementor="postgresql_xml_ge84", requires="proxiedXMLEcho", +@SQLAction(implementor="postgresql_xml", requires="proxiedXMLEcho", install= "WITH" + " s(how) AS (SELECT unnest('{1,2,4,5,6,7}'::int[]))," + @@ -170,7 +165,7 @@ " r" ) -@SQLAction(implementor="postgresql_xml_ge84", requires="lowLevelXMLEcho", +@SQLAction(implementor="postgresql_xml", requires="lowLevelXMLEcho", install={ "SELECT" + " preparexmlschema('schematest', $$" + @@ -279,6 +274,43 @@ " WHERE extname = 'pljava'" } ) + +@SQLAction(implementor="postgresql_xml", + provides="xml_java_ge_22", requires="javaSpecificationGE", install= + "SELECT CASE WHEN" + + " javatest.javaSpecificationGE('22')" + + " THEN set_config('pljava.implementors', 'xml_java_ge_22,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) + +@SQLAction(implementor="xml_java_ge_22", requires="lowLevelXMLEcho", install= + "WITH" + + " s(how) AS (SELECT unnest('{5,6,7}'::int[]))," + + " r(isdoc) AS (" + + " SELECT" + + " javatest.lowlevelxmlecho(" + + /* + * A truly minimal DTD, , cannot be ignored by Java 22's SAX/DOM + * parser (though it can be, when using the StAX API). NullPointerException + * calling getActiveGrammar().isImmutable() is the result. Bug: JDK-8329295 + * Including either an externalID or an internal subset (like the empty [] + * here) avoids the issue. + */ + " ''::xml, how, params) IS DOCUMENT" + + " FROM" + + " s," + + " (SELECT null::void AS ignoreDTD) AS params" + + " )" + + "SELECT" + + " CASE WHEN every(isdoc)" + + " THEN javatest.logmessage('INFO', 'jdk.xml.dtd.support=ignore OK')" + + " ELSE javatest.logmessage('WARNING', 'jdk.xml.dtd.support=ignore NG')" + + " END " + + "FROM" + + " r" +) + @MappedUDT(schema="javatest", name="onexml", structure="c1 xml", implementor="postgresql_xml", comment="A composite type mapped by the PassXML example class") @@ -518,33 +550,51 @@ private static void prepareXMLTransform(String name, SQLXML source, int how, builtin ? TransformerFactory.newDefaultInstance() : TransformerFactory.newInstance(); - String exf = - "http://www.oracle.com/xml/jaxp/properties/enableExtensionFunctions"; - String ecl = "jdk.xml.transform.extensionClassLoader"; + + String legacy_pfx = "http://www.oracle.com/xml/jaxp/properties/"; + String java17_pfx = "jdk.xml."; + String exf_sfx = "enableExtensionFunctions"; + + String ecl_legacy = "jdk.xml.transform.extensionClassLoader"; + String ecl_java17 = "jdk.xml.extensionClassLoader"; + Source src = sxToSource(source, how, adjust); + try { - try - { - tf.setFeature(exf, enableExtensionFunctions); - } - catch ( TransformerConfigurationException e ) + Exception e; + + e = setFirstSupported(tf::setFeature, enableExtensionFunctions, + List.of(TransformerConfigurationException.class), null, + java17_pfx + exf_sfx, legacy_pfx + exf_sfx); + + if ( null != e ) { - logMessage("WARNING", - "non-builtin transformer: ignoring " + e.getMessage()); + if ( builtin ) + throw new SQLException( + "Configuring XML transformation: " + e.getMessage(), e); + else + logMessage("WARNING", + "non-builtin transformer: ignoring " + e.getMessage()); } if ( withJava ) { - try - { - tf.setAttribute(ecl, - Thread.currentThread().getContextClassLoader()); - } - catch ( IllegalArgumentException e ) + e = setFirstSupported(tf::setAttribute, + Thread.currentThread().getContextClassLoader(), + List.of(IllegalArgumentException.class), null, + ecl_java17, ecl_legacy); + + if ( null != e ) { - logMessage("WARNING", - "non-builtin transformer: ignoring " + e.getMessage()); + if ( builtin ) + throw new SQLException( + "Configuring XML transformation: " + + e.getMessage(), e); + else + logMessage("WARNING", + "non-builtin transformer: ignoring " + + e.getMessage()); } } @@ -594,8 +644,7 @@ public static SQLXML transformXML( * for setting the Transformer to use the server encoding. */ if ( rlt instanceof StreamResult ) - t.setOutputProperty(ENCODING, - System.getProperty("org.postgresql.server.encoding")); + t.setOutputProperty(ENCODING, SERVER_ENCODING.charset().name()); else if ( Boolean.TRUE.equals(indent) ) logMessage("WARNING", "indent requested, but howout specifies a non-stream " + @@ -663,8 +712,7 @@ private static SQLXML echoSQLXML(SQLXML sx, int howin, int howout) * for setting the Transformer to use the server encoding. */ if ( howout < 5 ) - t.setOutputProperty(ENCODING, - System.getProperty("org.postgresql.server.encoding")); + t.setOutputProperty(ENCODING, SERVER_ENCODING.charset().name()); t.transform(src, rlt); } catch ( TransformerException te ) @@ -702,7 +750,7 @@ private static SQLXML echoSQLXML(SQLXML sx, int howin, int howout) * still be exercised by calling this method, explicitly passing * {@code adjust => NULL}. */ - @Function(schema="javatest", implementor="postgresql_xml_ge84", + @Function(schema="javatest", implementor="postgresql_xml", provides="lowLevelXMLEcho") public static SQLXML lowLevelXMLEcho( SQLXML sx, int how, @SQLType(defaultValue={}) ResultSet adjust) @@ -772,8 +820,7 @@ public static SQLXML lowLevelXMLEcho( *

    * Column names in the adjust row are case-insensitive versions of * the method names in {@link Adjusting.XML.Parsing}, and the value of each - * column should be of the appropriate type (at present, boolean for all of - * them). + * column should be of the appropriate type (if the method has a parameter). * @param adjust A row type as described above, possibly of no columns if no * adjustments are wanted * @param axp An instance of Adjusting.XML.Parsing @@ -789,8 +836,12 @@ T applyAdjustments(ResultSet adjust, T axp) for ( int i = 1; i <= n; ++i ) { String k = rsmd.getColumnLabel(i); - if ( "allowDTD".equalsIgnoreCase(k) ) + if ( "lax".equalsIgnoreCase(k) ) + axp.lax(adjust.getBoolean(i)); + else if ( "allowDTD".equalsIgnoreCase(k) ) axp.allowDTD(adjust.getBoolean(i)); + else if ( "ignoreDTD".equalsIgnoreCase(k) ) + axp.ignoreDTD(); else if ( "externalGeneralEntities".equalsIgnoreCase(k) ) axp.externalGeneralEntities(adjust.getBoolean(i)); else if ( "externalParameterEntities".equalsIgnoreCase(k) ) @@ -1046,12 +1097,9 @@ public static SQLXML mockedXMLEcho(String chars) /** * Text-typed variant of lowLevelXMLEcho (does not require XML type). - *

    - * It does declare a parameter default, limiting it to PostgreSQL 8.4 or - * later. */ @Function(schema="javatest", name="lowLevelXMLEcho", - type="text", implementor="postgresql_ge_80400") + type="text") public static SQLXML lowLevelXMLEcho_(@SQLType("text") SQLXML sx, int how, @SQLType(defaultValue={}) ResultSet adjust) throws SQLException diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PreJSR310.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PreJSR310.java index e430e25d1..726d46a5d 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PreJSR310.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PreJSR310.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Tada AB and other contributors, as listed below. + * Copyright (c) 2018-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -29,9 +29,6 @@ * Some tests of pre-JSR 310 date/time/timestamp conversions. *

    * For now, just {@code java.sql.Date}, thanks to issue #199. - *

    - * This example relies on {@code implementor} tags reflecting the PostgreSQL - * version, set up in the {@link ConditionalDDR} example. */ @SQLAction(provides="language java_tzset", install={ "SELECT sqlj.alias_java_language('java_tzset', true)" @@ -39,7 +36,7 @@ "DROP LANGUAGE java_tzset" }) -@SQLAction(implementor="postgresql_ge_90300", // needs LATERAL +@SQLAction( requires="issue199", install={ "SELECT javatest.issue199()" }) diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/RecordParameterDefaults.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/RecordParameterDefaults.java index 09d3dbbe8..291eb990b 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/RecordParameterDefaults.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/RecordParameterDefaults.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2018-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -19,7 +19,6 @@ import org.postgresql.pljava.ResultSetProvider; import org.postgresql.pljava.annotation.Function; -import org.postgresql.pljava.annotation.SQLAction; import org.postgresql.pljava.annotation.SQLType; /** @@ -28,21 +27,7 @@ * function. *

    * Also tests the proper DDR generation of defaults for such parameters. - *

    - * This example relies on {@code implementor} tags reflecting the PostgreSQL - * version, set up in the {@link ConditionalDDR} example. */ -@SQLAction( - provides = "paramtypeinfo type", // created in Triggers.java - install = { - "CREATE TYPE javatest.paramtypeinfo AS (" + - " name text, pgtypename text, javaclass text, tostring text" + - ")" - }, - remove = { - "DROP TYPE javatest.paramtypeinfo" - } -) public class RecordParameterDefaults implements ResultSetProvider { /** @@ -62,11 +47,11 @@ public class RecordParameterDefaults implements ResultSetProvider * */ @Function( - requires = "paramtypeinfo type", schema = "javatest", - implementor = "postgresql_ge_80400", // supports function param DEFAULTs - type = "javatest.paramtypeinfo" - ) + out = { + "name text", "pgtypename text", "javaclass text", "tostring text" + } + ) public static ResultSetProvider paramDefaultsRecord( @SQLType(defaultValue={})ResultSet params) throws SQLException @@ -87,7 +72,6 @@ public static ResultSetProvider paramDefaultsRecord( */ @Function( requires = "foobar tables", // created in Triggers.java - implementor = "postgresql_ge_80400", // supports function param DEFAULTs schema = "javatest" ) public static String paramDefaultsNamedRow( diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SPIActions.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SPIActions.java index dca34e5c7..baf3861e6 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SPIActions.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SPIActions.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -127,20 +127,26 @@ public static String getTimeAsString() throws SQLException { } } - static void log(String msg) { + static void log(String msg) throws SQLException { // GCJ has a somewhat serious bug (reported) // - if ("GNU libgcj".equals(System.getProperty("java.vm.name"))) { + if ("GNU libgcj" + .equals( + SessionManager.current().frozenSystemProperties() + .getProperty("java.vm.name"))) { System.out.print("INFO: "); System.out.println(msg); } else Logger.getAnonymousLogger().info(msg); } - static void warn(String msg) { + static void warn(String msg) throws SQLException { // GCJ has a somewhat serious bug (reported) // - if ("GNU libgcj".equals(System.getProperty("java.vm.name"))) { + if ("GNU libgcj" + .equals( + SessionManager.current().frozenSystemProperties() + .getProperty("java.vm.name"))) { System.out.print("WARNING: "); System.out.println(msg); } else diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SetOfRecordTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SetOfRecordTest.java index 13fce44d4..6b814b03a 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SetOfRecordTest.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SetOfRecordTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -26,13 +26,9 @@ * Example implementing the {@code ResultSetHandle} interface, to return * the {@link ResultSet} from any SQL {@code SELECT} query passed as a string * to the {@link #executeSelect executeSelect} function. - *

    - * This example relies on {@code implementor} tags reflecting the PostgreSQL - * version, set up in the {@link ConditionalDDR} example. Before PostgreSQL 8.4, - * there was no {@code =} or {@code DISTINCT FROM} operator between row types. */ -@SQLAction(requires="selecttorecords fn", implementor="postgresql_ge_80400", -install= +@SQLAction(requires="selecttorecords fn", +install={ " SELECT " + " CASE WHEN r IS DISTINCT FROM ROW('Foo'::varchar, 1::integer, 1.5::float, " + " 23.67::decimal(8,2), '2005-06-01'::date, '20:56'::time, " + @@ -45,8 +41,20 @@ " 'select ''Foo'', 1, 1.5::float, 23.67, ''2005-06-01'', " + " ''20:56''::time, ''192.168.0''') " + " AS r(t_varchar varchar, t_integer integer, t_float float, " + -" t_decimal decimal(8,2), t_date date, t_time time, t_cidr cidr)" -) +" t_decimal decimal(8,2), t_date date, t_time time, t_cidr cidr)", + +" SELECT " + +" CASE WHEN every(a IS NOT DISTINCT FROM b) " + +" THEN javatest.logmessage('INFO', 'nested/SPI SetOfRecordTest ok') " + +" ELSE javatest.logmessage('WARNING', 'nested/SPI SetOfRecordTest not ok') " + +" END " + +" FROM " + +" javatest.executeselecttorecords('" + +" SELECT " + +" javatest.executeselect(''select generate_series(1,1)''), " + +" javatest.executeselect(''select generate_series(1,1)'') " + +" ') AS t(a text, b text)" +}) public class SetOfRecordTest implements ResultSetHandle { @Function(schema="javatest", name="executeselecttorecords", diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java index 804ef9d83..bfdbf8c0f 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -38,8 +38,8 @@ * also create a function and trigger that uses transition tables. *

    * This example relies on {@code implementor} tags reflecting the PostgreSQL - * version, set up in the {@link ConditionalDDR} example. Constraint triggers - * appear in PG 9.1, transition tables in PG 10. + * version, set up in the {@link ConditionalDDR} example. Transition tables + * appear in PG 10. */ @SQLAction( provides = "foobar tables", @@ -135,10 +135,8 @@ public static void examineRows(TriggerData td) /** * Throw exception if value to be inserted is 44. - * Constraint triggers first became available in PostgreSQL 9.1. */ @Function( - implementor = "postgresql_ge_90100", requires = "foobar tables", provides = "constraint triggers", schema = "javatest", diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/TupleTableSlotTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/TupleTableSlotTest.java new file mode 100644 index 000000000..deed3d3c3 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/TupleTableSlotTest.java @@ -0,0 +1,768 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.Connection; +import static java.sql.DriverManager.getConnection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import java.util.ArrayList; +import java.util.Arrays; +import static java.util.Arrays.deepToString; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import java.time.LocalDateTime; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.Adapter.AdapterException;//for now; not planned API +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.Adapter.AsLong; +import org.postgresql.pljava.Adapter.AsDouble; +import org.postgresql.pljava.Adapter.AsInt; +import org.postgresql.pljava.Adapter.AsFloat; +import org.postgresql.pljava.Adapter.AsShort; +import org.postgresql.pljava.Adapter.AsChar; +import org.postgresql.pljava.Adapter.AsByte; +import org.postgresql.pljava.Adapter.AsBoolean; +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.TargetList; +import org.postgresql.pljava.TargetList.Cursor; +import org.postgresql.pljava.TargetList.Projection; + +import org.postgresql.pljava.annotation.Function; +import static + org.postgresql.pljava.annotation.Function.OnNullInput.RETURNS_NULL; +import org.postgresql.pljava.annotation.SQLAction; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.Portal; +import static org.postgresql.pljava.model.Portal.ALL; +import static org.postgresql.pljava.model.Portal.Direction.FORWARD; +import org.postgresql.pljava.model.SlotTester; +import org.postgresql.pljava.model.TupleDescriptor; +import org.postgresql.pljava.model.TupleTableSlot; + +/** + * A temporary test jig during TupleTableSlot development; intended + * to be used from a debugger. + */ +@SQLAction(requires = "modelToJDBC", install = +"WITH" + +" result AS (" + +" SELECT" + +" * " + +" FROM" + +" javatest.modelToJDBC(" + +" 'SELECT DISTINCT" + +" CAST ( relacl AS text ), relacl" + +" FROM" + +" pg_class" + +" WHERE" + +" relacl IS NOT NULL'," + +" 'org.postgresql.pljava.pg.adt.TextAdapter', 'INSTANCE'," + +" 'org.postgresql.pljava.pg.adt.GrantAdapter', 'LIST_INSTANCE'" + +" ) AS r(raw text, cooked text)" + +" )," + +" conformed AS (" + +" SELECT" + +" raw, translate(cooked, '[] ', '{}') AS cooked" + +" FROM" + +" result" + +" )" + +" SELECT" + +" CASE WHEN every(raw = cooked)" + +" THEN javatest.logmessage('INFO', 'AclItem[] ok')" + +" ELSE javatest.logmessage('WARNING', 'AclItem[] ng')" + +" END" + +" FROM" + +" conformed" +) +@SQLAction(requires = "modelToJDBC", install = +"WITH" + +" result AS (" + +" SELECT" + +" raw, cooked, CAST ( cooked AS numeric ) AS refried" + +" FROM" + +" javatest.modeltojdbc(" + +" 'SELECT" + +" CAST ( pow AS text ) AS txt, pow AS bin" + +" FROM" + +" generate_series(-20., 20., 1.) AS gs(p)," + +" (VALUES (1e-16), (1e-65)) AS pf(f)," + +" (VALUES (1.), (-1.)) AS sf(sgn)," + +" LATERAL (SELECT sgn*(37.821637 ^ (p + f))) AS s(pow)'," + +" 'org.postgresql.pljava.pg.adt.TextAdapter', 'INSTANCE'," + +" 'org.postgresql.pljava.pg.adt.NumericAdapter', 'BIGDECIMAL_INSTANCE'" + +" ) AS j(raw text, cooked text)" + +" )" + +" SELECT" + +" CASE WHEN every(raw = cooked OR raw = CAST ( refried AS text ))" + +" THEN javatest.logmessage('INFO', 'NUMERIC ok')" + +" ELSE javatest.logmessage('WARNING', 'NUMERIC ng')" + +" END" + +" FROM" + +" result" +) +@SQLAction(requires = "tupleTableSlotTest", install = +"SELECT" + +" javatest.tupletableslottest(" + +" 'SELECT most_common_vals FROM pg_catalog.pg_stats'," + +" 'org.postgresql.pljava.pg.adt.ArrayAdapter', 'TYPE_OBTAINING_INSTANCE')" +) +public class TupleTableSlotTest +{ + /* + * Collect some Adapter instances that are going to be useful in the code + * below. Is it necessary they be static final? No, they can be obtained at + * any time, but collecting these here will keep the example methods tidier + * below. + * + * These are "leaf" adapters: they work from the PostgreSQL types directly. + */ + static final AsLong < ?> INT8; + static final AsInt < ?> INT4; + static final AsShort < ?> INT2; + static final AsByte < ?> INT1; + static final AsDouble < ?> FLOAT8; + static final AsFloat < ?> FLOAT4; + static final AsBoolean< ?> BOOL; + + static final As TEXT; + static final As LDT; // for the PostgreSQL TIMESTAMP type + + /* + * Now some adapters that can be derived from leaf adapters by composing + * non-leaf adapters over them. + * + * By default, the Adapters for primitive types can't fetch a null + * value. There is no value in the primitive's value space that could + * unambiguously represent null, and a DBMS should not go and reuse an + * otherwise-valid value to also mean null, if you haven't said to. But in + * a case where that is what you want, it is simple to write an adapter with + * the wanted behavior and compose it over the original one. + */ + static final AsDouble F8_NaN; // primitive double using NaN for null + + /* + * Reference-typed adapters have no trouble with null values by default; + * they'll just produce Java null. But suppose it is more convenient to get + * an Optional instead of a LocalDateTime that might be null. + * An Adapter for that can be obtained by composition. + */ + static final As,?> LDT_O; + + /* + * A composing adapter expecting a reference type can also be composed + * over one that produces a primitive type. It will see the values + * automatically boxed. + * + * Corollary: should the desired behavior be not to produce Optional, + * but simply to enable null handling for a primitive type by producing + * its boxed form or null, just one absolutely trivial composing adapter + * could add that behavior over any primitive adapter. + */ + static final As ,?> INT8_O; + + /* + * Once properly-typed adapters for component types are in hand, + * getting properly-typed array adapters is straightforward. (In Java 10+, + * a person might prefer to set these up at run time in local variables, + * where var could be used instead of these longwinded declarations.) + * + * For fun, I8x1 will be built over INT8_O, so it will really produce + * Optional[] instead of long[]. F8x5 will be built over F8_NaN, so it + * will produce double[][][][][], but null elements won't be rejected, + * and will appear as NaN. DTx2 will be built over LDT_O, so it will really + * produce Optional[][]. + */ + static final As[] ,?> I8x1; + static final As< int[][] ,?> I4x2; + static final As< short[][][] ,?> I2x3; + static final As< byte[][][][] ,?> I1x4; + static final As< double[][][][][] ,?> F8x5; + static final As< float[][][][][][] ,?> F4x6; + static final As< boolean[][][][][] ,?> Bx5; + static final As[][],?> DTx2; + + static + { + /* + * This is the very untidy part, while the planned Adapter manager API + * is not yet implemented. The extremely temporary adapterPlease method + * can be used to grovel some adapters out of PL/Java's innards, as long + * as the name of a class and a static final field is known. + * + * The adapter manager will have generic methods to obtain adapters with + * specific compile-time types. The adapterPlease method, not so much. + * It needs to be used with ugly casts. + */ + try + { + Connection conn = getConnection("jdbc:default:connection"); + SlotTester t = conn.unwrap(SlotTester.class); + + String cls = "org.postgresql.pljava.pg.adt.Primitives"; + INT8 = (AsLong )t.adapterPlease(cls, "INT8_INSTANCE"); + INT4 = (AsInt )t.adapterPlease(cls, "INT4_INSTANCE"); + INT2 = (AsShort )t.adapterPlease(cls, "INT2_INSTANCE"); + INT1 = (AsByte )t.adapterPlease(cls, "INT1_INSTANCE"); + FLOAT8 = (AsDouble )t.adapterPlease(cls, "FLOAT8_INSTANCE"); + FLOAT4 = (AsFloat )t.adapterPlease(cls, "FLOAT4_INSTANCE"); + BOOL = (AsBoolean)t.adapterPlease(cls, "BOOLEAN_INSTANCE"); + + cls = "org.postgresql.pljava.pg.adt.TextAdapter"; + + /* + * SuppressWarnings must appear on a declaration, making it hard to + * apply here, an initial assignment to a final field declared + * earlier. But making this the declaration of a new local variable, + * with the actual wanted assignment as a "side effect", works. + * (The "unnamed variable" _ previewed in Java 21 would be ideal.) + */ + @SuppressWarnings("unchecked") Object _1 = + TEXT = (As)t.adapterPlease(cls, "INSTANCE"); + + cls = "org.postgresql.pljava.pg.adt.DateTimeAdapter$JSR310"; + + @SuppressWarnings("unchecked") Object _2 = + LDT = + (As)t.adapterPlease(cls, "TIMESTAMP_INSTANCE"); + } + catch ( SQLException | ReflectiveOperationException e ) + { + throw new ExceptionInInitializerError(e); + } + + /* + * Other than those stopgap uses of adapterPlease, the rest is + * not so bad. Instantiate some composing adapters over the leaf + * adapters already obtained: + */ + + F8_NaN = new NullReplacingDouble(FLOAT8, Double.NaN); + LDT_O = new AsOptional<>(LDT); + INT8_O = new AsOptional<>(INT8); + + /* + * (Those composing adapters should be provided by PL/Java and known + * to the adapter manager so it can compose them for you. For now, + * they are just defined in this example file, showing that client + * code can easily supply its own.) + * + * Java array-of-array adapters of various dimensionalities are + * easily built from the adapters chosen for their component types. + */ + + I8x1 = INT8_O .a1() .build(); // array of Optional + I4x2 = INT4 .a2() .build(); + I2x3 = INT2 .a2() .a1() .build(); + I1x4 = INT1 .a4() .build(); + F8x5 = F8_NaN .a4() .a1() .build(); // 5D F8 array, null <-> NaN + F4x6 = FLOAT4 .a4() .a2() .build(); + Bx5 = BOOL .a4() .a1() .build(); + DTx2 = LDT_O .a2() .build(); // 2D of optional LDT + } + + /** + * Test {@link TargetList} and its functional API for retrieving values. + */ + @Function(schema="javatest") + public static Iterator targetListTest() + throws SQLException, ReflectiveOperationException + { + try ( + Connection conn = getConnection("jdbc:default:connection"); + Statement s = conn.createStatement(); + ) + { + SlotTester t = conn.unwrap(SlotTester.class); + + String query = + "SELECT" + + " to_char(stamp, 'DAY') AS day," + + " stamp" + + " FROM" + + " generate_series(" + + " timestamp 'epoch', timestamp 'epoch' + interval 'P6D'," + + " interval 'P1D'" + + " ) AS s(stamp)"; + + try ( Portal p = t.unwrapAsPortal(s.executeQuery(query)) ) + { + Projection proj = p.tupleDescriptor(); + + /* + * A quick glance shows this project(...) to be unneeded, as the + * query's TupleDescriptor already has exactly these columns in + * this order, and could be used below directly. On the other + * hand, this line will keep things working if someone later + * changes the query, reordering these columns or adding + * to them, and it may give a more explanatory exception if + * a change to the query does away with an expected column. + */ + proj = proj.project("day", "stamp"); + + List fetched = p.fetch(FORWARD, ALL); + + List results = new ArrayList<>(); + + proj.applyOver(fetched, c -> + { + /* + * This loop demonstrates a straightforward use of two + * Adapters and a lambda with two parameters to go through + * the retrieved rows. + * + * Note that applyOver does not, itself, iterate over the + * rows; it supplies a Cursor object that can be iterated to + * do that. This gives the lambda body of applyOver more + * control over how that will happen. + * + * The Cursor object is mutated during iteration so the + * same object represents each row in turn; the iteration + * variable is simply the Cursor object itself, so does not + * need to be used. Once the "unnamed variable" _ is more + * widely available (Java 21 has it, with --enable-preview), + * it will be the obvious choice for the iteration variable + * here. + * + * Within the loop, the cursor represents the single current + * row as far as its apply(...) methods are concerned. + * + * Other patterns, such as the streams API, can also be used + * (starting with a stream of the cursor object itself, + * again for each row), but can involve more fuss when + * checked exceptions are involved. + */ + for ( Cursor __ : c ) + { + c.apply(TEXT, LDT, // the adapters + ( v0, v1 ) -> // the fetched values + results.add(v0 + " | " + v1.getDayOfWeek()) + ); + } + + /* + * This equivalent loop uses two lambdas in curried style + * to do the same processing of the same two columns. That + * serves no practical need in this example; a perfectly + * good method signature for two reference columns was seen + * above. This loop illustrates the technique for combining + * the available methods when there isn't one that exactly + * fits the number and types of the target columns. + */ + for ( Cursor __ : c ) + { + c.apply(TEXT, + v0 -> + c.apply(LDT, + v1 -> + results.add(v0 + " | " + v1.getDayOfWeek()) + ) + ); + } + + return null; + }); + + return results.iterator(); + } + } + } + + /** + * Test retrieval of a PostgreSQL array as a multidimensional Java array. + */ + @Function(schema="javatest") + public static Iterator javaMultiArrayTest() + throws SQLException, ReflectiveOperationException + { + Connection conn = getConnection("jdbc:default:connection"); + SlotTester t = conn.unwrap(SlotTester.class); + + String query = + "VALUES (" + + " CAST ( '{1,2}' AS int8 [] ), " + + " CAST ( '{{1},{2}}' AS int4 [] ), " + + " CAST ( '{{{1,2,3}}}' AS int2 [] ), " + + " CAST ( '{{{{1},{2},{3}}}}' AS \"char\" [] ), " + // ASCII + " CAST ( '{{{{{1,2,3}}}}}' AS float8 [] ), " + + " CAST ( '{{{{{{1},{2},{3}}}}}}' AS float4 [] ), " + + " CAST ( '{{{{{t},{f},{t}}}}}' AS boolean [] ), " + + " CAST ( '{{''epoch''}}' AS timestamp [] ) " + + "), (" + + " '{NULL}', NULL, NULL, NULL, '{{{{{1,NULL,3}}}}}', NULL, NULL," + + " '{{NULL}}'" + + ")"; + + Portal p = t.unwrapAsPortal(conn.createStatement().executeQuery(query)); + Projection proj = p.tupleDescriptor(); + + List tups = p.fetch(FORWARD, ALL); + + List result = new ArrayList<>(); + + /* + * Then just use the right adapter for each column. + */ + proj.applyOver(tups, c -> + { + for ( Cursor __ : c ) + { + c.apply(I8x1, I4x2, I2x3, I1x4, F8x5, F4x6, Bx5, DTx2, + ( v0, v1, v2, v3, v4, v5, v6, v7 ) -> + result.addAll(List.of( + Arrays.toString(v0), deepToString(v1), deepToString(v2), + deepToString(v3), deepToString(v4), deepToString(v5), + deepToString(v6), deepToString(v7), + v7[0][0].orElse(LocalDateTime.MAX).getMonth() + "" + )) + ); + } + return null; + }); + + return result.iterator(); + } + + /** + * An adapter to compose over another one, adding some wanted behavior. + * + * There should eventually be a built-in set of composing adapters like + * this available for ready use, and automatically composed for you by an + * adapter manager when you say "I want an adapter for this PG type to this + * Java type and behaving this way." + * + * Until then, let this illustrate the simplicity of writing one. + */ + public static class NullReplacingDouble extends AsDouble + { + private final double replacement; + + @Override + public boolean canFetchNull() { return true; } + + @Override + public double fetchNull(Attribute a) + { + return replacement; + } + + // It would be nice to let this method be omitted and this behavior + // assumed, in a composing adapter with the same type for return and + // parameter. Maybe someday. + public double adapt(Attribute a, double value) + { + return value; + } + + private static final Adapter.Configuration config = + Adapter.configure(NullReplacingDouble.class, null); + + NullReplacingDouble(AsDouble over, double valueForNull) + { + super(config, over); + replacement = valueForNull; + } + } + + /** + * Another example of a useful composing adapter that should eventually be + * part of a built-in set. + */ + public static class AsOptional extends As,T> + { + // canFetchNull isn't needed; its default in As is true. + + @Override + public Optional fetchNull(Attribute a) + { + return Optional.empty(); + } + + public Optional adapt(Attribute a, T value) + { + return Optional.of(value); + } + + private static final Adapter.Configuration config = + Adapter.configure(AsOptional.class, null); + + /* + * This adapter may be composed over any Adapter, including those + * of primitive types as well as the reference-typed As. When + * constructed over a primitive-returning adapter, values will be boxed + * when passed to adapt(). + */ + AsOptional(Adapter over) + { + super(config, over, null); + } + } + + /** + * A surprisingly useful composing adapter that should eventually be + * part of a built-in set. + *

    + * Surprisingly useful, because although it "does" nothing, composing it + * over any primitive adapter produces one that returns the boxed form, and + * Java null for SQL null. + */ + public static class Identity extends As + { + // the inherited fetchNull returns null, which is just right + + public T adapt(Attribute a, T value) + { + return value; + } + + private static final Adapter.Configuration config = + Adapter.configure(Identity.class, null); + + /* + * Another choice could be to restrict 'over' to extend Primitive, as + * there isn't much point composing this adapter over one of reference + * type ... unless you want Java null for SQL null and the 'over' + * adapter produces something else. + */ + Identity(Adapter over) + { + super(config, over, null); + } + } + + /** + * Test retrieving results from a query using the PG-model API and returning + * them to the caller using the legacy JDBC API. + * @param query a query producing some number of columns + * @param adapters an array of strings, twice the number of columns, + * supplying a class name and static field name for the ugly temporary + * {@code adapterPlease} method, one such pair for each result column + */ + @Function( + schema = "javatest", type = "pg_catalog.record", variadic = true, + onNullInput = RETURNS_NULL, provides = "modelToJDBC" + ) + public static ResultSetProvider modelToJDBC(String query, String[] adapters) + throws SQLException, ReflectiveOperationException + { + Connection conn = getConnection("jdbc:default:connection"); + SlotTester t = conn.unwrap(SlotTester.class); + Portal p = t.unwrapAsPortal(conn.createStatement().executeQuery(query)); + TupleDescriptor td = p.tupleDescriptor(); + + if ( adapters.length != 2 * td.size() ) + throw new SQLException(String.format( + "query makes %d columns so 'adapters' should have %d " + + "elements, not %d", td.size(), 2*td.size(), adapters.length)); + + if ( Arrays.stream(adapters).anyMatch(Objects::isNull) ) + throw new SQLException("adapters array has null element"); + + As[] resolved = new As[ td.size() ]; + + for ( int i = 0 ; i < resolved.length ; ++ i ) + { + Adapter a = + t.adapterPlease(adapters[i<<1], adapters[(i<<1) + 1]); + if ( a instanceof As ) + resolved[i] = (As)a; + else + resolved[i] = new Identity(a); + } + + return new ResultSetProvider.Large() + { + @Override + public boolean assignRowValues(ResultSet out, long currentRow) + throws SQLException + { + if ( 0 == currentRow ) + { + int rcols = out.getMetaData().getColumnCount(); + if ( td.size() != rcols ) + throw new SQLException(String.format( + "query makes %d columns but result descriptor " + + "has %d", td.size(), rcols)); + } + + /* + * This example will fetch one tuple at a time here in the + * ResultSetProvider. This is a low-level interface to Postgres. + * In the SFRM_ValuePerCall protocol that ResultSetProvider + * supports, a fresh call from Postgres is made to retrieve each + * row. The Portal lives in a memory context that persists + * across the multiple calls, but the fetch result tups only + * exist in a child of the SPI context set up for each call. + * So here we only fetch as many tups as we can use to make one + * result row. + * + * If the logic involved fetching a bunch of rows and processing + * those into Java representations with no further dependence on + * the native tuples, then of course that could be done all in + * advance. + */ + List tups = p.fetch(FORWARD, 1); + if ( 0 == tups.size() ) + return false; + + TupleTableSlot tts = tups.get(0); + + for ( int i = 0 ; i < resolved.length ; ++ i ) + { + Object o = tts.get(i, resolved[i]); + try + { + out.updateObject(1 + i, o); + } + catch ( SQLException e ) + { + try + { + out.updateObject(1 + i, o.toString()); + } + catch ( SQLException e2 ) + { + e.addSuppressed(e2); + throw e; + } + } + } + + return true; + } + + @Override + public void close() + { + p.close(); + } + }; + } + + /** + * A temporary test jig during TupleTableSlot development; intended + * to be used from a debugger. + */ + @Function(schema="javatest", provides="tupleTableSlotTest") + public static void tupleTableSlotTest( + String query, String adpClass, String adpInstance) + throws SQLException, ReflectiveOperationException + { + new TupleTableSlotTest().testWith(query, adpClass, adpInstance); + } + + As adpL; + AsLong adpJ; + AsDouble adpD; + AsInt adpI; + AsFloat adpF; + AsShort adpS; + AsChar adpC; + AsByte adpB; + AsBoolean adpZ; + + void testWith(String query, String adpClass, String adpInstance) + throws SQLException, ReflectiveOperationException + { + Connection c = getConnection("jdbc:default:connection"); + SlotTester t = c.unwrap(SlotTester.class); + + ResultSet rs = c.createStatement().executeQuery(query); + Portal p = t.unwrapAsPortal(rs); + TupleDescriptor td = p.tupleDescriptor(); + + List tups = p.fetch(FORWARD, ALL); + + int ntups = tups.size(); + + boolean firstTime = true; + + int form = 8; // set with debugger, 8 selects reference-typed adpL + + boolean go; // true until set false by debugger each time through loop + + /* + * Results from adapters of assorted types. + */ + long jj = 0; + double dd = 0; + int ii = 0; + float ff = 0; + short ss = 0; + char cc = 0; + byte bb = 0; + boolean zz = false; + Object ll = null; + + for ( TupleTableSlot tts : tups ) + { + if ( firstTime ) + { + firstTime = false; + Adapter a = t.adapterPlease(adpClass, adpInstance); + if ( a instanceof As ) + adpL = (As)a; + else if ( a instanceof AsLong ) + adpJ = (AsLong)a; + else if ( a instanceof AsDouble ) + adpD = (AsDouble)a; + else if ( a instanceof AsInt ) + adpI = (AsInt)a; + else if ( a instanceof AsFloat ) + adpF = (AsFloat)a; + else if ( a instanceof AsShort ) + adpS = (AsShort)a; + else if ( a instanceof AsChar ) + adpC = (AsChar)a; + else if ( a instanceof AsByte ) + adpB = (AsByte)a; + else if ( a instanceof AsBoolean ) + adpZ = (AsBoolean)a; + } + + for ( Attribute att : tts.descriptor() ) + { + go = true; + while ( go ) + { + go = false; + try + { + switch ( form ) + { + case 0: jj = tts.get(att, adpJ); break; + case 1: dd = tts.get(att, adpD); break; + case 2: ii = tts.get(att, adpI); break; + case 3: ff = tts.get(att, adpF); break; + case 4: ss = tts.get(att, adpS); break; + case 5: cc = tts.get(att, adpC); break; + case 6: bb = tts.get(att, adpB); break; + case 7: zz = tts.get(att, adpZ); break; + case 8: ll = tts.get(att, adpL); break; + } + } + catch ( AdapterException e ) + { + System.out.println(e); + } + } + } + } + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/TypeRoundTripper.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/TypeRoundTripper.java index df613c266..8c3151556 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/TypeRoundTripper.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/TypeRoundTripper.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2018-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -36,8 +36,6 @@ import org.postgresql.pljava.annotation.SQLAction; import org.postgresql.pljava.annotation.SQLType; -import org.postgresql.pljava.example.annotation.ConditionalDDR; // for javadoc - /** * A class to simplify testing of PL/Java's mappings between PostgreSQL and * Java/JDBC types. @@ -94,11 +92,8 @@ * (VALUES (timestamptz '2017-08-21 18:25:29.900005Z')) AS p(orig), * roundtrip(p) AS (roundtripped timestamptz); * - *

    - * This example relies on {@code implementor} tags reflecting the PostgreSQL - * version, set up in the {@link ConditionalDDR} example. */ -@SQLAction(implementor = "postgresql_ge_90300", // funcs see earlier FROM items +@SQLAction( requires = {"TypeRoundTripper.roundTrip", "point mirror type"}, install = { " SELECT" + @@ -309,8 +304,7 @@ private TypeRoundTripper() { } @Function( schema = "javatest", type = "RECORD", - provides = "TypeRoundTripper.roundTrip", - implementor = "postgresql_ge_80400" // supports function param DEFAULTs + provides = "TypeRoundTripper.roundTrip" ) public static boolean roundTrip( ResultSet in, @SQLType(defaultValue="") String classname, diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UDTScalarIOTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UDTScalarIOTest.java index 8bc9affd9..659a5e2ab 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UDTScalarIOTest.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UDTScalarIOTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2016-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -15,6 +15,8 @@ import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.net.MalformedURLException; @@ -100,9 +102,11 @@ public class UDTScalarIOTest implements SQLData s_utfgedicht = new byte[bb.limit()]; bb.get(s_utfgedicht); - s_url = new URL("http://tada.github.io/pljava/"); + s_url = new URI("http://tada.github.io/pljava/").toURL(); } - catch ( CharacterCodingException | MalformedURLException e ) + catch ( + CharacterCodingException | + URISyntaxException | MalformedURLException e ) { throw new RuntimeException(e); } diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UnicodeRoundTripTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UnicodeRoundTripTest.java index 4f2c0ec47..c317dab25 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UnicodeRoundTripTest.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UnicodeRoundTripTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -35,11 +35,10 @@ * if {@code matched} is false or the original and returned arrays or strings * do not match as seen in SQL. *

    - * This example relies on {@code implementor} tags reflecting the PostgreSQL - * version, set up in the {@link ConditionalDDR} example, and also sets its own. + * This example sets an {@code implementor} tag based on a PostgreSQL condition, + * as further explained in the {@link ConditionalDDR} example. */ -@SQLAction(provides="postgresql_unicodetest", - implementor="postgresql_ge_90000", install= +@SQLAction(provides="postgresql_unicodetest", install= "SELECT CASE" + " WHEN 'UTF8' = current_setting('server_encoding')" + " THEN set_config('pljava.implementors', 'postgresql_unicodetest,' ||" + @@ -49,50 +48,43 @@ @SQLAction(requires="unicodetest fn", implementor="postgresql_unicodetest", install= -" with " + -" usable_codepoints ( cp ) as ( " + -" select generate_series(1,x'd7ff'::int) " + -" union all " + -" select generate_series(x'e000'::int,x'10ffff'::int) " + +" WITH " + +" usable_codepoints ( cp ) AS ( " + +" SELECT generate_series(1,x'd7ff'::int) " + +" UNION ALL " + +" SELECT generate_series(x'e000'::int,x'10ffff'::int) " + " ), " + -" test_inputs ( groupnum, cparray, s ) as ( " + -" select " + -" cp / 1024 as groupnum, " + -" array_agg(cp order by cp), string_agg(chr(cp), '' order by cp) " + -" from usable_codepoints " + -" group by groupnum " + +" test_inputs ( groupnum, cparray, s ) AS ( " + +" SELECT " + +" cp / 1024 AS groupnum, " + +" array_agg(cp ORDER BY cp), string_agg(chr(cp), '' ORDER BY cp) " + +" FROM usable_codepoints " + +" GROUP BY groupnum " + " ), " + -" test_outputs as ( " + -" select groupnum, cparray, s, unicodetest(s, cparray) as roundtrip " + -" from test_inputs " + +" test_outputs AS ( " + +" SELECT groupnum, cparray, s, unicodetest(s, cparray) AS roundtrip " + +" FROM test_inputs " + " ), " + -" test_failures as ( " + -" select * " + -" from test_outputs " + -" where " + -" cparray != (roundtrip).cparray or s != (roundtrip).s " + -" or not (roundtrip).matched " + +" test_failures AS ( " + +" SELECT * " + +" FROM test_outputs " + +" WHERE " + +" cparray != (roundtrip).cparray OR s != (roundtrip).s " + +" OR NOT (roundtrip).matched " + " ), " + -" test_summary ( n_failing_groups, first_failing_group ) as ( " + -" select count(*), min(groupnum) from test_failures " + +" test_summary ( n_failing_groups, first_failing_group ) AS ( " + +" SELECT count(*), min(groupnum) FROM test_failures " + " ) " + -" select " + -" case when n_failing_groups > 0 then " + +" SELECT " + +" CASE WHEN n_failing_groups > 0 THEN " + " javatest.logmessage('WARNING', n_failing_groups || " + " ' 1k codepoint ranges had mismatches, first is block starting 0x' || " + " to_hex(1024 * first_failing_group)) " + -" else " + +" ELSE " + " javatest.logmessage('INFO', " + " 'all Unicode codepoint ranges roundtripped successfully.') " + -" end " + -" from test_summary" -) -@SQLAction( - install= - "CREATE TYPE unicodetestrow AS " + - "(matched boolean, cparray integer[], s text)", - remove="DROP TYPE unicodetestrow", - provides="unicodetestrow type" +" END " + +" FROM test_summary" ) public class UnicodeRoundTripTest { /** @@ -111,8 +103,8 @@ public class UnicodeRoundTripTest { * @param rs OUT (matched, cparray, s) as described above * @return true to indicate the OUT tuple is not null */ - @Function(type="unicodetestrow", - requires="unicodetestrow type", provides="unicodetest fn") + @Function(out={"matched boolean", "cparray integer[]", "s text"}, + provides="unicodetest fn") public static boolean unicodetest(String s, int[] ints, ResultSet rs) throws SQLException { boolean ok = true; diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/VarlenaUDTTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/VarlenaUDTTest.java index ae1be8991..c9982490e 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/VarlenaUDTTest.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/VarlenaUDTTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015- Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -29,14 +29,12 @@ * characters. That makes it easy to test how big a value gets correctly stored * and retrieved. It should be about a GB, but in issue 52 was failing at 32768 * because of a narrowing assignment in the native code. - *

    - * This example relies on {@code implementor} tags reflecting the PostgreSQL - * version, set up in the {@link ConditionalDDR} example. */ -@SQLAction(requires="varlena UDT", implementor="postgresql_ge_80300", install= +@SQLAction(requires="varlena UDT", install= " SELECT CASE v::text = v::javatest.VarlenaUDTTest::text " + -" WHEN true THEN javatest.logmessage('INFO', 'works for ' || v) " + -" ELSE javatest.logmessage('WARNING', 'fails for ' || v) " + +" WHEN true " + +" THEN javatest.logmessage('INFO', 'VarlenaUDTTest works for ' || v) " + +" ELSE javatest.logmessage('WARNING', 'VarlenaUDTTest fails for ' || v) " + " END " + " FROM (VALUES (('32767')), (('32768')), (('65536')), (('1048576'))) " + " AS t ( v )" diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/XMLRenderedTypes.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/XMLRenderedTypes.java index 871e96445..812233d15 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/XMLRenderedTypes.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/XMLRenderedTypes.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2019-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -27,19 +27,23 @@ *

    * Everything mentioning the type XML here needs a conditional implementor tag * in case of being loaded into a PostgreSQL instance built without that type. - * The {@code pg_node_tree} type appears in 9.1. */ -@SQLAction(implementor="postgresql_ge_90100", - provides="postgresql_xml_ge91", - install= - "SELECT CASE (SELECT 1 FROM pg_type WHERE typname = 'xml') WHEN 1" + - " THEN set_config('pljava.implementors', 'postgresql_xml_ge91,' || " + - " current_setting('pljava.implementors'), true) " + - "END" +@SQLAction(implementor="postgresql_xml", requires="pgNodeTreeAsXML", install= +"WITH" + +" a(t) AS (SELECT adbin FROM pg_catalog.pg_attrdef LIMIT 1)" + +" SELECT" + +" CASE WHEN pgNodeTreeAsXML(t) IS DOCUMENT" + +" THEN javatest.logmessage('INFO', 'pgNodeTreeAsXML ok')" + +" ELSE javatest.logmessage('WARNING', 'pgNodeTreeAsXML ng')" + +" END" + +" FROM a" ) public class XMLRenderedTypes { - @Function(schema="javatest", implementor="postgresql_xml_ge91") + @Function( + schema="javatest", implementor="postgresql_xml", + provides="pgNodeTreeAsXML" + ) public static SQLXML pgNodeTreeAsXML(@SQLType("pg_node_tree") SQLXML pgt) throws SQLException { diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/DoSQL.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/DoSQL.java new file mode 100644 index 000000000..713bca3fd --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/DoSQL.java @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.polyglot; + +import java.sql.SQLException; + +import org.postgresql.pljava.PLJavaBasedLanguage.InlineBlocks; + +import org.postgresql.pljava.annotation.SQLAction; + +import org.postgresql.pljava.model.ProceduralLanguage; + +/* + * The imports above are the basics to make this a language handler. + * + * These imports below for JDBC / database access might not be so common in a + * real language handler; you'd expect it to focus on compiling/executing some + * client code, and the client code is where you'd expect to see what looks + * more like application logic like this. But this is a handler for a very + * simple language that only takes the given string and hands it to JDBC, so it + * does look a bit like application logic. + */ +import java.sql.Connection; +import static java.sql.DriverManager.getConnection; +import java.sql.Statement; +import org.postgresql.pljava.model.Portal; +import static org.postgresql.pljava.model.Portal.ALL; +import static org.postgresql.pljava.model.Portal.Direction.FORWARD; +import org.postgresql.pljava.model.SlotTester; // temporary development hack + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Example of a procedural language with only DO blocks, built atop PL/Java. + */ +@SQLAction(requires = "pljavahandler language", install = { +"CREATE OR REPLACE FUNCTION javatest.dosql_validator(oid)" + +" RETURNS void" + +" LANGUAGE pljavahandler AS 'org.postgresql.pljava.example.polyglot.DoSQL'", + +"COMMENT ON FUNCTION javatest.dosql_validator(oid) IS " + +"'Validator function for the dosql procedural language'", + +"CREATE LANGUAGE dosql" + +" HANDLER sqlj.pljavaDispatchRoutine" + +" INLINE sqlj.pljavaDispatchInline" + +" VALIDATOR javatest.dosql_validator", + +"COMMENT ON LANGUAGE dosql IS " + +"'The dosql procedural language, which is implemented atop PL/Java, " + +"and supports inline code blocks that are just plain SQL, to be executed " + +"with any output discarded. COMMIT and ROLLBACK are recognized " + +"for transaction control.'", + +"DO LANGUAGE dosql 'SELECT javatest.logmessage(''INFO'', ''DoSQL ok'')'" +}, remove = { +"DROP LANGUAGE dosql", +"DROP FUNCTION javatest.dosql_validator(oid)" +}) +public class DoSQL implements InlineBlocks +{ + private final ProceduralLanguage pl; + + /** + * There must be a public constructor with a {@code ProceduralLanguage} + * parameter. + *

    + * The parameter can be ignored, or used to determine the name, oid, + * accessibility, or other details of the declared PostgreSQL language + * your handler class has been instantiated for. + */ + public DoSQL(ProceduralLanguage pl) + { + this.pl = pl; + } + + /** + * The sole method needed to implement inline code blocks. + *

    + * This implementation will recognize {@code COMMIT} or {@code ROLLBACK} + * and call the dedicated JDBC {@code Connection} methods for those, or + * otherwise just pass the string to {@code Statement.execute} and consume + * and discard any results. + */ + @Override + public void execute(String inlineSource, boolean atomic) throws SQLException + { + try ( + Connection c = getConnection("jdbc:default:connection"); + Statement s = c.createStatement() + ) + { + Matcher m = COMMIT_OR_ROLLBACK.matcher(inlineSource); + if ( m.matches() ) + { + if ( -1 != m.start(1) ) + c.commit(); + else + c.rollback(); + return; + } + + /* + * Not COMMIT or ROLLBACK, just hand it to execute() and consume + * any results. + */ + + SlotTester st = c.unwrap(SlotTester.class); + long count = 0; + + for ( + boolean isRS = s.execute(inlineSource); + -1 != count; + isRS = s.getMoreResults() + ) + { + if ( isRS ) + { + try ( Portal p = st.unwrapAsPortal(s.getResultSet()) ) + { + p.move(FORWARD, ALL); + } + } + else + count = s.getLargeUpdateCount(); + } + } + } + + static final Pattern COMMIT_OR_ROLLBACK = + Pattern.compile("^\\s*+(?i:(commit)|(rollback))\\s*+$"); +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/Glot64.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/Glot64.java new file mode 100644 index 000000000..d0b894a42 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/Glot64.java @@ -0,0 +1,819 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.polyglot; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.CharacterCodingException; +import static java.nio.charset.StandardCharsets.US_ASCII; + +import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; + +import java.util.Base64; +import java.util.BitSet; +import java.util.Objects; +import static java.util.Optional.ofNullable; + +import static java.util.stream.Collectors.toList; + +import org.postgresql.pljava.PLJavaBasedLanguage.InlineBlocks; +import org.postgresql.pljava.PLJavaBasedLanguage.ReturningSets; +import org.postgresql.pljava.PLJavaBasedLanguage.Routines; +import org.postgresql.pljava.PLJavaBasedLanguage.Routine; +import org.postgresql.pljava.PLJavaBasedLanguage.Template; +import org.postgresql.pljava.PLJavaBasedLanguage.Triggers; +import org.postgresql.pljava.PLJavaBasedLanguage.TriggerFunction; +import org.postgresql.pljava.PLJavaBasedLanguage.TriggerTemplate; +import org.postgresql.pljava.PLJavaBasedLanguage.UsingTransforms; + +import org.postgresql.pljava.TargetList.Projection; + +import org.postgresql.pljava.annotation.SQLAction; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.ProceduralLanguage; +import org.postgresql.pljava.model.ProceduralLanguage.PLJavaBased; +import org.postgresql.pljava.model.RegProcedure; +import org.postgresql.pljava.model.RegProcedure.Call; +import org.postgresql.pljava.model.RegProcedure.Lookup; +import org.postgresql.pljava.model.Transform; +import org.postgresql.pljava.model.Transform.FromSQL; +import org.postgresql.pljava.model.Transform.ToSQL; +import org.postgresql.pljava.model.Trigger; +import org.postgresql.pljava.model.Trigger.ForTrigger; +import org.postgresql.pljava.model.TupleDescriptor; +import org.postgresql.pljava.model.TupleTableSlot; + +/** + * Example of a procedural language implemented atop PL/Java. + *

    + * Glot64 has a couple of features rarely found in a PostgreSQL PL. First, you + * can't use it to do anything, other than print some text to standard output. + * That's the server's standard output, which you probably won't even see unless + * running the server with standard output to your terminal, as you might run a + * test instance under PL/Java's test harness. On a production server, what is + * written to standard output may well go nowhere at all, and then it may truly + * be said your Glot64 routines do nothing at all. + *

    + * Second, Glot64 has the rare property that the compiled form of its code is + * easier to read than the source. That's because when you + * {@code CREATE FUNCTION} or {@code CREATE PROCEDURE} in Glot64, you write + * the {@code AS} part in Base64. It gets 'compiled' by decoding it to ASCII. + * (Unless it is malformed Base64 or doesn't decode to ASCII. Then your routine + * gets rejected by the validator. Better luck next time.) Then, when you call + * the function or procedure, the 'compiled' code is written to standard output. + * Therefore, + *

    
    + * CREATE FUNCTION hello() RETURNS void
    + *   LANGUAGE glot64
    + *   AS 'SGVsbG8sIHdvcmxkIQo=';
    + *
    + * defines a function that writes "Hello, world!" when you call it. + *

    + * However, Glot64 writes several other things to standard output ahead of the + * output of the routine itself. That is the real purpose: to illustrate how + * PL/Java's language handler API is arranged, and how the information about the + * parameters and result type (or types) will be presented to your code. + *

    + * You can declare a Glot64 function or procedure with any number and types of + * parameters and return type (or {@code OUT} parameters). Because your routine + * will not use any of the arguments or produce any result, it doesn't care how + * they are declared. By declaring Glot64 functions in several different ways, + * you can see, in the output messages, how the API presents that information. + */ +@SQLAction(requires = "pljavahandler language", install = { +"CREATE OR REPLACE FUNCTION javatest.glot64_validator(oid)" + +" RETURNS void" + +" LANGUAGE pljavahandler AS 'org.postgresql.pljava.example.polyglot.Glot64'", + +"COMMENT ON FUNCTION javatest.glot64_validator(oid) IS " + +"'Validator function for the glot64 procedural language'", + +"CREATE LANGUAGE glot64" + +" HANDLER sqlj.pljavaDispatchRoutine" + +" INLINE sqlj.pljavaDispatchInline" + +" VALIDATOR javatest.glot64_validator", + +"COMMENT ON LANGUAGE glot64 IS " + +"'The glot64 procedural language, which is implemented atop PL/Java, " + +"and supports functions, procedures, and inline code blocks'", + +"CREATE FUNCTION javatest.hello() RETURNS void" + +" LANGUAGE glot64 AS 'SGVsbG8sIHdvcmxkIQo='", + +"CREATE FUNCTION javatest.hello(anyelement) RETURNS anyelement" + +" LANGUAGE glot64 AS 'SGVsbG8sIHdvcmxkIQo='", + +"CREATE FUNCTION javatest.hello(a int4, b int4, OUT c int2, OUT d int2)" + +" LANGUAGE glot64 AS 'SGVsbG8sIHdvcmxkIQo='", + +"CREATE FUNCTION javatest.hello(text, VARIADIC \"any\") RETURNS text[]" + +" LANGUAGE glot64 AS 'SGVsbG8sIHdvcmxkIQo='", + +"CREATE PROCEDURE javatest.say_hello()" + +" LANGUAGE glot64 AS 'SGVsbG8sIHdvcmxkIQo='", + +"DO LANGUAGE glot64 'SGVsbG8sIHdvcmxkIQo='", + +"SELECT javatest.hello()", + +"SELECT javatest.hello()", + +"SELECT javatest.hello(42)", + +"SELECT javatest.hello(i), javatest.hello(r)" + +" FROM (VALUES (CAST (1 AS INTEGER), CAST (1.0 AS REAL)), (2, 2.0), (3, 3.0))" + +" AS t(i, r)", + +"CALL javatest.say_hello()", + +"CREATE FUNCTION javatest.glot64_trigger() RETURNS trigger" + +" LANGUAGE glot64 AS 'dHJpZ2dlciBkZW1vCg=='", + +"CREATE TRIGGER g64_as_d AFTER DELETE ON javatest.username_test" + +" REFERENCING OLD TABLE AS oldone FOR EACH STATEMENT" + +" EXECUTE FUNCTION javatest.glot64_trigger('ab', 'cd')", + +"CREATE CONSTRAINT TRIGGER g64_ar_iu AFTER INSERT OR UPDATE" + +" ON javatest.username_test FOR EACH ROW" + +" EXECUTE FUNCTION javatest.glot64_trigger('ef', 'gh')", + +"INSERT INTO javatest.username_test VALUES ('Wilhelm Glot', '64')", + +"UPDATE javatest.username_test SET name = 'Glot, Wilhelm'" + +" WHERE username = '64'", + +"DELETE FROM javatest.username_test WHERE username = '64'", + +"CREATE FUNCTION javatest.fromline(internal) RETURNS internal" + +" IMMUTABLE LANGUAGE glot64 AS 'ZnJvbVNRTA=='", + +"CREATE FUNCTION javatest.toline(internal) RETURNS line" + +" IMMUTABLE LANGUAGE glot64 AS 'dG9TUUw='", + +"CREATE FUNCTION javatest.frombox(internal) RETURNS internal" + +" IMMUTABLE LANGUAGE glot64 AS 'ZnJvbVNRTA=='", + +"CREATE FUNCTION javatest.tolseg(internal) RETURNS lseg" + +" IMMUTABLE LANGUAGE glot64 AS 'dG9TUUw='", + +"CREATE TRANSFORM FOR line LANGUAGE glot64 (" + +" FROM SQL WITH FUNCTION javatest.fromline," + +" TO SQL WITH FUNCTION javatest.toline )", + +"CREATE TRANSFORM FOR box LANGUAGE glot64 (" + +" FROM SQL WITH FUNCTION javatest.frombox )", + +"CREATE TRANSFORM FOR lseg LANGUAGE glot64 (" + +" TO SQL WITH FUNCTION javatest.tolseg )", + +"CREATE FUNCTION javatest.usingtransforms() RETURNS void" + +" TRANSFORM FOR TYPE line, FOR TYPE box, FOR TYPE lseg" + +" LANGUAGE glot64 AS 'SSBjb3VsZCB1c2UgdHJhbnNmb3JtcyEK'", + +"SELECT javatest.usingtransforms()", + +"CREATE FUNCTION setof3() RETURNS SETOF INT" + +" LANGUAGE glot64 AS 'Mw==' /* 3 */", + +"SELECT javatest.setof3() LIMIT 2" +}, remove = { +"DROP FUNCTION javatest.setof3()", +"DROP FUNCTION javatest.usingtransforms()", +"DROP TRANSFORM FOR lseg LANGUAGE glot64", +"DROP TRANSFORM FOR box LANGUAGE glot64", +"DROP TRANSFORM FOR line LANGUAGE glot64", +"DROP FUNCTION javatest.tolseg(internal)", +"DROP FUNCTION javatest.frombox(internal)", +"DROP FUNCTION javatest.toline(internal)", +"DROP FUNCTION javatest.fromline(internal)", +"DROP TRIGGER g64_ar_iu ON javatest.username_test", +"DROP TRIGGER g64_as_d ON javatest.username_test", +"DROP FUNCTION javatest.glot64_trigger()", +"DROP PROCEDURE javatest.say_hello()", +"DROP FUNCTION javatest.hello(text,VARIADIC \"any\")", +"DROP FUNCTION javatest.hello(int4,int4)", +"DROP FUNCTION javatest.hello(anyelement)", +"DROP FUNCTION javatest.hello()", +"DO LANGUAGE glot64 'QnllIGJ5ZSEK'", +"DROP LANGUAGE glot64", +"DROP FUNCTION javatest.glot64_validator(oid)" +}) +public class Glot64 +implements InlineBlocks, Routines, ReturningSets, Triggers, UsingTransforms +{ + private final ProceduralLanguage pl; + + /** + * There must be a public constructor with a {@code ProceduralLanguage} + * parameter. + *

    + * The parameter can be ignored, or used to determine the name, oid, + * accessibility, or other details of the declared PostgreSQL language + * your handler class has been instantiated for. + */ + public Glot64(ProceduralLanguage pl) + { + this.pl = pl; + } + + /** + * The sole method needed to implement inline code blocks. + *

    + * This one merely writes the 'compiled' source text to standard output. + * @param inlineSource the source text to be executed as the inline code + * block. + * @param atomic true if top-level transaction control must be disallowed + * within the block. PL/Java will already handle propagating this value to + * underlying PostgreSQL SPI calls your code might make, but it is also + * available here in case your language has compilation choices it can make + * based on that information. + */ + @Override + public void execute(String inlineSource, boolean atomic) throws SQLException + { + System.out.printf("%s inline code block (atomic: %s):\n", pl, atomic) + .print(compile(inlineSource)); + } + + /** + * This and {@link #additionalChecks additionalChecks} are the two methods + * involved in implementing a validator for the language. + *

    + * Each method should simply return normally if its checks pass, or throw + * an (ideally informative) exception if not. The work is split into two + * methods (which need not both be supplied) because PostgreSQL does not + * guarantee that the validator fully ran at the time of creating any + * routine. Therefore, while PL/Java will normally call both methods during + * validation when a function or procedure is being created, it also will + * call {@link #essentialChecks essentialChecks} at runtime, in advance of + * calling {@link #prepare prepare}, so this is the place to put checks that + * are necessary to support assumptions {@code prepare} relies on. + *

    + * For Glot64, this method does nothing. The only check needed at validation + * time is whether the source text successfully 'compiles', and the + * {@code prepare} method will have to compile the code anyway, so including + * that check here would be redundant. It can be included in + * {@code additionalChecks}, so a user has useful feedback at create time. + * @param subject the proposed Glot64 routine to be validated + * @param checkBody whether to perform all checks. When false, depending on + * details of the language being implemented, some checks may need to be + * skipped. PostgreSQL can call validators with {@code checkBody} false at + * odd times, such as during {@code pg_restore} or {@code pg_upgrade}, when + * not everything in the database may be as the full suite of checks would + * expect. + */ + @Override + public void essentialChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + System.out.printf("%s essentialChecks: checkBody %s\n", + subject, checkBody); + } + + /** + * This and {@link #essentialChecks essentialChecks} are the two methods + * involved in implementing a validator for the language. + *

    + * Each method should simply return normally if its checks pass, or throw + * an (ideally informative) exception if not. See + * {@link #essentialChecks essentialChecks} for more on why there are two + * methods. + *

    + * For Glot64, this is the only method that really checks anything, namely, + * that the source text can be 'compiled'. That is work the {@code prepare} + * method must do when it is called anyway, so there is nothing to gain by + * having it redundantly done in {@code essentialChecks}. Doing it here at + * {@code CREATE} time, though, gives helpful feedback to a user. + * @param subject the proposed Glot64 routine to be validated + * @param checkBody whether to perform all checks. When false, depending on + * details of the language being implemented, some checks may need to be + * skipped. PostgreSQL can call validators with {@code checkBody} false at + * odd times, such as during {@code pg_restore} or {@code pg_upgrade}, when + * not everything in the database may be as the full suite of checks would + * expect. + */ + @Override + public void additionalChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + System.out.printf("%s additionalChecks: checkBody %s: ", + subject, checkBody); + + /* + * For Glot64, 'compiling' is purely a matter of string transformation + * and has no interaction with database state, so the judgment call can + * be made (as here) to include this check even when checkBody is false. + */ + String compiled = compile(subject.src()); + + if ( subject.returnsSet() ) + { + try + { + Integer.parseInt(compiled); + } + catch ( NumberFormatException e ) + { + throw new SQLSyntaxErrorException( + "the body of a Glot64 set-returning function must compile" + + " to an integer", "42P13", e); + } + } + + System.out.printf("ok\n"); + } + + /** + * Prepares an executable template for a routine (a function or procedure). + *

    + * The parameter to this method is a {@link RegProcedure}, carrying a + * {@link PLJavaBased PLJavaBased} memo. The {@code RegProcedure} exposes + * the PostgreSQL catalog information for the routine, and the memo provides + * some more information computed and cached by PL/Java. However, at this + * stage, no information from any specific call site is presented. + *

    + * For example, the memo describes the number, names, and types of the + * routine's inputs (all of its declared parameters that have mode + * {@code IN}, {@code INOUT}, or {@code VARIADIC}) and outputs (the ones + * with mode {@code INOUT}, {@code OUT}, or {@code TABLE}), each set + * presented in the simple form of a {@link TupleDescriptor}. + *

    + * Because this information is based only on the routine's declaration, + * these tuple descriptors are called {@code inputsTemplate} and + * {@code outputsTemplate}. They may contain entries with polymorphic types + * that will be resolved to concrete types at a given call site (and + * possibly to different concrete types at a different call site). The + * {@link BitSet BitSet}s {@code unresolvedInputs} and + * {@code unresolvedOutputs} indicate which positions must be resolved + * later. If the bit sets are empty, the template {@code TupleDescriptor}s + * are already complete descriptors of the inputs and outputs that will be + * seen at call sites. + *

    + * This method should precompute whatever it can based on the routine's + * catalog declaration and the template tuple descriptors only, and return + * a {@link Template} instance, which must depend only on this information, + * as it will be cached with the {@code RegProcedure} itself, independently + * of any call site. + *

    + * At a call site, the {@code Template} instance's {@code specialize} method + * will be called on a {@link Lookup Lookup} object (conventionally called + * {@code flinfo}) representing the call site. At that stage, more specific + * information is available, such as fully-resolved {@code TupleDescriptor}s + * for the inputs and outputs, and which argument expressions at that call + * site have stable values that will not vary in succesive calls made at + * that site. The {@code specialize} method should use that information to + * generate and return a {@link Routine Routine}, a fully-resolved object + * with a {@code call} method ready to be cached at that call site and used + * for (possibly many) calls made there. + *

    + * When the routine has no polymorphic inputs or outputs, as reported by + * empty {@code unresolvedInputs} and {@code unresolvedOutputs} bit sets + * at the {@code prepare} stage, a final {@code Routine} can be generated + * at that stage, and the {@code Template} returned by {@code prepare} can + * simply return it unconditionally (unless it wants to look at which + * input expressions can be treated as stable). + *

    + * For each call at a given call site, a {@link Call Call} instance will be + * passed (conventionally as {@code fcinfo}) to the generated + * {@code Routine}'s {@code call} method. The {@code Call} object bears the + * actual {@link TupleTableSlot}s from which the routine will fetch its + * arguments and to which it will (XXX when implemented) store its + * result(s). + */ + @Override + public Template prepare(RegProcedure target, PLJavaBased memo) + throws SQLException + { + BitSet unresolvedIn = memo.unresolvedInputs(); + BitSet unresolvedOut = memo.unresolvedOutputs(); + + System.out.printf( + "%s prepare():\n" + + "inputsTemplate : %s\n" + + "unresolvedInputs : %s\n" + + "outputsTemplate : %s\n" + + "unresolvedOutputs: %s\n" + + "transforms : %s\n", + + target, + + memo.inputsTemplate() + .stream().map(Attribute::type).collect(toList()), + + unresolvedIn, + + /* + * Unlike inputsTemplate, outputsTemplate can return null. That can + * happen for two reasons: (1) the routine is declared VOID and no + * outputs are needed, or (2) it is declared RECORD and will rely on + * an output column definition list at every call site, so there is + * no outputsTemplate to examine in advance. + */ + ofNullable(memo.outputsTemplate()) + .map(t -> + t.stream() + .map(Attribute::type) + .map(Object::toString) + .collect(toList()) + .toString()) + .orElse("null"), + + /* + * It's also possible for unresolvedOutputs to be null, in the + * declared-RECORD-so-nothing-is-known-yet case. (In the VOID case, + * it will just be an empty BitSet, meaning no outputs need to be + * resolved, just as an empty BitSet would mean any other time. That + * makes it simple to test for canSkipResolution, as shown below.) + */ + Objects.toString(unresolvedOut), + Objects.toString(memo.transforms()) + ); + + boolean canSkipResolution = + unresolvedIn.isEmpty() + && null != unresolvedOut && unresolvedOut.isEmpty(); + + /* + * For this 'language', all compilation can be done early; it does not + * need to see resolved type descriptors from flinfo at a call site. + */ + String compiled = compile(target.src()); + + /* + * This will be the Template object, cached with the RegProcedure. + */ + return flinfo -> + { + /* + * It might be interesting to know which arguments are 'stable' at + * this 'flinfo' call site, meaning they will have the same values + * in any number of upcoming calls at this site. In a realistic + * case, there might be certain arguments we'd be interested in + * precomputing values from, and we can use a BitSet to indicate + * which arguments we'd like to know the stability of, and the set + * returned from inputsStable will show the subset of those + * positions where stable expressions have been passed. For this + * example, we'll start by setting all bits [0,nargs) and thus ask + * about all the arguments. + */ + TupleDescriptor inDescriptor = flinfo.inputsDescriptor(); + int nargs = inDescriptor.size(); + BitSet maybeStable = new BitSet(nargs); + maybeStable.set(0, nargs); + + /* + * Precompute something specific to this call site + * that can be baked into the returned Routine. + */ + int id = System.identityHashCode(flinfo); + + System.out.printf( + "%s Template.specialize():\n" + + "precomputed id : %x\n" + + "inputsDescriptor : %s\n" + + "inputsAreSpread : %s\n" + + "stableInputs : %s\n" + + "outputsDescriptor: %s\n", + + target, id, + + inDescriptor.stream().map(Attribute::type).collect(toList()), + + flinfo.inputsAreSpread(), + + flinfo.stableInputs(maybeStable), + + /* + * Above, outputsTemplate could return null for two reasons. + * The second reason no longer applies; if the routine is + * declared RECORD and this call site has no column definition + * list, outputsDescriptor throws an exception. But a null + * return is still possible in the VOID case. + * + * Why not an empty descriptor for VOID? An empty descriptor + * really occurs if a function returns t where t is a + * zero-column composite type. Odd thing to do, but allowed. + */ + ofNullable(flinfo.outputsDescriptor()) + .map(d -> + d.stream() + .map(Attribute::type) + .map(Object::toString) + .collect(toList()) + .toString()) + .orElse("null") + ); + + /* + * This will be the Routine object, cached with the call site. + */ + return fcinfo -> + { + Call.Context cx = fcinfo.context(); + + String subifc = ofNullable(cx) + .map(c -> c.getClass().getInterfaces()[0].getSimpleName()) + .orElse("null"); + + String maybeAtomic = + (cx instanceof Call.Context.CallContext) + ? String.format("atomic: %s\n", + ((Call.Context.CallContext)cx).atomic()) + : ""; + + System.out.printf( + "%s Routine.call():\n" + + "precomputed id: %x\n" + + "collation: %s\n" + + "context: %s\n%s" + + "result:\n%s", + target, id, + fcinfo.collation(), + subifc, maybeAtomic, + compiled // here we 'execute' the 'compiled' routine :) + ); + }; + }; + } + + /** + * Prepares a template for a set-returning Glot64 function. + *

    + * The source of any set-returning Glot64 function must "compile" to + * the string representation of an integer. + *

    + * The generated routine will ignore any arguments, and produce a number of + * rows (of, for now, nothing, as {@code TupleTableSlot} isn't writable yet) + * equal to the integer. If the integer is negative, the return of a single + * (non-set) result is exercised. + */ + @Override + public SRFTemplate prepareSRF(RegProcedure target, PLJavaBased memo) + throws SQLException + { + int rowsToReturn = Integer.parseInt(compile(target.src())); + + return (SRFTemplate.ValuePerCall) flinfo -> + { + return fcinfo -> + { + return new SRFNext() + { + private int rowsLeft = rowsToReturn; + + @Override + public void close() + { + System.out.println("ValuePerCall result closed"); + } + + @Override + public SRFNext.Result nextResult(Call fcinfo) + { + if ( 0 > rowsLeft ) + return SRFNext.Result.SINGLE; + if ( 0 == rowsLeft ) + return SRFNext.Result.END; + -- rowsLeft; + return SRFNext.Result.MULTIPLE; + } + }; + }; + }; + } + + @Override + public void essentialTriggerChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + System.out.printf("essentialTriggerChecks: "); + essentialChecks(subject, memo, checkBody); + } + + @Override + public void additionalTriggerChecks( + RegProcedure subject, PLJavaBased memo, boolean checkBody) + throws SQLException + { + System.out.printf("additionalTriggerChecks: "); + additionalChecks(subject, memo, checkBody); + } + + @Override + public TriggerTemplate prepareTrigger( + RegProcedure target, PLJavaBased memo) + throws SQLException + { + System.out.printf( + "%s prepareTrigger():\n", + target + ); + + String compiled = compile(target.src()); + + return trigger -> + { + System.out.printf( + "%s TriggerTemplate.specialize():\n" + + "name : %s\n" + + "relation : %s\n" + + "function : %s\n" + + "called : %s\n" + + "events : %s\n" + + "scope : %s\n" + + "enabled : %s\n" + + "internal : %s\n" + + "arguments : %s\n" + + "columns : %s\n" + + "when : %s\n" + + "tableOld : %s\n" + + "tableNew : %s\n" + + "isClone : %s\n" + + "constraint: %s\n" + + "deferrable: %s\n" + + "initiallyDeferred: %s\n" + + "constraintTable : %s\n" + + "constraintIndex : %s\n", + target, + trigger.name(), trigger.relation(), trigger.function(), + trigger.called(), trigger.events(), trigger.scope(), + trigger.enabled(), trigger.internal(), + trigger.arguments(), + projectionListNames(trigger.columns()), + ofNullable(trigger.when()) + .map(xml -> + { + try + { + return xml.getString(); + } + catch ( SQLException e ) + { + return e.toString(); + } + }).orElse("null"), + trigger.tableOld(), + trigger.tableNew(), + trigger.isClone(), + trigger.constraint(), + trigger.deferrable(), trigger.initiallyDeferred(), + trigger.constraintRelation(), trigger.constraintIndex() + ); + + /* + * Precompute something specific to this trigger + * that can be baked into the returned TriggerFunction. + */ + String triggerName = + trigger.name() + " on " + trigger.relation().qualifiedName(); + + return triggerData -> + { + System.out.printf( + "%s TriggerFunction.apply():\n" + + "precomputed name: %s\n" + + "called : %s\n" + + "event : %s\n" + + "scope : %s\n" + + "relation : %s\n" + + "trigger : %s\n" + + "triggerTuple : %s\n" + + "newTuple : %s\n" + + "updatedCols : %s\n" + + "result:\n%s", + target, triggerName, + triggerData.called(), triggerData.event(), + triggerData.scope(), triggerData.relation(), + triggerData.trigger(), + slotListNamesTypes(triggerData.triggerTuple()), + slotListNamesTypes(triggerData.newTuple()), + projectionListNames(triggerData.updatedColumns()), + compiled // here we 'execute' the 'compiled' routine :) + ); + + return null; // in real life this suppresses triggering event + }; + }; + } + + /** + * Checks that t is a transform usable with this language. + *

    + * The toy requirements imposed here are that a {@code fromSQL} function + * must be implemented in this language and have a {@code src} string that + * compiles to {@code "fromSQL"}, and likewise a {@code toSQL} function must + * be implemented in this language and compile to {@code "toSQL"}. + */ + @Override + public void essentialTransformChecks(Transform t) throws SQLException + { + System.out.printf("%s essentialTransformChecks: ", t); + + RegProcedure fs = t.fromSQL(); + RegProcedure ts = t.toSQL(); + + if ( ! fs.isValid() ) + { + /* + * This transform specifies to use the PL's default from-SQL + * conversion for this type. An exception should be thrown here + * if there is no such usable default. + */ + System.out.printf(String.format( + "will use PL's default from-SQL treatment for %s\n", t.type())); + } + else if ( fs.language() != pl || ! "fromSQL".equals(compile(fs.src())) ) + throw new SQLSyntaxErrorException(String.format( + "%s for use as a fromSQL function for %s must be implemented " + + "in %s and compile to string \"fromSQL\"", fs, pl, pl), + "42P17"); + + if ( ! ts.isValid() ) + { + /* + * This transform specifies to use the PL's default to-SQL + * conversion for this type. An exception should be thrown here + * if there is no such usable default. + */ + System.out.printf(String.format( + "will use PL's default to-SQL treatment for %s\n", t.type())); + } + else if ( ts.language() != pl || ! "toSQL".equals(compile(ts.src())) ) + throw new SQLSyntaxErrorException(String.format( + "%s for use as a toSQL function for %s must be implemented " + + "in %s and compile to string \"toSQL\"", ts, pl, pl), + "42P17"); + + System.out.printf("ok\n"); + return; + } + + /** + * This method handles 'compiling' Glot64 source code (which is Base64) + * into its 'compiled' form, which is ASCII and easier to read than the + * source. + *

    + * It is factored out here so it can also be conveniently used at validation + * time. + *

    + * The longwinded style with explicit {@code newEncoder}/{@code newDecoder} + * calls is used to get strict checking (instead of lax character + * substitution) from the encoder/decoder, to give the most, shall we say, + * thorough feedback to the user. + */ + public static String compile(String sourceText) throws SQLException + { + try + { + CharBuffer cb = CharBuffer.wrap(sourceText); + ByteBuffer bb = US_ASCII.newEncoder().encode(cb); + bb = Base64.getDecoder().decode(bb); + cb = US_ASCII.newDecoder().decode(bb); + return cb.toString(); + } + catch ( CharacterCodingException | IllegalArgumentException e ) + { + throw new SQLSyntaxErrorException( + "compiling glot64 code: " + e, "42601", e); + } + } + + private static String projectionListNames(Projection td) + { + return + ofNullable(td) + .map(d -> + d.stream() + .map(Attribute::name) + .map(Object::toString) + .collect(toList()) + .toString()) + .orElse("null"); + } + + private static String slotListNamesTypes(TupleTableSlot tts) + { + return + ofNullable(tts) + .map(s -> + s.descriptor().stream() + .map(a->a.name() + ":" + a.type().qualifiedName()) + .collect(toList()) + .toString()) + .orElse("null"); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/package-info.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/package-info.java new file mode 100644 index 000000000..a9085f9fb --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/polyglot/package-info.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ + +/** + * Examples that illustrate how to build new Procedural Language implementations + * in PL/Java. + *

    + * The {@code SQLAction} annotation here creates the {@code pljavahandler} + * language needed for the other examples, and will eventually disappear when + * the creation actions are incorporated into PL/Java's installation. + * @author Chapman Flack + */ +@SQLAction(provides="pljavahandler language", install={ +"DO LANGUAGE plpgsql '" + +" DECLARE" + +" qbin text;" + +" BEGIN" + +" SELECT quote_literal(probin) INTO STRICT qbin FROM pg_proc" + +" WHERE oid = ''sqlj.java_call_handler()''::regprocedure;" + +" EXECUTE ''" + +" CREATE OR REPLACE FUNCTION sqlj.pljavaDispatchValidator(pg_catalog.oid)" + +" RETURNS pg_catalog.void" + +" LANGUAGE C AS '' || qbin || '', ''''pljavaDispatchValidator''''" + +" '';" + +" EXECUTE ''" + +" CREATE OR REPLACE FUNCTION sqlj.pljavaDispatchValidator()" + +" RETURNS pg_catalog.language_handler" + +" LANGUAGE C AS '' || qbin || '', ''''pljavaDispatchValidator''''" + +" '';" + +" EXECUTE ''" + +" CREATE OR REPLACE FUNCTION sqlj.pljavaDispatchRoutine()" + +" RETURNS pg_catalog.language_handler" + +" LANGUAGE C AS '' || qbin || '', ''''pljavaDispatchRoutine''''" + +" '';" + +" EXECUTE ''" + +" CREATE OR REPLACE FUNCTION sqlj.pljavaDispatchInline(pg_catalog.internal)" + +" RETURNS pg_catalog.void" + +" LANGUAGE C AS '' || qbin || '', ''''pljavaDispatchInline''''" + +" '';" + +"END'", + +"COMMENT ON FUNCTION sqlj.pljavaDispatchValidator(oid) IS " + +"'The validator function for the \"PL/Java handler\" language (in which one " + +"can only write functions that are validators of actual procedural languages " + +"implemented atop PL/Java).'", + +"COMMENT ON FUNCTION sqlj.pljavaDispatchValidator() IS " + +"'The call handler for the \"PL/Java handler\" language (in which one " + +"can only write functions that are validators of actual procedural languages " + +"implemented atop PL/Java). The C entry point is the same as for the " + +"validator handler, which works because the only functions that can be " + +"written in this \"language\" are validators.'", + +"COMMENT ON FUNCTION sqlj.pljavaDispatchRoutine() IS " + +"'The call handler that must be named (as HANDLER) in CREATE LANGUAGE for " + +"a procedural language implemented atop PL/Java. (PostgreSQL requires every " + +"CREATE LANGUAGE to include HANDLER, but PL/Java allows a language to " + +"simply not implement the Routines interface, if it is only intended for " + +"InlineBlocks.)'", + +"COMMENT ON FUNCTION sqlj.pljavaDispatchInline(pg_catalog.internal) IS " + +"'The handler that must be named (as INLINE) in CREATE LANGUAGE for " + +"a procedural language implemented atop PL/Java, if that language is " + +"intended to support inline code blocks.'", + +"CREATE LANGUAGE pljavahandler" + +" HANDLER sqlj.pljavaDispatchValidator" + +" VALIDATOR sqlj.pljavaDispatchValidator", + +"COMMENT ON LANGUAGE pljavahandler IS " + +"'The PL/Java \"handler language\", used in implementing other procedural " + +"languages atop PL/Java. Only one kind of function can be written in this " + +"\"language\", namely, a validator function, and the AS string of such a " + +"validator function is simply the name of a Java class that must implement " + +"one or both of PLJavaBasedLanguage.Routines or " + +"PLJavaBasedLanguage.InlineBlocks, and that class will be used as the " + +"implementation of the new language.'" +}, remove={ +"DROP LANGUAGE pljavahandler", +"DROP FUNCTION sqlj.pljavaDispatchInline(internal)", +"DROP FUNCTION sqlj.pljavaDispatchRoutine()", +"DROP FUNCTION sqlj.pljavaDispatchValidator()", +"DROP FUNCTION sqlj.pljavaDispatchValidator(oid)" +}) +package org.postgresql.pljava.example.polyglot; + +import org.postgresql.pljava.annotation.SQLAction; diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/S9.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/S9.java index fc3fb5210..d14c8f5e8 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/S9.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/S9.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Tada AB and other contributors, as listed below. + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -111,6 +111,7 @@ import org.postgresql.pljava.ResultSetProvider; import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; import org.postgresql.pljava.annotation.SQLType; import static org.postgresql.pljava.annotation.Function.OnNullInput.CALLED; @@ -288,6 +289,23 @@ * XQuery regular-expression methods provided here. * @author Chapman Flack */ +@SQLAction( + implementor = "postgresql_xml", // skip it all if no xml support + requires = "presentOnClassPath", + provides = "saxon9api", + install = + "SELECT CASE WHEN" + + " presentOnClassPath('net.sf.saxon.s9api.QName')" + + "THEN" + + " CAST(" + + " set_config('pljava.implementors', 'saxon9api,' || " + + " current_setting('pljava.implementors'), true)" + + " AS void" + + " )" + + "ELSE" + + " logMessage('INFO', 'Saxon examples skipped: s9api classes missing')" + + "END" +) public class S9 implements ResultSetProvider.Large { private S9( @@ -399,7 +417,7 @@ static class DocumentWrapUnwrap * @param sve SQL string value to use in a text node * @return XML content, the text node wrapped in a document node */ - @Function(schema="javatest") + @Function(implementor="saxon9api", schema="javatest") public static SQLXML xmltext(String sve) throws SQLException { SQLXML rx = s_dbc.createSQLXML(); @@ -457,6 +475,7 @@ public static SQLXML xmltext(String sve) throws SQLException * type to be cast to. */ @Function( + implementor="saxon9api", schema="javatest", type="pg_catalog.record", onNullInput=CALLED, @@ -615,6 +634,7 @@ public static boolean xmlcast( * namespace. */ @Function( + implementor="saxon9api", schema="javatest", onNullInput=CALLED, settings="IntervalStyle TO iso_8601" @@ -683,6 +703,7 @@ public static SQLXML xq_ret_content( * SQL value is null. */ @Function( + implementor="saxon9api", schema="javatest", onNullInput=CALLED, settings="IntervalStyle TO iso_8601" @@ -822,6 +843,7 @@ private static SQLXML returnContent( * for base64 or (the default, false) hexadecimal. */ @Function( + implementor="saxon9api", schema="javatest", onNullInput=CALLED, settings="IntervalStyle TO iso_8601" @@ -1431,10 +1453,7 @@ else if ( v instanceof XdmNode ) // XXX support SEQUENCE someday * preconfigured as the Syntax Rules dictate. * @param pt The single-row ResultSet representing the passed parameters * and context item, if any. - * @param nameToIndex A Map, supplied empty, that on return will map - * variable names for the dynamic context to column indices in {@code pt}. - * If a context item was supplied, its index will be entered in the map - * with the null key. + * @param namespaces namespace keys and values to be declared. */ private static XQueryCompiler createStaticContextWithPassedTypes( Binding.Assemblage pt, Iterable> namespaces) @@ -1485,10 +1504,10 @@ private static XQueryCompiler createStaticContextWithPassedTypes( /** * Check that something's type is "convertible to XML(SEQUENCE) - * according to the Syntax Rules of ... ." + * according to the Syntax Rules of ... {@code }." * That turns out not to be a very high bar; not much is excluded * by those rules except collection, row, structured, or - * reference typed s. + * reference typed {@code }s. * @param jdbcType The {@link Types JDBC type} to be checked. * @param what A string to include in the exception message if the * check fails. @@ -2983,7 +3002,7 @@ public void onGroupEnd(int groupNumber) * SQLFeatureNotSupportedException (0A000) if (in the current * implementation) w3cNewlines is false or omitted. */ - @Function(schema="javatest") + @Function(implementor="saxon9api", schema="javatest") public static boolean like_regex( String value, //strict String pattern, //strict @@ -3036,7 +3055,7 @@ public static boolean like_regex( * SQLFeatureNotSupportedException (0A000) if (in the current * implementation) usingOctets is true, or w3cNewlines is false or omitted. */ - @Function(schema="javatest") + @Function(implementor="saxon9api", schema="javatest") public static int occurrences_regex( String pattern, //strict @SQLType(name="\"in\"") String in, //strict @@ -3115,7 +3134,7 @@ public static int occurrences_regex( * SQLFeatureNotSupportedException (0A000) if (in the current * implementation) usingOctets is true, or w3cNewlines is false or omitted. */ - @Function(schema="javatest") + @Function(implementor="saxon9api", schema="javatest") public static int position_regex( String pattern, //strict @SQLType(name="\"in\"") String in, //strict @@ -3197,7 +3216,7 @@ public static int position_regex( * SQLFeatureNotSupportedException (0A000) if (in the current * implementation) usingOctets is true, or w3cNewlines is false or omitted. */ - @Function(schema="javatest") + @Function(implementor="saxon9api", schema="javatest") public static String substring_regex( String pattern, //strict @SQLType(name="\"in\"") String in, //strict @@ -3298,7 +3317,7 @@ public static String substring_regex( * SQLFeatureNotSupportedException (0A000) if (in the current * implementation) usingOctets is true, or w3cNewlines is false or omitted. */ - @Function(schema="javatest") + @Function(implementor="saxon9api", schema="javatest") public static String translate_regex( String pattern, //strict @SQLType(name="\"in\"") String in, //strict diff --git a/pljava-packaging/build.xml b/pljava-packaging/build.xml index c85fc4785..65155b4a1 100644 --- a/pljava-packaging/build.xml +++ b/pljava-packaging/build.xml @@ -255,6 +255,26 @@ jos.close(); simple update is possible, just repeat the next entry, with the from-version changed. --> + + + + + + + + pgjdbc + + + org.postgresql + postgresql + [42.6.0,) + + + + 0 ; ) + { + Transform tr = toCheck [ i ]; + utImpl.essentialTransformChecks(tr); + } + return doInPG(() -> + { + for ( Transform tr : toCheck ) + { + tr.language(); // make sure it's cached for invalidation use + final_map.put(tr.type(), tr); + FromSQLMemo.addDependent(tr.fromSQL(), tr); + ToSQLMemo.addDependent(tr.toSQL(), tr); + } + return p.transforms(); + }); + }; + } + + /* computation methods */ + + private static PLPrincipal principal(ProceduralLanguageImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + if ( s.get(Att.LANPLTRUSTED, BOOLEAN_INSTANCE) ) + return new PLPrincipal.Sandboxed(o.name()); + return new PLPrincipal.Unsandboxed(o.name()); + } + + private static RegProcedure handler(ProceduralLanguageImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + s.get(Att.LANPLCALLFOID, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure inlineHandler( + ProceduralLanguageImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + s.get(Att.LANINLINE, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure validator(ProceduralLanguageImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + s.get(Att.LANVALIDATOR, REGPROCEDURE_INSTANCE); + return p; + } + + /* API methods */ + + @Override + public PLPrincipal principal() + { + try + { + MethodHandle h = m_slots[SLOT_PRINCIPAL]; + return (PLPrincipal)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure handler() + { + try + { + MethodHandle h = m_slots[SLOT_HANDLER]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure inlineHandler() + { + try + { + MethodHandle h = m_slots[SLOT_INLINEHANDLER]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure validator() + { + try + { + MethodHandle h = m_slots[SLOT_VALIDATOR]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /** + * {@link SupportMemo SupportMemo} for attachment to + * a {@link RegProcedure RegProcedure} that serves as + * a {@link ProceduralLanguage ProceduralLanguage} handler. + */ + static class HandlerMemo + extends SupportMemo implements Handler + { + private HandlerMemo( + RegProcedure carrier, ProceduralLanguageImpl lang) + { + super(carrier, lang); + } + + static void addDependent( + RegProcedure proc, + ProceduralLanguageImpl lang) + { + SupportMemo.add(proc, lang, HandlerMemo.class, + () -> new HandlerMemo(proc, lang)); + } + } + + /** + * {@link SupportMemo SupportMemo} for attachment to + * a {@link RegProcedure RegProcedure} that serves as + * a {@link ProceduralLanguage ProceduralLanguage} inline handler. + */ + static class InlineHandlerMemo + extends SupportMemo + implements InlineHandler + { + private InlineHandlerMemo( + RegProcedure carrier, + ProceduralLanguageImpl lang) + { + super(carrier, lang); + } + + static void addDependent( + RegProcedure proc, + ProceduralLanguageImpl lang) + { + SupportMemo.add(proc, lang, InlineHandlerMemo.class, + () -> new InlineHandlerMemo(proc, lang)); + } + } + + /** + * {@link SupportMemo SupportMemo} for attachment to + * a {@link RegProcedure RegProcedure} that serves as + * a {@link ProceduralLanguage ProceduralLanguage} validator. + */ + static class ValidatorMemo + extends SupportMemo implements Validator + { + private ValidatorMemo( + RegProcedure carrier, + ProceduralLanguageImpl lang) + { + super(carrier, lang); + } + + static void addDependent( + RegProcedure proc, + ProceduralLanguageImpl lang) + { + SupportMemo.add(proc, lang, ValidatorMemo.class, + () -> new ValidatorMemo(proc, lang)); + } + } + + /** + * Implementation of {@link PLJavaBased PLJavaBased} memo for attachment to + * a {@link RegProcedure RegProcedure} whose implementation is + * PL/Java-based. + */ + static class PLJavaMemo extends How implements PLJavaBased + { + Template m_routineTemplate; + + private PLJavaMemo(RegProcedureImpl carrier) + { + super(carrier); + } + + /* + * Discards a PLJavaMemo that has been retained with a null template. + * + * The validator logic in LookupImpl creates the usual linkages between + * a RegProcedure and its language and validator, but never installs a + * template in the RegProcedure's memo as the first actual call would. + * Often, that isn't noticeable, because a shared-invalidation event + * upon rollback if the validator rejected, or even upon successful + * entry of the routine, causes the incomplete memo to be discarded. + * + * It can happen, though, if a routine is created/validated and then + * used in the same transaction, that the incomplete memo with null + * template is still there. Here is a convenient method to get rid of + * it the same way shared-invalidation would. + */ + void discardIncomplete() + { + assert null == m_routineTemplate : "discard memo Template non-null"; + List sps = new ArrayList<>(); + List postOps = new ArrayList<>(); + invalidate(sps, postOps); + assert 0 == sps.size() && 0 == postOps.size(); + } + + @Override + void invalidate(List sps, List postOps) + { + super.invalidate(sps, postOps); + ProceduralLanguageImpl pl = + (ProceduralLanguageImpl)m_carrier.language(); + pl.removeDependentRoutine(m_carrier); + } + + @Override + public TupleDescriptor inputsTemplate() + { + return m_carrier.inputsTemplate(); + } + + @Override + public BitSet unresolvedInputs() + { + return m_carrier.unresolvedInputs(); + } + + @Override + public TupleDescriptor outputsTemplate() + { + return m_carrier.outputsTemplate(); + } + + @Override + public BitSet unresolvedOutputs() + { + return m_carrier.unresolvedOutputs(); + } + + @Override + public List transforms() + { + return m_carrier.transforms(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegClassImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegClassImpl.java new file mode 100644 index 000000000..4895f49f1 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegClassImpl.java @@ -0,0 +1,1079 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import java.sql.SQLException; +import java.sql.SQLXML; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; +import static org.postgresql.pljava.model.MemoryContext.CurrentMemoryContext; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static + org.postgresql.pljava.pg.CatalogObjectImpl.Factory.ForeignTableRelationId; + +import static org.postgresql.pljava.pg.ModelConstants.Anum_pg_class_reltype; +import static org.postgresql.pljava.pg.ModelConstants.RELOID; // syscache +import static org.postgresql.pljava.pg.ModelConstants.FOREIGNTABLEREL; // " +import static org.postgresql.pljava.pg.ModelConstants.CLASS_TUPLE_SIZE; + +import static org.postgresql.pljava.pg.TupleTableSlotImpl.heapTupleGetLightSlot; + +import org.postgresql.pljava.pg.adt.GrantAdapter; +import org.postgresql.pljava.pg.adt.NameAdapter; +import static org.postgresql.pljava.pg.adt.OidAdapter.AM_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGCLASS_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGTYPE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.SERVER_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.TABLESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.*; +import static org.postgresql.pljava.pg.adt.XMLAdapter.SYNTHETIC_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Qualified; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +/* + * Can get lots of information, including Form_pg_class rd_rel and + * TupleDesc rd_att, from the relcache. See CacheRegisterRelcacheCallback(). + * However, the relcache copy of the class tuple is cut off at CLASS_TUPLE_SIZE. + */ + +/** + * Implementation of the {@link RegClass RegClass} interface. + */ +class RegClassImpl extends Addressed +implements + Nonshared, Namespaced, Owned, + AccessControlled, RegClass +{ + /** + * Subclass that additionally implements + * {@link RegClass.Known RegClass.Known}. + */ + static class Known> + extends RegClassImpl implements RegClass.Known + { + } + + private static final Function s_initializer; + + /** + * Per-instance switch point, to be invalidated selectively + * by a relcache callback. + */ + private final SwitchPoint[] m_cacheSwitchPoint; + + final SwitchPoint cacheSwitchPoint() + { + return m_cacheSwitchPoint[0]; + } + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return RELOID; + } + + /* Implementation of Named, Namespaced, Owned, AccessControlled */ + + private static Simple name(RegClassImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.RELNAME, NameAdapter.SIMPLE_INSTANCE); + } + + private static RegNamespace namespace(RegClassImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.RELNAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegClassImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.RELOWNER, REGROLE_INSTANCE); + } + + private static List grants(RegClassImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.RELACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of RegClass */ + + RegClassImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + m_cacheSwitchPoint = new SwitchPoint[] { new SwitchPoint() }; + } + + /** + * Called from {@code Factory}'s {@code invalidateRelation} to set up + * the invalidation of this relation's metadata. + *

    + * Adds this relation's {@code SwitchPoint} to the caller's list so that, + * if more than one is to be invalidated, that can be done in bulk. Adds to + * postOps any operations the caller should conclude with + * after invalidating the {@code SwitchPoint}. + */ + void invalidate(List sps, List postOps) + { + SwitchPoint sp = m_cacheSwitchPoint[0]; + if ( sp.unused() ) + return; + TupleDescriptor.Interned[] oldTDH = m_tupDescHolder; + sps.add(sp); + + /* + * Before invalidating the SwitchPoint, line up a new one (and a newly + * nulled tupDescHolder) for value-computing methods to find once the + * old SwitchPoint is invalidated. + */ + m_cacheSwitchPoint[0] = new SwitchPoint(); + m_tupDescHolder = null; + + /* + * After the old SwitchPoint gets invalidated, the old tupDescHolder, + * if any, can have its element nulled so the old TupleDescriptor can + * be collected without having to wait for the 'guardWithTest's it is + * bound into to be recomputed. + */ + if ( null != oldTDH ) + { + postOps.add(() -> + { + TupleDescImpl td = (TupleDescImpl)oldTDH[0]; + if ( null == td ) + return; + oldTDH[0] = null; + td.invalidate(); + }); + } + } + + /** + * Associated tuple descriptor, redundantly kept accessible here as well as + * opaquely bound into a {@code SwitchPointCache} method handle. + *

    + * This one-element array containing the descriptor is what gets bound into + * the handle, so the descriptor can be freed for GC at invalidation time + * (rather than waiting for the next tuple-descriptor request to replace + * the handle). Only accessed from {@code SwitchPointCache} computation + * methods or {@code TupleDescImpl} factory methods, all of which execute + * on the PG thread; no synchronization fuss needed. + *

    + * When null, no computation method has run (or none since invalidation), + * and the state is not known. Otherwise, the single element is the result + * to be returned by the {@code tupleDescriptor()} API method. + */ + TupleDescriptor.Interned[] m_tupDescHolder; + + /** + * Holder for the {@code RegType} corresponding to {@code type()}, + * only non-null during a call of {@code dualHandshake}. + */ + private RegType m_dual = null; + + /** + * Called by the corresponding {@code RegType} instance if it has just + * looked us up. + *

    + * Because the {@code SwitchPointCache} recomputation methods always execute + * on the PG thread, plain access to an instance field suffices here. + */ + void dualHandshake(RegType dual) + { + try + { + m_dual = dual; + dual = type(); + assert dual == m_dual : "RegType/RegClass handshake outcome"; + } + finally + { + m_dual = null; + } + } + + static final int SLOT_TUPLEDESCRIPTOR; + static final int SLOT_TYPE; + static final int SLOT_OFTYPE; + static final int SLOT_AM; + static final int SLOT_TABLESPACE; + static final int SLOT_TOASTRELATION; + static final int SLOT_HASINDEX; + static final int SLOT_ISSHARED; + static final int SLOT_PERSISTENCE; + static final int SLOT_KIND; + static final int SLOT_NATTRIBUTES; + static final int SLOT_CHECKS; + static final int SLOT_HASRULES; + static final int SLOT_HASTRIGGERS; + static final int SLOT_HASSUBCLASS; + static final int SLOT_ROWSECURITY; + static final int SLOT_FORCEROWSECURITY; + static final int SLOT_ISPOPULATED; + static final int SLOT_REPLIDENT; + static final int SLOT_ISPARTITION; + static final int SLOT_OPTIONS; + static final int SLOT_FOREIGN; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(RegClassImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> o.m_cacheSwitchPoint[0]) + .withSlots(o -> o.m_slots) + + .withCandidates( + CatalogObjectImpl.Addressed.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Addressed.class) + .withDependent("cacheTuple", SLOT_TUPLE) + + .withCandidates(RegClassImpl.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withReturnType(null) + .withDependent( "namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .withReceiverType(null) + .withDependent( "tupleDescriptor", SLOT_TUPLEDESCRIPTOR = i++) + .withDependent( "type", SLOT_TYPE = i++) + .withDependent( "ofType", SLOT_OFTYPE = i++) + .withDependent( "accessMethod", SLOT_AM = i++) + .withDependent( "tablespace", SLOT_TABLESPACE = i++) + .withDependent( "toastRelation", SLOT_TOASTRELATION = i++) + .withDependent( "hasIndex", SLOT_HASINDEX = i++) + .withDependent( "isShared", SLOT_ISSHARED = i++) + .withDependent( "persistence", SLOT_PERSISTENCE = i++) + .withDependent( "kind", SLOT_KIND = i++) + .withDependent( "nAttributes", SLOT_NATTRIBUTES = i++) + .withDependent( "checks", SLOT_CHECKS = i++) + .withDependent( "hasRules", SLOT_HASRULES = i++) + .withDependent( "hasTriggers", SLOT_HASTRIGGERS = i++) + .withDependent( "hasSubclass", SLOT_HASSUBCLASS = i++) + .withDependent( "rowSecurity", SLOT_ROWSECURITY = i++) + .withDependent("forceRowSecurity", SLOT_FORCEROWSECURITY = i++) + .withDependent( "isPopulated", SLOT_ISPOPULATED = i++) + .withDependent( "replicaIdentity", SLOT_REPLIDENT = i++) + .withDependent( "isPartition", SLOT_ISPARTITION = i++) + .withDependent( "options", SLOT_OPTIONS = i++) + .withDependent( "foreign", SLOT_FOREIGN = i++) + + .build(); + NSLOTS = i; + } + + static class Att + { + static final Attribute RELNAME; + static final Attribute RELNAMESPACE; + static final Attribute RELOWNER; + static final Attribute RELACL; + static final Attribute RELOFTYPE; + static final Attribute RELAM; + static final Attribute RELTABLESPACE; + static final Attribute RELTOASTRELID; + static final Attribute RELHASINDEX; + static final Attribute RELISSHARED; + static final Attribute RELPERSISTENCE; + static final Attribute RELKIND; + static final Attribute RELNATTS; + static final Attribute RELCHECKS; + static final Attribute RELHASRULES; + static final Attribute RELHASTRIGGERS; + static final Attribute RELHASSUBCLASS; + static final Attribute RELROWSECURITY; + static final Attribute RELFORCEROWSECURITY; + static final Attribute RELISPOPULATED; + static final Attribute RELREPLIDENT; + static final Attribute RELISPARTITION; + static final Attribute RELOPTIONS; + static final Attribute RELPARTBOUND; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "relname", + "relnamespace", + "relowner", + "relacl", + "reloftype", + "relam", + "reltablespace", + "reltoastrelid", + "relhasindex", + "relisshared", + "relpersistence", + "relkind", + "relnatts", + "relchecks", + "relhasrules", + "relhastriggers", + "relhassubclass", + "relrowsecurity", + "relforcerowsecurity", + "relispopulated", + "relreplident", + "relispartition", + "reloptions", + "relpartbound" + ).iterator(); + + RELNAME = itr.next(); + RELNAMESPACE = itr.next(); + RELOWNER = itr.next(); + RELACL = itr.next(); + RELOFTYPE = itr.next(); + RELAM = itr.next(); + RELTABLESPACE = itr.next(); + RELTOASTRELID = itr.next(); + RELHASINDEX = itr.next(); + RELISSHARED = itr.next(); + RELPERSISTENCE = itr.next(); + RELKIND = itr.next(); + RELNATTS = itr.next(); + RELCHECKS = itr.next(); + RELHASRULES = itr.next(); + RELHASTRIGGERS = itr.next(); + RELHASSUBCLASS = itr.next(); + RELROWSECURITY = itr.next(); + RELFORCEROWSECURITY = itr.next(); + RELISPOPULATED = itr.next(); + RELREPLIDENT = itr.next(); + RELISPARTITION = itr.next(); + RELOPTIONS = itr.next(); + RELPARTBOUND = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /** + * A tiny class to just encapsulate the couple of extra attributes a foreign + * table has, as an alternative to a full-blown ForeignTable catalog object. + *

    + * This class eagerly populates both {@code server} and {@code options} when + * constructed, so the {@code RegClass} needs just one slot holding this. + */ + static class Foreign + { + private static final RegClass FT; + private static final Attribute FTSERVER; + private static final Attribute FTOPTIONS; + + static + { + FT = of(CLASSID, ForeignTableRelationId); + Iterator itr = FT.tupleDescriptor().project( + "ftserver", + "ftoptions" + ).iterator(); + + FTSERVER = itr.next(); + FTOPTIONS = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + + final ForeignServer server; + final Map options; + + Foreign(int oid) + { + ByteBuffer heapTuple = _searchSysCacheCopy1(FOREIGNTABLEREL, oid); + TupleTableSlot tts = heapTupleGetLightSlot( + FT.tupleDescriptor(), heapTuple, CurrentMemoryContext()); + + server = tts.get(FTSERVER, SERVER_INSTANCE); + options = tts.get(FTOPTIONS, ArrayAdapters.RELOPTIONS_INSTANCE); + } + } + + /* computation methods */ + + /** + * Return the tuple descriptor for this relation, wrapped in a one-element + * array, which is also stored in {@code m_tupDescHolder}. + *

    + * The tuple descriptor for a relation can be retrieved from the PostgreSQL + * {@code relcache}, or from the {@code typcache} if the relation has an + * associated type; it's the same descriptor, and the + * latter gets it from the former. Going through the {@code relcache} is + * fussier, involving the lock manager every time, while using the + * {@code typcache} can avoid that except in its cache-miss case. + *

    + * Here, for every relation other than {@code pg_class} itself, we will + * rely on the corresponding {@code RegType}, if there is one, to do + * the work. There is a bit + * of incest involved; it will construct the descriptor to rely on our + * {@code SwitchPoint} for invalidation, and will poke the wrapper array + * into our {@code m_tupDescHolder}. + *

    + * It does that last bit so that, even if the first query for a type's + * tuple descriptor is made through the {@code RegType}, we will also return + * it if a later request is made here, and all of the invalidation logic + * lives here; it is relation-cache invalidation that obsoletes a cataloged + * tuple descriptor. + *

    + * However, when the relation is {@code pg_class} itself, or is one + * of the relation kinds without an associated type entry, we rely + * on a bespoke JNI method to get the descriptor from the {@code relcache}. + * The {@code pg_class} case occurs when we are looking up the descriptor to + * interpret our own cache tuples, and the normal case's {@code type()} call + * won't work before that's available. + */ + private static TupleDescriptor.Interned[] tupleDescriptor(RegClassImpl o) + { + TupleDescriptor.Interned[] r = o.m_tupDescHolder; + + /* + * If not null, r is a value placed here by an invocation of + * tupleDescriptor() on the associated RegType, and we have not seen an + * invalidation since that happened (invalidations run on the PG thread, + * as do computation methods like this, so we've not missed anything). + * It is the value to return. + */ + if ( null != r ) + return r; + + /* + * In any case other than looking up our own tuple descriptor, we can + * use type() to find the associated RegType and let it, if valid, + * do the work. + */ + if ( CLASSID != o ) + { + RegType t = o.type(); + if ( t.isValid() ) + { + t.tupleDescriptor(); // side effect: writes o.m_tupDescHolder + return o.m_tupDescHolder; + } + } + + /* + * May be the bootstrap case, looking up the pg_class tuple descriptor, + * or just a relation kind that does not have an associate type entry. + * If we got here we need it, so we can call the Cataloged constructor + * directly, rather than fromByteBuffer (which would first check whether + * we need it, and bump its reference count only if so). Called + * directly, the constructor expects the count already bumped, which + * the _tupDescBootstrap method will have done for us. + */ + ByteBuffer bb = _tupDescBootstrap(o.oid()); + bb.order(nativeOrder()); + r = new TupleDescriptor.Interned[] {new TupleDescImpl.Cataloged(bb, o)}; + return o.m_tupDescHolder = r; + } + + private static RegType type(RegClassImpl o) throws SQLException + { + /* + * If this is a handshake occurring when the corresponding RegType + * has just looked *us* up, we are done. + */ + if ( null != o.m_dual ) + return o.m_dual; + + /* + * Otherwise, look up the corresponding RegType, and do the same + * handshake in reverse. Either way, the connection is set up + * bidirectionally with one cache lookup starting from either. That + * can avoid extra work in operations (like TupleDescriptor caching) + * that may touch both objects, without complicating their code. + * + * Because the fetching of pg_attribute's tuple descriptor + * necessarily passes through this point, and attributes don't know + * what their names are until it has, use the attribute number here. + */ + TupleTableSlot s = o.cacheTuple(); + RegType t = s.get( + s.descriptor().sqlGet(Anum_pg_class_reltype), REGTYPE_INSTANCE); + + /* + * Regular relations have a valid reltype, but other kinds of RegClass + * (index, toast table) do not. + */ + if ( t.isValid() ) + ((RegTypeImpl)t).dualHandshake(o); + + return t; + } + + private static RegType ofType(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELOFTYPE, REGTYPE_INSTANCE); + } + + private static AccessMethod accessMethod(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELAM, AM_INSTANCE); + } + + private static Tablespace tablespace(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELTABLESPACE, TABLESPACE_INSTANCE); + } + + private static RegClass toastRelation(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELTOASTRELID, REGCLASS_INSTANCE); + } + + private static boolean hasIndex(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELHASINDEX, BOOLEAN_INSTANCE); + } + + private static boolean isShared(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELISSHARED, BOOLEAN_INSTANCE); + } + + private static Persistence persistence(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return persistenceFromCatalog( + s.get(Att.RELPERSISTENCE, INT1_INSTANCE)); + } + + private static Kind kind(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return kindFromCatalog( + s.get(Att.RELKIND, INT1_INSTANCE)); + } + + private static short nAttributes(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELNATTS, INT2_INSTANCE); + } + + private static short checks(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELCHECKS, INT2_INSTANCE); + } + + private static boolean hasRules(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELHASRULES, BOOLEAN_INSTANCE); + } + + private static boolean hasTriggers(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELHASTRIGGERS, BOOLEAN_INSTANCE); + } + + private static boolean hasSubclass(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELHASSUBCLASS, BOOLEAN_INSTANCE); + } + + private static boolean rowSecurity(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELROWSECURITY, BOOLEAN_INSTANCE); + } + + private static boolean forceRowSecurity(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.RELFORCEROWSECURITY, BOOLEAN_INSTANCE); + } + + private static boolean isPopulated(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELISPOPULATED, BOOLEAN_INSTANCE); + } + + private static ReplicaIdentity replicaIdentity(RegClassImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return replicaIdentityFromCatalog( + s.get(Att.RELREPLIDENT, INT1_INSTANCE)); + } + + private static boolean isPartition(RegClassImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELISPARTITION, BOOLEAN_INSTANCE); + } + + private static Map options(RegClassImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.RELOPTIONS, ArrayAdapters.RELOPTIONS_INSTANCE); + } + + private static Foreign foreign(RegClassImpl o) + throws SQLException + { + if ( Kind.FOREIGN_TABLE != o.kind() ) + return null; + return new Foreign(o.oid()); + } + + /* API methods */ + + @Override + public TupleDescriptor.Interned tupleDescriptor() + { + try + { + MethodHandle h = m_slots[SLOT_TUPLEDESCRIPTOR]; + return ((TupleDescriptor.Interned[])h.invokeExact(this, h))[0]; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType type() + { + try + { + MethodHandle h = m_slots[SLOT_TYPE]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType ofType() + { + try + { + MethodHandle h = m_slots[SLOT_OFTYPE]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public AccessMethod accessMethod() + { + try + { + MethodHandle h = m_slots[SLOT_AM]; + return (AccessMethod)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + // filenode + + @Override + public Tablespace tablespace() + { + try + { + MethodHandle h = m_slots[SLOT_TABLESPACE]; + return (Tablespace)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /* Of limited interest ... estimates used by planner + * + int pages(); + float tuples(); + int allVisible(); + */ + + @Override + public RegClass toastRelation() + { + try + { + MethodHandle h = m_slots[SLOT_TOASTRELATION]; + return (RegClass)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean hasIndex() + { + try + { + MethodHandle h = m_slots[SLOT_HASINDEX]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean isShared() + { + try + { + MethodHandle h = m_slots[SLOT_ISSHARED]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Persistence persistence() + { + try + { + MethodHandle h = m_slots[SLOT_PERSISTENCE]; + return (Persistence)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Kind kind() + { + try + { + MethodHandle h = m_slots[SLOT_KIND]; + return (Kind)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public short nAttributes() + { + try + { + MethodHandle h = m_slots[SLOT_NATTRIBUTES]; + return (short)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public short checks() + { + try + { + MethodHandle h = m_slots[SLOT_CHECKS]; + return (short)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean hasRules() + { + try + { + MethodHandle h = m_slots[SLOT_HASRULES]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean hasTriggers() + { + try + { + MethodHandle h = m_slots[SLOT_HASTRIGGERS]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean hasSubclass() + { + try + { + MethodHandle h = m_slots[SLOT_HASSUBCLASS]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean rowSecurity() + { + try + { + MethodHandle h = m_slots[SLOT_ROWSECURITY]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean forceRowSecurity() + { + try + { + MethodHandle h = m_slots[SLOT_FORCEROWSECURITY]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean isPopulated() + { + try + { + MethodHandle h = m_slots[SLOT_ISPOPULATED]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public ReplicaIdentity replicaIdentity() + { + try + { + MethodHandle h = m_slots[SLOT_REPLIDENT]; + return (ReplicaIdentity)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean isPartition() + { + try + { + MethodHandle h = m_slots[SLOT_ISPARTITION]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + // rewrite + // frozenxid + // minmxid + + @Override + public Map options() + { + try + { + MethodHandle h = m_slots[SLOT_OPTIONS]; + return (Map)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public SQLXML partitionBound() + { + /* + * Because of the JDBC rules that an SQLXML instance lasts no longer + * than one transaction and can only be read once, it is not a good + * candidate for caching. We will just fetch a new one from the cached + * tuple as needed. + */ + TupleTableSlot s = cacheTuple(); + return s.get(Att.RELPARTBOUND, SYNTHETIC_INSTANCE); + } + + @Override + public ForeignServer foreignServer() + { + try + { + MethodHandle h = m_slots[SLOT_FOREIGN]; + Foreign f = (Foreign)h.invokeExact(this, h); + return null == f ? null : f.server; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Map foreignOptions() + { + try + { + MethodHandle h = m_slots[SLOT_FOREIGN]; + Foreign f = (Foreign)h.invokeExact(this, h); + return null == f ? null : f.options; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + private static Persistence persistenceFromCatalog(byte b) + { + switch ( b ) + { + case (byte)'p': return Persistence.PERMANENT; + case (byte)'u': return Persistence.UNLOGGED; + case (byte)'t': return Persistence.TEMPORARY; + } + throw unchecked(new SQLException( + "unrecognized Persistence type '" + (char)b + "' in catalog", + "XX000")); + } + + private static Kind kindFromCatalog(byte b) + { + switch ( b ) + { + case (byte)'r': return Kind.TABLE; + case (byte)'i': return Kind.INDEX; + case (byte)'S': return Kind.SEQUENCE; + case (byte)'t': return Kind.TOAST; + case (byte)'v': return Kind.VIEW; + case (byte)'m': return Kind.MATERIALIZED_VIEW; + case (byte)'c': return Kind.COMPOSITE_TYPE; + case (byte)'f': return Kind.FOREIGN_TABLE; + case (byte)'p': return Kind.PARTITIONED_TABLE; + case (byte)'I': return Kind.PARTITIONED_INDEX; + } + throw unchecked(new SQLException( + "unrecognized Kind type '" + (char)b + "' in catalog", + "XX000")); + } + + private static ReplicaIdentity replicaIdentityFromCatalog(byte b) + { + switch ( b ) + { + case (byte)'d': return ReplicaIdentity.DEFAULT; + case (byte)'n': return ReplicaIdentity.NOTHING; + case (byte)'f': return ReplicaIdentity.ALL; + case (byte)'i': return ReplicaIdentity.INDEX; + } + throw unchecked(new SQLException( + "unrecognized ReplicaIdentity type '" + (char)b + "' in catalog", + "XX000")); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegCollationImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegCollationImpl.java new file mode 100644 index 000000000..55f486e99 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegCollationImpl.java @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.COLLOID; // syscache + +import org.postgresql.pljava.pg.adt.EncodingAdapter; +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.NameAdapter.AS_STRING_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.BOOLEAN_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.INT1_INSTANCE; +import org.postgresql.pljava.pg.adt.TextAdapter; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegCollation RegCollation} interface. + */ +class RegCollationImpl extends Addressed +implements Nonshared, Namespaced, Owned, RegCollation +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return COLLOID; + } + + /* Implementation of Named, Namespaced, Owned */ + + private static Simple name(RegCollationImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.COLLNAME, SIMPLE_INSTANCE); + } + + private static RegNamespace namespace(RegCollationImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.COLLNAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegCollationImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.COLLOWNER, REGROLE_INSTANCE); + } + + /* Implementation of RegCollation */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegCollationImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_ENCODING; + static final int SLOT_COLLATE; + static final int SLOT_CTYPE; + static final int SLOT_PROVIDER; + static final int SLOT_VERSION; + static final int SLOT_DETERMINISTIC; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(RegCollationImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(RegCollationImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withDependent( "namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + + .withReceiverType(null) + .withDependent( "encoding", SLOT_ENCODING = i++) + .withDependent( "collate", SLOT_COLLATE = i++) + .withDependent( "ctype", SLOT_CTYPE = i++) + .withDependent( "provider", SLOT_PROVIDER = i++) + .withDependent( "version", SLOT_VERSION = i++) + .withDependent("deterministic", SLOT_DETERMINISTIC = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute COLLNAME; + static final Attribute COLLNAMESPACE; + static final Attribute COLLOWNER; + static final Attribute COLLENCODING; + static final Attribute COLLCOLLATE; + static final Attribute COLLCTYPE; + static final Attribute COLLPROVIDER; + static final Attribute COLLVERSION; + static final Attribute COLLISDETERMINISTIC; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "collname", + "collnamespace", + "collowner", + "collencoding", + "collcollate", + "collctype", + "collprovider", + "collversion", + "collisdeterministic" + ).iterator(); + + COLLNAME = itr.next(); + COLLNAMESPACE = itr.next(); + COLLOWNER = itr.next(); + COLLENCODING = itr.next(); + COLLCOLLATE = itr.next(); + COLLCTYPE = itr.next(); + COLLPROVIDER = itr.next(); + COLLVERSION = itr.next(); + COLLISDETERMINISTIC = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static CharsetEncoding encoding(RegCollationImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.COLLENCODING, EncodingAdapter.INSTANCE); + } + + private static String collate(RegCollationImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.COLLCOLLATE, AS_STRING_INSTANCE); + } + + private static String ctype(RegCollationImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.COLLCTYPE, AS_STRING_INSTANCE); + } + + private static Provider provider(RegCollationImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte p = s.get(Att.COLLPROVIDER, INT1_INSTANCE); + switch ( p ) + { + case (byte)'d': + return Provider.DEFAULT; + case (byte)'c': + return Provider.LIBC; + case (byte)'i': + return Provider.ICU; + default: + throw new UnsupportedOperationException(String.format( + "Unrecognized collation provider value %#x", p)); + } + } + + private static String version(RegCollationImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.COLLVERSION, TextAdapter.INSTANCE); + } + + private static boolean deterministic(RegCollationImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.COLLISDETERMINISTIC, BOOLEAN_INSTANCE); + } + + /* API methods */ + + @Override + public CharsetEncoding encoding() + { + try + { + MethodHandle h = m_slots[SLOT_ENCODING]; + return (CharsetEncoding)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String collate() + { + try + { + MethodHandle h = m_slots[SLOT_COLLATE]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String ctype() + { + try + { + MethodHandle h = m_slots[SLOT_CTYPE]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Provider provider() // since PG 10 + { + try + { + MethodHandle h = m_slots[SLOT_PROVIDER]; + return (Provider)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String version() // since PG 10 + { + try + { + MethodHandle h = m_slots[SLOT_VERSION]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean deterministic() // since PG 12 + { + try + { + MethodHandle h = m_slots[SLOT_DETERMINISTIC]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegConfigImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegConfigImpl.java new file mode 100644 index 000000000..da2e74033 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegConfigImpl.java @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.TSCONFIGOID; // syscache + +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegConfig RegConfig} interface. + */ +class RegConfigImpl extends Addressed +implements Nonshared, Namespaced, Owned, RegConfig +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return TSCONFIGOID; + } + + /* Implementation of Named, Namespaced, Owned */ + + private static Simple name(RegConfigImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.CFGNAME, SIMPLE_INSTANCE); + } + + private static RegNamespace namespace(RegConfigImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.CFGNAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegConfigImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.CFGOWNER, REGROLE_INSTANCE); + } + + /* Implementation of RegConfig */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegConfigImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static + { + s_initializer = + new Builder<>(RegConfigImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(RegConfigImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withDependent( "namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + + .build() + .compose(CatalogObjectImpl.Addressed.s_initializer); + } + + static class Att + { + static final Attribute CFGNAME; + static final Attribute CFGNAMESPACE; + static final Attribute CFGOWNER; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "cfgname", + "cfgnamespace", + "cfgowner" + ).iterator(); + + CFGNAME = itr.next(); + CFGNAMESPACE = itr.next(); + CFGOWNER = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegDictionaryImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegDictionaryImpl.java new file mode 100644 index 000000000..da562900a --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegDictionaryImpl.java @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.TSDICTOID; // syscache + +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegDictionary RegDictionary} interface. + */ +class RegDictionaryImpl extends Addressed +implements Nonshared, Namespaced, Owned, RegDictionary +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return TSDICTOID; + } + + /* Implementation of Named, Namespaced, Owned */ + + private static Simple name(RegDictionaryImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.DICTNAME, SIMPLE_INSTANCE); + } + + private static RegNamespace namespace(RegDictionaryImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.DICTNAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegDictionaryImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.DICTOWNER, REGROLE_INSTANCE); + } + + /* Implementation of RegDictionary */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegDictionaryImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static + { + s_initializer = + new Builder<>(RegDictionaryImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(RegDictionaryImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withDependent( "namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + + .build() + .compose(CatalogObjectImpl.Addressed.s_initializer); + } + + static class Att + { + static final Attribute DICTNAME; + static final Attribute DICTNAMESPACE; + static final Attribute DICTOWNER; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "dictname", + "dictnamespace", + "dictowner" + ).iterator(); + + DICTNAME = itr.next(); + DICTNAMESPACE = itr.next(); + DICTOWNER = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegNamespaceImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegNamespaceImpl.java new file mode 100644 index 000000000..ad573fbe4 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegNamespaceImpl.java @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.NAMESPACEOID; // syscache + +import org.postgresql.pljava.pg.adt.GrantAdapter; +import org.postgresql.pljava.pg.adt.NameAdapter; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegNamespace RegNamespace} interface. + */ +class RegNamespaceImpl extends Addressed +implements + Nonshared, Named, Owned, + AccessControlled, RegNamespace +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return NAMESPACEOID; + } + + /* Implementation of Named, Owned, AccessControlled */ + + private static Simple name(RegNamespaceImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.NSPNAME, NameAdapter.SIMPLE_INSTANCE); + } + + private static RegRole owner(RegNamespaceImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.NSPOWNER, REGROLE_INSTANCE); + } + + private static List grants(RegNamespaceImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.NSPACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of RegNamespace */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegNamespaceImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static + { + s_initializer = + new Builder<>(RegNamespaceImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(RegNamespaceImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + } + + static class Att + { + static final Attribute NSPNAME; + static final Attribute NSPOWNER; + static final Attribute NSPACL; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "nspname", + "nspowner", + "nspacl" + ).iterator(); + + NSPNAME = itr.next(); + NSPOWNER = itr.next(); + NSPACL = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegOperatorImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegOperatorImpl.java new file mode 100644 index 000000000..62671bb48 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegOperatorImpl.java @@ -0,0 +1,457 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.OPEROID; // syscache + +import static org.postgresql.pljava.pg.adt.NameAdapter.OPERATOR_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGOPERATOR_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGPROCEDURE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGTYPE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.INT1_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.BOOLEAN_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Operator; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegOperator RegOperator} interface. + */ +class RegOperatorImpl extends Addressed +implements Nonshared, Namespaced, Owned, RegOperator +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return OPEROID; + } + + /* Implementation of Named, Namespaced, Owned */ + + private static Operator name(RegOperatorImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.OPRNAME, OPERATOR_INSTANCE); + } + + private static RegNamespace namespace(RegOperatorImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.OPRNAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegOperatorImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.OPROWNER, REGROLE_INSTANCE); + } + + /* Implementation of RegOperator */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegOperatorImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_KIND; + static final int SLOT_CANMERGE; + static final int SLOT_CANHASH; + static final int SLOT_LEFTOPERAND; + static final int SLOT_RIGHTOPERAND; + static final int SLOT_RESULT; + static final int SLOT_COMMUTATOR; + static final int SLOT_NEGATOR; + static final int SLOT_EVALUATOR; + static final int SLOT_RESTRICTIONESTIMATOR; + static final int SLOT_JOINESTIMATOR; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(RegOperatorImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(RegOperatorImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withDependent( "namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + + .withReceiverType(null) + .withDependent( "kind", SLOT_KIND = i++) + .withDependent( "canMerge", SLOT_CANMERGE = i++) + .withDependent( "canHash", SLOT_CANHASH = i++) + .withDependent( "leftOperand", SLOT_LEFTOPERAND = i++) + .withDependent( "rightOperand", SLOT_RIGHTOPERAND = i++) + .withDependent( "result", SLOT_RESULT = i++) + .withDependent( "commutator", SLOT_COMMUTATOR = i++) + .withDependent( "negator", SLOT_NEGATOR = i++) + .withDependent( "evaluator", SLOT_EVALUATOR = i++) + .withDependent( + "restrictionEstimator", SLOT_RESTRICTIONESTIMATOR = i++) + .withDependent("joinEstimator", SLOT_JOINESTIMATOR = i++) + + .build() + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute OPRNAME; + static final Attribute OPRNAMESPACE; + static final Attribute OPROWNER; + static final Attribute OPRKIND; + static final Attribute OPRCANMERGE; + static final Attribute OPRCANHASH; + static final Attribute OPRLEFT; + static final Attribute OPRRIGHT; + static final Attribute OPRRESULT; + static final Attribute OPRCOM; + static final Attribute OPRNEGATE; + static final Attribute OPRCODE; + static final Attribute OPRREST; + static final Attribute OPRJOIN; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "oprname", + "oprnamespace", + "oprowner", + "oprkind", + "oprcanmerge", + "oprcanhash", + "oprleft", + "oprright", + "oprresult", + "oprcom", + "oprnegate", + "oprcode", + "oprrest", + "oprjoin" + ).iterator(); + + OPRNAME = itr.next(); + OPRNAMESPACE = itr.next(); + OPROWNER = itr.next(); + OPRKIND = itr.next(); + OPRCANMERGE = itr.next(); + OPRCANHASH = itr.next(); + OPRLEFT = itr.next(); + OPRRIGHT = itr.next(); + OPRRESULT = itr.next(); + OPRCOM = itr.next(); + OPRNEGATE = itr.next(); + OPRCODE = itr.next(); + OPRREST = itr.next(); + OPRJOIN = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static Kind kind(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte b = s.get(Att.OPRKIND, INT1_INSTANCE); + switch ( b ) + { + case (byte)'b': + return Kind.INFIX; + case (byte)'l': + return Kind.PREFIX; + case (byte)'r': + @SuppressWarnings("deprecation") + Kind k = Kind.POSTFIX; + return k; + default: + throw new UnsupportedOperationException(String.format( + "Unrecognized operator kind value %#x", b)); + } + } + + private static boolean canMerge(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRCANMERGE, BOOLEAN_INSTANCE); + } + + private static boolean canHash(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRCANHASH, BOOLEAN_INSTANCE); + } + + private static RegType leftOperand(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRLEFT, REGTYPE_INSTANCE); + } + + private static RegType rightOperand(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRRIGHT, REGTYPE_INSTANCE); + } + + private static RegType result(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRRESULT, REGTYPE_INSTANCE); + } + + private static RegOperator commutator(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRCOM, REGOPERATOR_INSTANCE); + } + + private static RegOperator negator(RegOperatorImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.OPRNEGATE, REGOPERATOR_INSTANCE); + } + + private static RegProcedure evaluator(RegOperatorImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + s.get(Att.OPRCODE, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure + restrictionEstimator(RegOperatorImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = + (RegProcedure) + s.get(Att.OPRREST, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure + joinEstimator(RegOperatorImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + s.get(Att.OPRJOIN, REGPROCEDURE_INSTANCE); + return p; + } + + /* API methods */ + + @Override + public Kind kind() + { + try + { + MethodHandle h = m_slots[SLOT_KIND]; + return (Kind)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean canMerge() + { + try + { + MethodHandle h = m_slots[SLOT_CANMERGE]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean canHash() + { + try + { + MethodHandle h = m_slots[SLOT_CANHASH]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType leftOperand() + { + try + { + MethodHandle h = m_slots[SLOT_LEFTOPERAND]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType rightOperand() + { + try + { + MethodHandle h = m_slots[SLOT_RIGHTOPERAND]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType result() + { + try + { + MethodHandle h = m_slots[SLOT_RESULT]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegOperator commutator() + { + try + { + MethodHandle h = m_slots[SLOT_COMMUTATOR]; + return (RegOperator)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegOperator negator() + { + try + { + MethodHandle h = m_slots[SLOT_NEGATOR]; + return (RegOperator)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure evaluator() + { + try + { + MethodHandle h = m_slots[SLOT_EVALUATOR]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure restrictionEstimator() + { + try + { + MethodHandle h = m_slots[SLOT_RESTRICTIONESTIMATOR]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure joinEstimator() + { + try + { + MethodHandle h = m_slots[SLOT_JOINESTIMATOR]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegProcedureImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegProcedureImpl.java new file mode 100644 index 000000000..af8757b27 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegProcedureImpl.java @@ -0,0 +1,1306 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; +import java.sql.SQLXML; + +import java.util.BitSet; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import java.util.concurrent.CopyOnWriteArraySet; + +import java.util.function.Function; +import java.util.function.Supplier; + +import java.util.stream.IntStream; + +import org.postgresql.pljava.annotation.Function.Effects; +import org.postgresql.pljava.annotation.Function.OnNullInput; +import org.postgresql.pljava.annotation.Function.Parallel; +import org.postgresql.pljava.annotation.Function.Security; + +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import org.postgresql.pljava.internal.Checked; +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.*; +import org.postgresql.pljava.model.RegProcedure.Memo; +import org.postgresql.pljava.model.ProceduralLanguage.PLJavaBased; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.CatalogObjectImpl.Factory.PROCOID_CB; +import static org.postgresql.pljava.pg.ModelConstants.PROCOID; // syscache +import org.postgresql.pljava.pg.ProceduralLanguageImpl.PLJavaMemo; +import static org.postgresql.pljava.pg.TupleDescImpl.synthesizeDescriptor; + +import static org.postgresql.pljava.pg.adt.ArrayAdapter + .FLAT_STRING_LIST_INSTANCE; +import org.postgresql.pljava.pg.adt.GrantAdapter; +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.PLANG_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGPROCEDURE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGTYPE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.BOOLEAN_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.FLOAT4_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.INT1_INSTANCE; +import org.postgresql.pljava.pg.adt.TextAdapter; +import static org.postgresql.pljava.pg.adt.XMLAdapter.SYNTHETIC_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegProcedure RegProcedure} interface. + */ +class RegProcedureImpl> extends Addressed> +implements + Nonshared>, Namespaced, Owned, + AccessControlled, RegProcedure +{ + private static final Function s_initializer; + + /** + * Count of instances subject to invalidation. + *

    + * Only accessed in invalidate and SP.onFirstUse, both on the PG thread. + */ + private static int s_instances; + + private static class SP extends SwitchPoint + { + @Override + protected void onFirstUse() + { + if ( 1 == ++ s_instances ) + sysCacheInvalArmed(PROCOID_CB, true); + } + } + + private final SwitchPoint[] m_sp; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known> classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return PROCOID; + } + + /* Implementation of Named, Namespaced, Owned, AccessControlled */ + + private static Simple name(RegProcedureImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.PRONAME, SIMPLE_INSTANCE); + } + + private static RegNamespace namespace(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.PRONAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegProcedureImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.PROOWNER, REGROLE_INSTANCE); + } + + private static List grants(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.PROACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of RegProcedure */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegProcedureImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + m_sp = new SwitchPoint[] { new SP() }; + } + + @Override + void invalidate(List sps, List postOps) + { + SwitchPoint sp = m_sp[0]; + if ( sp.unused() ) + return; + sps.add(sp); + m_sp[0] = new SP(); + if ( 0 == -- s_instances ) + sysCacheInvalArmed(PROCOID_CB, false); + + M why = m_why; + PLJavaMemo how = m_how; + boolean dependsOnTransforms = m_dependsOnTransforms; + m_why = null; + m_how = null; + m_dependsOnTransforms = false; + if ( why instanceof AbstractMemo ) + ((AbstractMemo)why).invalidate(sps, postOps); + if ( null != how ) + how.invalidate(sps, postOps); + if ( dependsOnTransforms ) + TransformImpl.removeDependentRoutine(this, transforms()); + } + + static final int SLOT_LANGUAGE; + static final int SLOT_COST; + static final int SLOT_ROWS; + static final int SLOT_VARIADICTYPE; + static final int SLOT_SUPPORT; + static final int SLOT_KIND; + static final int SLOT_SECURITY; + static final int SLOT_LEAKPROOF; + static final int SLOT_ONNULLINPUT; + static final int SLOT_RETURNSSET; + static final int SLOT_EFFECTS; + static final int SLOT_PARALLEL; + static final int SLOT_RETURNTYPE; + static final int SLOT_ARGTYPES; + static final int SLOT_ALLARGTYPES; + static final int SLOT_ARGMODES; + static final int SLOT_ARGNAMES; + static final int SLOT_TRANSFORMTYPES; + static final int SLOT_SRC; + static final int SLOT_BIN; + static final int SLOT_CONFIG; + + /* + * Slots for some additional computed values that are not exposed in API + * but will be useful here in the internals. + */ + static final int SLOT_INPUTSTEMPLATE; + static final int SLOT_UNRESOLVEDINPUTS; + static final int SLOT_OUTPUTSTEMPLATE; + static final int SLOT_UNRESOLVEDOUTPUTS; + static final int SLOT_TRANSFORMS; + + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(RegProcedureImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> o.m_sp[0]) + .withSlots(o -> o.m_slots) + + .withCandidates( + CatalogObjectImpl.Addressed.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Addressed.class) + .withDependent("cacheTuple", SLOT_TUPLE) + + .withCandidates(RegProcedureImpl.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withDependent( "namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .withReceiverType(null) + .withDependent( "language", SLOT_LANGUAGE = i++) + .withDependent( "cost", SLOT_COST = i++) + .withDependent( "rows", SLOT_ROWS = i++) + .withDependent( "variadicType", SLOT_VARIADICTYPE = i++) + .withDependent( "support", SLOT_SUPPORT = i++) + .withDependent( "kind", SLOT_KIND = i++) + .withDependent( "security", SLOT_SECURITY = i++) + .withDependent( "leakproof", SLOT_LEAKPROOF = i++) + .withDependent( "onNullInput", SLOT_ONNULLINPUT = i++) + .withDependent( "returnsSet", SLOT_RETURNSSET = i++) + .withDependent( "effects", SLOT_EFFECTS = i++) + .withDependent( "parallel", SLOT_PARALLEL = i++) + .withDependent( "returnType", SLOT_RETURNTYPE = i++) + .withDependent( "argTypes", SLOT_ARGTYPES = i++) + .withDependent( "allArgTypes", SLOT_ALLARGTYPES = i++) + .withDependent( "argModes", SLOT_ARGMODES = i++) + .withDependent( "argNames", SLOT_ARGNAMES = i++) + .withDependent("transformTypes", SLOT_TRANSFORMTYPES = i++) + .withDependent( "src", SLOT_SRC = i++) + .withDependent( "bin", SLOT_BIN = i++) + .withDependent( "config", SLOT_CONFIG = i++) + + .withDependent( "inputsTemplate", SLOT_INPUTSTEMPLATE = i++) + .withDependent( "unresolvedInputs", SLOT_UNRESOLVEDINPUTS = i++) + .withDependent( "outputsTemplate", SLOT_OUTPUTSTEMPLATE = i++) + .withDependent("unresolvedOutputs", SLOT_UNRESOLVEDOUTPUTS = i++) + .withDependent( "transforms", SLOT_TRANSFORMS = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute PRONAME; + static final Attribute PRONAMESPACE; + static final Attribute PROOWNER; + static final Attribute PROACL; + static final Attribute PROLANG; + static final Attribute PROCOST; + static final Attribute PROROWS; + static final Attribute PROVARIADIC; + static final Attribute PROSUPPORT; + static final Attribute PROKIND; + static final Attribute PROSECDEF; + static final Attribute PROLEAKPROOF; + static final Attribute PROISSTRICT; + static final Attribute PRORETSET; + static final Attribute PROVOLATILE; + static final Attribute PROPARALLEL; + static final Attribute PRORETTYPE; + static final Attribute PROARGTYPES; + static final Attribute PROALLARGTYPES; + static final Attribute PROARGMODES; + static final Attribute PROARGNAMES; + static final Attribute PROTRFTYPES; + static final Attribute PROSRC; + static final Attribute PROBIN; + static final Attribute PROCONFIG; + static final Attribute PROARGDEFAULTS; + static final Attribute PROSQLBODY; + + static + { + Iterator itr = attNames( + "proname", + "pronamespace", + "proowner", + "proacl", + "prolang", + "procost", + "prorows", + "provariadic", + "prosupport", + "prokind", + "prosecdef", + "proleakproof", + "proisstrict", + "proretset", + "provolatile", + "proparallel", + "prorettype", + "proargtypes", + "proallargtypes", + "proargmodes", + "proargnames", + "protrftypes", + "prosrc", + "probin", + "proconfig", + "proargdefaults" + ).alsoIf(PG_VERSION_NUM >= 140000, + "prosqlbody" + ).project(CLASSID.tupleDescriptor()); + + PRONAME = itr.next(); + PRONAMESPACE = itr.next(); + PROOWNER = itr.next(); + PROACL = itr.next(); + PROLANG = itr.next(); + PROCOST = itr.next(); + PROROWS = itr.next(); + PROVARIADIC = itr.next(); + PROSUPPORT = itr.next(); + PROKIND = itr.next(); + PROSECDEF = itr.next(); + PROLEAKPROOF = itr.next(); + PROISSTRICT = itr.next(); + PRORETSET = itr.next(); + PROVOLATILE = itr.next(); + PROPARALLEL = itr.next(); + PRORETTYPE = itr.next(); + PROARGTYPES = itr.next(); + PROALLARGTYPES = itr.next(); + PROARGMODES = itr.next(); + PROARGNAMES = itr.next(); + PROTRFTYPES = itr.next(); + PROSRC = itr.next(); + PROBIN = itr.next(); + PROCONFIG = itr.next(); + PROARGDEFAULTS = itr.next(); + PROSQLBODY = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* mutable non-API fields that will only be used on the PG thread */ + + /* + * This is the idea behind the API memo() method. + * + * A Why memo can be retrieved with the memo() method. The method does not + * synchronize. It is documented to return a valid result only in certain + * circumstances, which an individual Why subinterface should detail. A Why + * memo's type is constrained by the type parameter M. + * + * As the only foreseeable How memo for now is PLJavaMemo, that field is + * of fixed type for now. PLJavaBased documents that it is valid when + * the dispatcher passes it to a language-handler method along with + * a RegProcedure. The dispatcher, while allowing the handler to run + * in another thread, is always on "the PG thread" when manipulating m_how. + */ + M m_why; + PLJavaMemo m_how; + + /* + * This flag is only set in ProceduralLanguageImpl.transformsFor when it is + * about to return a nonempty list of transforms and has registered this + * routine as depending on them. It is checked and cleared in invalidate + * above. If it is set, invalidate can safely use transforms() to retrieve + * the list, which is in the cache slot until actual invalidation of the + * SwitchPoint. + */ + boolean m_dependsOnTransforms = false; + + /* + * Computation methods for ProceduralLanguage.PLJavaBased API methods + * that happen to be implemented here for now. + */ + + static final EnumSet s_parameterModes = + EnumSet.of(ArgMode.IN, ArgMode.INOUT, ArgMode.VARIADIC); + + static final EnumSet s_resultModes = + EnumSet.of(ArgMode.INOUT, ArgMode.OUT, ArgMode.TABLE); + + static final BitSet s_noBits = new BitSet(0); + + private static TupleDescriptor inputsTemplate(RegProcedureImpl o) + throws SQLException + { + List names = o.argNames(); + List types = o.allArgTypes(); + + if ( null == types ) + { + types = o.argTypes(); + return synthesizeDescriptor(types, names, null); + } + + List modes = o.argModes(); + BitSet select = new BitSet(modes.size()); + IntStream.range(0, modes.size()) + .filter(i -> s_parameterModes.contains(modes.get(i))) + .forEach(select::set); + + return synthesizeDescriptor(types, names, select); + } + + private static BitSet unresolvedInputs(RegProcedureImpl o) + throws SQLException + { + TupleDescriptor td = o.inputsTemplate(); + BitSet unr = new BitSet(0); + IntStream.range(0, td.size()) + .filter(i -> td.get(i).type().needsResolution()) + .forEach(unr::set); + return unr; + } + + private static TupleDescriptor outputsTemplate(RegProcedureImpl o) + throws SQLException + { + RegTypeImpl returnType = (RegTypeImpl)o.returnType(); + + if ( RegType.VOID == returnType ) + return null; + + if ( RegType.RECORD != returnType ) + return returnType.notionalDescriptor(); + + /* + * For plain unmodified RECORD, there's more work to do. If there are + * declared outputs, gin up a descriptor from those. If there aren't, + * this can only be a function that relies on every call site supplying + * a column definition list; return null. + */ + List modes = o.argModes(); + if ( null == modes ) + return null; // Nothing helpful here. Must rely on call site. + + BitSet select = new BitSet(modes.size()); + IntStream.range(0, modes.size()) + .filter(i -> s_resultModes.contains(modes.get(i))) + .forEach(select::set); + + if ( select.isEmpty() ) + return null; // No INOUT/OUT/TABLE cols; still need call site. + + /* + * Build a descriptor from the INOUT/OUT/TABLE types and names. + */ + + List types = o.allArgTypes(); + List names = o.argNames(); + + return synthesizeDescriptor(types, names, select); + } + + private static BitSet unresolvedOutputs(RegProcedureImpl o) + throws SQLException + { + TupleDescriptor td = o.outputsTemplate(); + if ( null == td ) + return RegType.VOID == o.returnType() ? s_noBits : null; + BitSet unr = new BitSet(0); + IntStream.range(0, td.size()) + .filter(i -> td.get(i).type().needsResolution()) + .forEach(unr::set); + return unr; + } + + private static Checked.Supplier,SQLException> transforms( + RegProcedureImpl o) + throws SQLException + { + List types = o.transformTypes(); + if ( null == types || types.isEmpty() ) + return () -> null; + + ProceduralLanguageImpl pl = (ProceduralLanguageImpl)o.language(); + return pl.transformsFor(types, o); + } + + /* computation methods for API */ + + private static ProceduralLanguage language(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROLANG, PLANG_INSTANCE); + } + + private static float cost(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROCOST, FLOAT4_INSTANCE); + } + + private static float rows(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROROWS, FLOAT4_INSTANCE); + } + + private static RegType variadicType(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROVARIADIC, REGTYPE_INSTANCE); + } + + private static RegProcedure support(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + s.get(Att.PROSUPPORT, REGPROCEDURE_INSTANCE); + return p; + } + + private static Kind kind(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte b = s.get(Att.PROKIND, INT1_INSTANCE); + switch ( b ) + { + case (byte)'f': + return Kind.FUNCTION; + case (byte)'p': + return Kind.PROCEDURE; + case (byte)'a': + return Kind.AGGREGATE; + case (byte)'w': + return Kind.WINDOW; + default: + throw new UnsupportedOperationException(String.format( + "Unrecognized procedure/function kind value %#x", b)); + } + } + + private static Security security(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + if ( s.get(Att.PROSECDEF, BOOLEAN_INSTANCE) ) + return Security.DEFINER; + return Security.INVOKER; + } + + private static boolean leakproof(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROLEAKPROOF, BOOLEAN_INSTANCE); + } + + private static OnNullInput onNullInput(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + if ( s.get(Att.PROISSTRICT, BOOLEAN_INSTANCE) ) + return OnNullInput.RETURNS_NULL; + return OnNullInput.CALLED; + } + + private static boolean returnsSet(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PRORETSET, BOOLEAN_INSTANCE); + } + + private static Effects effects(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte b = s.get(Att.PROVOLATILE, INT1_INSTANCE); + switch ( b ) + { + case (byte)'i': + return Effects.IMMUTABLE; + case (byte)'s': + return Effects.STABLE; + case (byte)'v': + return Effects.VOLATILE; + default: + throw new UnsupportedOperationException(String.format( + "Unrecognized procedure/function volatility value %#x", b)); + } + } + + private static Parallel parallel(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + byte b = s.get(Att.PROPARALLEL, INT1_INSTANCE); + switch ( b ) + { + case (byte)'s': + return Parallel.SAFE; + case (byte)'r': + return Parallel.RESTRICTED; + case (byte)'u': + return Parallel.UNSAFE; + default: + throw new UnsupportedOperationException(String.format( + "Unrecognized procedure/function parallel safety value %#x",b)); + } + } + + private static RegType returnType(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PRORETTYPE, REGTYPE_INSTANCE); + } + + private static List argTypes(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.PROARGTYPES, + ArrayAdapters.REGTYPE_LIST_INSTANCE); + } + + private static List allArgTypes(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.PROALLARGTYPES, + ArrayAdapters.REGTYPE_LIST_INSTANCE); + } + + private static List argModes(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.PROARGMODES, + ArrayAdapters.ARGMODE_LIST_INSTANCE); + } + + private static List argNames(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.PROARGNAMES, + ArrayAdapters.TEXT_NAME_LIST_INSTANCE); + } + + private static List transformTypes(RegProcedureImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.PROTRFTYPES, + ArrayAdapters.REGTYPE_LIST_INSTANCE); + } + + private static String src(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROSRC, TextAdapter.INSTANCE); + } + + private static String bin(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.PROBIN, TextAdapter.INSTANCE); + } + + private static List config(RegProcedureImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return + s.get(Att.PROCONFIG, FLAT_STRING_LIST_INSTANCE); + } + + /* + * API-like methods not actually exposed as RegProcedure API. + * There are exposed on the RegProcedure.Memo.How subinterface + * ProceduralLanguage.PLJavaBased. These implementations could + * conceivably be moved to the implementation of that, so that + * not all RegProcedure instances would haul around five extra slots. + */ + public TupleDescriptor inputsTemplate() + { + try + { + MethodHandle h = m_slots[SLOT_INPUTSTEMPLATE]; + return (TupleDescriptor)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + public BitSet unresolvedInputs() + { + try + { + MethodHandle h = m_slots[SLOT_UNRESOLVEDINPUTS]; + BitSet unr = (BitSet)h.invokeExact(this, h); + return (BitSet)unr.clone(); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + public TupleDescriptor outputsTemplate() + { + try + { + MethodHandle h = m_slots[SLOT_OUTPUTSTEMPLATE]; + return (TupleDescriptor)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + public BitSet unresolvedOutputs() + { + try + { + MethodHandle h = m_slots[SLOT_UNRESOLVEDOUTPUTS]; + BitSet unr = (BitSet)h.invokeExact(this, h); + return null == unr ? null : (BitSet)unr.clone(); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + public List transforms() + { + try + { + MethodHandle h = m_slots[SLOT_TRANSFORMS]; + Checked.Supplier,SQLException> s = + (Checked.Supplier,SQLException>) + h.invokeExact(this, h); + return s.get(); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /* API methods */ + + @Override + public ProceduralLanguage language() + { + try + { + MethodHandle h = m_slots[SLOT_LANGUAGE]; + return (ProceduralLanguage)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public float cost() + { + try + { + MethodHandle h = m_slots[SLOT_COST]; + return (float)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public float rows() + { + try + { + MethodHandle h = m_slots[SLOT_ROWS]; + return (float)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType variadicType() + { + try + { + MethodHandle h = m_slots[SLOT_VARIADICTYPE]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure support() + { + try + { + MethodHandle h = m_slots[SLOT_SUPPORT]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Kind kind() + { + try + { + MethodHandle h = m_slots[SLOT_KIND]; + return (Kind)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Security security() + { + try + { + MethodHandle h = m_slots[SLOT_SECURITY]; + return (Security)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean leakproof() + { + try + { + MethodHandle h = m_slots[SLOT_LEAKPROOF]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public OnNullInput onNullInput() + { + try + { + MethodHandle h = m_slots[SLOT_ONNULLINPUT]; + return (OnNullInput)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean returnsSet() + { + try + { + MethodHandle h = m_slots[SLOT_RETURNSSET]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Effects effects() + { + try + { + MethodHandle h = m_slots[SLOT_EFFECTS]; + return (Effects)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public Parallel parallel() + { + try + { + MethodHandle h = m_slots[SLOT_PARALLEL]; + return (Parallel)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType returnType() + { + try + { + MethodHandle h = m_slots[SLOT_RETURNTYPE]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public List argTypes() + { + try + { + MethodHandle h = m_slots[SLOT_ARGTYPES]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public List allArgTypes() + { + try + { + MethodHandle h = m_slots[SLOT_ALLARGTYPES]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public List argModes() + { + try + { + MethodHandle h = m_slots[SLOT_ARGMODES]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public List argNames() + { + try + { + MethodHandle h = m_slots[SLOT_ARGNAMES]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public SQLXML argDefaults() + { + /* + * Because of the JDBC rules that an SQLXML instance lasts no longer + * than one transaction and can only be read once, it is not a good + * candidate for caching. We will just fetch a new one from the cached + * tuple as needed. + */ + TupleTableSlot s = cacheTuple(); + return s.get(Att.PROARGDEFAULTS, SYNTHETIC_INSTANCE); + } + + @Override + public List transformTypes() + { + try + { + MethodHandle h = m_slots[SLOT_TRANSFORMTYPES]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String src() + { + try + { + MethodHandle h = m_slots[SLOT_SRC]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public String bin() + { + try + { + MethodHandle h = m_slots[SLOT_BIN]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public SQLXML sqlBody() + { + /* + * Because of the JDBC rules that an SQLXML instance lasts no longer + * than one transaction and can only be read once, it is not a good + * candidate for caching. We will just fetch a new one from the cached + * tuple as needed. + */ + if ( null == Att.PROSQLBODY ) // missing in this PG version + return null; + + TupleTableSlot s = cacheTuple(); + return s.get(Att.PROSQLBODY, SYNTHETIC_INSTANCE); + } + + @Override + public List config() + { + try + { + MethodHandle h = m_slots[SLOT_CONFIG]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public M memo() + { + /* + * See the m_why declaration comments on this lack of synchronization. + */ + return m_why; + } + + /** + * Abstract superclass of both {@link Why Why} and {@link How How} memo + * implementations. + */ + public static abstract class AbstractMemo + { + protected AbstractMemo() + { + assert threadMayEnterPG() : "AbstractMemo thread"; + } + + abstract void invalidate(List sps, List postOps); + + /** + * Abstract base class for a {@link Why Why} memo implementation. + */ + public static abstract class Why> + extends AbstractMemo implements Memo.Why + { + /** + * The {@code RegProcedure} instance carrying this memo. + */ + protected final RegProcedureImpl m_carrier; + + protected Why(RegProcedure carrier) + { + @SuppressWarnings("unchecked") + RegProcedureImpl narrowed = (RegProcedureImpl)carrier; + if ( null != narrowed.m_why ) + throw new AssertionError("carrier already has why memo"); + m_carrier = narrowed; + } + + public RegProcedureImpl apply() + { + assert threadMayEnterPG() : "AbstractMemo.Why thread"; + assert null == m_carrier.m_why : "carrier memo became nonnull"; + + @SuppressWarnings("unchecked") + M self = (M)this; + + m_carrier.m_why = self; + return m_carrier; + } + + void invalidate(List sps, List postOps) + { + m_carrier.m_why = null; + } + } + + /** + * Abstract base class for a {@link How How} memo implementation. + */ + public static abstract class How> + extends AbstractMemo implements Memo.How + { + /** + * The {@code RegProcedure} instance carrying this memo. + */ + protected final RegProcedureImpl m_carrier; + + protected How(RegProcedure carrier) + { + RegProcedureImpl narrowed = (RegProcedureImpl)carrier; + if ( null != narrowed.m_how ) + throw new AssertionError("carrier already has how memo"); + m_carrier = narrowed; + } + + public RegProcedureImpl apply() + { + assert threadMayEnterPG() : "AbstractMemo.how thread"; + assert null == m_carrier.m_how : "carrier memo became nonnull"; + + // generalize later if there is ever any other possibility + PLJavaMemo self = (PLJavaMemo)this; + + m_carrier.m_how = self; + return m_carrier; + } + + void invalidate(List sps, List postOps) + { + m_carrier.m_how = null; + } + } + } + + /** + * Abstract superclass of a {@code Why} memo used on routines that play + * specific support roles for other catalog objects (such as a + * {@code Handler} or {@code Validator} for a {@code ProceduralLanguage} + * or a {@code FromSQL} or {@code ToSQL} for a {@code Transform>}, where + * dependent objects should be invalidated if the support routine is. + *

    + * Because a support routine can be depended on by more than one object + * (multiple languages, say, can share the same handler or validator + * routines), the memo carries a {@code Set} of dependent objects, not + * just a single reference. The {@code Set} implementation is chosen on + * an expectation of rare mutations and relatively small sets. + *

    + * A concrete subclass should supply an appropriately-typed static + * {@code addDependent} method that delegates to the protected + * {@link #add add} method here. The static {@code removeDependent} + * method of this class can be invoked directly (typically qualified + * by the concrete subclass name, for consistency with the + * {@code addDependent} method). + */ + static abstract class SupportMemo< + M extends Memo.Why, + A extends CatalogObjectImpl.Addressed + > + extends AbstractMemo.Why + { + private final Set m_dependents; + + protected SupportMemo( + RegProcedure carrier, A dep) + { + super(carrier); + m_dependents = new CopyOnWriteArraySet<>(Set.of(dep)); + } + + /** + * Has the effect of {@code super.invalidate} (simply nulling + * the carrier {@code RegProcedure}'s reference to this memo), + * and additionally calls + * {@link CatalogObjectImpl.Addressed#invalidate invalidate} + * on each recorded dependent A object. + */ + @Override + void invalidate(List sps, List postOps) + { + super.invalidate(sps, postOps); + m_dependents.forEach(a -> a.invalidate(sps, postOps)); + } + + /** + * Removes dep as a recorded dependency on + * proc, with no effect if proc isn't carrying + * a memo that extends this class or if its dependency set does not + * contain dep. + */ + static < + M extends Memo.Why, + A extends CatalogObjectImpl.Addressed + > + void removeDependent(RegProcedure proc, A dep) + { + M memo = proc.memo(); + if ( memo instanceof SupportMemo ) + ((SupportMemo)memo).m_dependents.remove(dep); + } + + /** + * Adds dep as a recorded dependency on proc, + * using an existing memo corresponding to type T + * if present, or getting a new one from supplier and + * applying it. + *

    + * The supplier will typically be a lambda that passes + * proc and dep to the constructor of + * the concrete subclass of this class. + *

    + * No action will be taken if proc is the invalid + * instance. It is not expected that proc will already + * be carrying a memo of some other type; an exception will result + * if it is. + */ + protected static < + O extends Memo.Why, + M extends Memo.Why, + T extends SupportMemo, + A extends CatalogObjectImpl.Addressed + > + void add( + RegProcedure proc, A dep, + Class witness, Supplier supplier) + { + if ( ! proc.isValid() ) + return; + O memo = proc.memo(); + if ( witness.isInstance(memo) ) + { + @SuppressWarnings("unchecked") + SupportMemo sm = (SupportMemo)memo; + sm.m_dependents.add(dep); + } + else + supplier.get().apply(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegRoleImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegRoleImpl.java new file mode 100644 index 000000000..9a28a9e3c --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegRoleImpl.java @@ -0,0 +1,384 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.nio.file.attribute.GroupPrincipal; +import java.nio.file.attribute.UserPrincipal; + +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; + +import org.postgresql.pljava.RolePrincipal; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.AUTHOID; // syscache +import static org.postgresql.pljava.pg.ModelConstants.AUTHMEMMEMROLE; +import static org.postgresql.pljava.pg.ModelConstants.AUTHMEMROLEMEM; + +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.BOOLEAN_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.INT4_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * Implementation of the {@link RegRole RegRole} interface. + *

    + * That this class can in fact be cast to {@link RegRole.Grantee Grantee} is an + * unadvertised implementation detail. + */ +class RegRoleImpl extends Addressed +implements + Shared, Named, + AccessControlled, RegRole.Grantee +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return AUTHOID; + } + + /* Implementation of Named, AccessControlled */ + + private static Simple name(RegRoleImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.ROLNAME, SIMPLE_INSTANCE); + } + + private static List grants(RegRoleImpl o) + { + throw notyet("CatCList support needed"); + } + + /* Implementation of RegRole */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegRoleImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_MEMBEROF; + static final int SLOT_SUPERUSER; + static final int SLOT_INHERIT; + static final int SLOT_CREATEROLE; + static final int SLOT_CREATEDB; + static final int SLOT_CANLOGIN; + static final int SLOT_REPLICATION; + static final int SLOT_BYPASSRLS; + static final int SLOT_CONNECTIONLIMIT; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(RegRoleImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(RegRoleImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .withReceiverType(null) + .withDependent( "memberOf", SLOT_MEMBEROF = i++) + .withDependent( "superuser", SLOT_SUPERUSER = i++) + .withDependent( "inherit", SLOT_INHERIT = i++) + .withDependent( "createRole", SLOT_CREATEROLE = i++) + .withDependent( "createDB", SLOT_CREATEDB = i++) + .withDependent( "canLogIn", SLOT_CANLOGIN = i++) + .withDependent( "replication", SLOT_REPLICATION = i++) + .withDependent( "bypassRLS", SLOT_BYPASSRLS = i++) + .withDependent("connectionLimit", SLOT_CONNECTIONLIMIT = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute ROLNAME; + static final Attribute ROLSUPER; + static final Attribute ROLINHERIT; + static final Attribute ROLCREATEROLE; + static final Attribute ROLCREATEDB; + static final Attribute ROLCANLOGIN; + static final Attribute ROLREPLICATION; + static final Attribute ROLBYPASSRLS; + static final Attribute ROLCONNLIMIT; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "rolname", + "rolsuper", + "rolinherit", + "rolcreaterole", + "rolcreatedb", + "rolcanlogin", + "rolreplication", + "rolbypassrls", + "rolconnlimit" + ).iterator(); + + ROLNAME = itr.next(); + ROLSUPER = itr.next(); + ROLINHERIT = itr.next(); + ROLCREATEROLE = itr.next(); + ROLCREATEDB = itr.next(); + ROLCANLOGIN = itr.next(); + ROLREPLICATION = itr.next(); + ROLBYPASSRLS = itr.next(); + ROLCONNLIMIT = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static List memberOf(RegRoleImpl o) + { + throw notyet("CatCList support needed"); + } + + private static boolean superuser(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLSUPER, BOOLEAN_INSTANCE); + } + + private static boolean inherit(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLINHERIT, BOOLEAN_INSTANCE); + } + + private static boolean createRole(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLCREATEROLE, BOOLEAN_INSTANCE); + } + + private static boolean createDB(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLCREATEDB, BOOLEAN_INSTANCE); + } + + private static boolean canLogIn(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLCANLOGIN, BOOLEAN_INSTANCE); + } + + private static boolean replication(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLREPLICATION, BOOLEAN_INSTANCE); + } + + private static boolean bypassRLS(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLBYPASSRLS, BOOLEAN_INSTANCE); + } + + private static int connectionLimit(RegRoleImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.ROLCONNLIMIT, INT4_INSTANCE); + } + + /* API methods */ + + @Override + public List memberOf() + { + try + { + MethodHandle h = m_slots[SLOT_MEMBEROF]; + return (List)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean superuser() + { + try + { + MethodHandle h = m_slots[SLOT_SUPERUSER]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean inherit() + { + try + { + MethodHandle h = m_slots[SLOT_INHERIT]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean createRole() + { + try + { + MethodHandle h = m_slots[SLOT_CREATEROLE]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean createDB() + { + try + { + MethodHandle h = m_slots[SLOT_CREATEDB]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean canLogIn() + { + try + { + MethodHandle h = m_slots[SLOT_CANLOGIN]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean replication() + { + try + { + MethodHandle h = m_slots[SLOT_REPLICATION]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean bypassRLS() + { + try + { + MethodHandle h = m_slots[SLOT_BYPASSRLS]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public int connectionLimit() + { + try + { + MethodHandle h = m_slots[SLOT_CONNECTIONLIMIT]; + return (int)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /* Implementation of RegRole.Grantee */ + + /* + * As it turns out, PostgreSQL doesn't use a notion like Identifier.Pseudo + * for the name of the public grantee. It uses the ordinary, folding name + * "public" and reserves it, forbidding that any actual role have any name + * that matches it according to the usual folding rules. So, construct that + * name here. + */ + private static final Simple s_public_name = Simple.fromCatalog("public"); + + @Override + public Simple nameAsGrantee() + { + return isPublic() ? s_public_name : name(); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/RegTypeImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/RegTypeImpl.java new file mode 100644 index 000000000..c088c240c --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/RegTypeImpl.java @@ -0,0 +1,1436 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import java.sql.SQLType; +import java.sql.SQLException; +import java.sql.SQLXML; + +import java.util.Iterator; +import java.util.List; + +import java.util.function.Function; +import java.util.function.Supplier; + +import org.postgresql.pljava.TargetList.Projection; + +import static org.postgresql.pljava.internal.SwitchPointCache.doNotCache; +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.CatalogObjectImpl.Factory.TYPEOID_CB; + +import static org.postgresql.pljava.pg.ModelConstants.TYPEOID; // syscache +import static org.postgresql.pljava.pg.ModelConstants.alignmentFromCatalog; +import static org.postgresql.pljava.pg.ModelConstants.storageFromCatalog; +import static org.postgresql.pljava.pg.TupleDescImpl.synthesizeDescriptor; + +import org.postgresql.pljava.pg.adt.GrantAdapter; +import org.postgresql.pljava.pg.adt.NameAdapter; +import org.postgresql.pljava.pg.adt.OidAdapter; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGCLASS_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGCOLLATION_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGNAMESPACE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGPROCEDURE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGTYPE_INSTANCE; +import org.postgresql.pljava.pg.adt.TextAdapter; +import static org.postgresql.pljava.pg.adt.XMLAdapter.SYNTHETIC_INSTANCE; +import static org.postgresql.pljava.pg.adt.Primitives.*; + +import org.postgresql.pljava.annotation.BaseUDT.Alignment; +import org.postgresql.pljava.annotation.BaseUDT.Storage; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Qualified; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +/* + * Can get lots of information, including TupleDesc, domain constraints, etc., + * from the typcache. A typcache entry is immortal but bits of it can change. + * So it may be safe to keep a reference to the entry forever, but detect when + * bits have changed. See in particular tupDesc_identifier. + * + * Many of the attributes of pg_type are available in the typcache. But + * lookup_type_cache() does not have a _noerror version. If there is any doubt + * about the existence of a type to be looked up, one must either do a syscache + * lookup first anyway, or have a plan to catch an undefined_object error. + * Same if you happen to look up a type still in the "only a shell" stage. + * At that rate, may as well rely on the syscache for all the pg_type info. + */ + +/** + * Implementation of the {@link RegType RegType} interface. + */ +abstract class RegTypeImpl extends Addressed +implements + Nonshared, Namespaced, Owned, + AccessControlled, RegType +{ + /** + * Per-instance switch point, to be invalidated selectively + * by a syscache callback. + *

    + * Only {@link NoModifier NoModifier} carries one; derived instances of + * {@link Modified Modified} or {@link Blessed Blessed} return that one. + */ + abstract SwitchPoint cacheSwitchPoint(); + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return TYPEOID; + } + + /* Implementation of Named, Namespaced, Owned, AccessControlled */ + + private static Simple name(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return + t.get(Att.TYPNAME, NameAdapter.SIMPLE_INSTANCE); + } + + private static RegNamespace namespace(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPNAMESPACE, REGNAMESPACE_INSTANCE); + } + + private static RegRole owner(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPOWNER, REGROLE_INSTANCE); + } + + private static List grants(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of RegType */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + RegTypeImpl(MethodHandle[] slots) + { + super(slots); + } + + /** + * Holder for the {@code RegClass} corresponding to {@code relation()}, + * only non-null during a call of {@code dualHandshake}. + */ + private RegClass m_dual = null; + + /** + * A lazily-populated synthetic tuple descriptor with a single element + * of this type. + */ + private TupleDescriptor m_singleton; + + /** + * Called by the corresponding {@code RegClass} instance if it has just + * looked us up. + *

    + * Because the {@code SwitchPointCache} recomputation methods always execute + * on the PG thread, plain access to an instance field does the trick here. + */ + void dualHandshake(RegClass dual) + { + try + { + m_dual = dual; + dual = relation(); + assert dual == m_dual : "RegClass/RegType handshake outcome"; + } + finally + { + m_dual = null; + } + } + + static final Function s_initializer; + + static final int SLOT_TUPLEDESCRIPTOR; + static final int SLOT_NOTIONALDESC; // defined even for non-row type + static final int SLOT_LENGTH; + static final int SLOT_BYVALUE; + static final int SLOT_TYPE; + static final int SLOT_CATEGORY; + static final int SLOT_PREFERRED; + static final int SLOT_DEFINED; + static final int SLOT_DELIMITER; + static final int SLOT_RELATION; + static final int SLOT_ELEMENT; + static final int SLOT_ARRAY; + static final int SLOT_INPUT; + static final int SLOT_OUTPUT; + static final int SLOT_RECEIVE; + static final int SLOT_SEND; + static final int SLOT_MODIFIERINPUT; + static final int SLOT_MODIFIEROUTPUT; + static final int SLOT_ANALYZE; + static final int SLOT_SUBSCRIPT; + static final int SLOT_ALIGNMENT; + static final int SLOT_STORAGE; + static final int SLOT_NOTNULL; + static final int SLOT_BASETYPE; + static final int SLOT_DIMENSIONS; + static final int SLOT_COLLATION; + static final int SLOT_DEFAULTTEXT; + + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(RegTypeImpl.class) + .withLookup(lookup().in(RegTypeImpl.class)) + .withSwitchPoint(RegTypeImpl::cacheSwitchPoint) + .withSlots(o -> o.m_slots) + + .withCandidates( + CatalogObjectImpl.Addressed.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Addressed.class) + .withDependent("cacheTuple", SLOT_TUPLE) + + .withCandidates(RegTypeImpl.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent("name", SLOT_NAME) + .withReceiverType(CatalogObjectImpl.Namespaced.class) + .withReturnType(null) + .withDependent("namespace", SLOT_NAMESPACE) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent("owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent("grants", SLOT_ACL) + + .withReceiverType(null) + .withSwitchPoint(o -> + { + RegClassImpl c = (RegClassImpl)o.relation(); + if ( c.isValid() ) + return c.cacheSwitchPoint(); + return o.cacheSwitchPoint(); + }) + .withDependent( + "tupleDescriptorCataloged", SLOT_TUPLEDESCRIPTOR = i++) + .withDependent("notionalDescriptor", SLOT_NOTIONALDESC = i++) + + .withSwitchPoint(RegTypeImpl::cacheSwitchPoint) + .withDependent( "length", SLOT_LENGTH = i++) + .withDependent( "byValue", SLOT_BYVALUE = i++) + .withDependent( "type", SLOT_TYPE = i++) + .withDependent( "category", SLOT_CATEGORY = i++) + .withDependent( "preferred", SLOT_PREFERRED = i++) + .withDependent( "defined", SLOT_DEFINED = i++) + .withDependent( "delimiter", SLOT_DELIMITER = i++) + .withDependent( "relation", SLOT_RELATION = i++) + .withDependent( "element", SLOT_ELEMENT = i++) + .withDependent( "array", SLOT_ARRAY = i++) + .withDependent( "input", SLOT_INPUT = i++) + .withDependent( "output", SLOT_OUTPUT = i++) + .withDependent( "receive", SLOT_RECEIVE = i++) + .withDependent( "send", SLOT_SEND = i++) + .withDependent( "modifierInput", SLOT_MODIFIERINPUT = i++) + .withDependent( "modifierOutput", SLOT_MODIFIEROUTPUT = i++) + .withDependent( "analyze", SLOT_ANALYZE = i++) + .withDependent( "subscript", SLOT_SUBSCRIPT = i++) + .withDependent( "alignment", SLOT_ALIGNMENT = i++) + .withDependent( "storage", SLOT_STORAGE = i++) + .withDependent( "notNull", SLOT_NOTNULL = i++) + .withDependent( "baseType", SLOT_BASETYPE = i++) + .withDependent( "dimensions", SLOT_DIMENSIONS = i++) + .withDependent( "collation", SLOT_COLLATION = i++) + .withDependent( "defaultText", SLOT_DEFAULTTEXT = i++) + + .build(); + NSLOTS = i; + } + + static class Att + { + static final Projection TYPBASETYPE_TYPTYPMOD; + + static final Attribute TYPNAME; + static final Attribute TYPNAMESPACE; + static final Attribute TYPOWNER; + static final Attribute TYPACL; + static final Attribute TYPLEN; + static final Attribute TYPBYVAL; + static final Attribute TYPTYPE; + static final Attribute TYPCATEGORY; + static final Attribute TYPISPREFERRED; + static final Attribute TYPISDEFINED; + static final Attribute TYPDELIM; + static final Attribute TYPRELID; + static final Attribute TYPELEM; + static final Attribute TYPARRAY; + static final Attribute TYPINPUT; + static final Attribute TYPOUTPUT; + static final Attribute TYPRECEIVE; + static final Attribute TYPSEND; + static final Attribute TYPMODIN; + static final Attribute TYPMODOUT; + static final Attribute TYPANALYZE; + static final Attribute TYPALIGN; + static final Attribute TYPSTORAGE; + static final Attribute TYPNOTNULL; + static final Attribute TYPNDIMS; + static final Attribute TYPCOLLATION; + static final Attribute TYPDEFAULT; + static final Attribute TYPDEFAULTBIN; + static final Attribute TYPSUBSCRIPT; + + static + { + AttNames itr = attNames( + "typbasetype", // these two are wanted + "typtypmod", // together, first, below + "typname", + "typnamespace", + "typowner", + "typacl", + "typlen", + "typbyval", + "typtype", + "typcategory", + "typispreferred", + "typisdefined", + "typdelim", + "typrelid", + "typelem", + "typarray", + "typinput", + "typoutput", + "typreceive", + "typsend", + "typmodin", + "typmodout", + "typanalyze", + "typalign", + "typstorage", + "typnotnull", + "typndims", + "typcollation", + "typdefault", + "typdefaultbin" + ).alsoIf(PG_VERSION_NUM >= 140000, + "typsubscript" + ).project(CLASSID.tupleDescriptor()); + + TYPBASETYPE_TYPTYPMOD = itr.project(itr.next(), itr.next()); + + TYPNAME = itr.next(); + TYPNAMESPACE = itr.next(); + TYPOWNER = itr.next(); + TYPACL = itr.next(); + TYPLEN = itr.next(); + TYPBYVAL = itr.next(); + TYPTYPE = itr.next(); + TYPCATEGORY = itr.next(); + TYPISPREFERRED = itr.next(); + TYPISDEFINED = itr.next(); + TYPDELIM = itr.next(); + TYPRELID = itr.next(); + TYPELEM = itr.next(); + TYPARRAY = itr.next(); + TYPINPUT = itr.next(); + TYPOUTPUT = itr.next(); + TYPRECEIVE = itr.next(); + TYPSEND = itr.next(); + TYPMODIN = itr.next(); + TYPMODOUT = itr.next(); + TYPANALYZE = itr.next(); + TYPALIGN = itr.next(); + TYPSTORAGE = itr.next(); + TYPNOTNULL = itr.next(); + TYPNDIMS = itr.next(); + TYPCOLLATION = itr.next(); + TYPDEFAULT = itr.next(); + TYPDEFAULTBIN = itr.next(); + TYPSUBSCRIPT = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods for non-API internal slots */ + + private static TupleDescriptor notionalDescriptor(RegTypeImpl o) + { + assert RECORD != o && VOID != o : "called on type " + o; + + for ( RegType t = o ; t.isValid() ; t = t.baseType() ) + { + TupleDescriptor td = t.tupleDescriptor(); + if ( null != td ) + return td; + } + + return new TupleDescImpl.OfType(o); + } + + /* computation methods for API */ + + /** + * Obtain the tuple descriptor for an ordinary cataloged composite type. + *

    + * Every such type has a corresponding {@link RegClass RegClass}, which has + * the {@code SwitchPoint} that will govern the descriptor's invalidation, + * and a one-element array in which the descriptor should be stored. This + * method returns the array. + */ + private static TupleDescriptor.Interned[] + tupleDescriptorCataloged(RegTypeImpl o) + { + RegClassImpl c = (RegClassImpl)o.relation(); + + /* + * If this is not a composite type, c won't be valid, and our API + * contract is to return null (which means, here, return {null}). + */ + if ( ! c.isValid() ) + return new TupleDescriptor.Interned[] { null }; + + TupleDescriptor.Interned[] r = c.m_tupDescHolder; + + /* + * If c is RegClass.CLASSID itself, it has the descriptor by now + * (bootstrapped at the latest during the above relation() call, + * if it wasn't there already). + */ + if ( RegClass.CLASSID == c ) + { + assert null != r && null != r[0] : + "RegClass TupleDescriptor bootstrap outcome"; + return r; + } + + assert null == r : "RegClass has tuple descriptor when RegType doesn't"; + + /* + * Otherwise, do the work here, and store the descriptor in r. + * Can pass -1 for the modifier; Blessed types do not use this method. + */ + + ByteBuffer b = _lookupRowtypeTupdesc(o.oid(), -1); + assert null != b : "cataloged composite type tupdesc lookup"; + b.order(nativeOrder()); + r = new TupleDescriptor.Interned[]{ new TupleDescImpl.Cataloged(b, c) }; + return c.m_tupDescHolder = r; + } + + private static TupleDescriptor.Interned[] tupleDescriptorBlessed(Blessed o) + { + TupleDescriptor.Interned[] r = new TupleDescriptor.Interned[1]; + ByteBuffer b = _lookupRowtypeTupdesc(o.oid(), o.modifier()); + + /* + * If there is no registered tuple descriptor for this typmod, return an + * empty value to the current caller, but do not cache it; a later call + * could find one has been registered. + */ + if ( null == b ) + { + doNotCache(); + return r; + } + + b.order(nativeOrder()); + r[0] = new TupleDescImpl.Blessed(b, o); + return o.m_tupDescHolder = r; + } + + private static short length(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPLEN, INT2_INSTANCE); + } + + private static boolean byValue(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPBYVAL, BOOLEAN_INSTANCE); + } + + private static Type type(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return typeFromCatalog( + t.get(Att.TYPTYPE, INT1_INSTANCE)); + } + + private static char category(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return (char) + (0xff & t.get(Att.TYPCATEGORY, INT1_INSTANCE)); + } + + private static boolean preferred(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPISPREFERRED, BOOLEAN_INSTANCE); + } + + private static boolean defined(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPISDEFINED, BOOLEAN_INSTANCE); + } + + private static byte delimiter(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPDELIM, INT1_INSTANCE); + } + + private static RegClass relation(RegTypeImpl o) throws SQLException + { + /* + * If this is a handshake occurring when the corresponding RegClass + * has just looked *us* up, we are done. + */ + if ( null != o.m_dual ) + return o.m_dual; + + /* + * Otherwise, look up the corresponding RegClass, and do the same + * handshake in reverse. Either way, the connection is set up + * bidirectionally with one cache lookup starting from either. That + * can avoid extra work in operations (like TupleDescriptor caching) + * that may touch both objects, without complicating their code. + */ + TupleTableSlot t = o.cacheTuple(); + RegClass c = t.get(Att.TYPRELID, REGCLASS_INSTANCE); + + if ( c.isValid() ) + ((RegClassImpl)c).dualHandshake(o); + + return c; + } + + private static RegType element(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPELEM, REGTYPE_INSTANCE); + } + + private static RegType array(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPARRAY, REGTYPE_INSTANCE); + } + + private static RegProcedure input(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPINPUT, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure output(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPOUTPUT, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure receive(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPRECEIVE, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure send(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPSEND, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure modifierInput(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPMODIN, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure modifierOutput( + RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPMODOUT, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure analyze(RegTypeImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure p = (RegProcedure) + t.get(Att.TYPANALYZE, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure subscript(RegTypeImpl o) + throws SQLException + { + RegProcedure p; + + if ( null == Att.TYPSUBSCRIPT ) // missing in this PG version + p = of(RegProcedure.CLASSID, InvalidOid); + else + { + TupleTableSlot t = o.cacheTuple(); + p = t.get(Att.TYPSUBSCRIPT, REGPROCEDURE_INSTANCE); + } + + @SuppressWarnings("unchecked") // XXX add memo magic here + RegProcedure narrowed = (RegProcedure)p; + + return narrowed; + } + + private static Alignment alignment(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return alignmentFromCatalog( + t.get(Att.TYPALIGN, INT1_INSTANCE)); + } + + private static Storage storage(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return storageFromCatalog( + t.get(Att.TYPSTORAGE, INT1_INSTANCE)); + } + + private static boolean notNull(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPNOTNULL, BOOLEAN_INSTANCE); + } + + private static RegType baseType(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return Att.TYPBASETYPE_TYPTYPMOD + .applyOver(t, c -> + c.apply(OidAdapter.INT4_INSTANCE, INT4_INSTANCE, + ( oid, mod ) -> + CatalogObjectImpl.Factory.formMaybeModifiedType(oid, mod))); + } + + private static int dimensions(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPNDIMS, INT4_INSTANCE); + } + + private static RegCollation collation(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPCOLLATION, REGCOLLATION_INSTANCE); + } + + private static String defaultText(RegTypeImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.TYPDEFAULT, TextAdapter.INSTANCE); + } + + /* API-like methods only used internally for now */ + + /** + * So that {@code TupleTableSlot} may be used uniformly as the API for + * Java <-> PostgreSQL data type conversions, let every type except + * unmodified {@code RECORD} or {@code VOID} have a "notional" + * {@code TupleDescriptor}. + *

    + * For a cataloged or interned row type, or a domain over a cataloged row + * type, it is that type's {@link #tupleDescriptor tupleDescriptor} (or that + * of the transitive base type, in the case of a domain). Such a descriptor + * will be of type {@link TupleDescriptor.Interned Interned}. Otherwise, + * it is a {@link TupleDescriptor.Ephemeral} whose one, unnamed, attribute + * has this type. + *

    + * The caller is expected to to have checked for {@code RECORD} or + * {@code VOID} and not to call this method on those types. + */ + public TupleDescriptor notionalDescriptor() + { + try + { + MethodHandle h = m_slots[SLOT_NOTIONALDESC]; + return (TupleDescriptor)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /* API methods */ + + @Override + public TupleDescriptor.Interned tupleDescriptor() + { + try + { + MethodHandle h = m_slots[SLOT_TUPLEDESCRIPTOR]; + return ((TupleDescriptor.Interned[])h.invokeExact(this, h))[0]; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public short length() + { + try + { + MethodHandle h = m_slots[SLOT_LENGTH]; + return (short)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public boolean byValue() + { + try + { + MethodHandle h = m_slots[SLOT_BYVALUE]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public Type type() + { + try + { + MethodHandle h = m_slots[SLOT_TYPE]; + return (Type)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public char category() + { + try + { + MethodHandle h = m_slots[SLOT_CATEGORY]; + return (char)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean preferred() + { + try + { + MethodHandle h = m_slots[SLOT_PREFERRED]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public boolean defined() + { + try + { + MethodHandle h = m_slots[SLOT_DEFINED]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public byte delimiter() + { + try + { + MethodHandle h = m_slots[SLOT_DELIMITER]; + return (byte)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegClass relation() + { + try + { + MethodHandle h = m_slots[SLOT_RELATION]; + return (RegClass)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public RegType element() + { + try + { + MethodHandle h = m_slots[SLOT_ELEMENT]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public RegType array() + { + try + { + MethodHandle h = m_slots[SLOT_ARRAY]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure input() + { + try + { + MethodHandle h = m_slots[SLOT_INPUT]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure output() + { + try + { + MethodHandle h = m_slots[SLOT_OUTPUT]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure receive() + { + try + { + MethodHandle h = m_slots[SLOT_RECEIVE]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure send() + { + try + { + MethodHandle h = m_slots[SLOT_SEND]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure modifierInput() + { + try + { + MethodHandle h = m_slots[SLOT_MODIFIERINPUT]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure modifierOutput() + { + try + { + MethodHandle h = m_slots[SLOT_MODIFIEROUTPUT]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure analyze() + { + try + { + MethodHandle h = m_slots[SLOT_ANALYZE]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure subscript() + { + try + { + MethodHandle h = m_slots[SLOT_SUBSCRIPT]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public Alignment alignment() + { + try + { + MethodHandle h = m_slots[SLOT_ALIGNMENT]; + return (Alignment)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public Storage storage() + { + try + { + MethodHandle h = m_slots[SLOT_STORAGE]; + return (Storage)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public boolean notNull() + { + try + { + MethodHandle h = m_slots[SLOT_NOTNULL]; + return (boolean)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType baseType() + { + try + { + MethodHandle h = m_slots[SLOT_BASETYPE]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public int dimensions() + { + try + { + MethodHandle h = m_slots[SLOT_DIMENSIONS]; + return (int)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegCollation collation() + { + try + { + MethodHandle h = m_slots[SLOT_COLLATION]; + return (RegCollation)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + // also available in the typcache, FWIW + } + + @Override + public SQLXML defaultBin() + { + /* + * Because of the JDBC rules that an SQLXML instance lasts no longer + * than one transaction and can only be read once, it is not a good + * candidate for caching. We will just fetch a new one from the cached + * tuple as needed. + */ + TupleTableSlot s = cacheTuple(); + return s.get(Att.TYPDEFAULTBIN, SYNTHETIC_INSTANCE); + } + + @Override + public String defaultText() + { + try + { + MethodHandle h = m_slots[SLOT_DEFAULTTEXT]; + return (String)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /** + * Return the expected zero value for {@code subId}. + *

    + * For keying the {@code CacheMap}, we sneak type modifiers in there + * (PG types do not otherwise use {@code subId}), but that's an + * implementation detail that could be done a different way if upstream + * ever decided to have subIds for types, and having it show in the address + * triple of a modified type could be surprising to an old PostgreSQL hand. + */ + @Override + public int subId() + { + return 0; + } + + /** + * Return the type modifier. + *

    + * In this implementation, where we snuck it in as the third component + * of the cache key, sneak it back out. + */ + @Override + public int modifier() + { + int m = super.subId(); + if ( -1 == m ) + return 0; + return m; + } + + /** + * Return a synthetic tuple descriptor with a single element of this type. + */ + public TupleDescriptor singletonTupleDescriptor() + { + TupleDescriptor td = m_singleton; + if ( null != td ) + return td; + /* + * In case of a race, the synthetic tuple descriptors will be + * equivalent anyway. + */ + return m_singleton = new TupleDescImpl.OfType(this); + } + + /** + * Represents a type that has been mentioned without an accompanying type + * modifier (or with the 'unspecified' value -1 for its type modifier). + */ + static class NoModifier extends RegTypeImpl + { + /** + * Count of instances subject to invalidation. + *

    + * Only accessed in invalidate and SP.onFirstUse, both on the PG thread. + */ + private static int s_instances; + + private static class SP extends SwitchPoint + { + @Override + protected void onFirstUse() + { + if ( 1 == ++ s_instances ) + sysCacheInvalArmed(TYPEOID_CB, true); + } + } + + private final SwitchPoint[] m_sp; + + @Override + SwitchPoint cacheSwitchPoint() + { + return m_sp[0]; + } + + NoModifier() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + m_sp = new SwitchPoint[] { new SP() }; + } + + @Override + void invalidate(List sps, List postOps) + { + SwitchPoint sp = m_sp[0]; + if ( sp.unused() ) + return; + sps.add(sp); + m_sp[0] = new SP(); + if ( 0 == -- s_instances ) + sysCacheInvalArmed(TYPEOID_CB, false); + } + + @Override + public int modifier() + { + return -1; + } + + @Override + public RegType modifier(int typmod) + { + if ( -1 == typmod ) + return this; + return + CatalogObjectImpl.Factory.formMaybeModifiedType(oid(), typmod); + } + + @Override + public RegType withoutModifier() + { + return this; + } + } + + /** + * Subclass that additionally implements + * {@link RegType.Unresolved RegType.Unresolved}. + */ + static class Unresolved extends NoModifier implements RegType.Unresolved + { + } + + /** + * Represents a type that is not {@code RECORD} and has a type modifier that + * is not the unspecified value. + *

    + * When the {@code RECORD} type appears in PostgreSQL with a type modifier, + * that is a special case; see {@link Blessed Blessed}. + */ + static class Modified extends RegTypeImpl + { + private final NoModifier m_base; + + @Override + SwitchPoint cacheSwitchPoint() + { + return m_base.m_sp[0]; + } + + Modified(NoModifier base) + { + super(base.m_slots); + m_base = base; // must keep it live, not only share its slots + } + + @Override + public RegType modifier(int typmod) + { + if ( modifier() == typmod ) + return this; + return m_base.modifier(typmod); + } + + @Override + public RegType withoutModifier() + { + return m_base; + } + + /** + * Whether a just-mentioned modified type "exists" depends on whether + * its unmodified type exists and has a modifier input function. + *

    + * No attempt is made here to verify that the modifier value is one that + * the modifier input/output functions would produce or accept. + */ + @Override + public boolean exists() + { + return m_base.exists() && modifierInput().isValid(); + } + + @Override + public String toString() + { + String prefix = super.toString(); + return prefix + "(" + modifier() + ")"; + } + } + + /** + * Represents the "row type" of a {@link TupleDescriptor TupleDescriptor} + * that has been programmatically constructed and interned ("blessed"). + *

    + * Such a type is represented in PostgreSQL as the type {@code RECORD} + * with a type modifier assigned uniquely for the life of the backend. + */ + static class Blessed extends RegTypeImpl + { + /** + * Associated tuple descriptor, redundantly kept accessible here as well + * as opaquely bound into a {@code SwitchPointCache} method handle. + *

    + * A {@code Blessed} descriptor has no associated {@code RegClass}, so + * a slot for the descriptor is provided here. No invalidation events + * are expected for a blessed type, but the one-element array form here + * matches that used in {@code RegClass} for cataloged descriptors, to + * avoid multiple cases in the code. Only accessed from + * {@code SwitchPointCache} computation methods and + * {@code TupleDescImpl} factory methods, all of which execute on the PG + * thread; no synchronization fuss needed. + *

    + * When null, no computation method has run, and the state is not known. + * Otherwise, the single element is the result to be returned by + * the {@code tupleDescriptor()} API method. + */ + TupleDescriptor.Interned[] m_tupDescHolder; + private final MethodHandle[] m_moreSlots; + private static final Function s_initializer; + private static final int SLOT_TDBLESSED; + private static final int NSLOTS; + + static + { + int i = 0; + s_initializer = + new Builder<>(Blessed.class) + .withLookup(lookup().in(RegTypeImpl.class)) + .withSwitchPoint(Blessed::cacheSwitchPoint) + .withSlots(o -> o.m_moreSlots) + .withCandidates(RegTypeImpl.class.getDeclaredMethods()) + .withDependent("tupleDescriptorBlessed", SLOT_TDBLESSED = i++) + .build(); + NSLOTS = i; + } + + @Override + SwitchPoint cacheSwitchPoint() + { + return ((NoModifier)RECORD).m_sp[0]; + } + + Blessed() + { + super(((RegTypeImpl)RECORD).m_slots); + // RECORD is static final, no other effort needed to keep it live + m_moreSlots = s_initializer.apply(new MethodHandle[NSLOTS]); + } + + /** + * The tuple descriptor registered in the type cache for this 'blessed' + * type, or null if none. + *

    + * A null value is not sticky; it would be possible to 'mention' a + * blessed type with a not-yet-used typmod, which could then later exist + * after a tuple descriptor has been interned. (Such usage would be odd, + * though; typically one will obtain a blessed instance from an existing + * tuple descriptor.) + */ + @Override + public TupleDescriptor.Interned tupleDescriptor() + { + try + { + MethodHandle h = m_moreSlots[SLOT_TDBLESSED]; + return ((TupleDescriptor.Interned[])h.invokeExact(this, h))[0]; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegType modifier(int typmod) + { + throw new UnsupportedOperationException( + "may not alter the type modifier of an interned row type"); + } + + @Override + public RegType withoutModifier() + { + return RECORD; + } + + /** + * Whether a just-mentioned blessed type "exists" depends on whether + * there is a tuple descriptor registered for it in the type cache. + *

    + * A false value is not sticky; it would be possible to 'mention' a + * blessed type with a not-yet-used typmod, which could then later exist + * after a tuple descriptor has been interned. (Such usage would be odd, + * though; typically one will obtain a blessed instance from an existing + * tuple descriptor.) + */ + @Override + public boolean exists() + { + return null != tupleDescriptor(); + } + + @Override + public String toString() + { + String prefix = super.toString(); + return prefix + "[" + modifier() + "]"; + } + } + + private static Type typeFromCatalog(byte b) + { + switch ( b ) + { + case (byte)'b': return Type.BASE; + case (byte)'c': return Type.COMPOSITE; + case (byte)'d': return Type.DOMAIN; + case (byte)'e': return Type.ENUM; + case (byte)'m': return Type.MULTIRANGE; + case (byte)'p': return Type.PSEUDO; + case (byte)'r': return Type.RANGE; + } + throw unchecked(new SQLException( + "unrecognized Type type '" + (char)b + "' in catalog", "XX000")); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/ResourceOwnerImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/ResourceOwnerImpl.java new file mode 100644 index 000000000..ff041d5c7 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/ResourceOwnerImpl.java @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; + +import org.postgresql.pljava.internal.CacheMap; +import org.postgresql.pljava.internal.DualState; +import static org.postgresql.pljava.internal.DualState.m; +import org.postgresql.pljava.internal.LifespanImpl; + +import org.postgresql.pljava.model.ResourceOwner; + +import static org.postgresql.pljava.pg.DatumUtils.asReadOnlyNativeOrder; +import static org.postgresql.pljava.pg.DatumUtils.fetchPointer; +import static org.postgresql.pljava.pg.DatumUtils.storePointer; + +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_DATUM; + +/** + * A PostgreSQL {@code ResourceOwner}, one of the things that can serve as + * a PL/Java {@code Lifespan}. + *

    + * The designer of this PostgreSQL object believed strongly in encapsulation, + * so very strongly that there is not any C header exposing its structure, + * and any operations to be exposed here will have to be calls through JNI. + * While a {@code ResourceOwner} does have a name (which will appear in log + * messages involving it), there's not even an exposed API to retrieve that. + * So this object will be not much more than a stub, known by its address + * and capable of serving as a PL/Java lifespan. + */ +public class ResourceOwnerImpl extends LifespanImpl +implements ResourceOwner, LifespanImpl.Addressed +{ + static final ByteBuffer[] s_knownOwners; + + static final CacheMap s_map = + CacheMap.newThreadConfined(() -> ByteBuffer.allocate(SIZEOF_DATUM)); + + static + { + ByteBuffer[] bs = EarlyNatives._window(ByteBuffer.class); + /* + * The first one windows CurrentResourceOwner. Set the correct byte + * order but do not make it read-only; operations may be provided + * for setting it. + */ + bs[0] = bs[0].order(nativeOrder()); + /* + * The rest are made native-ordered and read-only. + */ + for ( int i = 1; i < bs.length; ++ i ) + bs[i] = asReadOnlyNativeOrder(bs[i]); + s_knownOwners = bs; + } + + static ResourceOwner known(int which) + { + ByteBuffer global = s_knownOwners[which]; + return doInPG(() -> + { + long rso = fetchPointer(global, 0); + if ( 0 == rso ) + return null; + + return fromAddress(rso); + }); + } + + public static ResourceOwner fromAddress(long address) + { + assert threadMayEnterPG() : m("ResourceOwner thread"); + + /* + * Cache strongly; see LifespanImpl javadoc. + */ + return s_map.stronglyCache( + b -> + { + if ( 4 == SIZEOF_DATUM ) + b.putInt((int)address); + else + b.putLong(address); + }, + b -> new ResourceOwnerImpl(b) + ); + } + + /** + * Specialized method intended, so far, only for + * {@code PgSavepoint}'s use. + *

    + * Only to be called on the PG thread. + */ + public static long getCurrentRaw() + { + assert threadMayEnterPG() : m("ResourceOwner thread"); + return fetchPointer(s_knownOwners[0], 0); + } + + /** + * Even more specialized method intended, so far, only for + * {@code PgSavepoint}'s use. + *

    + * Only to be called on the PG thread. + */ + public static void setCurrentRaw(long owner) + { + assert threadMayEnterPG() : m("ResourceOwner thread"); + storePointer(s_knownOwners[0], 0, owner); + } + + /* + * Called only from JNI. + */ + private static void callback(long nativePointer) + { + CacheMap.Entry e = s_map.find( + b -> + { + if ( 4 == SIZEOF_DATUM ) + b.putInt((int)nativePointer); + else + b.putLong(nativePointer); + } + ); + + if ( null == e ) + return; + + ResourceOwnerImpl r = e.get(); + if ( null == r ) + return; + + r.invalidate(); + e.remove(); + } + + /** + * The {@code ByteBuffer} keying this object. + *

    + * As described for {@code CatalogObjectImpl}, as we'd like to be able + * to retrieve the address, and that's what's in the ByteBuffer that is + * held as the key in the CacheMap anyway, just keep a reference to that + * here. We must treat it as read-only, even if it hasn't officially + * been made that way. + *

    + * The contents are needed only for non-routine operations like + * {@code toString}, where an extra {@code fetchPointer} doesn't + * break the bank. + */ + private final ByteBuffer m_key; + private boolean m_valid = true; + + private ResourceOwnerImpl(ByteBuffer key) + { + m_key = key; + } + + @Override // Addressed + public long address() + { + if ( m_valid ) + return fetchPointer(m_key, 0); + throw new IllegalStateException( + "address may not be taken of invalidated ResourceOwner"); + } + + @Override + public String toString() + { + return String.format("%s[%#x]", + super.toString(), fetchPointer(m_key, 0)); + } + + private void invalidate() + { + lifespanRelease(); + m_valid = false; + // nothing else to do here. + } + + private static class EarlyNatives + { + /** + * Returns an array of ByteBuffer, one covering each PostgreSQL + * known resource owner global, in the same order as the arbitrary + * indices defined in the API class CatalogObject.Factory, which are + * what will be passed to the known() method. + *

    + * Takes a {@code Class} argument, to save the native + * code a lookup. + */ + private static native ByteBuffer[] _window( + Class component); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TablespaceImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/TablespaceImpl.java new file mode 100644 index 000000000..d3d2b8811 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TablespaceImpl.java @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import java.util.function.Function; + +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.TABLESPACEOID; // syscache + +import org.postgresql.pljava.pg.adt.GrantAdapter; +import static org.postgresql.pljava.pg.adt.NameAdapter.SIMPLE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGROLE_INSTANCE; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +/** + * Implementation of the {@link Tablespace Tablespace} interface. + */ +class TablespaceImpl extends Addressed +implements + Shared, Named, Owned, + AccessControlled, Tablespace +{ + private static final Function s_initializer; + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return TABLESPACEOID; + } + + /* Implementation of Named, Owned, AccessControlled */ + + private static Simple name(TablespaceImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.SPCNAME, SIMPLE_INSTANCE); + } + + private static RegRole owner(TablespaceImpl o) throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.SPCOWNER, REGROLE_INSTANCE); + } + + private static List grants(TablespaceImpl o) + throws SQLException + { + TupleTableSlot t = o.cacheTuple(); + return t.get(Att.SPCACL, GrantAdapter.LIST_INSTANCE); + } + + /* Implementation of Tablespace */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + TablespaceImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + } + + static final int SLOT_OPTIONS; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(TablespaceImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> s_globalPoint[0]) + .withSlots(o -> o.m_slots) + .withCandidates(TablespaceImpl.class.getDeclaredMethods()) + + .withReceiverType(CatalogObjectImpl.Named.class) + .withReturnType(Unqualified.class) + .withDependent( "name", SLOT_NAME) + .withReturnType(null) + .withReceiverType(CatalogObjectImpl.Owned.class) + .withDependent( "owner", SLOT_OWNER) + .withReceiverType(CatalogObjectImpl.AccessControlled.class) + .withDependent( "grants", SLOT_ACL) + + .withReceiverType(null) + .withDependent("options", SLOT_OPTIONS = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute SPCNAME; + static final Attribute SPCOWNER; + static final Attribute SPCACL; + static final Attribute SPCOPTIONS; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "spcname", + "spcowner", + "spcacl", + "spcoptions" + ).iterator(); + + SPCNAME = itr.next(); + SPCOWNER = itr.next(); + SPCACL = itr.next(); + SPCOPTIONS = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* computation methods */ + + private static Map options(TablespaceImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.SPCOPTIONS, ArrayAdapters.RELOPTIONS_INSTANCE); + } + + /* API methods */ + + @Override + public Map options() + { + try + { + MethodHandle h = m_slots[SLOT_OPTIONS]; + return (Map)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TargetListImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/TargetListImpl.java new file mode 100644 index 000000000..092b573c6 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TargetListImpl.java @@ -0,0 +1,1917 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import org.postgresql.pljava.Adapter.AdapterException; +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.Adapter.AsBoolean; +import org.postgresql.pljava.Adapter.AsByte; +import org.postgresql.pljava.Adapter.AsChar; +import org.postgresql.pljava.Adapter.AsDouble; +import org.postgresql.pljava.Adapter.AsFloat; +import org.postgresql.pljava.Adapter.AsInt; +import org.postgresql.pljava.Adapter.AsLong; +import org.postgresql.pljava.Adapter.AsShort; +import org.postgresql.pljava.TargetList; +import org.postgresql.pljava.TargetList.Cursor; +import org.postgresql.pljava.TargetList.Projection; + +import + org.postgresql.pljava.internal.AbstractNoSplitList.IteratorNonSpliterator; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.TupleDescriptor; +import org.postgresql.pljava.model.TupleTableSlot; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +import java.lang.ref.WeakReference; + +import java.sql.SQLException; + +import java.util.AbstractSequentialList; +import java.util.Arrays; +import static java.util.Arrays.copyOfRange; +import java.util.BitSet; +import java.util.Collection; +import java.util.IntSummaryStatistics; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.NoSuchElementException; +import static java.util.Objects.checkFromToIndex; +import static java.util.Objects.checkIndex; +import static java.util.Objects.requireNonNull; +import java.util.Spliterator; +import static java.util.Spliterator.IMMUTABLE; +import static java.util.Spliterator.NONNULL; +import static java.util.Spliterator.ORDERED; +import static java.util.Spliterator.SIZED; +import java.util.Spliterators; +import static java.util.Spliterators.spliteratorUnknownSize; + +import java.util.function.IntUnaryOperator; + +import static java.util.stream.Collectors.joining; +import java.util.stream.IntStream; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +/** + * Implementation of {@link TargetList TargetList}. + */ + /* + * This abstract base class in fact implements neither TargetList nor + * Projection. + * + * It always holds a TupleDescriptor and a BitSet. In the concrete subclasses + * that represent a subset of the attributes with no repetition or permutation, + * the BitSet is all there is. Subclasses that need to represent repetition + * (TargetList only) or permutation (TargetList or Projection) also include + * a mapping array. + * + * Bits in the BitSet always (even in multiply-derived projections) correspond + * to indices in the original TupleDescriptor, streamlining contains() tests. + */ +abstract class TargetListImpl extends AbstractSequentialList +{ + protected static final Projection EMPTY = new P(null, new BitSet()); + + protected final TupleDescriptor m_tdesc; + protected final BitSet m_bitset; + + protected TargetListImpl(TupleDescriptor tdesc, BitSet bitset) + { + m_tdesc = tdesc; + m_bitset = bitset; // not cloned here; caller should ensure no aliasing + } + + @Override // Collection + public boolean contains(Object o) + { + if ( ! (o instanceof AttributeImpl) ) + return false; + AttributeImpl ai = (AttributeImpl)o; + return m_bitset.get(ai.subId() - 1) && m_tdesc.contains(ai); + } + + @Override // List + public int indexOf(Object o) // override in M where reordering is possible + { + if ( ! contains(o) ) + return -1; + int index = ((AttributeImpl)o).subId() - 1; + return (int)m_bitset.stream().takeWhile(i -> i < index).count(); + } + + @Override // List + public int lastIndexOf(Object o) + { + return indexOf(o); // override in MT where o could appear more than once + } + + @Override + public ListIterator listIterator() + { + return listIterator(0); + } + + @Override + public ListIterator listIterator(int index) + { + checkIndex(index, size() + 1); // ListIterator can point beyond end + int attno = m_bitset.stream().skip(index).findFirst().orElse(-1); + return new BLI(m_tdesc, m_bitset, attno, index); + } + + @Override // Collection + public int size() + { + return m_bitset.cardinality(); + } + + public R applyOver( + Iterable tuples, Cursor.Function f) + throws X, SQLException + { + return TargetListImpl.applyOver((TargetList)this, tuples, f); + } + + public R applyOver( + TupleTableSlot tuple, Cursor.Function f) + throws X, SQLException + { + return TargetListImpl.applyOver((TargetList)this, tuple, f); + } + + public Projection project(Simple... names) + { + return project(m_tdesc, (TargetList)this, m_bitset, names); + } + + public Projection project(Attribute... attrs) + { + return project(m_tdesc, (TargetList)this, m_bitset.length(), attrs); + } + + public Projection project(int... indices) + { + if ( null == indices ) + throw new NullPointerException("project() indices null"); + return project(Flavor.ZEROBASED, indices.length, i -> indices[i]); + } + + public Projection sqlProject(int... indices) + { + if ( null == indices ) + throw new NullPointerException("sqlProject() indices null"); + return project(Flavor.SQL, indices.length, i -> indices[i]); + } + + public Projection project(short... indices) + { + if ( null == indices ) + throw new NullPointerException("project() indices null"); + return project(Flavor.ZEROBASED, indices.length, i -> indices[i]); + } + + public Projection sqlProject(short... indices) + { + if ( null == indices ) + throw new NullPointerException("sqlProject() indices null"); + return project(Flavor.SQL, indices.length, i -> indices[i]); + } + + public Projection project(BitSet indices) + { + return project(Flavor.ZEROBASED, indices); + } + + public Projection sqlProject(BitSet indices) + { + return project(Flavor.SQL, indices); + } + + abstract Projection project(Flavor flavor, int n, IntUnaryOperator indices); + + abstract Projection project(Flavor flavor, BitSet indices); + + /** + * Whether, when building a {@link TargetList TargetList} + * or {@link Projection} from numeric indices, to treat the indices as + * {@link #ZEROBASED ZEROBASED} or {@link #SQL SQL} (one-based). + */ + enum Flavor + { + ZEROBASED( + 0, "project", + "project() indices must be distinct, >= 0, and < %d: %s") + { + BitSet cloned(BitSet b) + { + return (BitSet)b.clone(); + } + }, + SQL( + 1, "sqlProject", + "sqlProject() indices must be distinct, > 0, and <= %d: %s") + { + BitSet cloned(BitSet b) + { + return b.get(1, b.length()); + } + }; + + int offset; + String method; + String checkFormat; + + Flavor(int offset, String method, String checkFormat) + { + this.offset = offset; + this.method = method; + this.checkFormat = checkFormat; + } + + abstract BitSet cloned(BitSet b); + + /* + * On success, returns the max of the supplied indices, minus offset. + * Returns -1 for the empty set. + */ + int check(int size, int inLen, IntUnaryOperator indices) + { + if ( 0 == inLen ) + return -1; + + IntSummaryStatistics s = + IntStream.range(0, inLen).map(indices).distinct() + .summaryStatistics(); + int max = s.getMax(); + + if ( s.getCount() < inLen || inLen > size + || s.getMin() < offset || size + offset <= max ) + throw new IllegalArgumentException( + String.format(checkFormat, size, + IntStream.range(0, inLen).map(indices) + .mapToObj(Integer::toString) + .collect(joining(",")))); + + return max - offset; + } + + int check(int size, BitSet indices) + { + if ( null == indices ) + throw new NullPointerException(method + "() indices null"); + + if ( indices.isEmpty() ) + return -1; + + int max = indices.length() - 1; + int min = indices.nextSetBit(0); + + if ( min < offset || size + offset <= max ) + throw new IllegalArgumentException( + String.format(checkFormat, size, indices)); + + return max - offset; + } + } + + static final class P extends TargetListImpl implements Projection + { + protected P(TupleDescriptor tdesc, BitSet bitset) + { + /* + * Nothing here prevents construction of an instance with bits for + * all of tdesc's columns. The idea is never to return such a thing + * (just return tdesc itself for that case), but one may be + * constructed as a temporary within the static methods here that + * TupleDescriptor uses. + */ + super(tdesc, bitset); + } + + @Override // List + public Projection subList(int fromIndex, int toIndex) + { + int n = size(); + if ( 0 == fromIndex && n == toIndex ) + return this; + checkFromToIndex(fromIndex, toIndex, n); + if ( fromIndex == toIndex ) + return EMPTY; + + BitSet newBits = new BitSet(m_bitset.length()); + + m_bitset.stream().skip(fromIndex).limit(toIndex - fromIndex) + .forEach(newBits::set); + + return new P(m_tdesc, newBits); + } + + @Override + Projection project(Flavor flavor, int inLen, IntUnaryOperator indices) + { + final int offset = flavor.offset; + + int n = size(); + int max = flavor.check(n, inLen, indices); + if ( -1 == max ) + return EMPTY; + + boolean increasing = increasing(inLen, indices); + + if ( increasing ) // no permutation involved, make a P instance + { + if ( inLen == n ) // n distinct increasing values 0..n-1 + return this; // can only be this exactly + + BitSet newBits = new BitSet(m_bitset.length()); + + for ( + int i = 0, // index in supplied indices + j = 0, // index 1st col in this proj + v = m_bitset.nextSetBit(0)// tupledesc index of 1st col + ; + v >= 0 // nextSetBit returns -1 when done + ; + ++ j, // next col in this projection + v = m_bitset.nextSetBit(v + 1) + ) + { + if ( j < indices.applyAsInt(i)-offset )//j not a wanted col + continue; + newBits.set(v); // set tupledesc index in new set + if ( ++ i == inLen ) // next wanted index + break; + } + + return new P(m_tdesc, newBits); + } + + /* + * The indices are not strictly increasing; make MP instance with + * a map array to represent permutation. + * + * First expand this current projection's tupledesc indices + * from BitSet into array form. + */ + short[] td_indices = new short [ n ]; + + for ( + int i = 0, + v = m_bitset.nextSetBit(0) + ; + v >= 0 + ; + ++ i, + v = m_bitset.nextSetBit(++v) + ) + { + td_indices[i] = (short)v; + } + + /* + * Now construct a new BitSet and map array for an MP instance + */ + BitSet newBits = new BitSet(td_indices[max]); + short[] map = new short [ inLen ]; + for ( int i = 0; i < map.length; ++ i ) + { + newBits.set(map[i] = td_indices[indices.applyAsInt(i)-offset]); + } + + return new MP(m_tdesc, newBits, map); + } + + @Override + Projection project(Flavor flavor, BitSet indices) + { + final int offset = flavor.offset; + + int n = size(); + int max = flavor.check(n, indices); + if ( -1 == max ) + return EMPTY; + + if ( indices.cardinality() == n ) + return this; + + BitSet newBits = new BitSet(m_bitset.length()); + + for ( + int i = 0, + v = m_bitset.nextSetBit(0), + w = indices.nextSetBit(0) + ; + v >= 0 + ; + ++ i, + v = m_bitset.nextSetBit(v + 1) + ) + { + if ( i < w - offset ) + continue; + newBits.set(v); + w = indices.nextSetBit(w); + if ( w < 0 ) + break; + } + + return new P(m_tdesc, newBits); + } + } + + abstract static class M extends TargetListImpl + { + protected final short[] m_map; + + M(TupleDescriptor tdesc, BitSet bits, short[] map) + { + super(tdesc, bits); + m_map = map; + } + + @Override // Collection + public int size() + { + return m_map.length; + } + + @Override // List + public Attribute get(int index) + { + checkIndex(index, m_map.length); + return m_tdesc.get(m_map[index]); + } + + @Override // List + public int indexOf(Object o) + { + if ( ! contains(o) ) + return -1; + int index = ((AttributeImpl)o).subId() - 1; + for ( int i = 0; i < m_map.length; ++ i ) + if ( index == m_map[i] ) + return i; + + throw new AssertionError("contains vs. indexOf"); + } + + @Override + public ListIterator listIterator(int index) + { + checkIndex(index, size() + 1); + return new MLI(m_tdesc, m_map, index); + } + + @Override // List + public TargetList subList(int fromIndex, int toIndex) + { + if ( 0 == fromIndex && m_map.length == toIndex ) + return (TargetList)this; + checkFromToIndex(fromIndex, toIndex, m_map.length); + if ( fromIndex == toIndex ) + return EMPTY; + + BitSet newBits = new BitSet(m_bitset.length()); + short[] map = copyOfRange(m_map, fromIndex, toIndex); + + boolean increasing = true; + boolean duplicates = false; + + for ( short mapped : map ) + { + if ( newBits.get(mapped) ) + duplicates = true; + if ( mapped < newBits.length() - 1 ) + increasing = false; + newBits.set(mapped); + } + + if ( duplicates ) + return new MT(m_tdesc, newBits, map); + + if ( increasing ) + return new P(m_tdesc, newBits); + + return new MP(m_tdesc, newBits, map); + } + + @Override + Projection project(Flavor flavor, int inLen, IntUnaryOperator indices) + { + final int offset = flavor.offset; + + int n = size(); + int max = flavor.check(n, inLen, indices); + if ( -1 == max ) + return EMPTY; + + if ( ( inLen == n ) && increasing(inLen, indices) + && this instanceof Projection ) + return (Projection)this; + + BitSet newBits = new BitSet(m_map[max]); + short[] map = new short [ inLen ]; + + boolean increasing = true; + boolean duplicates = false; + + for ( int i = 0 ; i < inLen ; ++ i ) + { + short mapped = m_map[indices.applyAsInt(i) - offset]; + if ( newBits.get(mapped) ) + duplicates = true; + if ( mapped < newBits.length() - 1 ) + increasing = false; + newBits.set(mapped); + map[i] = mapped; + } + + if ( duplicates ) + throw new IllegalArgumentException( + flavor.method + "() result would have repeated attributes" + + " and not be a Projection"); + + if ( increasing ) + return new P(m_tdesc, newBits); + + return new MP(m_tdesc, newBits, map); + } + + @Override + Projection project(Flavor flavor, BitSet indices) + { + final int offset = flavor.offset; + + int n = size(); + int max = flavor.check(n, indices); + if ( -1 == max ) + return EMPTY; + + BitSet newBits = new BitSet(m_bitset.length()); + short[] map = new short [ indices.cardinality() ]; + + boolean increasing = true; + boolean duplicates = false; + + for ( + int i = 0, + v = indices.nextSetBit(0) + ; + v >= 0 + ; + ++i, + v = m_bitset.nextSetBit(v + 1) + ) + { + short mapped = m_map[v - offset]; + if ( mapped < newBits.length() - 1 ) + increasing = false; + if ( newBits.get(mapped) ) + duplicates = true; + newBits.set(mapped); + map[i] = mapped; + } + + if ( duplicates ) + throw new IllegalArgumentException( + flavor.method + "() result would have repeated attributes" + + " and not be a Projection"); + + if ( increasing ) + return new P(m_tdesc, newBits); + + return new MP(m_tdesc, newBits, map); + } + } + + static final class MP extends M implements Projection + { + MP(TupleDescriptor tdesc, BitSet bits, short[] map) + { + super(tdesc, bits, map); + } + + @Override // List + public Projection subList(int fromIndex, int toIndex) + { + return (Projection)super.subList(fromIndex, toIndex); + } + } + + static final class MT extends M implements TargetList + { + MT(TupleDescriptor tdesc, BitSet bits, short[] map) + { + super(tdesc, bits, map); + } + + @Override // List + public int lastIndexOf(Object o) + { + if ( ! contains(o) ) + return -1; + int index = ((AttributeImpl)o).subId() - 1; + for ( int i = m_map.length; i --> 0; ) + if ( index == m_map[i] ) + return i; + + throw new AssertionError("contains vs. lastIndexOf"); + } + } + + static boolean increasing(int nValues, IntUnaryOperator values) + { + if ( nValues < 2 ) + return true; + for ( int i = 1; i < nValues; ++ i ) + if ( values.applyAsInt(i) <= values.applyAsInt(i-1) ) + return false; + return true; + } + + static Projection subList(TupleDescriptor src, int fromIndex, int toIndex) + { + int n = src.size(); + + if ( 0 == fromIndex && n == toIndex ) + return src; + checkFromToIndex(fromIndex, toIndex, n); + if ( fromIndex == toIndex ) + return EMPTY; + BitSet newBits = new BitSet(toIndex); + newBits.set(fromIndex, toIndex); + return new P(src, newBits); + } + + static Projection project(TupleDescriptor src, int... indices) + { + if ( null == indices ) + throw new NullPointerException("project() indices null"); + return project(Flavor.ZEROBASED, src, indices.length, i -> indices[i]); + } + + static Projection sqlProject(TupleDescriptor src, int... indices) + { + if ( null == indices ) + throw new NullPointerException("sqlProject() indices null"); + return project(Flavor.SQL, src, indices.length, i -> indices[i]); + } + + static Projection project(TupleDescriptor src, short... indices) + { + if ( null == indices ) + throw new NullPointerException("project() indices null"); + return project(Flavor.ZEROBASED, src, indices.length, i -> indices[i]); + } + + static Projection sqlProject(TupleDescriptor src, short... indices) + { + if ( null == indices ) + throw new NullPointerException("sqlProject() indices null"); + return project(Flavor.SQL, src, indices.length, i -> indices[i]); + } + + static Projection project(TupleDescriptor src, Simple... names) + { + int n = src.size(); + BitSet b = new BitSet(n); + b.set(0, n); + return project(src, src, b, names); + } + + static Projection project(TupleDescriptor src, Attribute... attrs) + { + return project(src, src, src.size(), attrs); + } + + private static Projection project( + Flavor flavor, TupleDescriptor src, int inLen, IntUnaryOperator indices) + { + final int offset = flavor.offset; + + int n = src.size(); + int max = flavor.check(n, inLen, indices); + if ( -1 == max ) + return EMPTY; + + if ( ( inLen == n ) && increasing(inLen, indices) ) + return src; + + BitSet newBits = new BitSet(max); + short[] map = new short [ inLen ]; + + boolean increasing = true; + + for ( int i = 0 ; i < inLen ; ++ i ) + { + int idx = indices.applyAsInt(i) - offset; + if ( idx < newBits.length() - 1 ) + increasing = false; + newBits.set(idx); + map[i] = (short)idx; + } + + if ( increasing ) + return new P(src, newBits); + + return new MP(src, newBits, map); + } + + private static Projection project( + TupleDescriptor base, TargetList proxy, BitSet proxyHas, + Simple... names) + { + if ( requireNonNull(names, "project() names null").length == 0 ) + return EMPTY; + + /* + * An exception could be thrown here if names.length > n, but that + * condition ensures the later exception for names left unmatched + * will have to be thrown, and as long as that's going to happen + * anyway, the extra work to see just what names didn't match + * produces a more helpful message. + */ + + BitSet namesYetToMatch = new BitSet(names.length); + namesYetToMatch.set(0, names.length); + + BitSet newBits = new BitSet(proxyHas.length()); + short[] map = new short [ names.length ]; + + boolean increasing = true; + int jMax = -1; + +outer: for ( + int i = proxyHas.nextSetBit(0); + 0 <= i; + i = proxyHas.nextSetBit(i+1) + ) + { + Simple name = base.get(i).name(); + + for ( + int j = namesYetToMatch.nextSetBit(0); + 0 <= j; + j = namesYetToMatch.nextSetBit(j+1) + ) + { + if ( name.equals(names[j]) ) + { + if ( j < jMax ) + increasing = false; + else + jMax = j; + newBits.set(i); + map[j] = (short)i; + namesYetToMatch.clear(j); + if ( namesYetToMatch.isEmpty() ) + break outer; + break; + } + } + } + + if ( ! namesYetToMatch.isEmpty() ) + throw new IllegalArgumentException(String.format( + "project() left unmatched by name: %s", + Arrays.toString( + namesYetToMatch.stream().mapToObj(i->names[i]) + .toArray(Simple[]::new) + ))); + + return project(base, proxy, newBits, map, increasing); + } + + private static Projection project( + TupleDescriptor base, TargetList proxy, + int highestProxyAttrPlus1, Attribute... attrs) + { + if ( requireNonNull(attrs, "project() attrs null").length == 0 ) + return EMPTY; + + BitSet attrsYetToMatch = new BitSet(attrs.length); + attrsYetToMatch.set(0, attrs.length); + + BitSet newBits = new BitSet(highestProxyAttrPlus1); + short[] map = new short [ attrs.length ]; + + boolean increasing = true; + + for ( int i = 0 ; i < attrs.length ; ++ i ) + { + Attribute attr = attrs[i]; + if ( ! proxy.contains(attr) ) + continue; + int idx = attr.subId() - 1; + if ( newBits.get(idx) ) // it's a duplicate + continue; + if ( idx < newBits.length() - 1 ) + increasing = false; + newBits.set(idx); + map[i] = (short)idx; + attrsYetToMatch.clear(i); + } + + if ( ! attrsYetToMatch.isEmpty() ) + throw new IllegalArgumentException(String.format( + "project() extraneous attributes: %s", + Arrays.toString( + attrsYetToMatch.stream().mapToObj(i->attrs[i]) + .toArray(Attribute[]::new)))); + + return project(base, proxy, newBits, map, increasing); + } + + static Projection project(TupleDescriptor src, BitSet indices) + { + return project(Flavor.ZEROBASED, src, indices); + } + + static Projection sqlProject(TupleDescriptor src, BitSet indices) + { + return project(Flavor.SQL, src, indices); + } + + private static Projection project( + Flavor flavor, TupleDescriptor src, BitSet indices) + { + int n = src.size(); + int max = flavor.check(n, indices); + if ( -1 == max ) + return EMPTY; + + if ( indices.cardinality() == n ) + return src; + + return new P(src, flavor.cloned(indices)); + } + + /* + * A factored-out epilogue. If we have generated newBits/map representing + * n distinct attributes and n was proxy.size(), then proxy was a Projection + * to start with and may be what to return. + */ + private static Projection project( + TupleDescriptor base, TargetList proxy, + BitSet newBits, short[] map, boolean increasing) + { + if ( map.length == proxy.size() ) + { + if ( increasing ) + { + if ( proxy instanceof P || proxy instanceof TupleDescriptor ) + return (Projection)proxy; + } + else if ( proxy instanceof MP ) + if ( Arrays.equals(map, ((MP)proxy).m_map) ) + return (Projection)proxy; + } + + return increasing ? new P(base, newBits) : new MP(base, newBits, map); + } + + static R applyOver( + TargetList tl, Iterable tuples, Cursor.Function f) + throws X, SQLException + { + try + { + return f.apply(new CursorImpl(tl, tuples)); + } + catch ( AdapterException e ) + { + throw e.unwrap(SQLException.class); + } + } + + static R applyOver( + TargetList tl, TupleTableSlot tuple, Cursor.Function f) + throws X, SQLException + { + try + { + return f.apply(new CursorImpl(tl, tuple)); + } + catch ( AdapterException e ) + { + throw e.unwrap(SQLException.class); + } + } + + abstract static class ALI implements ListIterator + { + protected final TupleDescriptor m_tdesc; + /* + * Invariant on m_idx: except transiently during an operation, it + * doesn't point to the item last returned. It points where the + * *next* item will come from if fetching in the same direction. + * It is incremented/decremented after every item fetch. + * After fetching everything possible backward, it has an otherwise + * invalid value, -1. After fetching everything possible forward, it + * has an otherwise invalid value, the underlying source's length. + * These are, in fact, the values previousIndex() or nextIndex(), + * respectively, will return in those cases. + * Any forward operation that follows a previous() begins by + * incrementing this index (for real, if next(), or notionally, for + * hasNext or nextIndex); likewise, any backward operation that + * follows a next() begins by (really or notionally) decrementing + * it. + * The constructor should be called passing idx and forward so chosen + * that a call of nextIndex() will produce the caller's desired result. + * That can be accomplished either by passing the intended index itself + * and forward=true, or the intended index minus one and forward=false. + * See the BLI constructor for where both approaches can be useful for + * edge cases. + */ + protected int m_idx; + protected boolean m_forward; + + ALI(TupleDescriptor td, int idx, boolean forward) + { + m_tdesc = td; + m_idx = idx; + m_forward = forward; + } + + @Override + public boolean hasPrevious() + { + return m_idx >= (m_forward ? 1 : 0); + } + + @Override + public int nextIndex() + { + return m_forward ? m_idx : m_idx + 1; + } + + @Override + public int previousIndex() + { + return m_forward ? m_idx - 1 : m_idx; + } + + @Override + public void remove() + { + throw new UnsupportedOperationException("ListIterator.remove"); + } + + @Override + public void set(Attribute e) + { + throw new UnsupportedOperationException("ListIterator.set"); + } + + @Override + public void add(Attribute e) + { + throw new UnsupportedOperationException("ListIterator.add"); + } + } + + static class BLI extends ALI + { + private final BitSet m_bitset; + /* + * The bit index last returned by the bitset's nextSetBit or + * previousSetBit method and used to make a return value from + * next() or previous(). This is not the m_idx'th set bit, because + * m_idx is left as the index to be used next in the same direction. + * A *change* of direction will bump m_idx back into correspondence with + * this value, and the value can be reused (and then m_idx will be + * bumped again and left pointing past it in the new direction). + * BitSet's nextSetBit and previousSetBit methods can return -1 when + * no such bit exists in either direction, but none of the iterator + * options should store such a value here. They should simply leave + * the last-used value here, and adjust m_idx and m_forward so that it + * will be reused if the direction changes. + * On construction, the caller may pass -1 if listIterator(index) has + * been called with index the otherwise-invalid value equal to + * bits.length. For that case, we pass idx and forward=true to + * the superclass constructor, and initialize m_attno here to + * bits.length() - 1, so that value can be used for the first backward + * fetch. In all other cases, the super constructor gets idx - 1 and + * forward=false, so the value stored here will be used for the first + * forward fetch. The only way a -1 value can be stored here is in + * the constructor, if the bitset is empty. + */ + private int m_attno; + + BLI(TupleDescriptor td, BitSet bits, int attno, int idx) + { + super(td, -1 == attno ? idx : idx - 1, -1 == attno); + m_bitset = bits; + m_attno = -1 != attno ? attno : bits.length() - 1; + } + + @Override + public boolean hasNext() + { + if ( -1 == m_attno ) + return false; + if ( m_forward ) + return -1 != m_bitset.nextSetBit(m_attno + 1); + /* + * Existing direction is backward, so next() would be a direction + * change, and the valid value in m_attno is what it would use. + */ + return true; + } + + @Override + public Attribute next() + { + int attno = m_attno; + if ( ! m_forward ) + { + m_forward = true; + ++ m_idx; + } + else if ( -1 != attno ) + { + attno = m_bitset.nextSetBit(attno + 1); + if ( -1 != attno ) + m_attno = attno; + } + + if ( -1 == attno ) + throw new NoSuchElementException(); + + ++ m_idx; + return m_tdesc.get(attno); + } + + @Override + public Attribute previous() + { + int attno = m_attno; + if ( m_forward ) + { + m_forward = false; + -- m_idx; + } + else if ( -1 != attno ) + { + attno = m_bitset.previousSetBit(attno - 1); + if ( -1 != attno ) + m_attno = attno; + } + + if ( -1 == attno ) + throw new NoSuchElementException(); + + -- m_idx; + return m_tdesc.get(attno); + } + } + + static class MLI extends ALI + { + private final short[] m_map; + + MLI(TupleDescriptor td, short[] map, int idx) + { + super(td, idx, true); + m_map = map; + } + + @Override + public boolean hasNext() + { + return m_map.length > (m_forward ? m_idx : m_idx + 1); + } + + @Override + public Attribute next() + { + if ( ! m_forward ) + { + m_forward = true; + ++ m_idx; + } + + if ( m_idx > m_map.length - 1 ) + throw new NoSuchElementException(); + + return m_tdesc.get(m_map[m_idx ++]); + } + + @Override + public Attribute previous() + { + if ( m_forward ) + { + m_forward = false; + -- m_idx; + } + + if ( m_idx < 0 ) + throw new NoSuchElementException(); + + return m_tdesc.get(m_map[m_idx --]); + } + } + + /** + * Implementation of {@link TargetList.Cursor TargetList.Cursor}. + */ + static class CursorImpl implements TargetList.Cursor, AutoCloseable + { + private final TargetList m_tlist; + private final int m_targets; + private Iterable m_slots; + private TupleTableSlot m_currentSlot; + private int m_currentTarget; + private int m_nestLevel; + private WeakReference m_activeIterator; + + CursorImpl(TargetList tlist, Iterable slots) + { + m_tlist = tlist; + m_targets = tlist.size(); + m_slots = requireNonNull(slots, "applyOver() tuples null"); + } + + CursorImpl(TargetList tlist, TupleTableSlot slot) + { + m_tlist = tlist; + m_targets = tlist.size(); + m_currentSlot = requireNonNull(slot, "applyOver() tuple null"); + } + + @Override // Iterable + public Iterator iterator() + { + if ( 0 < m_nestLevel ) + throw new IllegalStateException( + "Cursor.iterator() called within a curried CursorFunction"); + + /* + * Only one Iterator should be active at a time. There is nothing in + * Iterator's API to indicate when one is no longer active (its user + * might just stop iterating it), so just keep track of whether an + * earlier-created one is still around and, if so, sabotage it. + */ + WeakReference iRef = m_activeIterator; + if ( null != iRef ) + { + Itr i = iRef.get(); + if ( null != i ) + { + i.slot_iter = new Iterator() + { + @Override + public boolean hasNext() + { + throw new IllegalStateException( + "another iterator for this Cursor has been " + + "started"); + } + @Override + public TupleTableSlot next() + { + hasNext(); + return null; + } + }; + } + } + + if ( null == m_slots ) + { + m_slots = List.of(m_currentSlot); + m_currentSlot = null; + } + + Itr i = new Itr(); + m_activeIterator = new WeakReference<>(i); + return i; + } + + @Override // Cursor + public Stream stream() + { + Iterator itr = iterator(); + Spliterator spl; + int chr = IMMUTABLE | NONNULL | ORDERED; + long est = Long.MAX_VALUE; + + if ( m_slots instanceof Collection ) + { + est = ((Collection)m_slots).size(); + chr |= SIZED; + } + + spl = new IteratorNonSpliterator<>(itr, est, chr); + + return StreamSupport.stream(spl, false); + } + + class Itr implements Iterator + { + private Iterator slot_iter = m_slots.iterator(); + + @Override + public boolean hasNext() + { + return slot_iter.hasNext(); + } + + @Override + public Cursor next() + { + m_currentSlot = slot_iter.next(); + m_currentTarget = 0; + return CursorImpl.this; + } + } + + @Override // Iterator + public boolean hasNext() + { + return m_currentTarget < m_targets; + } + + @Override // Iterator + public Attribute next() + { + if ( m_currentTarget < m_targets ) + return m_tlist.get(m_currentTarget++); + + throw new NoSuchElementException( + "fewer Attributes in TargetList than parameters to assign"); + } + + private CursorImpl nest() + { + ++ m_nestLevel; + return this; + } + + @Override // AutoCloseable + public void close() + { + if ( 0 == -- m_nestLevel ) + m_currentTarget = 0; + } + + @Override + public R apply( + L0 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + return f.apply(); + } + } + + @Override + public R apply( + As a0, + L1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + As a0, As a1, + L2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + As a0, As a1, As a2, + L3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + As a0, As a1, As a2, As a3, + L4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + D v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + As a0, As a1, As a2, As a3, + As a4, + L5 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + D v3 = m_currentSlot.get(next(), a3); + E v4 = m_currentSlot.get(next(), a4); + return f.apply(v0, v1, v2, v3, v4); + } + } + + @Override + public R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, + L6 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + D v3 = m_currentSlot.get(next(), a3); + E v4 = m_currentSlot.get(next(), a4); + F v5 = m_currentSlot.get(next(), a5); + return f.apply(v0, v1, v2, v3, v4, v5); + } + } + + @Override + public R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, As a6, + L7 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + D v3 = m_currentSlot.get(next(), a3); + E v4 = m_currentSlot.get(next(), a4); + F v5 = m_currentSlot.get(next(), a5); + G v6 = m_currentSlot.get(next(), a6); + return f.apply(v0, v1, v2, v3, v4, v5, v6); + } + } + + @Override + public R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, As a6, As a7, + L8 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + D v3 = m_currentSlot.get(next(), a3); + E v4 = m_currentSlot.get(next(), a4); + F v5 = m_currentSlot.get(next(), a5); + G v6 = m_currentSlot.get(next(), a6); + H v7 = m_currentSlot.get(next(), a7); + return f.apply(v0, v1, v2, v3, v4, v5, v6, v7); + } + } + + @Override + public R apply( + As a0, As a1, As a2, As a3, + As a4, As a5, As a6, As a7, + As a8, As a9, As aa, As ab, + As ac, As ad, As ae, As af, + L16 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + A v0 = m_currentSlot.get(next(), a0); + B v1 = m_currentSlot.get(next(), a1); + C v2 = m_currentSlot.get(next(), a2); + D v3 = m_currentSlot.get(next(), a3); + E v4 = m_currentSlot.get(next(), a4); + F v5 = m_currentSlot.get(next(), a5); + G v6 = m_currentSlot.get(next(), a6); + H v7 = m_currentSlot.get(next(), a7); + I v8 = m_currentSlot.get(next(), a8); + J v9 = m_currentSlot.get(next(), a9); + K va = m_currentSlot.get(next(), aa); + L vb = m_currentSlot.get(next(), ab); + M vc = m_currentSlot.get(next(), ac); + N vd = m_currentSlot.get(next(), ad); + O ve = m_currentSlot.get(next(), ae); + P vf = m_currentSlot.get(next(), af); + return f.apply( + v0, v1, v2, v3, v4, v5, v6, v7, + v8, v9, va, vb, vc, vd, ve, vf); + } + } + + @Override + public R apply( + AsLong a0, + J1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + long v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsLong a0, AsLong a1, + J2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + long v0 = m_currentSlot.get(next(), a0); + long v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsLong a0, AsLong a1, AsLong a2, + J3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + long v0 = m_currentSlot.get(next(), a0); + long v1 = m_currentSlot.get(next(), a1); + long v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsLong a0, AsLong a1, AsLong a2, AsLong a3, + J4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + long v0 = m_currentSlot.get(next(), a0); + long v1 = m_currentSlot.get(next(), a1); + long v2 = m_currentSlot.get(next(), a2); + long v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsDouble a0, + D1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + double v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsDouble a0, AsDouble a1, + D2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + double v0 = m_currentSlot.get(next(), a0); + double v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsDouble a0, AsDouble a1, AsDouble a2, + D3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + double v0 = m_currentSlot.get(next(), a0); + double v1 = m_currentSlot.get(next(), a1); + double v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsDouble a0, AsDouble a1, AsDouble a2, AsDouble a3, + D4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + double v0 = m_currentSlot.get(next(), a0); + double v1 = m_currentSlot.get(next(), a1); + double v2 = m_currentSlot.get(next(), a2); + double v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsInt a0, + I1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + int v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsInt a0, AsInt a1, + I2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + int v0 = m_currentSlot.get(next(), a0); + int v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsInt a0, AsInt a1, AsInt a2, + I3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + int v0 = m_currentSlot.get(next(), a0); + int v1 = m_currentSlot.get(next(), a1); + int v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsInt a0, AsInt a1, AsInt a2, AsInt a3, + I4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + int v0 = m_currentSlot.get(next(), a0); + int v1 = m_currentSlot.get(next(), a1); + int v2 = m_currentSlot.get(next(), a2); + int v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsFloat a0, + F1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + float v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsFloat a0, AsFloat a1, + F2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + float v0 = m_currentSlot.get(next(), a0); + float v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsFloat a0, AsFloat a1, AsFloat a2, + F3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + float v0 = m_currentSlot.get(next(), a0); + float v1 = m_currentSlot.get(next(), a1); + float v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsFloat a0, AsFloat a1, AsFloat a2, AsFloat a3, + F4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + float v0 = m_currentSlot.get(next(), a0); + float v1 = m_currentSlot.get(next(), a1); + float v2 = m_currentSlot.get(next(), a2); + float v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsShort a0, + S1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + short v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsShort a0, AsShort a1, + S2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + short v0 = m_currentSlot.get(next(), a0); + short v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsShort a0, AsShort a1, AsShort a2, + S3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + short v0 = m_currentSlot.get(next(), a0); + short v1 = m_currentSlot.get(next(), a1); + short v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsShort a0, AsShort a1, AsShort a2, AsShort a3, + S4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + short v0 = m_currentSlot.get(next(), a0); + short v1 = m_currentSlot.get(next(), a1); + short v2 = m_currentSlot.get(next(), a2); + short v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsChar a0, + C1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + char v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsChar a0, AsChar a1, + C2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + char v0 = m_currentSlot.get(next(), a0); + char v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsChar a0, AsChar a1, AsChar a2, + C3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + char v0 = m_currentSlot.get(next(), a0); + char v1 = m_currentSlot.get(next(), a1); + char v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsChar a0, AsChar a1, AsChar a2, AsChar a3, + C4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + char v0 = m_currentSlot.get(next(), a0); + char v1 = m_currentSlot.get(next(), a1); + char v2 = m_currentSlot.get(next(), a2); + char v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsByte a0, + B1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + byte v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsByte a0, AsByte a1, + B2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + byte v0 = m_currentSlot.get(next(), a0); + byte v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsByte a0, AsByte a1, AsByte a2, + B3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + byte v0 = m_currentSlot.get(next(), a0); + byte v1 = m_currentSlot.get(next(), a1); + byte v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsByte a0, AsByte a1, AsByte a2, AsByte a3, + B4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + byte v0 = m_currentSlot.get(next(), a0); + byte v1 = m_currentSlot.get(next(), a1); + byte v2 = m_currentSlot.get(next(), a2); + byte v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + + @Override + public R apply( + AsBoolean a0, + Z1 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + boolean v0 = m_currentSlot.get(next(), a0); + return f.apply(v0); + } + } + + @Override + public R apply( + AsBoolean a0, AsBoolean a1, + Z2 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + boolean v0 = m_currentSlot.get(next(), a0); + boolean v1 = m_currentSlot.get(next(), a1); + return f.apply(v0, v1); + } + } + + @Override + public R apply( + AsBoolean a0, AsBoolean a1, AsBoolean a2, + Z3 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + boolean v0 = m_currentSlot.get(next(), a0); + boolean v1 = m_currentSlot.get(next(), a1); + boolean v2 = m_currentSlot.get(next(), a2); + return f.apply(v0, v1, v2); + } + } + + @Override + public R apply( + AsBoolean a0, AsBoolean a1, AsBoolean a2, AsBoolean a3, + Z4 f) + throws X + { + try ( CursorImpl unnest = nest() ) + { + boolean v0 = m_currentSlot.get(next(), a0); + boolean v1 = m_currentSlot.get(next(), a1); + boolean v2 = m_currentSlot.get(next(), a2); + boolean v3 = m_currentSlot.get(next(), a3); + return f.apply(v0, v1, v2, v3); + } + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TransformImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/TransformImpl.java new file mode 100644 index 000000000..4103e1f0a --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TransformImpl.java @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.lookup; + +import java.nio.ByteBuffer; + +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import java.util.concurrent.CopyOnWriteArraySet; + +import java.util.function.Function; + +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import org.postgresql.pljava.internal.SwitchPointCache.Builder; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.*; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.CatalogObjectImpl.Factory.TRFOID_CB; +import static org.postgresql.pljava.pg.ModelConstants.TRFOID; // syscache +import static org.postgresql.pljava.pg.ModelConstants.TRFTYPELANG; // syscache +import static org.postgresql.pljava.pg.TupleTableSlotImpl.heapTupleGetLightSlot; +import org.postgresql.pljava.pg.RegProcedureImpl.SupportMemo; + +import static org.postgresql.pljava.pg.adt.OidAdapter.PLANG_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGPROCEDURE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.REGTYPE_INSTANCE; +import static org.postgresql.pljava.pg.adt.OidAdapter.TRANSFORM_INSTANCE; + +/** + * Implementation of the {@link Transform Transform} interface. + */ +class TransformImpl extends Addressed +implements Nonshared, Transform +{ + private static final Function s_initializer; + + /** + * Count of instances subject to invalidation. + *

    + * Only accessed in invalidate and SP.onFirstUse, both on the PG thread. + */ + private static int s_instances; + + private static class SP extends SwitchPoint + { + @Override + protected void onFirstUse() + { + if ( 1 == ++ s_instances ) + sysCacheInvalArmed(TRFOID_CB, true); + } + } + + private final SwitchPoint[] m_sp; + + /** + * Looks up a single {@code Transform} given a type and procedural language. + *

    + * Only to be called "on the PG thread". + * @return a {@code Transform} if found, otherwise null. + */ + static Transform fromTypeLang(RegType type, ProceduralLanguage lang) + { + assert threadMayEnterPG() : "Transform.fromTypeLang thread"; + + /* + * All we need here is the transform's oid, which a custom native + * method could obtain more cheaply without copying the tuple, but + * _searchSysCacheCopy2 can do the job without adding yet another JNI + * method. We will allocate in the current context, assumed to be + * short-lived, context and use heapTupleGetLightSlot(..., false) to let + * the context take care of cleanup, as no reference to this slot will + * escape this call. + */ + ByteBuffer heapTuple = + _searchSysCacheCopy2(TRFTYPELANG, type.oid(), lang.oid()); + if ( null == heapTuple ) + return null; + + TupleDescImpl td = (TupleDescImpl)CLASSID.tupleDescriptor(); + TupleTableSlot s = heapTupleGetLightSlot(td, heapTuple, null, false); + return s.get(Att.OID, TRANSFORM_INSTANCE); + } + + /* Implementation of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + @Override + int cacheId() + { + return TRFOID; + } + + /* Implementation of Transform */ + + /** + * Merely passes the supplied slots array to the superclass constructor; all + * initialization of the slots will be the responsibility of the subclass. + */ + TransformImpl() + { + super(s_initializer.apply(new MethodHandle[NSLOTS])); + m_sp = new SwitchPoint[] { new SP() }; + } + + @Override + void invalidate(List sps, List postOps) + { + SwitchPoint sp = m_sp[0]; + if ( sp.unused() ) + return; + sps.add(sp); + m_sp[0] = new SP(); + if ( 0 == -- s_instances ) + sysCacheInvalArmed(TRFOID_CB, false); + + boolean languageCached = m_languageCached; + m_languageCached = false; + if ( languageCached ) + ((ProceduralLanguageImpl)language()).removeKnownTransform(this); + + Iterator> itr = m_dependentRoutines.iterator(); + m_dependentRoutines.clear(); // CopyOnWriteArraySet iterator still good + itr.forEachRemaining(p -> p.invalidate(sps, postOps)); + + FromSQLMemo.removeDependent(fromSQL(), this); + ToSQLMemo.removeDependent(toSQL(), this); + } + + static final int SLOT_TYPE; + static final int SLOT_LANG; + static final int SLOT_FROMSQL; + static final int SLOT_TOSQL; + static final int NSLOTS; + + static + { + int i = CatalogObjectImpl.Addressed.NSLOTS; + s_initializer = + new Builder<>(TransformImpl.class) + .withLookup(lookup()) + .withSwitchPoint(o -> o.m_sp[0]) + .withSlots(o -> o.m_slots) + + .withCandidates( + CatalogObjectImpl.Addressed.class.getDeclaredMethods()) + .withReceiverType(CatalogObjectImpl.Addressed.class) + .withDependent("cacheTuple", SLOT_TUPLE) + + .withCandidates(TransformImpl.class.getDeclaredMethods()) + .withReceiverType(null) + .withDependent( "type", SLOT_TYPE = i++) + .withDependent("language", SLOT_LANG = i++) + .withDependent( "fromSQL", SLOT_FROMSQL = i++) + .withDependent( "toSQL", SLOT_TOSQL = i++) + + .build() + /* + * Add these slot initializers after what Addressed does. + */ + .compose(CatalogObjectImpl.Addressed.s_initializer); + NSLOTS = i; + } + + static class Att + { + static final Attribute OID; // used in fromTypeLang() above + static final Attribute TRFTYPE; + static final Attribute TRFLANG; + static final Attribute TRFFROMSQL; + static final Attribute TRFTOSQL; + + static + { + Iterator itr = CLASSID.tupleDescriptor().project( + "oid", + "trftype", + "trflang", + "trffromsql", + "trftosql" + ).iterator(); + + OID = itr.next(); + TRFTYPE = itr.next(); + TRFLANG = itr.next(); + TRFFROMSQL = itr.next(); + TRFTOSQL = itr.next(); + + assert ! itr.hasNext() : "attribute initialization miscount"; + } + } + + /* mutable non-API data used only on the PG thread */ + + private final Set> + m_dependentRoutines = new CopyOnWriteArraySet<>(); + + private boolean m_languageCached = false; // needed in invalidate + + static void addDependentRoutine(RegProcedureImpl p, List ts) + { + for ( Transform t : ts ) + ((TransformImpl)t).m_dependentRoutines.add(p); + } + + static void removeDependentRoutine(RegProcedureImpl p,List ts) + { + for ( Transform t : ts ) + ((TransformImpl)t).m_dependentRoutines.remove(p); + } + + /* computation methods */ + + private static RegType type(TransformImpl o) throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + return s.get(Att.TRFTYPE, REGTYPE_INSTANCE); + } + + private static ProceduralLanguage language(TransformImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + o.m_languageCached = true; + return s.get(Att.TRFLANG, PLANG_INSTANCE); + } + + private static RegProcedure fromSQL(TransformImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") + RegProcedure p = + (RegProcedure)s.get(Att.TRFFROMSQL, REGPROCEDURE_INSTANCE); + return p; + } + + private static RegProcedure toSQL(TransformImpl o) + throws SQLException + { + TupleTableSlot s = o.cacheTuple(); + @SuppressWarnings("unchecked") + RegProcedure p = + (RegProcedure)s.get(Att.TRFTOSQL, REGPROCEDURE_INSTANCE); + return p; + } + + /* API methods */ + + @Override + public RegType type() + { + try + { + MethodHandle h = m_slots[SLOT_TYPE]; + return (RegType)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public ProceduralLanguage language() + { + try + { + MethodHandle h = m_slots[SLOT_LANG]; + return (ProceduralLanguage)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure fromSQL() + { + try + { + MethodHandle h = m_slots[SLOT_FROMSQL]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + @Override + public RegProcedure toSQL() + { + try + { + MethodHandle h = m_slots[SLOT_TOSQL]; + return (RegProcedure)h.invokeExact(this, h); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /** + * {@link SupportMemo SupportMemo} for attachment to + * a {@link RegProcedure RegProcedure} that serves as + * a {@link #fromSQL() fromSQL} function. + */ + static class FromSQLMemo + extends SupportMemo implements FromSQL + { + private FromSQLMemo( + RegProcedure carrier, Transform dep) + { + super(carrier, (TransformImpl)dep); + } + + static void addDependent( + RegProcedure proc, Transform dep) + { + SupportMemo.add(proc, (TransformImpl)dep, FromSQLMemo.class, + () -> new FromSQLMemo(proc, dep)); + } + } + + /** + * {@link SupportMemo SupportMemo} for attachment to + * a {@link RegProcedure RegProcedure} that serves as + * a {@link #toSQL() toSQL} function. + */ + static class ToSQLMemo + extends SupportMemo implements ToSQL + { + private ToSQLMemo( + RegProcedure carrier, Transform dep) + { + super(carrier, (TransformImpl)dep); + } + + static void addDependent( + RegProcedure proc, Transform dep) + { + SupportMemo.add(proc, (TransformImpl)dep, ToSQLMemo.class, + () -> new ToSQLMemo(proc, dep)); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TriggerImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/TriggerImpl.java new file mode 100644 index 000000000..4214327f6 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TriggerImpl.java @@ -0,0 +1,548 @@ +/* + * Copyright (c) 2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.annotation.Native; + +import java.nio.ByteBuffer; +import java.nio.ShortBuffer; + +import java.nio.charset.CharacterCodingException; + +import java.sql.SQLException; +import java.sql.SQLXML; + +import static java.util.Collections.unmodifiableSet; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import org.postgresql.pljava.TargetList.Projection; + +import org.postgresql.pljava.adt.spi.Datum; + +import org.postgresql.pljava.annotation.Trigger.Called; +import org.postgresql.pljava.annotation.Trigger.Event; +import org.postgresql.pljava.annotation.Trigger.Scope; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.Checked; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.jdbc.SQLXMLImpl; + +import org.postgresql.pljava.model.*; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; + +import org.postgresql.pljava.pg.CatalogObjectImpl.*; + +import static + org.postgresql.pljava.pg.CatalogObjectImpl.Addressed._sysTableGetByOid; + +import static org.postgresql.pljava.pg.DatumUtils.asAlwaysCopiedDatum; +import static org.postgresql.pljava.pg.DatumUtils.fetchPointer; +import static org.postgresql.pljava.pg.DatumUtils.mapCString; +import static org.postgresql.pljava.pg.DatumUtils.mapFixedLength; + +import org.postgresql.pljava.pg.LookupImpl.CallImpl.TriggerDataImpl; + +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_DATUM; +import static org.postgresql.pljava.pg.ModelConstants.Anum_pg_trigger_oid; +import static org.postgresql.pljava.pg.ModelConstants.TriggerOidIndexId; + +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgname; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgfoid; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgtype; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgenabled; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgisinternal; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgisclone; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgconstrrelid; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgconstrindid; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgconstraint; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgdeferrable; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tginitdeferred; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgnargs; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgnattr; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgattr; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgargs; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgqual; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgoldtable; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_TRG_tgnewtable; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Implements {@code Trigger}. + *

    + * This implementation, at least at first, will have an unusual limitation: + * its accessor methods (other than those of + * {@link CatalogObject.Addressed Addressed}) may only work + * when called by a trigger function or its language handler within the scope + * of the function's preparation and execution. Some may be unimplemented even + * then, as noted in the documentation of the methods themselves. + *

    + * That spares it from having to deal with getting the content from + * {@code pg_trigger}, or cache lifetime, or invalidation; it can operate from + * the copy PostgreSQL supplies for the trigger function call, during the scope + * of the call. + *

    + * At least for now, then, it simply extends {@code CatalogObjectImpl} directly + * rather than {@code CatalogObjectImpl.Addressed}, needing none of the caching + * machinery in the latter. + */ +class TriggerImpl extends CatalogObjectImpl +implements Nonshared, Trigger +{ + @Native private static final char TRIGGER_FIRES_ON_ORIGIN = 'O'; + @Native private static final char TRIGGER_FIRES_ALWAYS = 'A'; + @Native private static final char TRIGGER_FIRES_ON_REPLICA = 'R'; + @Native private static final char TRIGGER_DISABLED = 'D'; + + @Native private static final int TRIGGER_TYPE_ROW = 1 << 0; + @Native private static final int TRIGGER_TYPE_BEFORE = 1 << 1; + @Native private static final int TRIGGER_TYPE_INSERT = 1 << 2; + @Native private static final int TRIGGER_TYPE_DELETE = 1 << 3; + @Native private static final int TRIGGER_TYPE_UPDATE = 1 << 4; + @Native private static final int TRIGGER_TYPE_TRUNCATE = 1 << 5; + @Native private static final int TRIGGER_TYPE_INSTEAD = 1 << 6; + + @Native private static final int TRIGGER_TYPE_LEVEL_MASK = TRIGGER_TYPE_ROW; + @Native private static final int TRIGGER_TYPE_STATEMENT = 0; + + @Native private static final int TRIGGER_TYPE_TIMING_MASK = + TRIGGER_TYPE_BEFORE | TRIGGER_TYPE_INSTEAD; + @Native private static final int TRIGGER_TYPE_AFTER = 0; + + @Native private static final int TRIGGER_TYPE_EVENT_MASK = + TRIGGER_TYPE_INSERT | TRIGGER_TYPE_DELETE | + TRIGGER_TYPE_UPDATE | TRIGGER_TYPE_TRUNCATE; + + /* + * By inspection of the above, event bits are contiguous and can be shifted + * right by this amount to make a zero-based index of power sets, which is + * relied on below; if that changes, fix whatever needs fixing. + */ + private static final int EVENT_SHIFT = 2; + + private static Set indexToSet(int index) + { + int type = index << EVENT_SHIFT; + EnumSet s = EnumSet.noneOf(Event.class); + if ( 0 != (type & TRIGGER_TYPE_INSERT) ) + s.add(Event.INSERT); + if ( 0 != (type & TRIGGER_TYPE_DELETE) ) + s.add(Event.DELETE); + if ( 0 != (type & TRIGGER_TYPE_UPDATE) ) + s.add(Event.UPDATE); + if ( 0 != (type & TRIGGER_TYPE_TRUNCATE) ) + s.add(Event.TRUNCATE); + return unmodifiableSet(s); + } + + private static final List> EVENT_SETS = List.of( + indexToSet( 0), indexToSet( 1), indexToSet( 2), indexToSet( 3), + indexToSet( 4), indexToSet( 5), indexToSet( 6), indexToSet( 7), + indexToSet( 8), indexToSet( 9), indexToSet(10), indexToSet(11), + indexToSet(12), indexToSet(13), indexToSet(14), indexToSet(15)); + + private static Set typeToSet(int type) + { + type &= TRIGGER_TYPE_EVENT_MASK; + return EVENT_SETS.get(type >>> EVENT_SHIFT); + } + + static + { + assert + typeToSet(TRIGGER_TYPE_INSERT).equals(EnumSet.of(Event.INSERT)) && + typeToSet(TRIGGER_TYPE_DELETE).equals(EnumSet.of(Event.DELETE)) && + typeToSet(TRIGGER_TYPE_UPDATE).equals(EnumSet.of(Event.UPDATE)) && + typeToSet(TRIGGER_TYPE_TRUNCATE).equals(EnumSet.of(Event.TRUNCATE)) + : "Trigger.events representation has changed"; + } + + private TriggerDataImpl m_td; + private ByteBuffer m_bb; + + /** + * Executes work in a scope during which this instance is + * associated with the supplied {@link TriggerDataImpl TriggerDataImpl} + * instance and returns any result. + *

    + * Used by the dispatcher in a somewhat incestuous arrangement further + * described at {@link TriggerDataImpl#m_trigger}. + */ + + T withTriggerData(TriggerDataImpl td, Checked.Supplier work) + throws E + { + final Object[] save = {null, null}; + try + { + doInPG(() -> + { + save[0] = m_td; + save[1] = m_bb; + m_td = td; + m_bb = td.m_trigger; + }); + return work.get(); + } + finally + { + doInPG(() -> + { + m_td.m_trigger = m_bb = (ByteBuffer)save[1]; + m_td = (TriggerDataImpl)save[0]; + }); + } + } + + /* API methods of Addressed */ + + @Override + public RegClass.Known classId() + { + return CLASSID; + } + + /* + * The API javadoc does say the methods of Addressed will work even outside + * of the trigger-call context, and this is one of those, so give it a + * simple if nonoptimal implementation doing an index lookup to cover that. + */ + @Override + public boolean exists() + { + if ( null != m_bb ) + return true; + + ByteBuffer heapTuple; + TupleDescImpl td = (TupleDescImpl)CLASSID.tupleDescriptor(); + + try + { + return doInPG(() -> + null != _sysTableGetByOid( + CLASSID.oid(), oid(), Anum_pg_trigger_oid, + TriggerOidIndexId, td.address()) + ); + } + catch ( SQLException e ) + { + throw unchecked(e); + } + } + + /* API method of Named */ + + @Override + public Simple name() + { + if ( null == m_bb ) + throw notyet(); + + try + { + long p = fetchPointer(m_bb, OFFSET_TRG_tgname); + ByteBuffer b = mapCString(p); + return Simple.fromCatalog(SERVER_ENCODING.decode(b).toString()); + } + catch ( CharacterCodingException e ) + { + throw new AssertionError(e); + } + } + + /* API methods */ + + @Override + public RegClass relation() + { + if ( null == m_td ) + throw notyet(); + + return m_td.relation(); + } + + @Override + public Trigger parent() + { + throw notyet(); + } + + @Override + public RegProcedure function() + { + if ( null == m_bb ) + throw notyet(); + + int oid = m_bb.getInt(OFFSET_TRG_tgfoid); + + @SuppressWarnings("unchecked") + RegProcedure f = + (RegProcedure)of(RegProcedure.CLASSID, oid); + + return f; + } + + @Override + public Called called() + { + if ( null == m_bb ) + throw notyet(); + + int type = Short.toUnsignedInt(m_bb.getShort(OFFSET_TRG_tgtype)); + type &= TRIGGER_TYPE_TIMING_MASK; + switch ( type ) + { + case TRIGGER_TYPE_BEFORE : return Called.BEFORE; + case TRIGGER_TYPE_AFTER : return Called.AFTER; + case TRIGGER_TYPE_INSTEAD: return Called.INSTEAD_OF; + default: + throw new AssertionError("Trigger.called enum"); + } + } + + @Override + public Set events() + { + if ( null == m_bb ) + throw notyet(); + + int type = Short.toUnsignedInt(m_bb.getShort(OFFSET_TRG_tgtype)); + return typeToSet(type); + } + + @Override + public Scope scope() + { + if ( null == m_bb ) + throw notyet(); + + int type = Short.toUnsignedInt(m_bb.getShort(OFFSET_TRG_tgtype)); + type &= TRIGGER_TYPE_LEVEL_MASK; + switch ( type ) + { + case TRIGGER_TYPE_ROW : return Scope.ROW; + case TRIGGER_TYPE_STATEMENT: return Scope.STATEMENT; + default: + throw new AssertionError("Trigger.scope enum"); + } + } + + @Override + public ReplicationRole enabled() + { + if ( null == m_bb ) + throw notyet(); + + char c = (char)(0xff & m_bb.get(OFFSET_TRG_tgenabled)); + + switch ( c ) + { + case TRIGGER_FIRES_ON_ORIGIN : return ReplicationRole.ON_ORIGIN; + case TRIGGER_FIRES_ALWAYS : return ReplicationRole.ALWAYS; + case TRIGGER_FIRES_ON_REPLICA : return ReplicationRole.ON_REPLICA; + case TRIGGER_DISABLED : return ReplicationRole.DISABLED; + default: + throw new AssertionError("Trigger.enabled enum"); + } + } + + @Override + public boolean internal() + { + if ( null == m_bb ) + throw notyet(); + + return 0 != m_bb.get(OFFSET_TRG_tgisinternal); + } + + @Override + public RegClass constraintRelation() + { + if ( null == m_bb ) + throw notyet(); + + int oid = m_bb.getInt(OFFSET_TRG_tgconstrrelid); + return InvalidOid == oid ? null : of(RegClass.CLASSID, oid); + } + + @Override + public RegClass constraintIndex() + { + if ( null == m_bb ) + throw notyet(); + + int oid = m_bb.getInt(OFFSET_TRG_tgconstrindid); + return InvalidOid == oid ? null : of(RegClass.CLASSID, oid); + } + + @Override + public Constraint constraint() + { + if ( null == m_bb ) + throw notyet(); + + int oid = m_bb.getInt(OFFSET_TRG_tgconstraint); + return InvalidOid == oid ? null : of(Constraint.CLASSID, oid); + } + + @Override + public boolean deferrable() + { + if ( null == m_bb ) + throw notyet(); + + return 0 != m_bb.get(OFFSET_TRG_tgdeferrable); + } + + @Override + public boolean initiallyDeferred() + { + if ( null == m_bb ) + throw notyet(); + + return 0 != m_bb.get(OFFSET_TRG_tginitdeferred); + } + + @Override + public Projection columns() + { + if ( null == m_bb ) + throw notyet(); + + int nattr = Short.toUnsignedInt(m_bb.get(OFFSET_TRG_tgnattr)); + + if ( 0 == nattr ) + return null; + + long attvp = fetchPointer(m_bb, OFFSET_TRG_tgattr); + ByteBuffer attvb = mapFixedLength(attvp, nattr * Short.BYTES); + ShortBuffer sb = attvb.asShortBuffer(); + short[] attnums = new short [ nattr ]; + sb.get(attnums); + return relation().tupleDescriptor().sqlProject(attnums); + } + + @Override + public List arguments() + { + if ( null == m_bb ) + throw notyet(); + + int nargs = Short.toUnsignedInt(m_bb.get(OFFSET_TRG_tgnargs)); + + if ( 0 == nargs ) + return List.of(); + + long argvp = fetchPointer(m_bb, OFFSET_TRG_tgargs); + ByteBuffer argvb = mapFixedLength(argvp, nargs * SIZEOF_DATUM); + String[] argv = new String[nargs]; + for ( int i = 0 ; i < nargs ; ++ i ) + { + long p = fetchPointer(argvb, i * SIZEOF_DATUM); + ByteBuffer b = mapCString(p); + try + { + argv[i] = SERVER_ENCODING.decode(b).toString(); + } + catch ( CharacterCodingException e ) + { + throw new AssertionError(e); + } + } + return List.of(argv); + } + + @Override + public SQLXML when() + { + if ( null == m_bb ) + throw notyet(); + + long p = fetchPointer(m_bb, OFFSET_TRG_tgqual); + + if ( 0 == p ) + return null; + + ByteBuffer bb = mapCString(p); + + Datum.Input in = asAlwaysCopiedDatum(bb, 0, bb.limit()); + + try + { + return SQLXMLImpl.newReadable(in, RegType.PG_NODE_TREE, true); + } + catch ( SQLException e ) + { + throw unchecked(e); + } + } + + @Override + public Simple tableOld() + { + if ( null == m_bb ) + throw notyet(); + + long p = fetchPointer(m_bb, OFFSET_TRG_tgoldtable); + + if ( 0 == p ) + return null; + + ByteBuffer b = mapCString(p); + + try + { + return Simple.fromCatalog(SERVER_ENCODING.decode(b).toString()); + } + catch ( CharacterCodingException e ) + { + throw new AssertionError(e); + } + } + + @Override + public Simple tableNew() + { + if ( null == m_bb ) + throw notyet(); + + long p = fetchPointer(m_bb, OFFSET_TRG_tgnewtable); + + if ( 0 == p ) + return null; + + ByteBuffer b = mapCString(p); + + try + { + return Simple.fromCatalog(SERVER_ENCODING.decode(b).toString()); + } + catch ( CharacterCodingException e ) + { + throw new AssertionError(e); + } + } + + @Override + public boolean isClone() + { + if ( null == m_bb ) + throw notyet(); + + return 0 != m_bb.get(OFFSET_TRG_tgisclone); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TupleDescImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/TupleDescImpl.java new file mode 100644 index 000000000..dcc8211d7 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TupleDescImpl.java @@ -0,0 +1,854 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import org.postgresql.pljava.model.*; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; +import static org.postgresql.pljava.model.RegType.RECORD; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import org.postgresql.pljava.internal.DualState; +import org.postgresql.pljava.internal.SwitchPointCache.SwitchPoint; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.pg.TargetListImpl; +import static org.postgresql.pljava.pg.CatalogObjectImpl.*; +import static org.postgresql.pljava.pg.ModelConstants.*; +import static org.postgresql.pljava.pg.DatumUtils.addressOf; +import static org.postgresql.pljava.pg.DatumUtils.asReadOnlyNativeOrder; + +import java.lang.invoke.MethodHandle; +import static java.lang.invoke.MethodHandles.constant; +import static java.lang.invoke.MethodHandles.lookup; +import static java.lang.invoke.MethodType.methodType; + +import static java.lang.Math.ceil; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import java.nio.charset.CharsetEncoder; +import java.nio.charset.CoderResult; + +import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; + +import java.util.AbstractList; +import static java.util.Arrays.fill; +import java.util.BitSet; +import java.util.List; +import java.util.Map; + +import java.util.concurrent.ConcurrentHashMap; + +import java.util.function.BiFunction; +import java.util.function.IntFunction; +import java.util.function.IntSupplier; +import java.util.function.ToIntBiFunction; + +/** + * Implementation of {@link TupleDescriptor TupleDescriptor}. + *

    + * A {@link Cataloged Cataloged} descriptor corresponds to a known composite + * type declared in the PostgreSQL catalogs; its {@link #rowType rowType} method + * returns that type. A {@link Blessed Blessed} descriptor has been constructed + * on the fly and then interned in the type cache, such that the type + * {@code RECORD} and its type modifier value will identify it uniquely for + * the life of the backend; {@code rowType} will return the corresponding + * {@link RegTypeImpl.Blessed} instance. An {@link Ephemeral Ephemeral} + * descriptor has been constructed ad hoc and not interned; {@code rowType} will + * return {@link RegType#RECORD RECORD} itself, which isn't a useful identifier + * (many such ephemeral descriptors, all different, could exist at once). + * An ephemeral descriptor is only useful as long as a reference to it is held. + *

    + * A {@code Cataloged} descriptor can be obtained from the PG {@code relcache} + * or the {@code typcache}, should respond to cache invalidation for + * the corresponding relation, and is reference-counted, so the count should be + * incremented when cached here, and decremented/released if this instance + * goes unreachable from Java. + *

    + * A {@code Blessed} descriptor can be obtained from the PG {@code typcache} + * by {@code lookup_rowtype_tupdesc}. No invalidation logic is needed, as it + * will persist, and its identifying typmod will remain unique, for the life of + * the backend. It may or may not be reference-counted. + *

    + * An {@code Ephemeral} tuple descriptor may need to be copied out of + * a short-lived memory context where it is found, either into a longer-lived + * context (and invalidated when that context is), or onto the Java heap and + * used until GC'd. + */ +abstract class TupleDescImpl extends AbstractList +implements TupleDescriptor +{ + private final MethodHandle m_tdH; + private final Attribute[] m_attrs; + private final State m_state; + + /* + * Implementation of Projection + */ + + @Override // Projection + public Projection subList(int fromIndex, int toIndex) + { + return TargetListImpl.subList(this, fromIndex, toIndex); + } + + @Override // Projection + public Projection project(Simple... names) + { + return TargetListImpl.project(this, names); + } + + @Override // Projection + public Projection project(int... indices) + { + return TargetListImpl.project(this, indices); + } + + @Override // Projection + public Projection sqlProject(int... indices) + { + return TargetListImpl.sqlProject(this, indices); + } + + @Override // Projection + public Projection project(short... indices) + { + return TargetListImpl.project(this, indices); + } + + @Override // Projection + public Projection sqlProject(short... indices) + { + return TargetListImpl.sqlProject(this, indices); + } + + @Override // Projection + public Projection project(Attribute... attrs) + { + return TargetListImpl.project(this, attrs); + } + + @Override // Projection + public Projection project(BitSet indices) + { + return TargetListImpl.project(this, indices); + } + + @Override // Projection + public Projection sqlProject(BitSet indices) + { + return TargetListImpl.sqlProject(this, indices); + } + + @Override // TargetList + public R applyOver( + Iterable tuples, Cursor.Function f) + throws X, SQLException + { + return TargetListImpl.applyOver(this, tuples, f); + } + + @Override // TargetList + public R applyOver( + TupleTableSlot tuple, Cursor.Function f) + throws X, SQLException + { + return TargetListImpl.applyOver(this, tuple, f); + } + + private static final int s_perAttributeSize; + + /** + * A "getAndAdd" (with just plain memory effects, as it will only be used on + * the PG thread) tailored to the width of the tdrefcount field (which is, + * oddly, declared as C int rather than a specific-width type). + */ + private static final ToIntBiFunction s_getAndAddPlain; + + private static final MethodHandle s_everNull; + private static final MethodHandle s_throwInvalidated; + + private static ByteBuffer throwInvalidated() + { + throw new IllegalStateException( + "use of stale TupleDescriptor outdated by a DDL change"); + } + + static + { + assert Integer.BYTES == SIZEOF_Oid : "sizeof Oid"; + assert Integer.BYTES == SIZEOF_pg_attribute_atttypmod : "sizeof typmod"; + + s_perAttributeSize = SIZEOF_FORM_PG_ATTRIBUTE + + ( (PG_VERSION_NUM < 180000) ? 0 : SIZEOF_CompactAttribute ); + + if ( 4 == SIZEOF_TUPLEDESC_TDREFCOUNT ) + { + s_getAndAddPlain = (b,i) -> + { + int count = b.getInt(OFFSET_TUPLEDESC_TDREFCOUNT); + b.putInt(OFFSET_TUPLEDESC_TDREFCOUNT, count + i); + return count; + }; + } + else + throw new ExceptionInInitializerError( + "Implementation needed for platform with " + + "sizeof TupleDesc->tdrefcount = " +SIZEOF_TUPLEDESC_TDREFCOUNT); + + s_everNull = constant(ByteBuffer.class, null); + + try + { + s_throwInvalidated = lookup().findStatic(TupleDescImpl.class, + "throwInvalidated", methodType(ByteBuffer.class)); + } + catch ( ReflectiveOperationException e ) + { + throw new ExceptionInInitializerError(e); + } + } + + private ByteBuffer bufferIfValid() + { + try + { + return (ByteBuffer)m_tdH.invokeExact(); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /** + * Called after the {@code SwitchPoint} has been invalidated. + *

    + * Only happens for a {@link Cataloged} descriptor, on the PG thread, as a + * consequence of invalidation of the {@link RegClass} that defines it. + */ + void invalidate() + { + assert threadMayEnterPG() : "TupleDescImpl slice thread"; + + m_state.release(); + fill(m_attrs, null); + } + + /** + * Address of the native tuple descriptor (not supported on + * an {@code Ephemeral} instance). + */ + long address() throws SQLException + { + try + { + m_state.pin(); + return m_state.address(); + } + finally + { + m_state.unpin(); + } + } + + /** + * Slice off the portion of the buffer representing one attribute. + *

    + * Only called by {@code AttributeImpl}. + */ + ByteBuffer slice(int index) + { + assert threadMayEnterPG() : "TupleDescImpl slice thread"; + + ByteBuffer td = bufferIfValid(); + + int len = SIZEOF_FORM_PG_ATTRIBUTE; + int off = OFFSET_TUPLEDESC_ATTRS + len * index; + len = ATTRIBUTE_FIXED_PART_SIZE; // TupleDesc hasn't got the whole thing + + /* + * Prior to PG 18, OFFSET_TUPLEDESC_ATTRS from the true beginning of + * the buffer is where the first Form_pg_attribute starts, so + * OFFSET_TUPLEDESC_ATTRS + len * index is the whole story. As of PG 18, + * the value we pick up for OFFSET_TUPLEDESC_ATTRS is actually the + * offset of compact_attrs, and there are natts of those before + * the first Form_pg_attribute, so we really want OFFSET_TUPLEDESC_ATTRS + * + len * index + (natts * SIZEOF_CompactAttribute). That final term is + * constant for this TupleDescriptor, so the constructor has simply set + * the buffer's position() to that value. When we slice off bnew below, + * it begins at that position, so our off as computed above is right. + * + * Java 13 has a slice(off, len) method that may be tidier; it would + * have to be passed off+position(). + */ + ByteBuffer bnew = td.slice(); + bnew.position(off).limit(off + len); + return bnew.slice().order(td.order()); + } + + /** + * Construct a descriptor given a {@code ByteBuffer} windowing a native one. + *

    + * Important: As of PostgreSQL 18, the offset to the first + * {@code Form_pg_attribute} slice is no longer fixed, but depends on the + * number of attributes. The number of attributes is, of course, known here + * in this constructor, but is not easily available to the + * {@link #slice slice} method as it is being called by the + * {@code Attribute} instances being constructed before the {@code m_attrs} + * field is assigned here. + *

    + * This will be handled by advancing the {@code ByteBuffer}'s + * {@code position} here by the necessary offset (which means, in general, + * the position will be some nonsensical place in the buffer, but will yield + * the desired attribute slice when {@code slice} computes an offset + * relative to it. All other accesses to {@code TupleDesc} fields + * must be made using absolute offsets, not position-relative ones. + * @param td ByteBuffer over a native TupleDesc + * @param sp SwitchPoint that the instance will rely on to detect + * invalidation, or null if invalidation will not be possible. + * @param useState whether a native TupleDesc is associated, and therefore a + * State object must be used to release it on unreachability of this object. + * @param ctor constructor to be used for each Attribute instance. (The + * Attribute constructors also determine, indirectly, what SwitchPoint, if + * any, the Attribute instances will rely on to detect invalidation.) + */ + private TupleDescImpl( + ByteBuffer td, SwitchPoint sp, boolean useState, + BiFunction ctor) + { + assert threadMayEnterPG() : "TupleDescImpl construction thread"; + + m_state = useState ? new State(this, td) : null; + + td = asReadOnlyNativeOrder(td); + MethodHandle c = constant(ByteBuffer.class, td); + m_tdH = (null == sp) ? c : sp.guardWithTest(c, s_throwInvalidated); + + Attribute[] attrs = + new Attribute [ (td.capacity() - OFFSET_TUPLEDESC_ATTRS) + / s_perAttributeSize ]; + + /* + * ATTENTION: as described in the javadoc, this leaves the buffer's + * position nonsensical for ordinary purposes, but will produce correct + * results in the slice method above. All other accesses to fields of + * the underlying TupleDesc must be absolute, not relative to this + * position. + */ + if ( PG_VERSION_NUM >= 180000 ) + td.position(attrs.length * SIZEOF_CompactAttribute); + + for ( int i = 0 ; i < attrs.length ; ++ i ) + attrs[i] = ctor.apply(this, 1 + i); + + m_attrs = attrs; + } + + /** + * Constructor used only by OfType to produce a synthetic tuple descriptor + * with one element of a specified RegType. + */ + private TupleDescImpl(RegType type) + { + m_state = null; + m_tdH = s_everNull; + m_attrs = new Attribute[] { new AttributeImpl.OfType(this, type) }; + } + + /** + * Return a {@code TupleDescImpl} given a byte buffer that maps a PostgreSQL + * {@code TupleDesc} structure. + *

    + * This method is called from native code, and assumes the caller has not + * (or not knowingly) obtained the descriptor directly from the type cache, + * so if it is not reference-counted (its count is -1) it will be assumed + * unsafe to directly cache. In that case, if it represents a cataloged + * or interned ("blessed") descriptor, we will get one directly from the + * cache and return that, or if it is ephemeral, we will return one based + * on a defensive copy. + *

    + * If the descriptor is reference-counted, and we use it (that is, we do not + * find an existing version in our cache), we increment the reference count + * here. That does not have the effect of requesting leak warnings + * at the exit of PostgreSQL's current resource owner, because we have every + * intention of hanging on to it longer, until GC or an invalidation + * callback tells us not to. + *

    + * While we can just read the type oid, typmod, and reference count through + * the byte buffer, as long as the only caller is C code, it saves some fuss + * just to have it pass those values. If the C caller has the relation oid + * handy also, it can pass that as well and save a lookup here. + */ + private static TupleDescriptor fromByteBuffer( + ByteBuffer td, int typoid, int typmod, int reloid, int refcount) + { + TupleDescriptor.Interned result; + + td.order(nativeOrder()); + + /* + * Case 1: if the type is not RECORD, it's a cataloged composite type. + * Build an instance of Cataloged (unless the implicated RegClass has + * already got one). + */ + if ( RECORD.oid() != typoid ) + { + RegTypeImpl t = + (RegTypeImpl)Factory.formMaybeModifiedType(typoid, typmod); + + RegClassImpl c = + (RegClassImpl)( InvalidOid == reloid ? t.relation() + : Factory.staticFormObjectId(RegClass.CLASSID, reloid) ); + + assert c.isValid() : "Cataloged row type without matching RegClass"; + + if ( -1 == refcount ) // don't waste time on an ephemeral copy. + return c.tupleDescriptor(); // just go get the real one. + + TupleDescriptor.Interned[] holder = c.m_tupDescHolder; + if ( null != holder ) + { + result = holder[0]; + assert null != result : "disagree whether RegClass has desc"; + return result; + } + + holder = new TupleDescriptor.Interned[1]; + /* + * The constructor assumes the reference count has already been + * incremented to account for the reference constructed here. + */ + s_getAndAddPlain.applyAsInt(td, 1); + holder[0] = result = new Cataloged(td, c); + c.m_tupDescHolder = holder; + return result; + } + + /* + * Case 2: if RECORD with a modifier, it's an interned tuple type. + * Build an instance of Blessed (unless the implicated RegType has + * already got one). + */ + if ( -1 != typmod ) + { + RegTypeImpl.Blessed t = + (RegTypeImpl.Blessed)RECORD.modifier(typmod); + + if ( -1 == refcount ) // don't waste time on an ephemeral copy. + return t.tupleDescriptor(); // just go get the real one. + + TupleDescriptor.Interned[] holder = t.m_tupDescHolder; + if ( null != holder ) + { + result = holder[0]; + assert null != result : "disagree whether RegType has desc"; + return result; + } + + holder = new TupleDescriptor.Interned[1]; + /* + * The constructor assumes the reference count has already been + * incremented to account for the reference constructed here. + */ + s_getAndAddPlain.applyAsInt(td, 1); + holder[0] = result = new Blessed(td, t); + t.m_tupDescHolder = holder; + return result; + } + + /* + * Case 3: it's RECORD with no modifier, an ephemeral tuple type. + * Build an instance of Ephemeral unconditionally, defensively copying + * the descriptor if it isn't reference-counted (which we assert it + * isn't). + */ + assert -1 == refcount : "can any ephemeral TupleDesc be refcounted?"; + return new Ephemeral(td); + } + + /** + * Copy a byte buffer (which may refer to native-managed memory) to one + * with JVM-managed backing memory. + *

    + * Acquiescing to JDK-8318966, it still has to be a direct buffer to avoid + * exceptions when checking alignment. But it will use off-heap memory + * managed by the JVM (reclaimed when the buffer is unreachable), and so + * will not depend on the lifespan of the source buffer. + */ + private static ByteBuffer asManagedNativeOrder(ByteBuffer bb) + { + ByteBuffer copy = + ByteBuffer.allocateDirect(bb.capacity()).put(bb).flip(); + return copy.order(nativeOrder()); + } + + @Override + public Attribute sqlGet(int index) + { + bufferIfValid(); // just for the check + return m_attrs[index - 1]; + } + + /* + * AbstractList implementation + */ + @Override + public int size() + { + return m_attrs.length; + } + + @Override + public Attribute get(int index) + { + bufferIfValid(); // just for the check + return m_attrs[index]; + } + + @Override // Collection + public boolean contains(Object o) + { + if ( ! (o instanceof AttributeImpl) ) + return false; + + AttributeImpl ai = (AttributeImpl)o; + int idx = ai.subId() - 1; + return ( idx < m_attrs.length ) && ( ai == m_attrs[idx] ); + } + + @Override // List + public int indexOf(Object o) + { + if ( ! contains(o) ) + return -1; + + return ((Attribute)o).subId() - 1; + } + + @Override // List + public int lastIndexOf(Object o) + { + return indexOf(o); + } + + /** + * An abstract base shared by the {@code Blessed} and {@code Ephemeral} + * concrete classes, which are populated with + * {@code AttributeImpl.Transient} instances. + *

    + * Supplies their implementation of {@code contains}. {@code OfType} is also + * populated with {@code AttributeImpl.Transient} instances, but it has an + * even more trivial {@code contains} method. + */ + abstract static class NonCataloged extends TupleDescImpl + { + NonCataloged( + ByteBuffer td, SwitchPoint sp, boolean useState, + BiFunction ctor) + { + super(td, sp, useState, ctor); + } + + @Override // Collection + public boolean contains(Object o) + { + if ( ! (o instanceof AttributeImpl.Transient) ) + return false; + + AttributeImpl ai = (AttributeImpl)o; + return this == ai.containingTupleDescriptor(); + } + } + + /** + * A tuple descriptor for a row type that appears in the catalog. + */ + static class Cataloged extends TupleDescImpl implements Interned + { + private final RegClass m_relation;// using its SwitchPoint, keep it live + + Cataloged(ByteBuffer td, RegClassImpl c) + { + /* + * Invalidation of a Cataloged tuple descriptor happens with the + * SwitchPoint attached to the RegClass. Every Cataloged descriptor + * from the cache had better be reference-counted, so unconditional + * true is passed for useState. + */ + super( + td, c.cacheSwitchPoint(), true, + (o, i) -> CatalogObjectImpl.Factory.formAttribute( + c.oid(), i, () -> new AttributeImpl.Cataloged(c)) + ); + + m_relation = c; // we need it alive for its SwitchPoint + } + + @Override + public RegType rowType() + { + return m_relation.type(); + } + } + + /** + * A tuple descriptor that is not in the catalog, but has been interned and + * can be identified by {@code RECORD} and a distinct type modifier for the + * life of the backend. + */ + static class Blessed extends NonCataloged implements Interned + { + private final RegType m_rowType; // using its SwitchPoint, keep it live + + Blessed(ByteBuffer td, RegTypeImpl t) + { + /* + * A Blessed tuple descriptor has no associated RegClass, and is + * expected to live for the life of the backend without invalidation + * events, so we pass null for the SwitchPoint, and a constructor + * that will build AttributeImpl.Transient instances. + * + * If the caller, fromByteBuffer, saw a non-reference-counted + * descriptor, it grabbed one straight from the type cache instead. + * But sometimes, the one in PostgreSQL's type cache is + * non-reference counted, and that's ok, because that one will be + * good for the life of the process. So we do need to check, in this + * constructor, whether to pass true or false for useState. + * (Checking with getAndAddPlain(0) is a bit goofy, but it was + * already set up, matched to the field width, does the job.) + */ + super( + td, null, -1 != s_getAndAddPlain.applyAsInt(td, 0), + (o, i) -> new AttributeImpl.Transient(o, i) + ); + + m_rowType = t; + } + + @Override + public RegType rowType() + { + return m_rowType; + } + } + + /** + * A tuple descriptor that is not in the catalog, has not been interned, and + * is useful only so long as a reference is held. + */ + static class Ephemeral extends NonCataloged + implements TupleDescriptor.Ephemeral + { + private Ephemeral(ByteBuffer td) + { + super( + asManagedNativeOrder(td), null, false, + (o, i) -> new AttributeImpl.Transient(o, i) + ); + } + + @Override + public RegType rowType() + { + return RECORD; + } + + @Override + public Interned intern() + { + TupleDescImpl sup = (TupleDescImpl)this; // bufferIfValid is private + + return doInPG(() -> + { + ByteBuffer td = sup.bufferIfValid(); + + ByteBuffer direct = ByteBuffer.allocateDirect( + td.capacity()).put(td.rewind()); + + int assigned = _assign_record_type_typmod(direct); + + /* + * That will have saved in the typcache an authoritative + * new copy of the descriptor. It will also have written + * the assigned modifier into the 'direct' copy of this + * descriptor, but this is still an Ephemeral instance, + * the wrong Java type. We need to return a new instance + * over the authoritative typcache copy. + */ + return RECORD.modifier(assigned).tupleDescriptor(); + }); + } + } + + /** + * A specialized, synthetic tuple descriptor representing a single column + * of the given {@code RegType}. + */ + static class OfType extends TupleDescImpl + implements TupleDescriptor.Ephemeral + { + OfType(RegType type) + { + super(type); + } + + @Override + public RegType rowType() + { + return RECORD; + } + + @Override + public Interned intern() + { + throw notyet(); + } + + @Override // Collection + public boolean contains(Object o) + { + return get(0) == o; + } + } + + /** + * Based on {@code SingleFreeTupleDesc}, but really does + * {@code ReleaseTupleDesc}. + *

    + * Decrements the reference count and, if it was 1 before decrementing, + * proceeds to the superclass method to free the descriptor. + */ + private static class State + extends DualState.SingleFreeTupleDesc + { + private final IntSupplier m_getAndDecrPlain; + + private State(TupleDescImpl referent, ByteBuffer td) + { + super(referent, null, addressOf(td)); + /* + * The only reference to this non-readonly ByteBuffer retained here + * is what's bound into this getAndDecr for the reference count. + */ + m_getAndDecrPlain = () -> s_getAndAddPlain.applyAsInt(td, -1); + } + + @Override + protected void javaStateUnreachable(boolean nativeStateLive) + { + if ( nativeStateLive && 1 == m_getAndDecrPlain.getAsInt() ) + super.javaStateUnreachable(nativeStateLive); + } + + private void release() + { + releaseFromJava(); + } + + private long address() + { + return guardedLong(); + } + } + + static Ephemeral synthesizeDescriptor( + List types, List names, BitSet selected) + { + int n = types.size(); + IntFunction toName; + if ( null == names ) + toName = i -> ""; + else + { + assert names.size() == n; + toName = i -> names.get(i).nonFolded(); + } + + if ( null != selected ) + assert selected.length() <= n; + else + { + selected = new BitSet(n); + selected.set(0, n); + } + + CharsetEncoder enc = SERVER_ENCODING.newEncoder(); + float maxbpc = enc.maxBytesPerChar(); + int alignmentModulus = ALIGNOF_INT; + int maxToAlign = alignmentModulus - 1; + int alignmask = maxToAlign; + int sizeTypeTypmodBool = 2 * Integer.BYTES + 1; + + int size = + selected.stream() + .map(i -> toName.apply(i).length()) + .map(len -> + sizeTypeTypmodBool + (int)ceil(len*maxbpc) + 1 + maxToAlign) + .reduce(0, Math::addExact); + + ByteBuffer direct = + ByteBuffer.allocateDirect(size) + .alignedSlice(ALIGNOF_INT).order(nativeOrder()); + + selected.stream().forEachOrdered(i -> + { + int pos = direct.position(); + int misalign = direct.alignmentOffset(pos, alignmentModulus); + pos += - misalign & alignmask; + direct.position(pos); + + RegType t = types.get(i); + direct.putInt(t.oid()).putInt(t.modifier()); + + /* + * The C code will want a value for attndims, about which the docs + * for pg_attribute say: Presently, the number of dimensions of an + * array is not enforced, so any nonzero value effectively means + * "it's an array". + */ + direct.put(t.element().isValid() ? (byte)1 : (byte)0); + + pos = direct.position(); + CharBuffer cb = CharBuffer.wrap(toName.apply(i)); + CoderResult rslt = enc.encode(cb, direct, true); + if ( rslt.isUnderflow() ) + rslt = enc.flush(direct); + if ( ! rslt.isUnderflow() ) + throw new AssertionError("name to server encoding: " + rslt); + enc.reset(); + direct.put((byte)'\0'); + while ( '\0' != direct.get(pos) ) + ++ pos; + if ( ++ pos != direct.position() ) + throw new AssertionError("server encoding of name has NUL"); + }); + + int c = selected.cardinality(); + + return new Ephemeral(doInPG(() -> _synthesizeDescriptor(c, direct))); + } + + /** + * Call the PostgreSQL {@code typcache} function of the same name, but + * return the assigned typmod rather than {@code void}. + */ + private static native int _assign_record_type_typmod(ByteBuffer bb); + + private static native ByteBuffer _synthesizeDescriptor(int n,ByteBuffer bb); +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TupleList.java b/pljava/src/main/java/org/postgresql/pljava/pg/TupleList.java new file mode 100644 index 000000000..f8b2ad878 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TupleList.java @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.nio.ByteBuffer; +import java.nio.IntBuffer; +import java.nio.LongBuffer; + +import java.util.Iterator; +import java.util.List; +import java.util.RandomAccess; +import java.util.Spliterator; +import static java.util.Spliterator.IMMUTABLE; +import static java.util.Spliterator.NONNULL; +import static java.util.Spliterator.ORDERED; +import static java.util.Spliterator.SIZED; +import java.util.Spliterators.AbstractSpliterator; + +import java.util.function.Consumer; +import java.util.function.IntToLongFunction; + +import org.postgresql.pljava.internal.AbstractNoSplitList; +import + org.postgresql.pljava.internal.AbstractNoSplitList.IteratorNonSpliterator; +import org.postgresql.pljava.internal.DualState; +import org.postgresql.pljava.internal.DualState.Pinned; +import org.postgresql.pljava.internal.Invocation; + +import org.postgresql.pljava.model.MemoryContext; // for javadoc +import org.postgresql.pljava.model.TupleTableSlot; + +import static org.postgresql.pljava.pg.DatumUtils.asReadOnlyNativeOrder; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_DATUM; + +/* + * Plan: a group (maybe a class or interface with nested classes) of + * implementations that look like lists of TupleTableSlot over different kinds + * of result: + * - SPITupleTable (these: a tupdesc, and vals array of HeapTuple pointers) + * - CatCList (n_members and a members array of CatCTup pointers, where each + * CatCTup has a HeapTupleData and HeapTupleHeader nearly but not quite + * adjacent), must find tupdesc + * - Tuplestore ? (is this visible, or concealed behind SPI's cursors?) + * - Tuplesort ? (") + * - SFRM results? (Ah, SFRM_Materialize makes a Tuplestore.) + * - will we ever see a "tuple table" ("which is a List of independent + * TupleTableSlots")? + */ + +/** + * Superinterface of one or more classes that can present a sequence of tuples, + * working from the forms in which PostgreSQL can present them. + */ +public interface TupleList extends List, AutoCloseable +{ + @Override + default void close() + { + } + + TupleList EMPTY = new Empty(); + + /** + * Returns a {@code Spliterator} that never splits. + *

    + * Because a {@code TupleList} is typically built on a single + * {@code TupleTableSlot} holding each tuple in turn, there can be no + * thought of parallel stream execution. + *

    + * Also, because a {@code TupleList} iterator may return the same + * {@code TupleTableSlot} repeatedly, stateful {@code Stream} operations + * such as {@code distinct} or {@code sorted} will make no sense applied + * to those objects. + */ + @Override + default public Spliterator spliterator() + { + return new IteratorNonSpliterator<>(iterator(), size(), + IMMUTABLE | NONNULL | ORDERED | SIZED); + } + + /** + * A permanently-empty {@link TupleList TupleList}. + */ + final static class Empty + extends AbstractNoSplitList implements TupleList + { + private Empty() + { + } + + @Override + public int size() + { + return 0; + } + + @Override + public TupleTableSlot get(int i) + { + throw new IndexOutOfBoundsException( + "Index " + i + " out of bounds for length 0"); + } + } + + /** + * A {@code TupleList} constructed atop a PostgreSQL {@code SPITupleTable}. + *

    + * The native table is allocated in a {@link MemoryContext} that will be + * deleted when {@code SPI_finish} is called on exit of the current + * {@code Invocation}. This class merely maps the native tuple table in + * place, and so will prevent later access. + */ + class SPI extends AbstractNoSplitList + implements TupleList, RandomAccess + { + private final State state; + private final TupleTableSlotImpl ttSlot; + private final int nTuples; + private final IntToLongFunction indexToPointer; + + private static class State + extends DualState.SingleSPIfreetuptable + { + private State(SPI r, long tt) + { + /* + * Each SPITupleTable is constructed in a context of its own + * that is a child of the SPI Proc context, and is used by + * SPI_freetuptable to efficiently free it. By rights, that + * context should be the Lifespan here, but that member of + * SPITupleTable is declared a private member "not intended for + * external callers" in the documentation. + * + * If that admonition is to be obeyed, a next-best choice is the + * current Invocation. As long as SPI connection continues to be + * managed automatically and disconnected when the invocation + * exits (and it makes its lifespanRelease call before + * disconnecting SPI, which it does), it should be safe enough. + */ + super(r, Invocation.current(), tt); + } + + private void close() + { + unlessReleased(() -> + { + releaseFromJava(); + }); + } + } + + /** + * Constructs an instance over an {@code SPITupleTable}. + * @param slot a TupleTableSlotImpl to use. The constructed object's + * iterator will return this slot repeatedly, with each tuple in turn + * stored into it. + * @param spiStructP address of the SPITupleTable structure itself, + * saved here to be freed if this object is closed or garbage-collected. + * @param htarray ByteBuffer over the consecutive HeapTuple pointers at + * spiStructP->vals. + */ + SPI(TupleTableSlotImpl slot, long spiStructP, ByteBuffer htarray) + { + ttSlot = slot; + htarray = asReadOnlyNativeOrder(htarray); + state = new State(this, spiStructP); + + if ( 8 == SIZEOF_DATUM ) + { + LongBuffer tuples = htarray.asLongBuffer(); + nTuples = tuples.capacity(); + indexToPointer = tuples::get; + return; + } + else if ( 4 == SIZEOF_DATUM ) + { + IntBuffer tuples = htarray.asIntBuffer(); + nTuples = tuples.capacity(); + indexToPointer = tuples::get; + return; + } + else + throw new AssertionError("unsupported SIZEOF_DATUM"); + } + + @Override + public TupleTableSlot get(int index) + { + try ( Pinned p = state.pinnedNoChecked() ) + { + ttSlot.store_heaptuple( + indexToPointer.applyAsLong(index), false); + return ttSlot; + } + } + + @Override + public int size() + { + return nTuples; + } + + @Override + public void close() + { + state.close(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/TupleTableSlotImpl.java b/pljava/src/main/java/org/postgresql/pljava/pg/TupleTableSlotImpl.java new file mode 100644 index 000000000..aac490a6c --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/TupleTableSlotImpl.java @@ -0,0 +1,1117 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg; + +import java.lang.annotation.Native; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.IntBuffer; +import java.nio.LongBuffer; + +import java.util.List; + +import java.util.function.IntUnaryOperator; + +import java.sql.SQLException; + +import static java.util.Objects.checkIndex; +import static java.util.Objects.requireNonNull; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.Adapter.AsLong; +import org.postgresql.pljava.Adapter.AsDouble; +import org.postgresql.pljava.Adapter.AsInt; +import org.postgresql.pljava.Adapter.AsFloat; +import org.postgresql.pljava.Adapter.AsShort; +import org.postgresql.pljava.Adapter.AsChar; +import org.postgresql.pljava.Adapter.AsByte; +import org.postgresql.pljava.Adapter.AsBoolean; + +import org.postgresql.pljava.Lifespan; + +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.adt.spi.Datum.Accessor; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.DualState; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegClass; +import org.postgresql.pljava.model.TupleDescriptor; +import org.postgresql.pljava.model.TupleTableSlot; + +import static org.postgresql.pljava.pg.CatalogObjectImpl.notyet; + +import static org.postgresql.pljava.pg.DatumUtils.mapFixedLength; +import static org.postgresql.pljava.pg.DatumUtils.mapCString; +import static org.postgresql.pljava.pg.DatumUtils.asAlwaysCopiedDatum; +import static org.postgresql.pljava.pg.DatumUtils.asReadOnlyNativeOrder; +import static org.postgresql.pljava.pg.DatumUtils.inspectVarlena; +import static org.postgresql.pljava.pg.DatumUtils.Accessor.forDeformed; +import static org.postgresql.pljava.pg.DatumUtils.Accessor.forHeap; + +import static org.postgresql.pljava.pg.ModelConstants.HEAPTUPLESIZE; + +import static + org.postgresql.pljava.pg.CatalogObjectImpl.Factory.staticFormObjectId; + +import static org.postgresql.pljava.pg.ModelConstants.*; + +/* + * bool always 1 byte (see c.h). + * + * From PG 12: + * type, flags, nvalid, tupleDescriptor, *values, *isnull, mcxt, tid, tableOid + * flags: EMPTY SHOULDFREE SLOW FIXED + * + * Pre-PG 12 (= for fields present in both): + * type + * individual bool flags + * isempty, shouldFree, shouldFreeMin, slow, fixedTupleDescriptor + * HeapTuple tuple + * =tupleDescriptor + * =mcxt + * buffer + * =nvalid + * =*values + * =*isnull + * mintuple, minhdr, off + * + * tableOid is tuple->t_tableOid, tid is tuple->t_self. + * Can a tuple from a different descendant table then get loaded in the slot? + * Answer: yes. So tableOid can change per tuple. (See ExecStoreHeapTuple.) + * Fetching the tableOid is easy starting with PG 12 (it's right in the TTS + * struct). For PG < 12, a native method will be needed to inspect 'tuple' (or + * just return a ByteBuffer windowing it, to be inspected here). That native + * method will not need to be serialized onto the PG thread, as it only looks at + * an existing struct in memory. + * FWIW, *HeapTuple is a HeapTupleData, and a HeapTupleData has a t_len. + * heap_copytuple allocates HEAPTUPLESIZE + tuple->t_len. The HEAPTUPLESIZE + * covers the HeapTupleData that precedes the HeapTupleHeader; from the start + * of that it's t_len. They could be allocated separately but typically aren't. + * (A HeapTuple in the form of a Datum is without the HeapTupleData part; see + * ExecStoreHeapTupleDatum, which just puts a transient HeapTupleData struct + * on the stack to point to the thing during the operation, deforms it, and + * stores it in virtual form.) + * + * (Also FWIW, to make a MinimalTuple from a HeapTuple, subract + * MINIMAL_TUPLE_OFFSET from the latter's t_len; the result is the amount to + * allocate and the amount to copy and what goes in the result's t_len.) + * + * For now: support only FIXED/fixedTupleDescriptor slots. For those, the native + * code can create ByteBuffers and pass them all at once to the constructor for: + * the TTS struct itself, the values array, the isnull array, and the TupleDesc + * (this constructor can pass that straight to the TupleDesc constructor). If it + * later makes sense to support non-fixed slots, that will mean checking for + * changes, and possibly creating a new TupleDesc and new values/isnull buffers + * on the fly. + * + * A PostgreSQL TupleTableSlot can be configured with TTSOpsVirtual or + * TTSOpsHeapTuple (or others, not contemplated here). The Heap and Deformed + * subclasses here don't exactly mirror that distinction. What they are really + * distinguishing is which flavor of DatumUtils.Accessor will be used. + * + * That is, the Deformed subclass here relies on getsomeattrs and the + * tts_values/tts_isnull arrays of the slot (which are in fact available for any + * flavor of slot). The Heap subclass here overloads m_values and m_isnull to + * directly map the tuple data, rather than relying on tts_values and + * tts_isnull, so it can only work for slot flavors where such regions exist in + * the expected formats. In other words, a Deformed can be constructed over any + * flavor of PostgreSQL slot (and is the only choice if the slot is + * TTSOpsVirtual); a Heap is an alternative choice only available if the + * underlying slot is known to have the expected null bitmap and data layout, + * and may save the overhead of populating tts_isnull and tts_values arrays from + * the underlying tuple. It would still be possible in principle to exploit + * those arrays in the Heap case if they have been populated, to avoid + * repeatedly walking the tuple, but the Heap implementation here, as of this + * writing, doesn't. Perhaps some refactoring / renaming is needed, so Heap has + * its own instance fields for the directly accessed tuple regions, and the + * m_values / m_isnull in the superclass always map the tts_values / tts_isnull + * arrays? + */ + +/** + * Implementation of {@link TupleTableSlot TupleTableSlot}. + */ +public abstract class TupleTableSlotImpl +implements TupleTableSlot +{ + @Native private static final int OFFSET_HeapTupleData_t_len = 0; + @Native private static final int OFFSET_HeapTupleData_t_tableOid = 12; + + @Native private static final int SIZEOF_HeapTupleData_t_len = 4; + @Native private static final int SIZEOF_HeapTupleData_t_tableOid = 4; + + @Native private static final int OFFSET_HeapTupleHeaderData_t_infomask2= 18; + @Native private static final int OFFSET_HeapTupleHeaderData_t_infomask = 20; + @Native private static final int OFFSET_HeapTupleHeaderData_t_hoff = 22; + @Native private static final int OFFSET_HeapTupleHeaderData_t_bits = 23; + + @Native private static final int SIZEOF_HeapTupleHeaderData_t_infomask2 = 2; + @Native private static final int SIZEOF_HeapTupleHeaderData_t_infomask = 2; + @Native private static final int SIZEOF_HeapTupleHeaderData_t_hoff = 1; + + @Native private static final int HEAP_HASNULL = 1; // lives in infomask + @Native private static final int HEAP_HASEXTERNAL = 4; // lives in infomask + @Native private static final int HEAP_NATTS_MASK = 0x07FF; // infomask2 + + @Native private static final int OFFSET_NullableDatum_value = 0; + + protected final ByteBuffer m_tts; + /* These can be final only because non-FIXED slots aren't supported yet. */ + protected final TupleDescriptor m_tupdesc; + protected final ByteBuffer m_values; + protected final ByteBuffer m_isnull; + protected final Accessor[] m_accessors; + protected final Adapter[] m_adapters; + + /* + * Experimenting with yet another pattern for use of DualState. We will + * keep one here and be agnostic about its exact subtype. Methods that + * install a tuple in the slot will be expected to provide a DualState + * instance with this slot as its referent and encapsulating whatever object + * and behavior it needs for cleaning up. Pin/unpin should be done at + * outermost API-exposed methods, not by internal ones. + */ + DualState m_state; + + TupleTableSlotImpl( + ByteBuffer tts, TupleDescriptor tupleDesc, + ByteBuffer values, ByteBuffer isnull) + { + m_tts = null == tts ? null : asReadOnlyNativeOrder(tts); + m_tupdesc = tupleDesc; + /* + * From the Deformed constructor, this is the array of Datum elements. + * From the Heap constructor, it may be null. + */ + m_values = null == values ? null : asReadOnlyNativeOrder(values); + /* + * From the Deformed constructor, this is an array of one-byte booleans. + * From the Heap constructor, it may be null. + */ + m_isnull = null == isnull ? null : asReadOnlyNativeOrder(isnull); + m_adapters = new Adapter [ m_tupdesc.size() ]; + + @SuppressWarnings("unchecked") + Object dummy = + m_accessors = new Accessor [ m_adapters.length ]; + + /* + * A subclass constructor other than Deformed could pass null for tts, + * provided it overrides the inherited relation(), which relies on it. + */ + if ( null == m_tts ) + return; + + /* + * Verify (for now) that this is a FIXED TupleTableSlot. + * JIT will specialize to the test that applies in this PG version + */ + if ( NOCONSTANT != OFFSET_TTS_FLAGS ) + { + if ( 0 != (TTS_FLAG_FIXED & m_tts.getChar(OFFSET_TTS_FLAGS)) ) + return; + } + else if ( NOCONSTANT != OFFSET_TTS_FIXED ) + { + if ( 0 != m_tts.get(OFFSET_TTS_FIXED) ) + return; + } + else + throw new UnsupportedOperationException( + "Cannot construct non-fixed TupleTableSlot (PG < 11)"); + throw new UnsupportedOperationException( + "Cannot construct non-fixed TupleTableSlot"); + } + + static Deformed newDeformed( + ByteBuffer tts, TupleDescriptor tupleDesc, + ByteBuffer values, ByteBuffer isnull) + { + return new Deformed(tts, tupleDesc, values, isnull); + } + + static NullableDatum newNullableDatum( + TupleDescriptor tupleDesc, ByteBuffer values) + { + return new NullableDatum(tupleDesc, values); + } + + /** + * Allocate a 'light' (no native TupleTableSlot struct) + * {@code TupleTableSlotImpl.Heap} object, given a tuple descriptor and + * a byte buffer that maps a single-chunk-allocated {@code HeapTuple} (one + * where the {@code HeapTupleHeader} directly follows the + * {@code HeapTupleData}) that's to be passed to {@code heap_freetuple} when + * no longer needed. + *

    + * If an optional {@code Lifespan} is supplied, the slot will be linked + * to it and invalidated when it expires. Otherwise, the tuple will be + * assumed allocated in an immortal memory context and freed upon the + * {@code javaStateUnreachable} or {@code javaStateReleased} events. + */ + static Heap heapTupleGetLightSlot( + TupleDescriptor td, ByteBuffer ht, Lifespan lifespan) + { + return heapTupleGetLightSlot(td, ht, lifespan, true); + } + + /** + * Allocate a 'light' (no native TupleTableSlot struct) + * {@code TupleTableSlotImpl.Heap} object, given a tuple descriptor and + * a pointer to a single-chunk-allocated {@code HeapTuple} (one where the + * {@code HeapTupleHeader} directly follows the {@code HeapTupleData}) that + * is not to be freed when no longer needed. + *

    + * The first and, so far, only use of this method is from + * {@code TriggerDataImpl} to present the old and/or new tuples for + * inspection in a trigger function. The Lifespan passed should persist + * no longer than the current function invocation, and no special action + * will be taken to free the tuple itself, which belongs to PostgreSQL. + */ + static Heap heapTupleGetLightSlotNoFree( + TupleDescriptor td, long p, Lifespan lifespan) + { + ByteBuffer ht = doInPG(() -> _mapHeapTuple(p)); + return heapTupleGetLightSlot(td, ht, lifespan, false); + } + + static Heap heapTupleGetLightSlot( + TupleDescriptor td, ByteBuffer ht, Lifespan lifespan, boolean free) + { + ht = asReadOnlyNativeOrder(ht); + + assert 4 == SIZEOF_HeapTupleData_t_len + : "sizeof HeapTupleData.t_len changed"; + int len = ht.getInt(OFFSET_HeapTupleData_t_len); + + assert ht.capacity() == len + HEAPTUPLESIZE + : "unexpected length for single-chunk HeapTuple"; + + int relOid = ht.getInt(OFFSET_HeapTupleData_t_tableOid); + + boolean disallowExternal = true; + + /* + * Following offsets are relative to the HeapTupleHeaderData struct. + * Could slice off a new ByteBuffer from HEAPTUPLESIZE here and use + * the offsets directly, but we'll just add HEAPTUPLESIZE to the offsets + * and save constructing that intermediate object. We will slice off + * values and nulls ByteBuffers further below. + */ + + assert 2 == SIZEOF_HeapTupleHeaderData_t_infomask + : "sizeof HeapTupleHeaderData.t_infomask changed"; + short infomask = ht.getShort( + HEAPTUPLESIZE + OFFSET_HeapTupleHeaderData_t_infomask); + + assert 2 == SIZEOF_HeapTupleHeaderData_t_infomask2 + : "sizeof HeapTupleHeaderData.t_infomask2 changed"; + short infomask2 = ht.getShort( + HEAPTUPLESIZE + OFFSET_HeapTupleHeaderData_t_infomask2); + + assert 1 == SIZEOF_HeapTupleHeaderData_t_hoff + : "sizeof HeapTupleHeaderData.t_hoff changed"; + int hoff = + Byte.toUnsignedInt(ht.get( + HEAPTUPLESIZE + OFFSET_HeapTupleHeaderData_t_hoff)); + + if ( disallowExternal && 0 != ( infomask & HEAP_HASEXTERNAL ) ) + throw notyet("heapTupleGetLightSlot with external values in tuple"); + + int voff = hoff + HEAPTUPLESIZE; + + ByteBuffer values = mapFixedLength(ht, voff, ht.capacity() - voff); + ByteBuffer nulls = null; + + if ( 0 != ( infomask & HEAP_HASNULL ) ) + { + int nlen = ( td.size() + 7 ) / 8; + if ( nlen + OFFSET_HeapTupleHeaderData_t_bits > hoff ) + { + int attsReallyPresent = infomask2 & HEAP_NATTS_MASK; + nlen = ( attsReallyPresent + 7 ) / 8; + assert nlen + OFFSET_HeapTupleHeaderData_t_bits <= hoff + : "heap null bitmap length"; + } + nulls = mapFixedLength(ht, + HEAPTUPLESIZE + OFFSET_HeapTupleHeaderData_t_bits, nlen); + } + + Heap slot = new Heap( + staticFormObjectId(RegClass.CLASSID, relOid), td, values, nulls); + + slot.m_state = + free + ? new HTChunkState(slot, lifespan, ht) + : new BBOnlyState(slot, lifespan, ht); + + return slot; + } + + /** + * Return the index into {@code m_accessors} for this attribute, + * ensuring the elements at that index of {@code m_accessors} and + * {@code m_adapters} are set, or throw an exception if + * this {@code Attribute} doesn't belong to this slot's + * {@code TupleDescriptor}, or if the supplied {@code Adapter} can't + * fetch it. + *

    + * Most tests are skipped if the index is in range and {@code m_adapters} + * at that index already contains the supplied {@code Adapter}. + */ + protected int toIndex(Attribute att, Adapter adp) + { + int idx = att.subId() - 1; + + if ( 0 > idx || idx >= m_adapters.length + || m_adapters [ idx ] != requireNonNull(adp) ) + { + if ( ! m_tupdesc.contains(att) ) + { + throw new IllegalArgumentException( + "attribute " + att + " does not go with slot " + this); + } + + memoize(idx, att, adp); + } + + return idx; + } + + /** + * Return the {@code Attribute} at this index into the associated + * {@code TupleDescriptor}, + * ensuring the elements at that index of {@code m_accessors} and + * {@code m_adapters} are set, or throw an exception if + * this {@code Attribute} doesn't belong to this slot's + * {@code TupleDescriptor}, or if the supplied {@code Adapter} can't + * fetch it. + *

    + * Most tests are skipped if the index is in range and {@code m_adapters} + * at that index already contains the supplied {@code Adapter}. + */ + protected Attribute fromIndex(int idx, Adapter adp) + { + Attribute att = m_tupdesc.get(idx); + if ( m_adapters [ idx ] != requireNonNull(adp) ) + memoize(idx, att, adp); + return att; + } + + /** + * Called after verifying that att belongs to this slot's + * {@code TupleDescriptor}, that idx is its corresponding + * (zero-based) index, and that {@code m_adapters[idx]} does not already + * contain adp. + */ + protected void memoize(int idx, Attribute att, Adapter adp) + { + if ( ! adp.canFetch(att) ) + { + throw new IllegalArgumentException(String.format( + "cannot fetch attribute %s of type %s using %s", + att, att.type(), adp)); + } + + m_adapters [ idx ] = adp; + + if ( null == m_accessors [ idx ] ) + { + boolean byValue = att.byValue(); + short length = att.length(); + + m_accessors [ idx ] = selectAccessor(byValue, length); + } + } + + /** + * Selects appropriate {@code Accessor} for this {@code Layout} given + * byValue and length. + */ + protected abstract Accessor selectAccessor( + boolean byValue, short length); + + /** + * Returns the previously-selected {@code Accessor} for the item at the + * given index. + *

    + * The indirection's cost may be regrettable, but it simplifies the + * implementation of {@code Indexed}. + */ + protected Accessor accessor(int idx) + { + return m_accessors[idx]; + } + + /** + * Only to be called after idx is known valid + * from calling {@code toIndex}. + */ + protected abstract boolean isNull(int idx); + + /** + * Only to be called after idx is known valid + * from calling {@code toIndex}. + */ + protected abstract int toOffset(int idx); + + /** + * Implementation of {@link TupleTableSlot TupleTableSlot} for deformed + * layout. + */ + static class Deformed extends TupleTableSlotImpl + { + Deformed( + ByteBuffer tts, TupleDescriptor tupleDesc, + ByteBuffer values, ByteBuffer isnull) + { + super(tts, tupleDesc, values, requireNonNull(isnull)); + } + + @Override + protected int toIndex(Attribute att, Adapter adp) + { + int idx = super.toIndex(att, adp); + + getsomeattrs(idx); + return idx; + } + + @Override + protected Attribute fromIndex(int idx, Adapter adp) + { + Attribute att = super.fromIndex(idx, adp); + + getsomeattrs(idx); + return att; + } + + @Override + protected Accessor selectAccessor( + boolean byValue, short length) + { + return forDeformed(byValue, length); + } + + @Override + protected boolean isNull(int idx) + { + return 0 != m_isnull.get(idx); + } + + @Override + protected int toOffset(int idx) + { + return idx * SIZEOF_DATUM; + } + + /** + * Like PostgreSQL's {@code slot_getsomeattrs}, but {@code idx} here is + * zero-based (one will be added when it is passed to PostgreSQL). + */ + private void getsomeattrs(int idx) + { + int nValid; + if ( 2 == SIZEOF_TTS_NVALID ) + nValid = m_tts.getShort(OFFSET_TTS_NVALID); + else + { + assert 4 == SIZEOF_TTS_NVALID : "unexpected SIZEOF_TTS_NVALID"; + nValid = m_tts.getInt(OFFSET_TTS_NVALID); + } + if ( nValid <= idx ) + doInPG(() -> _getsomeattrs(m_tts, 1 + idx)); + } + } + + /** + * Implementation of {@link TupleTableSlot TupleTableSlot} for heap + * layout. + */ + static class Heap extends TupleTableSlotImpl + { + protected final ByteBuffer m_hValues; + protected final ByteBuffer m_hIsNull; + protected final RegClass m_relation; + + Heap( + RegClass relation, TupleDescriptor tupleDesc, + ByteBuffer hValues, ByteBuffer hIsNull) + { + super(null, tupleDesc, null, null); + m_relation = requireNonNull(relation); + m_hValues = requireNonNull(hValues); + m_hIsNull = hIsNull; + } + + @Override + protected Accessor selectAccessor( + boolean byValue, short length) + { + return forHeap(byValue, length); + } + + @Override + protected boolean isNull(int idx) + { + if ( null == m_hIsNull ) + return false; + + // XXX we could have actual natts < m_tupdesc.size() + return 0 == ( m_hIsNull.get(idx >>> 3) & (1 << (idx & 7)) ); + } + + @Override + protected int toOffset(int idx) + { + int offset = 0; + List atts = m_tupdesc; + Attribute att; + + /* + * This logic is largely duplicated in Heap.Indexed.toOffsetNonFixed + * and will probably need to be changed there too if anything is + * changed here. + */ + for ( int i = 0 ; i < idx ; ++ i ) + { + if ( isNull(i) ) + continue; + + att = atts.get(i); + + int align = alignmentModulus(att.alignment()); + int len = att.length(); + + /* + * Skip the fuss of aligning if align isn't greater than 1. + * More interestingly, whether to align in the varlena case + * (length of -1) depends on whether the byte at the current + * offset is zero. Each outcome includes two subcases, for one + * of which it doesn't matter whether we align or not because + * the offset is already aligned, and for the other of which it + * does matter, so that determines the choice. If the byte seen + * there is zero, it might be a pad byte and require aligning, + * so align. See att_align_pointer in PG's access/tupmacs.h. + */ + if ( align > 1 && ( -1 != len || 0 == m_hValues.get(offset) ) ) + offset += + - m_hValues.alignmentOffset(offset, align) & (align-1); + + if ( 0 <= len ) // a nonnegative length is used directly + offset += len; + else if ( -1 == len ) // find and skip the length of the varlena + offset += inspectVarlena(m_hValues, offset); + else if ( -2 == len ) // NUL-terminated value, skip past the NUL + { + while ( 0 != m_hValues.get(offset) ) + ++ offset; + ++ offset; + } + else + throw new AssertionError( + "cannot skip attribute with weird length " + len); + } + + att = atts.get(idx); + + int align = alignmentModulus(att.alignment()); + int len = att.length(); + /* + * Same alignment logic as above. + */ + if ( align > 1 && ( -1 != len || 0 == m_hValues.get(offset) ) ) + offset += -m_hValues.alignmentOffset(offset, align) & (align-1); + + return offset; + } + + @Override + ByteBuffer values() + { + return m_hValues; + } + + @Override + public RegClass relation() + { + return m_relation; + } + + /** + * Something that resembles a {@code Heap} tuple, but consists of + * a number of elements all of the same type, distinguished by index. + *

    + * Constructed with a one-element {@code TupleDescriptor} whose single + * {@code Attribute} describes the type of all elements. + *

    + * + */ + static class Indexed extends Heap implements TupleTableSlot.Indexed + { + private final int m_elements; + private final IntUnaryOperator m_toOffset; + + Indexed( + TupleDescriptor td, int elements, + ByteBuffer nulls, ByteBuffer values) + { + super(td.get(0).relation(), td, values, nulls); + assert elements >= 0 : "negative element count"; + assert null == nulls || nulls.capacity() == (elements+7)/8 + : "nulls length element count mismatch"; + m_elements = elements; + + Attribute att = td.get(0); + int length = att.length(); + int align = alignmentModulus(att.alignment()); + assert 0 == values.alignmentOffset(0, align) + : "misaligned ByteBuffer passed"; + int mask = align - 1; // make it a mask + if ( length < 0 ) // the non-fixed case + /* + * XXX without offset memoization of some kind, this will be + * a quadratic way of accessing elements, but that can be + * improved later. + */ + m_toOffset = i -> toOffsetNonFixed(i, length, mask); + else + { + int stride = length + ( -(length & mask) & mask ); + if ( null == nulls ) + m_toOffset = i -> i * stride; + else + m_toOffset = i -> (i - nullsPreceding(i)) * stride; + } + } + + @Override + public int elements() + { + return m_elements; + } + + @Override + protected Attribute fromIndex(int idx, Adapter adp) + { + checkIndex(idx, m_elements); + Attribute att = m_tupdesc.get(0); + if ( m_adapters [ 0 ] != requireNonNull(adp) ) + memoize(0, att, adp); + return att; + } + + @Override + protected int toOffset(int idx) + { + return m_toOffset.applyAsInt(idx); + } + + @Override + protected Accessor accessor(int idx) + { + return m_accessors[0]; + } + + private int nullsPreceding(int idx) + { + int targetByte = idx >>> 3; + int targetBit = 1 << ( idx & 7 ); + byte b = m_hIsNull.get(targetByte); + /* + * The nulls bitmask has 1 bits where values are *not* null. + * Java has a bitCount method that counts 1 bits. So the loop + * below will have an invert step before counting bits. That + * means we want to modify *this* byte to have 1 at the target + * position *and above*, so all those bits will invert to zero + * before we count them. The next step does that. + */ + b |= - targetBit; + int count = Integer.bitCount(Byte.toUnsignedInt(b) ^ 0xff); + for ( int i = 0; i < targetByte; ++ i ) + { + b = m_hIsNull.get(i); + count += Integer.bitCount(Byte.toUnsignedInt(b) ^ 0xff); + } + return count; + } + + /** + * Largely duplicates the superclass {@code toOffset} but + * specialized to only a single attribute type that is repeated. + *

    + * Only covers the non-fixed-length cases (length of -1 or -2). + * Assumes the byte buffer is already aligned such that offset 0 + * satisfies the alignment constraint. + *

    + * Important: align here is a mask; the caller + * has subtracted 1 from it, compared to the align value + * seen in the superclass implementation. + */ + private int toOffsetNonFixed(int idx, int len, int align) + { + int offset = 0; + + if ( null != m_hIsNull ) + idx -= nullsPreceding(idx); + + /* + * The following code is very similar to that in the superclass, + * other than having already converted align to a mask (changing + * the test below to align>0 where the superclass has align>1), + * and having already reduced idx by the preceding nulls. If any + * change is needed here, it is probably needed there too. + */ + for ( int i = 0 ; i < idx ; ++ i ) + { + if ( align > 0 + && ( -1 != len || 0 == m_hValues.get(offset) ) ) + offset += - (offset & align) & align; + + if ( -1 == len ) // find and skip the length of the varlena + offset += inspectVarlena(m_hValues, offset); + else if ( -2 == len ) // NUL-terminated, skip past the NUL + { + while ( 0 != m_hValues.get(offset) ) + ++ offset; + ++ offset; + } + else + throw new AssertionError( + "cannot skip attribute with weird length " + len); + } + + /* + * Same alignment logic as above. + */ + if ( align > 0 && ( -1 != len || 0 == m_hValues.get(offset) ) ) + offset += - (offset & align) & align; + + return offset; + } + } + } + + /** + * Implementation of {@link TupleTableSlot TupleTableSlot} for + * {@code NullableDatum} layout, as used for PL routine arguments. + */ + static class NullableDatum extends TupleTableSlotImpl + { + NullableDatum(TupleDescriptor tupleDesc, ByteBuffer values) + { + super(null, tupleDesc, values, null); + } + + @Override + protected Accessor selectAccessor( + boolean byValue, short length) + { + return forDeformed(byValue, length); + } + + @Override + protected boolean isNull(int idx) + { + return 0 != m_values.get( + idx * SIZEOF_NullableDatum + OFFSET_NullableDatum_isnull); + } + + @Override + protected int toOffset(int idx) + { + return idx * SIZEOF_NullableDatum + OFFSET_NullableDatum_value; + } + + @Override + public RegClass relation() + { + return RegClass.CLASSID.invalid(); + } + } + + @Override + public RegClass relation() + { + int tableOid; + + if ( NOCONSTANT == OFFSET_TTS_TABLEOID ) + throw notyet("table Oid from TupleTableSlot in PostgreSQL < 12"); + + tableOid = m_tts.getInt(OFFSET_TTS_TABLEOID); + return staticFormObjectId(RegClass.CLASSID, tableOid); + } + + @Override + public TupleDescriptor descriptor() + { + return m_tupdesc; + } + + ByteBuffer values() + { + return m_values; + } + + void store_heaptuple(long ht, boolean shouldFree) + { + doInPG(() -> _store_heaptuple(m_tts, ht, shouldFree)); + } + + private static native void _getsomeattrs(ByteBuffer tts, int idx); + + private static native ByteBuffer _mapHeapTuple(long nativeAddress); + + private static native void _store_heaptuple( + ByteBuffer tts, long ht, boolean shouldFree); + + @Override + public T get(Attribute att, As adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public long get(Attribute att, AsLong adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public double get(Attribute att, AsDouble adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public int get(Attribute att, AsInt adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public float get(Attribute att, AsFloat adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public short get(Attribute att, AsShort adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public char get(Attribute att, AsChar adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public byte get(Attribute att, AsByte adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public boolean get(Attribute att, AsBoolean adapter) + { + int idx = toIndex(att, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(m_accessors[idx], values(), off, att); + } + + @Override + public T get(int idx, As adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public long get(int idx, AsLong adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public double get(int idx, AsDouble adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public int get(int idx, AsInt adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public float get(int idx, AsFloat adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public short get(int idx, AsShort adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public char get(int idx, AsChar adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public byte get(int idx, AsByte adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + @Override + public boolean get(int idx, AsBoolean adapter) + { + Attribute att = fromIndex(idx, adapter); + + if ( isNull(idx) ) + return adapter.fetchNull(att); + + int off = toOffset(idx); + return adapter.fetch(accessor(idx), values(), off, att); + } + + private static class HTChunkState + extends DualState.BBHeapFreeTuple + { + private HTChunkState( + TupleTableSlotImpl referent, Lifespan span, ByteBuffer ht) + { + super(referent, span, ht); + } + } + + private static class BBOnlyState + extends DualState.SingleGuardedBB + { + private BBOnlyState( + TupleTableSlotImpl referent, Lifespan span, ByteBuffer ht) + { + super(referent, span, ht); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/ArrayAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/ArrayAdapter.java new file mode 100644 index 000000000..746f2b1dd --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/ArrayAdapter.java @@ -0,0 +1,403 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.lang.reflect.Type; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; +import java.nio.IntBuffer; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import java.util.List; +import static java.util.Objects.requireNonNull; + +import java.util.stream.IntStream; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.Adapter.Contract; + +import org.postgresql.pljava.adt.Array.AsFlatList; +import org.postgresql.pljava.adt.spi.Datum; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegClass; +import org.postgresql.pljava.model.RegType; +import static org.postgresql.pljava.model.RegType.ANYARRAY; +import org.postgresql.pljava.model.TupleDescriptor; +import org.postgresql.pljava.model.TupleTableSlot; + +import static org.postgresql.pljava.pg.CatalogObjectImpl.of; +import static org.postgresql.pljava.pg.DatumUtils.indexedTupleSlot; +import static org.postgresql.pljava.pg.DatumUtils.mapFixedLength; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_ArrayType_ndim; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_ArrayType_ndim; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_ArrayType_elemtype; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_ArrayType_elemtype; +import static + org.postgresql.pljava.pg.ModelConstants.SIZEOF_ArrayType_dataoffset; +import static + org.postgresql.pljava.pg.ModelConstants.OFFSET_ArrayType_dataoffset; +import static org.postgresql.pljava.pg.ModelConstants.OFFSET_ArrayType_DIMS; +import static org.postgresql.pljava.pg.ModelConstants.SIZEOF_ArrayType_DIM; +import static org.postgresql.pljava.pg.ModelConstants.VARHDRSZ; + +import static org.postgresql.pljava.pg.ModelConstants.MAXIMUM_ALIGNOF; + +/* + * The representation details are found in include/utils/array.h + */ + +/** + * Ancestor of adapters that can map a PostgreSQL array to some representation + * {@literal }. + * @param Java type to represent the entire array. + */ +public class ArrayAdapter extends Adapter.Array +{ + private static final Configuration s_config; + + /** + * An {@code ArrayAdapter} that maps any PostgreSQL array with element type + * compatible with {@link TextAdapter TextAdapter} to flat (disregarding the + * PostgreSQL array's dimensionality) {@code List} of {@code String}, + * with any null elements mapped to Java null. + */ + public static final + ArrayAdapter> FLAT_STRING_LIST_INSTANCE; + + public static final + ArrayAdapter TYPE_OBTAINING_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration config = AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(ArrayAdapter.class, Via.DATUM)); + + s_config = config; + + FLAT_STRING_LIST_INSTANCE = new ArrayAdapter<>( + TextAdapter.INSTANCE, AsFlatList.of(AsFlatList::nullsIncludedCopy)); + + TYPE_OBTAINING_INSTANCE = new ArrayAdapter( + Opaque.INSTANCE, new ElementTypeContract()); + } + + /** + * Constructs an array adapter given an adapter that returns a reference + * type {@literal } for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.As element, Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code long} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsLong element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code double} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsDouble element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code int} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsInt element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code float} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsFloat element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code short} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsShort element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code char} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsChar element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code byte} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsByte element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + /** + * Constructs an array adapter given an adapter that returns a primitive + * {@code boolean} for the element type, and a corresponding array + * contract producing {@literal }. + */ + public ArrayAdapter( + Adapter.AsBoolean element, + Contract.Array> contract) + { + super(contract, element, null, s_config); + } + + ArrayAdapter( + Adapter.As element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsLong element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsDouble element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsInt element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsFloat element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsShort element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsChar element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsByte element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + ArrayAdapter( + Adapter.AsBoolean element, Type witness, + Contract.Array> contract) + { + super(contract, element, witness, s_config); + } + + /** + * Whether this adapter can be applied to the given PostgreSQL type. + *

    + * If not overridden, simply requires that pgType is an array + * type and that its declared element type is acceptable to {@code canFetch} + * of the configured element adapter. + */ + @Override + public boolean canFetch(RegType pgType) + { + RegType elementType = pgType.element(); + if ( elementType.isValid() && m_elementAdapter.canFetch(elementType) ) + return true; + return + ANYARRAY == pgType && Opaque.INSTANCE == m_elementAdapter; + } + + /** + * Returns the result of applying the configured element adapter and + * {@link Contract.Array array contract} to the contents of the array + * in. + */ + public T fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + try + { + in.pin(); + ByteBuffer bb = in.buffer().order(nativeOrder()); + + assert 4 == SIZEOF_ArrayType_ndim : "ArrayType.ndim size change"; + int nDims = bb.getInt(OFFSET_ArrayType_ndim); + + assert 4 == SIZEOF_ArrayType_elemtype + : "ArrayType.elemtype size change"; + RegType elementType = + of(RegType.CLASSID, bb.getInt(OFFSET_ArrayType_elemtype)); + + if ( ! m_elementAdapter.canFetch(elementType) ) + throw new IllegalArgumentException(String.format( + "cannot fetch array element of type %s using %s", + elementType, m_elementAdapter)); + + assert 4 == SIZEOF_ArrayType_dataoffset + : "ArrayType.dataoffset size change"; + int dataOffset = bb.getInt(OFFSET_ArrayType_dataoffset); + + boolean hasNulls = 0 != dataOffset; + + int dimsOffset = OFFSET_ArrayType_DIMS; + int dimsBoundsLength = 2 * nDims * SIZEOF_ArrayType_DIM; + + assert 4 == SIZEOF_ArrayType_DIM : "ArrayType dim size change"; + IntBuffer dimsAndBounds = + mapFixedLength(bb, dimsOffset, dimsBoundsLength).asIntBuffer(); + + int nItems = 0 == nDims ? 0 : + IntStream.range(0, nDims).map(dimsAndBounds::get) + .reduce(1, Math::multiplyExact); + + ByteBuffer nulls; + + if ( hasNulls ) + { + int nullsOffset = dimsOffset + dimsBoundsLength; + int nullsLength = (nItems + 7) / 8; + nulls = mapFixedLength(bb, nullsOffset, nullsLength); + /* + * In the with-nulls case, PostgreSQL has supplied dataOffset. + * But it includes VARHDRSZ, and a VarlenaWrapper doesn't + * include that first word. + */ + dataOffset -= VARHDRSZ; + } + else + { + nulls = null; + /* + * In the no-nulls case, computing dataOffset is up to us. + */ + dataOffset = dimsOffset + dimsBoundsLength; + dataOffset += + - bb.alignmentOffset(dataOffset, MAXIMUM_ALIGNOF) + & (MAXIMUM_ALIGNOF - 1); + } + + ByteBuffer values = + mapFixedLength(bb, dataOffset, bb.capacity() - dataOffset); + + TupleTableSlot.Indexed tti = + indexedTupleSlot(elementType, nItems, nulls, values); + + int[] dimsBoundsArray = new int [ dimsAndBounds.capacity() ]; + dimsAndBounds.get(dimsBoundsArray); + + /* + * The accessible constructors ensured that m_elementAdapter and + * m_contract have compatible parameterized types. They were stored + * as raw types to avoid having extra type parameters on array + * adapters that are of no interest to code that makes use of them. + */ + @SuppressWarnings("unchecked") + T result = (T)m_contract.construct( + nDims, dimsBoundsArray, m_elementAdapter, tti); + + return result; + } + finally + { + in.unpin(); + in.close(); + } + } + + /** + * A contract that cannot retrieve any element, but returns the array's + * internally-recorded element type. + */ + private static class ElementTypeContract + implements Contract.Array> + { + @Override + public RegType construct( + int nDims, int[] dimsAndBounds, Adapter.As adapter, + TupleTableSlot.Indexed slot) + throws SQLException + { + return slot.descriptor().get(0).type(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/ByteaAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/ByteaAdapter.java new file mode 100644 index 000000000..b0fbed3f6 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/ByteaAdapter.java @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.InputStream; +import java.io.IOException; + +import java.nio.ByteBuffer; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegType; + +/** + * PostgreSQL {@code bytea}. + */ +public abstract class ByteaAdapter extends Adapter.Container +{ + private ByteaAdapter() // no instances + { + } + + public static final Bytes ARRAY_INSTANCE; + public static final Stream STREAM_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure( Bytes.class, Via.DATUM), + configure(Stream.class, Via.DATUM) + }); + + ARRAY_INSTANCE = new Bytes(configs[0]); + STREAM_INSTANCE = new Stream(configs[1]); + } + + /** + * Adapter producing a Java byte array. + */ + public static class Bytes extends Adapter.As + { + private Bytes(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.BYTEA == pgType; + } + + public byte[] fetch(Attribute a, Datum.Input in) + throws SQLException + { + in.pin(); + try + { + ByteBuffer b = in.buffer(); + byte[] array = new byte [ b.limit() ]; + // Java >= 13: b.get(0, array) + b.rewind().get(array); + return array; + } + finally + { + in.unpin(); + } + } + } + + /** + * Adapter producing an {@code InputStream}. + */ + public static class Stream extends Adapter.As + { + private Stream(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.BYTEA == pgType; + } + + public InputStream fetch(Attribute a, Datum.Input in) + throws SQLException + { + return in.inputStream(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/DateTimeAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/DateTimeAdapter.java new file mode 100644 index 000000000..408828fbf --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/DateTimeAdapter.java @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.nio.ByteBuffer; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.OffsetTime; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.Datetime; +import org.postgresql.pljava.adt.Timespan; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import static org.postgresql.pljava.model.RegNamespace.PG_CATALOG; +import org.postgresql.pljava.model.RegType; + +import org.postgresql.pljava.model.SlotTester.Visible; // temporary for test jig + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * PostgreSQL date, time, timestamp, and interval types, available in various + * representations by implementing the corresponding functional interfaces + * to construct them. + */ +public abstract class DateTimeAdapter extends Adapter.Container +{ + private DateTimeAdapter() // no instances + { + } + + private static final Configuration[] s_configs; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure( Date.class, Via.INT32SX), + configure( Time.class, Via.INT64SX), + configure( TimeTZ.class, Via.DATUM ), + configure( Timestamp.class, Via.INT64SX), + configure(TimestampTZ.class, Via.INT64SX), + configure( Interval.class, Via.DATUM ) + }); + + s_configs = configs; + } + + /** + * Instances of the date/time/timestamp adapters using the JSR310 + * {@code java.time} types. + *

    + * A holder interface so these won't be instantiated unless wanted. + */ + public interface JSR310 extends Visible + { + Date DATE_INSTANCE = + new Date<>(Datetime.Date.AsLocalDate.INSTANCE); + + Time TIME_INSTANCE = + new Time<>(Datetime.Time.AsLocalTime.INSTANCE); + + TimeTZ TIMETZ_INSTANCE = + new TimeTZ<>(Datetime.TimeTZ.AsOffsetTime.INSTANCE); + + Timestamp TIMESTAMP_INSTANCE = + new Timestamp<>(Datetime.Timestamp.AsLocalDateTime.INSTANCE); + + TimestampTZ TIMESTAMPTZ_INSTANCE = + new TimestampTZ<>(Datetime.TimestampTZ.AsOffsetDateTime.INSTANCE); + + /* + * See org.postgresql.pljava.adt.Timespan.Interval for why a reference + * implementation for that type is missing here. + */ + } + + /** + * Adapter for the {@code DATE} type to the functional interface + * {@link Datetime.Date Datetime.Date}. + */ + public static class Date extends Adapter.As + { + private Datetime.Date m_ctor; + public Date(Datetime.Date ctor) + { + super(ctor, null, s_configs[0]); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.DATE == pgType; + } + + public T fetch(Attribute a, int in) + { + return m_ctor.construct(in); + } + } + + /** + * Adapter for the {@code TIME} type to the functional interface + * {@link Datetime.Time Datetime.Time}. + */ + public static class Time extends Adapter.As + { + private Datetime.Time m_ctor; + public Time(Datetime.Time ctor) + { + super(ctor, null, s_configs[1]); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.TIME == pgType; + } + + public T fetch(Attribute a, long in) + { + return m_ctor.construct(in); + } + } + + /** + * Adapter for the {@code TIME WITH TIME ZONE} type to the functional + * interface {@link Datetime.TimeTZ Datetime.TimeTZ}. + */ + public static class TimeTZ extends Adapter.As + { + private Datetime.TimeTZ m_ctor; + public TimeTZ(Datetime.TimeTZ ctor) + { + super(ctor, null, s_configs[2]); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.TIMETZ == pgType; + } + + public T fetch(Attribute a, Datum.Input in) + throws IOException, SQLException + { + try + { + in.pin(); + ByteBuffer bb = in.buffer(); + long microsecondsSincePostgresEpoch = bb.getLong(); + int secondsWestOfPrimeMeridian = bb.getInt(); + return m_ctor.construct( + microsecondsSincePostgresEpoch, secondsWestOfPrimeMeridian); + } + finally + { + in.unpin(); + in.close(); + } + } + } + + /** + * Adapter for the {@code TIMESTAMP} type to the functional + * interface {@link Datetime.Timestamp Datetime.Timestamp}. + */ + public static class Timestamp extends Adapter.As + { + private Datetime.Timestamp m_ctor; + public Timestamp(Datetime.Timestamp ctor) + { + super(ctor, null, s_configs[3]); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.TIMESTAMP == pgType; + } + + public T fetch(Attribute a, long in) + { + return m_ctor.construct(in); + } + } + + /** + * Adapter for the {@code TIMESTAMP WITH TIME ZONE} type to the functional + * interface {@link Datetime.TimestampTZ Datetime.TimestampTZ}. + */ + public static class TimestampTZ extends Adapter.As + { + private Datetime.TimestampTZ m_ctor; + public TimestampTZ(Datetime.TimestampTZ ctor) + { + super(ctor, null, s_configs[4]); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.TIMESTAMPTZ == pgType; + } + + public T fetch(Attribute a, long in) + { + return m_ctor.construct(in); + } + } + + /** + * Adapter for the {@code INTERVAL} type to the functional + * interface {@link Timespan.Interval Timespan.Interval}. + */ + public static class Interval extends Adapter.As + { + private static final Simple + s_name_INTERVAL = Simple.fromJava("interval"); + + private static RegType s_intervalType; + + private Timespan.Interval m_ctor; + public Interval(Timespan.Interval ctor) + { + super(ctor, null, s_configs[5]); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + /* + * There has to be some kind of rule for which data types deserve + * their own RegType constants. The date/time/timestamp ones all do + * because JDBC mentions them, but it doesn't mention interval. + * So just compare it by name here, unless the decision is made + * to have a RegType constant for it too. + */ + RegType intervalType = s_intervalType; + if ( null != intervalType ) // did we match the type and cache it? + return intervalType == pgType; + + if ( ! s_name_INTERVAL.equals(pgType.name()) + || PG_CATALOG != pgType.namespace() ) + return false; + + /* + * Hang onto this matching RegType for faster future checks. + * Because RegTypes are singletons, and reference writes can't + * be torn, this isn't evil as data races go. + */ + s_intervalType = pgType; + return true; + } + + public T fetch(Attribute a, Datum.Input in) + throws IOException, SQLException + { + try + { + in.pin(); + ByteBuffer bb = in.buffer(); + long microseconds = bb.getLong(); + int days = bb.getInt(); + int months = bb.getInt(); + return m_ctor.construct(microseconds, days, months); + } + finally + { + in.unpin(); + in.close(); + } + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/EncodingAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/EncodingAdapter.java new file mode 100644 index 000000000..d2df9b5da --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/EncodingAdapter.java @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; + +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.CharsetEncoding; +import org.postgresql.pljava.model.RegType; + +/** + * PostgreSQL character set encoding ({@code int4} in the catalogs) represented + * as {@code CharsetEncoding}. + */ +public class EncodingAdapter extends Adapter.As +{ + public static final EncodingAdapter INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration config = AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(EncodingAdapter.class, Via.INT32SX)); + + INSTANCE = new EncodingAdapter(config); + } + + EncodingAdapter(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.INT4 == pgType; + } + + public CharsetEncoding fetch(Attribute a, int in) + throws SQLException, IOException + { + return -1 == in ? CharsetEncoding.ANY : CharsetEncoding.fromOrdinal(in); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/GrantAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/GrantAdapter.java new file mode 100644 index 000000000..1848710da --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/GrantAdapter.java @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import java.util.List; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.Array.AsFlatList; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.CatalogObject.Grant; +import org.postgresql.pljava.model.RegType; + +import org.postgresql.pljava.pg.AclItem; + +/** + * PostgreSQL {@code aclitem} represented as {@link Grant Grant}. + */ +public class GrantAdapter extends Adapter.As +{ + public static final GrantAdapter INSTANCE; + + public static final ArrayAdapter> LIST_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration config = AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(GrantAdapter.class, Via.DATUM)); + + INSTANCE = new GrantAdapter(config); + + LIST_INSTANCE = new ArrayAdapter<>(INSTANCE, + AsFlatList.of(AsFlatList::nullsIncludedCopy)); + } + + private GrantAdapter(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.ACLITEM == pgType; + } + + public Grant fetch(Attribute a, Datum.Input in) + throws IOException, SQLException + { + in.pin(); + try + { + ByteBuffer b = in.buffer().order(nativeOrder()); + return new AclItem.NonRole(b); + } + finally + { + in.unpin(); + in.close(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/MoneyAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/MoneyAdapter.java new file mode 100644 index 000000000..4694a354e --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/MoneyAdapter.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.Money; +import org.postgresql.pljava.model.Attribute; +import static org.postgresql.pljava.model.RegNamespace.PG_CATALOG; +import org.postgresql.pljava.model.RegType; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Adapter for the {@code MONEY} type to the functional interface {@link Money}. + */ +public abstract class MoneyAdapter extends Adapter.As +{ + private static final Simple s_name_MONEY = Simple.fromJava("money"); + private static RegType s_moneyType; + private final Money m_ctor; + + @SuppressWarnings("removal") // JEP 411 + private static final Configuration s_config = + AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(MoneyAdapter.class, Via.INT64SX)); + + public MoneyAdapter(Money ctor) + { + super(ctor, null, s_config); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + /* + * There has to be some kind of rule for which data types deserve + * their own RegType constants. The date/time/timestamp ones all do + * because JDBC mentions them, but it doesn't mention interval. + * So just compare it by name here, unless the decision is made + * to have a RegType constant for it too. + */ + RegType moneyType = s_moneyType; + if ( null != moneyType ) // did we match the type and cache it? + return moneyType == pgType; + + if ( ! s_name_MONEY.equals(pgType.name()) + || PG_CATALOG != pgType.namespace() ) + return false; + + /* + * Hang onto this matching RegType for faster future checks. + * Because RegTypes are singletons, and reference writes can't + * be torn, this isn't evil as data races go. + */ + s_moneyType = pgType; + return true; + } + + public T fetch(Attribute a, long scaledToInteger) + throws IOException, SQLException + { + return m_ctor.construct(scaledToInteger); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/NameAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/NameAdapter.java new file mode 100644 index 000000000..b06fa76c3 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/NameAdapter.java @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.nio.ByteBuffer; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; + +import org.postgresql.pljava.model.RegType; + +import static org.postgresql.pljava.pg.DatumUtils.mapCString; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Unqualified; + +/** + * PostgreSQL {@code name} type represented as + * {@code Lexicals.Identifier.Simple} or {@code Lexicals.Identifier.Operator}. + */ +public abstract class NameAdapter +extends Adapter.As +{ + public static final Simple SIMPLE_INSTANCE; + public static final Operator OPERATOR_INSTANCE; + public static final AsString AS_STRING_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure( Simple.class, Via.DATUM), + configure(Operator.class, Via.DATUM), + configure(AsString.class, Via.DATUM) + }); + + SIMPLE_INSTANCE = new Simple(configs[0]); + OPERATOR_INSTANCE = new Operator(configs[1]); + AS_STRING_INSTANCE = new AsString(configs[2]); + } + + NameAdapter(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.NAME == pgType; + } + + /** + * Adapter for the {@code name} type, returning an + * {@link Identifier.Simple Identifier.Simple}. + */ + public static class Simple extends NameAdapter + { + private Simple(Configuration c) + { + super(c); + } + + public Identifier.Simple fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + return Identifier.Simple.fromCatalog(decoded(in)); + } + } + + /** + * Adapter for the {@code name} type, returning an + * {@link Identifier.Operator Identifier.Operator}. + */ + public static class Operator extends NameAdapter + { + private Operator(Configuration c) + { + super(c); + } + + public Identifier.Operator fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + return Identifier.Operator.from(decoded(in)); + } + } + + /** + * Adapter for the {@code name} type, returning a Java {@code String}. + *

    + * This may be convenient for some casual uses, but a Java string will not + * observe any of the peculiar case-sensitivity rules of SQL identifiers. + */ + public static class AsString extends Adapter.As + { + private AsString(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.NAME == pgType; + } + + public String fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + return decoded(in); + } + } + + static final String decoded(Datum.Input in) throws SQLException, IOException + { + in.pin(); + try + { + ByteBuffer bnew = mapCString(in.buffer(), 0); + return SERVER_ENCODING.decode(bnew).toString(); + } + finally + { + in.unpin(); + in.close(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/NumericAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/NumericAdapter.java new file mode 100644 index 000000000..35f871763 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/NumericAdapter.java @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.math.BigDecimal; + +import java.nio.ShortBuffer; +import static java.nio.ByteOrder.nativeOrder; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.Numeric; +import org.postgresql.pljava.adt.Numeric.Kind; +import org.postgresql.pljava.adt.Numeric.AsBigDecimal; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegType; + +/** + * Adapter for the {@code NUMERIC} type to the functional interface + * {@link Numeric}. + */ +public class NumericAdapter extends Adapter.As +{ + private final Numeric m_ctor; + + @SuppressWarnings("removal") // JEP 411 + private static final Configuration s_config = + AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(NumericAdapter.class, Via.DATUM)); + + public static final NumericAdapter BIGDECIMAL_INSTANCE = + new NumericAdapter<>(AsBigDecimal.INSTANCE); + + public NumericAdapter(Numeric ctor) + { + super(ctor, null, s_config); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.NUMERIC == pgType; + } + + public T fetch(Attribute a, Datum.Input in) throws SQLException + { + in.pin(); + try + { + ShortBuffer b = + in.buffer().order(nativeOrder()).asShortBuffer(); + + /* + * Magic numbers used below are not exposed in .h files, but + * only found in PostgreSQL's utils/adt/numeric.c. Most are used + * naked here, rather than named, if they aren't needed in many + * places and the usage is clear in context. Regression tests + * are the only way to confirm they are right anyway. + */ + + short header = b.get(); + + boolean isShort = 0 != (header & 0x8000); + + Kind k; + + switch ( header & 0xF000 ) + { + case 0xC000: k = Kind.NAN; break; + case 0xD000: k = Kind.POSINFINITY; break; + case 0xF000: k = Kind.NEGINFINITY; break; + default: + int displayScale; + int weight; + + if ( isShort ) + { + k = 0 != (header & 0x2000) ? Kind.NEGATIVE : Kind.POSITIVE; + displayScale = (header & 0x1F80) >>> 7; + weight = ( (header & 0x007F) ^ 0x0040 ) - 0x0040;// sign ext + } + else + { + k = 0 != (header & 0x4000) ? Kind.NEGATIVE : Kind.POSITIVE; + displayScale = header & 0x3FFF; + weight = b.get(); + } + + short[] base10000Digits = new short [ b.remaining() ]; + b.get(base10000Digits); + + return m_ctor.construct( + k, displayScale, weight, base10000Digits); + } + + return m_ctor.construct(k, 0, 0, new short[0]); + } + finally + { + in.unpin(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/OidAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/OidAdapter.java new file mode 100644 index 000000000..23c39b58d --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/OidAdapter.java @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2022-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import static java.util.Arrays.stream; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.model.Attribute; + +import org.postgresql.pljava.model.*; + +import static org.postgresql.pljava.pg.CatalogObjectImpl.of; + +/** + * PostgreSQL {@code oid} type represented as + * {@code CatalogObject} or one of its {@code Addressed} subtypes. + */ +public class OidAdapter +extends Adapter.As +{ + public static final OidAdapter INSTANCE; + public static final Int4 INT4_INSTANCE; + public static final Addressed REGCLASS_INSTANCE; + public static final Addressed REGCOLLATION_INSTANCE; + public static final Addressed REGCONFIG_INSTANCE; + public static final Addressed REGDICTIONARY_INSTANCE; + public static final Addressed REGNAMESPACE_INSTANCE; + public static final Addressed REGOPERATOR_INSTANCE; + public static final Procedure REGPROCEDURE_INSTANCE; + public static final Addressed REGROLE_INSTANCE; + public static final Addressed REGTYPE_INSTANCE; + public static final Addressed CONSTRAINT_INSTANCE; + public static final Addressed DATABASE_INSTANCE; + public static final Addressed EXTENSION_INSTANCE; + public static final Addressed PLANG_INSTANCE; + public static final Addressed TRANSFORM_INSTANCE; + public static final Addressed AM_INSTANCE; + public static final Addressed TABLESPACE_INSTANCE; + public static final Addressed FDW_INSTANCE; + public static final Addressed SERVER_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure(OidAdapter.class, Via.INT32ZX), + configure( Int4.class, Via.INT32ZX), + configure( Addressed.class, Via.INT32ZX), + configure( Procedure.class, Via.INT32ZX) + }); + + INSTANCE = new OidAdapter<>(configs[0], null); + + INT4_INSTANCE = new Int4(configs[1]); + + REGCLASS_INSTANCE = new Addressed<>(configs[2], + RegClass.CLASSID, RegClass.class, RegType.REGCLASS); + + REGCOLLATION_INSTANCE = new Addressed<>(configs[2], + RegCollation.CLASSID, RegCollation.class, RegType.REGCOLLATION); + + REGCONFIG_INSTANCE = new Addressed<>(configs[2], + RegConfig.CLASSID, RegConfig.class, RegType.REGCONFIG); + + REGDICTIONARY_INSTANCE = new Addressed<>(configs[2], + RegDictionary.CLASSID, RegDictionary.class, RegType.REGDICTIONARY); + + REGNAMESPACE_INSTANCE = new Addressed<>(configs[2], + RegNamespace.CLASSID, RegNamespace.class, RegType.REGNAMESPACE); + + REGOPERATOR_INSTANCE = new Addressed<>(configs[2], + RegOperator.CLASSID, RegOperator.class, + RegType.REGOPER, RegType.REGOPERATOR); + + REGPROCEDURE_INSTANCE = new Procedure(configs[3]); + + REGROLE_INSTANCE = new Addressed<>(configs[2], + RegRole.CLASSID, RegRole.class, RegType.REGROLE); + + REGTYPE_INSTANCE = new Addressed<>(configs[2], + RegType.CLASSID, RegType.class, RegType.REGTYPE); + + CONSTRAINT_INSTANCE = new Addressed<>(configs[2], + Constraint.CLASSID, Constraint.class); + + DATABASE_INSTANCE = new Addressed<>(configs[2], + Database.CLASSID, Database.class); + + EXTENSION_INSTANCE = new Addressed<>(configs[2], + Extension.CLASSID, Extension.class); + + PLANG_INSTANCE = new Addressed<>(configs[2], + ProceduralLanguage.CLASSID, ProceduralLanguage.class); + + TRANSFORM_INSTANCE = new Addressed<>(configs[2], + Transform.CLASSID, Transform.class); + + AM_INSTANCE = new Addressed<>(configs[2], + AccessMethod.CLASSID, AccessMethod.class); + + TABLESPACE_INSTANCE = new Addressed<>(configs[2], + Tablespace.CLASSID, Tablespace.class); + + FDW_INSTANCE = new Addressed<>(configs[2], + ForeignDataWrapper.CLASSID, ForeignDataWrapper.class); + + SERVER_INSTANCE = new Addressed<>(configs[2], + ForeignServer.CLASSID, ForeignServer.class); + } + + /** + * Types for which the non-specific {@code OidAdapter} or {@code Int4} will + * allow itself to be applied. + *

    + * Some halfhearted effort is put into ordering this with less commonly + * sought entries later. + */ + private static final RegType[] s_oidTypes = + { + RegType.OID, RegType.REGPROC, RegType.REGPROCEDURE, RegType.REGTYPE, + RegType.REGNAMESPACE, RegType.REGOPER, RegType.REGOPERATOR, + RegType.REGROLE, RegType.REGCLASS, RegType.REGCOLLATION, + RegType.REGCONFIG, RegType.REGDICTIONARY + }; + + private OidAdapter(Configuration c, Class witness) + { + super(c, null, witness); + } + + @Override + public boolean canFetch(RegType pgType) + { + for ( RegType t : s_oidTypes ) + if ( t == pgType ) + return true; + return false; + } + + public CatalogObject fetch(Attribute a, int in) + { + return of(in); + } + + /** + * Adapter for the {@code oid} type, returned as a primitive {@code int}. + */ + public static class Int4 extends Adapter.AsInt.Unsigned + { + private Int4(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + for ( RegType t : s_oidTypes ) + if ( t == pgType ) + return true; + return false; + } + + public int fetch(Attribute a, int in) + { + return in; + } + } + + /** + * Adapter for the {@code oid} type, able to return most of the + * {@link CatalogObject.Addressed CatalogObject.Addressed} subinterfaces. + */ + public static class Addressed> + extends OidAdapter + { + private final RegClass.Known m_classId; + private final RegType[] m_specificTypes; + + private Addressed( + Configuration c, RegClass.Known classId, Class witness, + RegType... specificTypes) + { + super(c, witness); + m_classId = classId; + m_specificTypes = stream(specificTypes) + .filter(RegType::isValid).toArray(RegType[]::new); + } + + @Override + public boolean canFetch(RegType pgType) + { + for ( RegType t : m_specificTypes ) + if ( t == pgType ) + return true; + return RegType.OID == pgType; + } + + public T fetch(Attribute a, int in) + { + return of(m_classId, in); + } + } + + /** + * A distinct adapter class is needed here because the parameterized + * {@code RegProcedure} type can't be indicated with a class literal + * argument to {@code Addressed}. + */ + public static class Procedure + extends OidAdapter> + { + private Procedure(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + if ( RegType.REGPROC == pgType || RegType.REGPROCEDURE == pgType ) + return true; + return RegType.OID == pgType; + } + + public RegProcedure fetch(Attribute a, int in) + { + return of(RegProcedure.CLASSID, in); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/Primitives.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/Primitives.java new file mode 100644 index 000000000..35d70fd56 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/Primitives.java @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegType; + +/** + * PostgreSQL primitive numeric and boolean, as the corresponding Java + * primitive types. + */ +public abstract class Primitives extends Adapter.Container +{ + private Primitives() // no instances + { + } + + public static final Int8 INT8_INSTANCE; + public static final Int4 INT4_INSTANCE; + public static final Int2 INT2_INSTANCE; + /** + * The PostgreSQL type {@code "char"} (with the quotes, to distinguish it + * from the different, standard SQL type), an 8-bit signed value with no + * associated character encoding (though often used in PostgreSQL catalogs + * with ASCII letters as values). + */ + public static final Int1 INT1_INSTANCE; + public static final Float8 FLOAT8_INSTANCE; + public static final Float4 FLOAT4_INSTANCE; + public static final Boolean BOOLEAN_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure( Int8.class, Via.INT64SX), + configure( Int4.class, Via.INT32SX), + configure( Int2.class, Via.SHORT), + configure( Int1.class, Via.BYTE), + configure( Float8.class, Via.DOUBLE), + configure( Float4.class, Via.FLOAT), + configure(Boolean.class, Via.BOOLEAN) + }); + + INT8_INSTANCE = new Int8(configs[0]); + INT4_INSTANCE = new Int4(configs[1]); + INT2_INSTANCE = new Int2(configs[2]); + INT1_INSTANCE = new Int1(configs[3]); + FLOAT8_INSTANCE = new Float8(configs[4]); + FLOAT4_INSTANCE = new Float4(configs[5]); + BOOLEAN_INSTANCE = new Boolean(configs[6]); + } + + /** + * Adapter for the {@code int8} type. + */ + public static class Int8 extends Adapter.AsLong.Signed + { + private Int8(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.INT8 == pgType; + } + + public long fetch(Attribute a, long in) + { + return in; + } + } + + /** + * Adapter for the {@code int4} type. + */ + public static class Int4 extends Adapter.AsInt.Signed + { + private Int4(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.INT4 == pgType; + } + + public int fetch(Attribute a, int in) + { + return in; + } + } + + /** + * Adapter for the {@code int2} type. + */ + public static class Int2 extends Adapter.AsShort.Signed + { + private Int2(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.INT2 == pgType; + } + + public short fetch(Attribute a, short in) + { + return in; + } + } + + /** + * Adapter for the {@code "char"} type. + */ + public static class Int1 extends Adapter.AsByte.Signed + { + private Int1(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.CHAR == pgType; + } + + public byte fetch(Attribute a, byte in) + { + return in; + } + } + + /** + * Adapter for the {@code float8} type. + */ + public static class Float8 extends Adapter.AsDouble + { + private Float8(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.FLOAT8 == pgType; + } + + public double fetch(Attribute a, double in) + { + return in; + } + } + + /** + * Adapter for the {@code float4} type. + */ + public static class Float4 extends Adapter.AsFloat + { + private Float4(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.FLOAT4 == pgType; + } + + public float fetch(Attribute a, float in) + { + return in; + } + } + + /** + * Adapter for the {@code boolean} type. + */ + public static class Boolean extends Adapter.AsBoolean + { + private Boolean(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.BOOL == pgType; + } + + public boolean fetch(Attribute a, boolean in) + { + return in; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/Service.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/Service.java new file mode 100644 index 000000000..02f7682a0 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/Service.java @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2023-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.lang.reflect.Type; + +import java.security.Permission; + +import java.sql.SQLException; +import java.sql.SQLDataException; + +import static java.util.Arrays.copyOf; +import static java.util.Objects.requireNonNull; + +import java.util.function.Consumer; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.Adapter.Array; +import org.postgresql.pljava.Adapter.ArrayBuilder; +import org.postgresql.pljava.Adapter.As; +import org.postgresql.pljava.Adapter.AsBoolean; +import org.postgresql.pljava.Adapter.AsByte; +import org.postgresql.pljava.Adapter.AsChar; +import org.postgresql.pljava.Adapter.AsDouble; +import org.postgresql.pljava.Adapter.AsFloat; +import org.postgresql.pljava.Adapter.AsInt; +import org.postgresql.pljava.Adapter.AsLong; +import org.postgresql.pljava.Adapter.AsShort; +import org.postgresql.pljava.Adapter.TypeWrapper; + +import org.postgresql.pljava.adt.spi.AbstractType.MultiArray; +import org.postgresql.pljava.adt.spi.AbstractType.MultiArray.Sized.Allocated; + +import org.postgresql.pljava.internal.Backend; + +import org.postgresql.pljava.model.RegType; +import org.postgresql.pljava.model.TupleTableSlot.Indexed; + +/** + * Implementation of a service defined by {@link Adapter} for data types. + *

    + * Handles operations such as creating a properly-typed {@link ArrayAdapter} + * with dimensions and types computed from an adapter for the component type. + */ +public final class Service extends Adapter.Service +{ + @Override + protected Array + buildArrayAdapterImpl(ArrayBuilder builder, TypeWrapper w) + { + return staticBuildArrayAdapter( + builder, adapter(builder), multiArray(builder), requireNonNull(w)); + } + + @Override + protected Consumer permissionChecker() + { + return Backend.CHECKER; + } + + @Override + protected Array elementTypeAdapter() + { + return ArrayAdapter.TYPE_OBTAINING_INSTANCE; + } + + /** + * Functional interface representing the initial logic of multiarray + * creation, verifying that the dimensions match, and allocating the Java + * array using the sizes from the PostgreSQL array datum. + */ + @FunctionalInterface + private interface MultiArrayBuilder + { + Allocated + build(int nDims, int[] dimsAndBounds) throws SQLException; + } + + /** + * Instantiate an array adapter, given the builder, and the component + * adapter and the {@link MultiArray} representing the desired array shape, + * both extracted from the builder in the protected caller above. + * + * A {@link TypeWrapper} has been supplied, to be populated here with the + * computed type, and passed as the 'witness' to the appropriate + * {@code ArrayAdapter} constructor. + */ + private static Array staticBuildArrayAdapter( + ArrayBuilder builder, + Adapter componentAdapter, + MultiArray shape, + TypeWrapper w) + { + w.setWrappedType(shape.arrayType()); + + /* + * Build an 'init' lambda that closes over 'shape'. + */ + final MultiArrayBuilder init = (nDims, dimsAndBounds) -> + { + if ( shape.dimensions != nDims ) + throw new SQLDataException( + shape.dimensions + "-dimension array adapter " + + "applied to " + nDims + "-dimension value", "2202E"); + + return shape.size(copyOf(dimsAndBounds, nDims)).allocate(); + }; + + /* + * A lambda implementing the rest of the array contract (closed over + * the 'init' created above) has to be specialized to the component type + * (reference or one of the primitives) that its inner loop will have to + * contend with. That can be determined from the subclass of Adapter. + */ + if ( componentAdapter instanceof AsLong ) + { + return new ArrayAdapter( + (AsLong)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsLong adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( long[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsDouble ) + { + return new ArrayAdapter( + (AsDouble)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsDouble adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( double[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsInt ) + { + return new ArrayAdapter( + (AsInt)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsInt adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( int[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsFloat ) + { + return new ArrayAdapter( + (AsFloat)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsFloat adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( float[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsShort ) + { + return new ArrayAdapter( + (AsShort)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsShort adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( short[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsChar ) + { + return new ArrayAdapter( + (AsChar)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsChar adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( char[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsByte ) + { + return new ArrayAdapter( + (AsByte)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsByte adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( byte[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof AsBoolean ) + { + return new ArrayAdapter( + (AsBoolean)componentAdapter, w, + (int nDims, int[] dimsAndBounds, AsBoolean adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( boolean[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + else if ( componentAdapter instanceof As ) + { + @SuppressWarnings("unchecked") + As erasedComponent = (As)componentAdapter; + + return new ArrayAdapter( + erasedComponent, w, + (int nDims, int[] dimsAndBounds, As adapter, + Indexed slot) -> + { + @SuppressWarnings("unchecked") + Allocated multi = (Allocated) + init.build(nDims, dimsAndBounds); + + int n = slot.elements(); + int i = 0; + + for ( Object[] a : multi ) + for ( int j = 0; j < a.length; ++ j ) + a[j] = slot.get(i++, adapter); + assert i == n; + return multi.array(); + } + ); + } + throw new AssertionError("unhandled type building array adapter"); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/TextAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/TextAdapter.java new file mode 100644 index 000000000..089657e66 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/TextAdapter.java @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import static org.postgresql.pljava.model.CharsetEncoding.SERVER_ENCODING; +import org.postgresql.pljava.model.RegType; + +/** + * PostgreSQL {@code text}, {@code varchar}, and similar types represented as + * Java {@code String}. + */ +public class TextAdapter extends Adapter.As +{ + public static final TextAdapter INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration config = AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(TextAdapter.class, Via.DATUM)); + + INSTANCE = new TextAdapter(config); + } + + private TextAdapter(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + if ( RegType.TEXT == pgType || RegType.CSTRING == pgType ) + return true; + + pgType = pgType.withoutModifier(); + + return RegType.VARCHAR == pgType + || RegType.BPCHAR == pgType; + + /* [comment re: typmod copied from upstream utils/adt/varchar.c:] + * For largely historical reasons, the typmod is VARHDRSZ plus the number + * of characters; there is enough client-side code that knows about that + * that we'd better not change it. + */ + } + + public String fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + return SERVER_ENCODING.decode(in, /* close */ true).toString(); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/UUIDAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/UUIDAdapter.java new file mode 100644 index 000000000..d1d47ddad --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/UUIDAdapter.java @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.nio.ByteBuffer; +import static java.nio.ByteOrder.BIG_ENDIAN; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import java.util.UUID; + +import org.postgresql.pljava.Adapter; + +import org.postgresql.pljava.adt.spi.Datum; + +import org.postgresql.pljava.model.Attribute; +import static org.postgresql.pljava.model.RegNamespace.PG_CATALOG; +import org.postgresql.pljava.model.RegType; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * PostgreSQL {@code uuid} type represented + * as {@code java.util.UUID}. + */ +public class UUIDAdapter extends Adapter.As +{ + public static final UUIDAdapter INSTANCE; + + private static final Simple s_name_UUID = Simple.fromJava("uuid"); + + private static RegType s_uuidType; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration config = AccessController.doPrivileged( + (PrivilegedAction)() -> + configure(UUIDAdapter.class, Via.DATUM)); + + INSTANCE = new UUIDAdapter(config); + } + + UUIDAdapter(Configuration c) + { + super(c, null, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + /* + * Compare by name and namespace rather than requiring RegType to have + * a static field for the UUID type; more popular ones, sure, but a line + * has to be drawn somewhere. + */ + RegType uuidType = s_uuidType; + if ( null != uuidType ) // have we matched it before and cached it? + return uuidType == pgType; + + if ( ! s_name_UUID.equals(pgType.name()) + || PG_CATALOG != pgType.namespace() ) + return false; + + /* + * Hang onto this matching RegType for faster future checks. + * Because RegTypes are singletons, and reference writes can't + * be torn, this isn't evil as data races go. + */ + s_uuidType = pgType; + return true; + } + + public UUID fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + try + { + in.pin(); + ByteBuffer bb = in.buffer(); + /* + * The storage is laid out byte by byte in the order PostgreSQL + * prints them (irrespective of architecture). Java's UUID type + * prints the MSB first. + */ + bb.order(BIG_ENDIAN); + long high64 = bb.getLong(); + long low64 = bb.getLong(); + return new UUID(high64, low64); + } + finally + { + in.unpin(); + in.close(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/XMLAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/XMLAdapter.java new file mode 100644 index 000000000..7899f00e5 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/XMLAdapter.java @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2022-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.InputStream; +import java.io.IOException; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; +import java.sql.SQLXML; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.spi.Datum; + +import org.postgresql.pljava.jdbc.SQLXMLImpl; +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegType; + +/** + * PostgreSQL {@code xml} type represented as {@code java.sql.SQLXML}. + */ +public class XMLAdapter extends Adapter.As +{ + public static final XMLAdapter INSTANCE; + public static final XMLAdapter SYNTHETIC_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure(XMLAdapter.class, Via.DATUM), + configure(Synthetic.class, Via.DATUM) + }); + + INSTANCE = new XMLAdapter(configs[0]); + SYNTHETIC_INSTANCE = new Synthetic(configs[1]); + } + + XMLAdapter(Configuration c) + { + super(c, null, null); + } + + /* + * This preserves the convention, since SQLXML came to PL/Java 1.5.1, that + * you can use the SQLXML API over text values (such as in a database built + * without the XML type, though who would do that nowadays?). + */ + @Override + public boolean canFetch(RegType pgType) + { + return RegType.XML == pgType + || RegType.TEXT == pgType; + } + + public + SQLXML fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + return SQLXMLImpl.newReadable(in, a.type(), false); + } + + /** + * Adapter for use when the PostgreSQL type is not actually XML, but + * to be synthetically rendered as XML (such as {@code pg_node_tree}). + *

    + * This is, for now, a very thin wrapper over + * {@code SQLXMLImpl.newReadable}, which (so far) is still where the + * type-specific rendering logic gets chosen, but that can be refactored + * eventually. + */ + public static class Synthetic extends XMLAdapter + { + Synthetic(Configuration c) + { + super(c); + } + + @Override + public boolean canFetch(RegType pgType) + { + return RegType.PG_NODE_TREE == pgType; + } + + @Override + public + SQLXML fetch(Attribute a, Datum.Input in) + throws SQLException, IOException + { + return SQLXMLImpl.newReadable(in, a.type(), true); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/XidAdapter.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/XidAdapter.java new file mode 100644 index 000000000..204067a4d --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/XidAdapter.java @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import java.io.IOException; + +import java.nio.ByteBuffer; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.sql.SQLException; + +import org.postgresql.pljava.Adapter; +import org.postgresql.pljava.adt.Internal; +import org.postgresql.pljava.adt.spi.Datum; +import org.postgresql.pljava.model.Attribute; +import org.postgresql.pljava.model.RegType; +import static org.postgresql.pljava.model.RegNamespace.PG_CATALOG; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * PostgreSQL {@code cid}, {@code tid}, {@code xid}, and {@code xid8} types. + */ +public abstract class XidAdapter extends Adapter.Container +{ + private XidAdapter() // no instances + { + } + + private static final Configuration s_tid_config; + + public static final CidXid CID_INSTANCE; + public static final CidXid XID_INSTANCE; + public static final Xid8 XID8_INSTANCE; + + static + { + @SuppressWarnings("removal") // JEP 411 + Configuration[] configs = AccessController.doPrivileged( + (PrivilegedAction)() -> new Configuration[] + { + configure( CidXid.class, Via.INT32ZX), + configure( Xid8.class, Via.INT64ZX), + configure( Tid.class, Via.DATUM ) + }); + + CID_INSTANCE = new CidXid(configs[0], "cid"); + XID_INSTANCE = new CidXid(configs[0], "xid"); + XID8_INSTANCE = new Xid8(configs[1]); + + s_tid_config = configs[2]; + } + + /** + * Adapter for the {@code cid} or {@code xid} type, returned as + * a primitive {@code int}. + */ + public static class CidXid extends Adapter.AsInt.Unsigned + { + private final Simple m_typeName; + private RegType m_type; + + private CidXid(Configuration c, String typeName) + { + super(c, null); + m_typeName = Simple.fromJava(typeName); + } + + @Override + public boolean canFetch(RegType pgType) + { + RegType myType = m_type; + if ( null != myType ) + return myType == pgType; + if ( ! m_typeName.equals(pgType.name()) + || PG_CATALOG != pgType.namespace() ) + return false; + /* + * Reference writes are atomic and RegTypes are singletons, + * so this race isn't evil. + */ + m_type = pgType; + return true; + } + + public int fetch(Attribute a, int in) + { + return in; + } + } + + /** + * Adapter for the {@code xid8} type, returned as a primitive {@code long}. + */ + public static class Xid8 extends Adapter.AsLong.Unsigned + { + private static final Simple s_typeName = Simple.fromJava("xid8"); + private static RegType s_type; + + private Xid8(Configuration c) + { + super(c, null); + } + + @Override + public boolean canFetch(RegType pgType) + { + RegType myType = s_type; + if ( null != myType ) + return myType == pgType; + if ( ! s_typeName.equals(pgType.name()) + || PG_CATALOG != pgType.namespace() ) + return false; + /* + * Reference writes are atomic and RegTypes are singletons, + * so this race isn't evil. + */ + s_type = pgType; + return true; + } + + public long fetch(Attribute a, long in) + { + return in; + } + } + + /** + * Adapter for the {@code tid} type using the functional interface + * {@link Internal.Tid Internal.Tid}. + */ + public static class Tid extends Adapter.As + { + private static final Simple s_typeName = Simple.fromJava("tid"); + private static RegType s_type; + private Internal.Tid m_ctor; + + public Tid(Configuration c, Internal.Tid ctor) + { + super(ctor, null, c); + m_ctor = ctor; + } + + @Override + public boolean canFetch(RegType pgType) + { + RegType myType = s_type; + if ( null != myType ) + return myType == pgType; + if ( ! s_typeName.equals(pgType.name()) + || PG_CATALOG != pgType.namespace() ) + return false; + /* + * Reference writes are atomic and RegTypes are singletons, + * so this race isn't evil. + */ + s_type = pgType; + return true; + } + + public T fetch(Attribute a, Datum.Input in) + throws IOException, SQLException + { + try + { + in.pin(); + ByteBuffer bb = in.buffer(); + /* + * The following read could be unaligned; the C code declares + * BlockIdData trickily to allow it to be short-aligned. + * Java ByteBuffers will break up unaligned accesses as needed. + */ + int blockId = bb.getInt(); + short offsetNumber = bb.getShort(); + return m_ctor.construct(blockId, offsetNumber); + } + finally + { + in.unpin(); + in.close(); + } + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/adt/package-info.java b/pljava/src/main/java/org/postgresql/pljava/pg/adt/package-info.java new file mode 100644 index 000000000..02e2c7cdf --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/adt/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +/** + * Built-in implementations of {@link Adapter Adapter} for common PostgreSQL + * data types. + * + * @author Chapman Flack + */ +package org.postgresql.pljava.pg.adt; + +import org.postgresql.pljava.Adapter; diff --git a/pljava/src/main/java/org/postgresql/pljava/pg/package-info.java b/pljava/src/main/java/org/postgresql/pljava/pg/package-info.java new file mode 100644 index 000000000..0b6109730 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/pg/package-info.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +/** + * Package that provides the running-directly-in-PG-backend implementations + * for the API in {@link org.postgresql.pljava.model}. + * + * @author Chapman Flack + */ +package org.postgresql.pljava.pg; diff --git a/pljava/src/main/java/org/postgresql/pljava/sqlj/Loader.java b/pljava/src/main/java/org/postgresql/pljava/sqlj/Loader.java index 0301bd970..ba21a7153 100644 --- a/pljava/src/main/java/org/postgresql/pljava/sqlj/Loader.java +++ b/pljava/src/main/java/org/postgresql/pljava/sqlj/Loader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2021 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -20,6 +20,8 @@ import static java.lang.invoke.MethodType.methodType; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.security.CodeSigner; @@ -130,7 +132,7 @@ public URL nextElement() return entryURL(m_entryIds[m_top++]); } } - private static final Identifier.Simple PUBLIC_SCHEMA = + public static final Identifier.Simple PUBLIC_SCHEMA = Identifier.Simple.fromCatalog("public"); private static final Map @@ -229,7 +231,7 @@ public static ClassLoader getSchemaLoader(Identifier.Simple schema) { while(rs.next()) { - URL jarUrl = new URL("sqlj:" + rs.getString(2)); + URL jarUrl = new URI("sqlj", rs.getString(2), null).toURL(); CodeSource cs = new CodeSource(jarUrl, (CodeSigner[])null); inner.setInt(1, rs.getInt(1)); @@ -255,7 +257,7 @@ public static ClassLoader getSchemaLoader(Identifier.Simple schema) } } } - catch ( MalformedURLException e ) + catch ( URISyntaxException | MalformedURLException e ) { throw unchecked(e); } @@ -353,12 +355,14 @@ private static URL entryURL(int entryId) { try { - return doPrivileged(() -> new URL( + @SuppressWarnings("deprecation") // Java >= 20: URL.of(uri,handler) + URL u = doPrivileged(() -> new URL( "dbf", "localhost", -1, "/" + entryId, EntryStreamHandler.getInstance())); + return u; } catch(MalformedURLException e) { diff --git a/pom.xml b/pom.xml index 0749b4acf..f2fb5d0ed 100644 --- a/pom.xml +++ b/pom.xml @@ -39,7 +39,7 @@ https://www.postgresql.org/list/pljava-dev/ https://web.archive.org/web/*/http://lists.pgfoundry.org/pipermail/pljava-dev/ - gmane.comp.db.postgresql.pljava on news.gmane.io + news://news.gmane.io/gmane.comp.db.postgresql.pljava @@ -70,45 +70,20 @@ - nashorngone + nashornmod [15,) - - org.codehaus.mojo - properties-maven-plugin - 1.0.0 - - - initialize - - set-system-properties - - - - - true - - - - - - org.apache.maven.plugins maven-antrun-plugin - org.graalvm.js - js - 20.1.0 - - - org.graalvm.js - js-scriptengine - 20.1.0 + org.openjdk.nashorn + nashorn-core + 15.4 @@ -132,17 +107,17 @@ org.apache.maven.plugins maven-install-plugin - 2.5.2 + 3.1.0 org.apache.maven.plugins maven-resources-plugin - 3.0.1 + 3.3.0 org.apache.maven.plugins maven-compiler-plugin - 3.8.1 + 3.10.1 @@ -152,7 +127,7 @@ maven-compiler-plugin ${project.build.sourceEncoding} - 9 + 9 true true @@ -160,24 +135,17 @@ org.apache.maven.plugins maven-jar-plugin - 3.0.2 + 3.3.0 org.apache.maven.plugins maven-surefire-plugin - 3.0.0-M4 + 3.0.0-M7 org.apache.maven.plugins maven-site-plugin - 3.9.1 - - - net.trajano.wagon - wagon-git - 2.0.4 - - + 3.12.1 false @@ -197,7 +165,7 @@ org.apache.maven.plugins maven-project-info-reports-plugin - 3.1.0 + 3.4.5 diff --git a/src/site/markdown/build/build.md b/src/site/markdown/build/build.md index d48103354..9c0dee7ac 100644 --- a/src/site/markdown/build/build.md +++ b/src/site/markdown/build/build.md @@ -60,7 +60,13 @@ There is a "troubleshooting the build" section at the end of this page. mvn --version - succeeds. + succeeds. It reports not only the version of Maven, but the version of Java + that Maven has found and is using, which must be a Java version supported + for building PL/Java (see more on [version compatibility](versions.html)). + If Maven is not finding and using the intended Java version, the environment + variable `JAVA_HOME` can be set to point to the desired Java installation, + and `mvn --version` should then confirm that the Java being found is the + one intended. If you have more than one version installed of PostgreSQL, Java, or the compile/link tools, make sure the ones found on your search path are the @@ -222,30 +228,6 @@ build issues that are commonly asked about.* [btwp]: https://github.com/tada/pljava/wiki/Build-tips -#### Not all `[ERROR]`s are errors - -In the part of the build that compiles the native code, you may see lines of -output starting with `[ERROR]`, but the build completes and shows success for -all subprojects. - -Maven is capturing output from the C compiler and adding a tag at the front of -each line. If the line from the C compiler contains the string `warning:` then -Maven adds a `[WARNING]` tag at the front of the line; otherwise it adds -`[ERROR]`. That is how Maven can turn a multiple-line warning, like - -``` -type/String.c: In function 'String_createJavaString': -type/String.c:132:43: warning: conversion to 'jlong' from 'Size' may change - the sign of the result [-Wsign-conversion] - bytebuf = JNI_newDirectByteBuffer(utf8, srcLen); - ^ -``` - -(where only the second line contains `warning:`) into what looks like one -`[WARNING]` and several `[ERROR]`s. - -If the compiler reports any actual errors, the build will fail. - #### Capture the output of `mvn -X` The `-X` option will add a lot of information on the details of Maven's @@ -259,3 +241,11 @@ On the first run, Maven will produce a lot of output while downloading all of the dependencies needed to complete the build. It is better, if the build fails, to simply run Maven again and capture the output of that run, which will not include all of the downloading activity. + +As an alternative, the flood of messages reflecting successful dependency +downloads in a first run can be suppressed by adding this option on the `mvn` +command line: + +``` +-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn +``` diff --git a/src/site/markdown/build/buildmsvc.md b/src/site/markdown/build/buildmsvc.md index d9af41837..715a05426 100644 --- a/src/site/markdown/build/buildmsvc.md +++ b/src/site/markdown/build/buildmsvc.md @@ -21,9 +21,6 @@ PostgreSQL and PL/Java. Using a *newer* version of Visual Studio (including the Community 2015 version) will generally work, while older versions are more likely to be problematic. -* PostgreSQL 9.1 to 9.3 were built using Visual Studio 2010. -* PostgreSQL 9.4 was built using Visual Studio 2013. - ## Software Prerequisites 0. You will need an appropriate version of [Microsoft Visual Studio][msvc]. When @@ -50,11 +47,6 @@ likely to be problematic. an `INCLUDEDIR-SERVER` line, and list the directory it refers to. There should be a bunch of `*.h` files there. -0. OpenSSL headers: if using an EnterpriseDB PostgreSQL build older than 9.3, - these will be missing. They can be obtained from a 9.3 or later - EDB PostgreSQL build by copying the `include/openssl` directory and - its contents. - 0. You will need to install [Maven][mvn] and add it to your PATH so that mvn --version @@ -219,26 +211,5 @@ dependency when [building your own projects that _use_ PL/Java][jproj]. ### Troubleshooting the build -If something fails, two tricks may be helpful. The C compilation may produce -a lot of nuisance warnings, because the Maven plugin driving it enables many -types of warning that would be impractical to fix. With many warnings it may -be difficult to pick out messages that matter. - -If the link step of the build reports that the symbol `rint` is undefined you -are probably using an older version of Visual Studio (2010) with a newer version -of Postgresql (9.4). This symbol is defined in Visual Studio 2013 and later and -the Postgresql 9.4 headers lack the appropriate conditional options for the -older compilers. You will need to use a newer version of Visual Studio. - -On a machine with many cores, messages from several compilation threads may be -intermingled in the output so that related messages are hard to identify. -The option `-Dnar.cores=1` will force the messages into a sequential order -(and has little effect on the speed of a PL/Java build). - -The `-X` option will add a lot of information on the details of Maven's -build activities. - - mvn -X -Dnar.cores=1 clean install - -There is a more comprehensive "troubleshooting the build" section +There is an extensive "troubleshooting the build" section on the [main build page][mbp]. diff --git a/src/site/markdown/build/freebsd.md b/src/site/markdown/build/freebsd.md index 7995f7d19..613650e05 100644 --- a/src/site/markdown/build/freebsd.md +++ b/src/site/markdown/build/freebsd.md @@ -1,23 +1,7 @@ # Building on FreeBSD -At one time, [FreeBSD][]'s threading library would malfunction if it was -dynamically loaded after the start of a program that did not use threads -itself. That was a problem for PL/Java on FreeBSD, because PostgreSQL -itself does not use threads, but Java does. The only known workaround was -to build PostgreSQL itself from source, with the thread library included -in linking. - -The same problem was [reported to affect other PostgreSQL extensions][rep] -such as `plv8` and `imcs` also. - -The [manual page for FreeBSD's libthr][manthr] was edited -[in February 2015][thrdif] to remove the statement of that limitation, -and the updated manual page appears first in [FreeBSD 10.2][rel102], -so in FreeBSD 10.2 or later, PL/Java (and other affected extensions) -may work without the need to build PostgreSQL from source. +Building on [FreeBSD][] should proceed just as it does on Linux, +as of late 2023, according to Achilleos Mantzios, who provided the patch +adding the necessary build rules. [FreeBSD]: https://www.freebsd.org/ -[rep]: https://lists.freebsd.org/pipermail/freebsd-hackers/2014-April/044961.html -[manthr]: https://www.freebsd.org/cgi/man.cgi?query=libthr&apropos=0&sektion=3&manpath=FreeBSD+10.2-RELEASE&arch=default&format=html -[thrdif]: https://svnweb.freebsd.org/base/head/lib/libthr/libthr.3?r1=272153&r2=278627 -[rel102]: https://www.freebsd.org/releases/10.2R/announce.html diff --git a/src/site/markdown/build/package.md b/src/site/markdown/build/package.md index e61c80d95..346791829 100644 --- a/src/site/markdown/build/package.md +++ b/src/site/markdown/build/package.md @@ -25,13 +25,6 @@ When building a package, you are encouraged to set the default `pljava.libjvm_location` to the library of a JRE version that is expected to be present on your platform. -**Note:** when building on Windows, the `-Dpljava.libjvmdefault` option is -likely to produce a failed build or the wrong stored value for the library -path. A fix for this option on Windows is unlikely (see [issue 190][bug190]); -if preparing a package for Windows, it will be simplest to use a patch that -changes the definition of `PLJAVA_LIBJVMDEFAULT` in -`pljava-so/src/main/c/Backend.c`. - [locatejvm]: ../install/locatejvm.html [bug190]: https://github.com/tada/pljava/issues/190 @@ -78,11 +71,13 @@ shown in that link to disable the repacking of jars. The one part of PL/Java that could, if desired, be handled in the manner of Java libraries is `pljava-api`. This single jar file is needed on the classpath when compiling Java code that will be loaded into PL/Java in the database. -It is _not_ needed at the time that code will _run_. That means it could be -appropriate to treat `pljava-api` as a separate `-devel` package, if your -packaging guidelines encourage such a distinction. In that case, you would -exclude the `pljava-api` jar file from the main package, and produce a `-devel` -package that provides it. +That means it could be +appropriate to provide `pljava-api` in a separate `-devel` package, if your +packaging guidelines encourage such a distinction, where it would be installed +in the expected place for a conventional Java library. (The API jar must still +be included in the main package also, installed in the location where PostgreSQL +expects it. There may be no need, therefore, for the main package to depend on +the `-devel` package.) A `-devel` package providing `pljava-api` might appropriately follow java library packaging guidelines to ensure it appears on a developer's @@ -108,7 +103,7 @@ is the useful one to have in an installation target host's repository.) The PL/Java build does not automatically build javadocs. Those that go with `pljava-api` can be easily generated by running -`mvn --projects pljava-api javadoc:javadoc` to build them, then collecting +`mvn --projects pljava-api site` to build them, then collecting the `apidocs` subtree from `target/site`. They can be included in the same package as `pljava-api` or in a separate javadoc package, as your guidelines may require. @@ -126,7 +121,7 @@ been built first and installed into the build host's local Maven repository. Note that many of the examples do double duty as tests, as described in _confirming the build_ below. -When building for (and with) Java 8 or later and PostgreSQL 8.4 or later, +Unless they are not wanted, the XML examples based on the Saxon library should also be built, by adding `-Psaxon-examples` to the `mvn` command line. @@ -137,7 +132,7 @@ the package. `-Dpljava.libjvmdefault=`_path/to/jvm-shared-object_ : As suggested earlier, please use this option to build a useful default -into PL/Java for the `pljava.libjvm_location` PostgreSQL variable, users +into PL/Java for the `pljava.libjvm_location` PostgreSQL variable, so users of your package will not need to set that variable before `CREATE EXTENSION pljava` works. @@ -170,6 +165,14 @@ Some tests involving Unicode are skipped if the `server_encoding` is not `utf-8`, so it is best to run them in a server instance created with that encoding. +To simplify automated testing, the jar file that is the end product of a full +PL/Java source build contains a class that can serve as a PostgreSQL test +harness from Java's `jshell` script engine. It is documented [here][node], +and the continuous-integration scripts in PL/Java's own source-control +repository can be consulted as examples of its use. + +[node]: ../develop/node.html + ## Packaging the built items The end product of a full PL/Java source build is a jar file that functions as @@ -179,8 +182,11 @@ those needed to support `ALTER EXTENSION UPGRADE`. It also contains the `pljava-api` jar, needed for developing Java code to use in a database with PL/Java, and the `pljava-examples` jar. As discussed above, -these may be omitted from a base package and supplied separately, if packaging -guidelines require. +the examples jar may be omitted from a base package and supplied separately, +if packaging guidelines require, and the API jar may be included also in a +`-devel` package that installs it in a standard Java-library location. (However, +the API jar cannot be omitted from the base package; it is needed at runtime, in +the `SHAREDIR/pljava` location where the extension expects it.) The self-extracting jar consults `pg_config` at the time of extraction to determine where the files should be installed. @@ -216,9 +222,10 @@ will have on the target system. In addition to the files named in the self-extractor's output, additional files could be included in the package (if guidelines require the README -or COPYRIGHT, for example). As discussed above, the `pljava-api` jar could -be filtered from the list if it will be delivered in a separate `-devel` -package, and the same could be done for `pljava-examples`. +or COPYRIGHT, for example). As discussed above, the `pljava-examples` jar could +be filtered from the list if it will be delivered in a separate +package, and the `pljava-api` jar could be additionally delivered in a separate +`-devel` package (but must not be excluded from the base package). [install]: ../install/install.html diff --git a/src/site/markdown/build/versions.md b/src/site/markdown/build/versions.md index d5d36fb9e..f8df0aea8 100644 --- a/src/site/markdown/build/versions.md +++ b/src/site/markdown/build/versions.md @@ -1,13 +1,22 @@ # Versions of external packages needed to build and use PL/Java -As of mid-2020, the following version constraints are known. +As of spring 2025, the following version constraints are known. ## Java No version of Java before 9 is supported. The PL/Java code makes use of Java features first appearing in Java 9. -As for later versions of Java, backward compatibility in the language is +PL/Java's [security policy enforcement][policy] is available only when the Java +version at run time is 9 through 23. On Java 24 or later runtime, PL/Java 1.6.x +can only run [with no policy enforcement][nopolicy]. This is independed of +the Java version used at build time, and so the availability of enforcement +can be changed at any time after building, by changing the +`pljava.libjvm_location` [configuration variable][jvml] to point to a Java +shared object of a different version. + +Other than the loss of policy enforcement in Java 24, backward compatibility +in the language is generally good. Before Java 8, most likely problem areas with a new Java version tended to be additions to the JDBC API that PL/Java had not yet implemented. Since Java 8, even JDBC additions have not caused problems for @@ -24,6 +33,11 @@ itself was built with, as long as that later JRE version is used at run time. That also allows PL/Java to take advantage of recent Java implementation advances such as [class data sharing][cds]. +Some builds of Java 20 are affected by a bug, [JDK-8309515][]. PL/Java will +report an error if it detects it is affected by that bug, and the solution can +be to use a Java version earlier than 20, or one recent enough to have the bug +fixed. The bug was fixed in Java 21. + PL/Java has been successfully used with [Oracle Java][orj] and with [OpenJDK][], which is available with [either the Hotspot or the OpenJ9 JVM][hsj9]. It can also be built and used @@ -38,6 +52,7 @@ the `mvn` command line. [OpenJDK]: https://adoptopenjdk.net/ [hsj9]: https://www.eclipse.org/openj9/oj9_faq.html [GraalVM]: https://www.graalvm.org/ +[JDK-8309515]: https://bugs.openjdk.org/browse/JDK-8309515 ## Maven @@ -56,13 +71,11 @@ versions 4.3.0 or later are recommended in order to avoid a ## PostgreSQL -PL/Java 1.6.0 does not commit to support PostgreSQL earlier than 9.5. -(Support for 9.4 or even 9.3 might be feasible to add if there is a pressing -need.) +The PL/Java 1.6 series does not support PostgreSQL earlier than 9.5. More current PostgreSQL versions, naturally, are the focus of development and receive more attention in testing. -PL/Java 1.6.0 has been successfully built and run on at least one platform -with PostgreSQL versions from 13 to 9.5, the latest maintenance +PL/Java 1.6.9 has been successfully built and run on at least one platform +with PostgreSQL versions from 17 to 9.5, the latest maintenance release for each. diff --git a/src/site/markdown/develop/contextloader.md b/src/site/markdown/develop/contextloader.md index fda8b120d..49f7da5e5 100644 --- a/src/site/markdown/develop/contextloader.md +++ b/src/site/markdown/develop/contextloader.md @@ -24,11 +24,14 @@ below. It is also possible for an application or library to create subclasses of `Thread` that override the behavior of `getContextClassLoader` so that -the value set by PL/Java will have no effect. PL/Java does not detect or work -around such a case. A clear sign of code that does subclass `Thread` -in this way is that it will need the `enableContextClassLoaderOverride` -[`RuntimePermission`][runtimeperm] to be granted in -the [policy](../use/policy.html). +the value set by PL/Java will have no effect. PL/Java does not detect such +a case to work around it. + +When PL/Java is used [with policy enforcement][policy], a clear sign of code +that does subclass `Thread` in this way is that it will need the +`enableContextClassLoaderOverride` [`RuntimePermission`][runtimeperm] to be +granted in the [policy][]. When PL/Java is used [without enforcement][nopolicy], +there will be no such clear sign, making a problem of this kind harder to trace. ## Effects on application code @@ -97,3 +100,5 @@ in the `pljava.vmoptions` [setting](../use/variables.html). [tfndi]: https://docs.oracle.com/javase/9/docs/api/javax/xml/transform/TransformerFactory.html#newDefaultInstance-- [runtimeperm]: https://docs.oracle.com/en/java/javase/14/docs/api/java.base/java/lang/RuntimePermission.html [baseudt]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/BaseUDT.html +[policy]: ../use/policy.html +[nopolicy]: ../use/unenforced.html diff --git a/src/site/markdown/develop/node.md b/src/site/markdown/develop/node.md index 1011182d2..9cb80ba52 100644 --- a/src/site/markdown/develop/node.md +++ b/src/site/markdown/develop/node.md @@ -20,12 +20,18 @@ project's repository, as a way to keep as much as possible of the testing code common across platforms. The overall flavor, and even some of the method names, follow the `PostgresNode` -Perl module that has been part of PostgreSQL's "PGXS" extension-building tools -since 2015, so a quick review of that follows. +Perl module that became part of PostgreSQL's "PGXS" extension-building tools +in 2015, so a quick review of that follows. -## Similarities to PostgreSQL's `PostgresNode` Perl module +For PostgreSQL 15, the module distributed with PostgreSQL was renamed from +`PostgresNode` to `PostgreSQL::Test::Cluster`, with no essential change in +functionality (though `get_new_node` did become, simply, `new`). To avoid +needless churn, this Java class still has the historical name and methods. -When used from a testing script written in Perl, the methods of `PostgresNode` +## Similarities to the upstream `PostgreSQL::Test::Cluster` Perl module + +When used from a testing script written in Perl, the methods of +`PostgreSQL::Test::Cluster` make it easy to spin up and tear down one or more PostgreSQL instances, running in temporary directories, listening on temporary ports, non-interfering with each other or with production instances using the standard locations and ports, @@ -41,7 +47,7 @@ $n1->stop(); # stop the server $n1->clean_node(); # recursively delete the temporary location ``` -`PostgresNode.pm` illustrates the immense utility of making just a few +`PostgreSQL::Test::Cluster` illustrates the immense utility of making just a few well-chosen methods available, when there is already an expressive scripting language at hand (Perl) for putting those methods to use. @@ -51,7 +57,7 @@ installer jar on its classpath, and you have an interactive, scriptable version of Java, with the methods of `Node.class` available in it. The ones that correspond to the Perl example above have the same names, for -consistency (right down to the Perlish spelling with underscores rather than +familiarity (right down to the Perlish spelling with underscores rather than Javaish camelCase): ```java @@ -65,9 +71,11 @@ n1.clean_node() ``` `jshell` has to be run with a rather lengthy command line to get to this point; -more on that later. But once started, it presents a familiar `PostgresNode`-like +more on that later. But once started, it presents a familiar +`PostgreSQL::Test::Cluster`-like environment. As the example shows, `jshell` is lenient about statement-ending -semicolons. +semicolons. (Using them is still advisable, though; that leniency has fiddly +exceptions, such as not applying to pasted text.) ## `Node.class` in detail @@ -102,19 +110,16 @@ The full set of `Node` methods available can be seen Running `initdb` and starting a server are all well and good, but sooner or later a test may need to connect to it. That requires a JDBC driver to be on the -classpath also: specifically `pgjdbc-ng` (at least, that is the one that's been -tested and whose URL syntax is built in to `Node`). The older `pgjdbc` punts on -the correct handling of warning/notice responses from the server, which seems -rather disqualifying for a testing environment. In `pgjdbc-ng`, notices and -warnings (any PostgreSQL severity less than `ERROR` and at or above the -`client_min_messages` setting) are chained together as `SQLWarning` instances, -as JDBC provides. - -A new profile has been added to PL/Java's Maven build, and can be activated with -`-Ppgjdbc-ng` on the `mvn` command line. It has no effect but to declare an -extra dependency on the `pgjdbc-ng` dependencies-included jar. It is not used in -the build, but Maven will have downloaded it to the local repository, and that -location can be added to `jshell`'s classpath to make the driver available. +classpath also; either `PGJDBC` or `pgjdbc-ng` will work, with a few minor +differences. + +New profiles have been added to PL/Java's Maven build, and can be activated with +`-Ppgjdbc` or `-Ppgjdbc-ng` on the `mvn` command line. They have no effect but +to declare an extra dependency on the corresponding dependencies-included driver +jar. It is not used in the build, but Maven will have downloaded it to the local +repository, and that location can be added to `jshell`'s classpath to make the +driver available. (In the case of `PGJDBC`, adding the jar to the module path +also works.) That addition leads to the final long unwieldy command line needed to start `jshell`, which can be seen in all its glory toward the end of this page. @@ -122,12 +127,12 @@ Once that is copied and pasted into a terminal and any local paths changed, the rest is easy: ```java -import org.postgresql.pljava.packaging.Node -Node n1 = Node.get_new_node("TestNode1") -n1.init() -n1.start() -import java.sql.Connection -Connection c1 = n1.connect() +import org.postgresql.pljava.packaging.Node; +Node n1 = Node.get_new_node("TestNode1"); +n1.init(); +n1.start(); +import java.sql.Connection; +Connection c1 = n1.connect(); ``` Once you have an open connection (or several), the convenience methods `Node` @@ -138,11 +143,11 @@ parameter. ```java import static org.postgresql.pljava.packaging.Node.qp; // query-print -qp(c1, "create table foo (bar int, baz text)") -qp(c1, "insert into foo (values (1, 'Howdy!'))") -qp(c1, "select 1/0") -qp(c1, "select pg_sleep(1.5)") -qp(c1, "select * from foo") +qp(c1, "CREATE TABLE foo (bar int, baz text)"); +qp(c1, "INSERT INTO foo (VALUES (1, 'Howdy!'))"); +qp(c1, "SELECT 1/0"); +qp(c1, "SELECT pg_sleep(1.5)"); +qp(c1, "SELECT * FROM foo"); ``` This example shows `qp` used several different ways: with a DDL statement that @@ -152,18 +157,18 @@ produces a one-row result with one column typed `void` and always null), and one that returns a general query result. What it prints: ``` -jshell> qp(c1, "create table foo (bar int, baz text)") +jshell> qp(c1, "CREATE TABLE foo (bar int, baz text)"); -jshell> qp(c1, "insert into foo (values (1, 'Howdy!'))") +jshell> qp(c1, "INSERT INTO foo (VALUES (1, 'Howdy!'))"); -jshell> qp(c1, "select 1/0") +jshell> qp(c1, "SELECT 1/0"); -jshell> qp(c1, "select pg_sleep(1.5)") +jshell> qp(c1, "SELECT pg_sleep(1.5)"); -jshell> qp(c1, "select * from foo") +jshell> qp(c1, "SELECT * FROM foo"); ... text @@ -192,6 +197,16 @@ the output format consistent. The `void` output is special treatment for the common case of a result set with only the `void` column type, to spare the effort of generating a whole `WebRowSet` XML that only shows nothing is there. +The results shown above were obtained with `pgjdbc-ng`. If using `PGJDBC`, you +will notice these minor differences: + +* For the first case shown, DDL with no result, `PGJDBC` will present a zero-row + success result, the same as for DML that did not affect any rows. This has + to be taken into account if writing a state machine to check results, as + discussed further below. +* The `message` attribute produced for an error will have a prefix of `ERROR: ` + (or the corresponding word in the PostgreSQL server's configured language). + #### `qp` dissected `qp` is for interactive, exploratory use, generating printed output. For @@ -206,7 +221,8 @@ update count), or throwables (caught exceptions, or `SQLWarning` instances). The JDBC `Statement` is polled for new `SQLWarning`s before checking for each next result (`ResultSet` or update count). An error or exception that is thrown and caught will be placed on the stream when caught (and will be the last thing on -the stream). +the stream, though it may carry chains of cause, suppressed, or next exceptions +that may follow it if `flattenDiagnostics` is used on the stream). All 'notices' from PostgreSQL (severity below `ERROR` but at or above `client_min_messages`) are turned into `SQLWarning` instances by `pgjdbc-ng`, @@ -215,6 +231,16 @@ of the details other than the message and SQLState code. `Node` classifies them as `info` if the SQLState 'class' (leftmost two positions) is `00`, otherwise as `warning`. Exceptions of any other kind are classified as `error`. +`PGJDBC` also turns notices below `ERROR` into `SQLWarning` instances, but +provides access to the severity tag from the server, so `Node` uses that to +classify them as `warning` or `info`, instead of the class `00` rule. If the +severity is `WARNING` (or happens to be null for some reason), `Node` will +classify the notice as `warning`; any other severity will be classified as +`info`. (Note, however, that this scheme will not work if the server is +configured for a language that uses a different word for `WARNING`. To make it +work in that case, you can call `Node.set_WARNING_localized` in advance, passing +the word that PostgreSQL uses for `WARNING` in your language.) + As it happens, there is also an overload of `qp` with just one `Stream` parameter. If you have already run a query with `q` and have the result stream, and decide you just want to print that, just pass it to `qp`. There are other @@ -240,15 +266,15 @@ will install it from there, given a path to the repository root and the desired version of Saxon-HE. Not to be outdone, `installSaxonAndExamplesAndPath` combines the steps in correct order to install the Saxon jar, place it on the classpath, install and deploy the examples jar, and set a final classpath that -installs both. +includes both. ```java -import static java.nio.file.Paths.get -import java.sql.Connection -import org.postgresql.pljava.packaging.Node -import static org.postgresql.pljava.packaging.Node.qp +import static java.nio.file.Paths.get; +import java.sql.Connection; +import org.postgresql.pljava.packaging.Node; +import static org.postgresql.pljava.packaging.Node.qp; -Node n1 = Node.get_new_node("TestNode1") +Node n1 = Node.get_new_node("TestNode1"); try ( AutoCloseable t1 = n1.initialized_cluster(); @@ -273,8 +299,8 @@ try ( { qp(Node.installSaxonAndExamplesAndPath(c, get(System.getProperty("user.home"), ".m2", "repository").toString(), - "10.2", - true)) + "10.9", + true)); } } /exit @@ -290,6 +316,8 @@ would expect of a test. Worked-out examples that do the rest of that can be seen in the project repository in the configuration files for the CI testing services. +#### `stateMachine` for checking results + One last `Node` method most useful for checking returned results programmatically is `stateMachine` (full description in [the javadocs][nodeapi]). For example, the `installSaxonAndExamplesAndPath` call @@ -353,6 +381,36 @@ The `isDiagnostic` method shown above isn't part of the `Node` class; in the actual test configurations in the repository, it is trivially defined in `jshell` a few lines earlier. Not everything needs to be built in. +The difference in treatment of no-result DDL statements between drivers (where +`pgjdbc-ng` really has no result, and `PGJDBC` has a zero-row update count as +it would for a DML statement) can complicate writing a state machine that works +with both drivers. `Node` predefines a state function +`NOTHING_OR_PGJDBC_ZERO_COUNT` that consults the `s_urlForm` static field to +determine which driver is in use, and then moves to the numerically next state, +after consuming + +* nothing, if the driver is `pgjdbc-ng`, or +* a single zero row count (rejecting any other input), if the driver + is `PGJDBC`. + +```java +succeeding &= stateMachine( + "descriptive string for this state machine", + null, + + q(c, "CREATE TABLE foo (bar int, baz text)") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), // so they also appear in the log + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + + Node.NOTHING_OR_PGJDBC_ZERO_COUNT, + + (o,p,q) -> null == o +); + +``` + ## Invoking `jshell` to use `Node.class` As hinted above, the command needed to get `jshell` started so all the foregoing @@ -369,7 +427,8 @@ jshell \ ``` where _$packageJar_ is a PL/Java self-installer jar, _$jdbcJar_ should point -to a `pgjdbc-ng` "fat jar" (`pgjdbc-ng-all`), and _$pgConfig_ should point to +to a "fat jar" for the JDBC driver of choice (`postgresql-`_version_`.jar` or +`pgjdbc-ng-all-`_version_`.jar`), and _$pgConfig_ should point to the `pg_config` executable for the PostgreSQL installation that should be used. (If there is only one PostgreSQL installation or the right `pg_config` will be found on the search path, it doesn't have to be specified.) @@ -379,13 +438,19 @@ The `-J--add-modules` is needed because even though `jshell` treats (because of `--execution local`) wouldn't know that without being told. The path given to `jshell` itself (`--class-path` without the `-J`) does not -need to mention the `pgjdbc-ng` jar, because it can be a provider of the +need to mention _$jdbcJar_, because that can be a provider of the `java.sql.Driver` service without having to be visible. If the script will -want to use `pgjdbc-ng`-specific classes, then the jar does have to be +want to use driver-specific extension classes, then the jar does have to be on `jshell`'s class path too. -The `noUnsafe` setting silences a complaint from the `netty` library -about Java (correctly!) denying it access to private internals. +When the driver is `PGJDBC`, it can be placed on the class path or on the module +path (in which case it becomes the named module `org.postgresql.jdbc`). Again, +it does not need to be on `jshell`'s module path also---the one without +`-J`---unless the script will be referring to driver-specific classes. + +The `noUnsafe` setting, needed only when the driver is `pgjdbc-ng`, silences +a complaint from the `netty` library about Java (correctly!) denying it access +to private internals. [jshell]: https://docs.oracle.com/en/java/javase/15/jshell/introduction-jshell.html -[nodeapi]: ../pljava-packaging/apidocs/org/postgresql/pljava/packaging/Node.html#method.summary +[nodeapi]: ../pljava-packaging/apidocs/org/postgresql/pljava/packaging/Node.html#method-summary diff --git a/src/site/markdown/examples/saxon.md b/src/site/markdown/examples/saxon.md index 213ef9195..de4f72495 100644 --- a/src/site/markdown/examples/saxon.md +++ b/src/site/markdown/examples/saxon.md @@ -451,18 +451,6 @@ string (`\A\z` in Java syntax): That workaround would also cause the replacement to happen if the input string is completely empty to start with, which might not be what's wanted. -#### Syntax in older PostgreSQL versions - -The desugared syntax shown above can be used in PostgreSQL versions as old -as 9.5. In 9.4 and 9.3, the same syntax, but with `=>` replaced by `:=` for -the named parameters, can be used. The functions remain usable in still -earlier PostgreSQL versions, but with increasingly convoluted SQL syntax -needed to call them; before 9.3, for example, there was no `LATERAL` in a -`SELECT`, and a function could not refer to earlier `FROM` items. Before 9.0, -named-parameter notation can't be used in function calls. Before 8.4, the -functions would have to be declared without their `DEFAULT` clauses and the -`IntervalStyle` settings, and would not work with PostgreSQL interval values. - ### Minimizing startup time Saxon is a large library, and benefits greatly from precompilation into a diff --git a/src/site/markdown/index.md b/src/site/markdown/index.md index 983395818..27dfaf763 100644 --- a/src/site/markdown/index.md +++ b/src/site/markdown/index.md @@ -49,7 +49,7 @@ use. [JDBC]: https://docs.oracle.com/javase/tutorial/jdbc/ [pljapi]: pljava-api/apidocs/org.postgresql.pljava/module-summary.html [annotations]: https://docs.oracle.com/javase/tutorial/java/annotations/ -[oppa]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/package-summary.html#package_description +[oppa]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/package-summary.html#package-description [trgann]: https://github.com/tada/pljava/blob/master/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java [depdesc]: https://github.com/tada/pljava/wiki/Sql-deployment-descriptor [jar]: https://docs.oracle.com/javase/tutorial/deployment/jar/index.html diff --git a/src/site/markdown/install/install.md.vm b/src/site/markdown/install/install.md.vm index b34979fd2..a8fa4e509 100644 --- a/src/site/markdown/install/install.md.vm +++ b/src/site/markdown/install/install.md.vm @@ -39,10 +39,9 @@ see the jar file there. __Upgrade installations__ below*. -*Not running PostgreSQL 9.1 or higher? Use +*Avoiding `CREATE EXTENSION` for some reason? Use `LOAD 'libpljava-so-${project.version}';` instead of the `CREATE EXTENSION` -command. (It works in later versions too, if you prefer it to -`CREATE EXTENSION`.) Using a Mac? Be sure to add `.bundle` at the end of the file name +command. Using a Mac? Be sure to add `.bundle` at the end of the file name in the `LOAD` command. Windows? Remove `lib` from the front. Something else? Keep reading.* @@ -98,8 +97,8 @@ you will have to become patient, and read the rest of this page. **You will most probably have to set `pljava.libjvm_location`.** See the next section. -**It is useful to consider `pljava.vmoptions`.** See the -[VM options page][vmop]. +**It is useful to consider `pljava.vmoptions`. For Java 18 or later it is +necessary.** See the [VM options page][vmop]. [vmop]: vmoptions.html @@ -134,11 +133,27 @@ things right on the first try, you might set them after, too.) For example: Then set this variable to the full pathname, including the filename and extension. + The version of Java this variable points to will determine whether PL/Java + can operate [with security policy enforcement][policy] or must be used + [with no policy enforcement][unenforced]. + +`pljava.allow_unenforced` +: When using PL/Java with no policy enforcement, this variable must be set + as described on the [PL/Java with no policy enforcement][unenforced] page. + +`pljava.allow_unenforced_udt` +: When using PL/Java with no policy enforcement, if PL/Java + [mapped user-defined types][mappedudt] are to be used, this variable must + be set as described on the + [PL/Java with no policy enforcement][unenforced] page. + `pljava.vmoptions` -: While it should not be necessary to set these before seeing the first signs - of life from PL/Java, there are useful options to consider setting here - before calling the installation complete. Some are described on the - [VM options page][vmop]. +: JVM options can be set here, a number of which are described on the + [VM options page][vmop]. For the most part, they are not essential to + seeing the first signs of life from PL/Java and can be left for tuning + later. However, on Java 18 and later, it is necessary to choose + a `-Djava.security.manager=...` setting before PL/Java will run at all. + Details are on the [VM options page][vmop]. `pljava.module_path` : There is probably no need to set this variable unless installation locations @@ -187,7 +202,7 @@ Another approach is to save them to the server's configuration file. If you wish PL/Java to be available for all databases in a cluster, it may be more convenient to put the settings in the file than to issue `ALTER DATABASE` for several databases, but `pg_ctl reload` will be needed -to make changed settings effective. Starting with PostgreSQL 9.4, +to make changed settings effective. `ALTER SYSTEM` may be used as an alternative to editing the file. If you have several databases in the cluster and you favor the @@ -197,9 +212,6 @@ sure that `CREATE EXTENSION` just works, in any database where PL/Java is wanted. Different per-database settings can still be made if one database needs them. -For PostgreSQL releases [earlier than 9.2][pre92], the configuration file is -the _only_ way to make your settings persistent. - $h2 Upgrade installations PL/Java performs an upgrade installation if there is already an `sqlj` schema @@ -215,13 +227,17 @@ $h2 Usage permission Installation of PL/Java creates two "languages", `java` and `javau`. Functions that specify `LANGUAGE javau` can be created only by superusers, -and are subject to very few restrictions at runtime. Functions that specify +and PL/Java's default policy grants them some filesystem access. Functions that +specify `LANGUAGE java` can be created by any user or role that has been granted -`USAGE` permission `ON LANGUAGE java`. They run under a security manager that -denies access to the host filesystem. +`USAGE` permission `ON LANGUAGE java`. The default policy grants them no extra +permissions. The exact permissions granted in either case can be customized +in [`pljava.policy`][policy]. -__Note: For implications when running on Java 17 or later, -please see [JEP 411][jep411]__. +__Important: The above description applies when PL/Java is run +[with policy enforcement][policy], available on Java 23 and older. +On stock Java 24 and later, PL/Java can only be run with no policy enforcement, +and the implications should be reviewed carefully [here][unenforced].__ PostgreSQL, by default, would grant `USAGE` to `PUBLIC` on the `java` language, but PL/Java takes a more conservative approach on a new installation. @@ -233,16 +249,19 @@ if a site prefers that traditional policy. In a repeat or upgrade installation (the language `java` already exists), no change will be made to the access permissions granted on it. +When running [with no policy enforcement][unenforced], PL/Java allows only +database superusers to create functions even in the `java` language, +disregarding any `USAGE` grants. + $h2 Special topics Be sure to read these additional sections if: -* You are installing to [a PostgreSQL release earlier than 9.2][pre92] +* You intend to use [Java 24 or later][unenforced] * You are installing on [a system using SELinux][selinux] * You are installing on [Mac OS X][osx] * You are installing on [Ubuntu][ubu] and the self-extracting jar won't work -[pre92]: prepg92.html [selinux]: selinux.html [osx]: ../build/macosx.html [ubu]: ../build/ubuntu.html @@ -254,9 +273,10 @@ $h3 Puzzling error message from `CREATE EXTENSION` ERROR: relation "see doc: do CREATE EXTENSION PLJAVA in new session" already exists -For PL/Java, `CREATE EXTENSION` (which works in PostgreSQL 9.1 and later) is a -wrapper around installation via `LOAD` (which works in all versions PL/Java -supports). A quirk of this arrangement is that PostgreSQL treats `LOAD` as a +For PL/Java, `CREATE EXTENSION` is a wrapper around installation via `LOAD` +(which was needed for PostgreSQL versions now of only historical interest, +and remains supported for cases where `CREATE EXTENSION` is too inflexible). +A quirk of this arrangement is that PostgreSQL treats `LOAD` as a no-op for the remainder of a session once the library has been loaded, so `CREATE EXTENSION pljava` works in a *fresh* session, but not in one where PL/Java's native code is already in place. @@ -283,7 +303,7 @@ Because PL/Java, by design, runs entirely in the backend process created for each connection to PostgreSQL, to configure it does not require any cluster-wide actions such as stopping or restarting the server, or editing the configuration file; any necessary settings can be made in SQL over -an ordinary connection (in PostgreSQL 9.2 and later, anyway). +an ordinary connection. _Caution: if you are installing a new, little-tested PL/Java build, be aware that in the unexpected case of a crash, the `postmaster` will kick other @@ -398,4 +418,6 @@ In this case, simply place the files in any location where you can make them readable by the user running `postgres`, and set the `pljava.*` variables accordingly. -[jep411]: https://github.com/tada/pljava/wiki/JEP-411 +[policy]: ../use/policy.html +[unenforced]: ../use/unenforced.html +[mappedudt]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/MappedUDT.html diff --git a/src/site/markdown/install/locate.md.vm b/src/site/markdown/install/locate.md.vm index 1564bafc0..2d94bc5b1 100644 --- a/src/site/markdown/install/locate.md.vm +++ b/src/site/markdown/install/locate.md.vm @@ -31,11 +31,9 @@ work with a `.jar` file no matter what. Relative to the root of the build tree, the jar file is found at -`pljava-packaging/target/pljava-${pgversion}-${naraol}.jar` +`pljava-packaging/target/pljava-${pgversion}.jar` -where `${pgversion}` resembles `pg9.4` and `${naraol}` is an -*architecture-os-linker* triple, for example `amd64-Linux-gpp` -or `amd64-Windows-msvc`. It contains these things: +where `${pgversion}` resembles `pg16`. The jar contains these things: `pljava/pkglibdir/libpljava-\${project.version}.so` (or `.dll`, etc.) : The architecture-dependent, native library portion of the PL/Java @@ -62,13 +60,26 @@ or `amd64-Windows-msvc`. It contains these things: : Various files scripting what `CREATE EXTENSION` or `ALTER EXTENSION ... UPDATE` really do. +`pljava/sysconfdir/pljava.policy` +: Policy file defining the Java permissions granted to the languages `java` + and `javaU`, to any custom language aliases, or to specific jars, as + described [here][policy]. Unused if PL/Java is run + [without policy enforcement][nopolicy]. + It could happen that future versions add more files in the jar before updating this page. Also, every jar file has a `MANIFEST.MF`, and this file also contains a `JarX.class` to make it self-extracting; these are not otherwise important to PL/Java. See the [installation page][inst] for how to control the self-extraction. +Another file, `Node.class`, present in this jar is also unimportant for +normal installation, but provides some facilities for automated testing, +as described [here][node]. + [examples]: ../examples/examples.html +[node]: ../develop/node.html +[policy]: ../use/policy.html +[nopolicy]: ../use/unenforced.html Extract the needed files from this archive and place them in appropriate locations, then complete the [installation][inst]. @@ -112,11 +123,8 @@ $h3 The architecture-dependent PL/Java native library This is built by the `pljava-so` subproject. Its filename extension can depend on the operating system: `.so` on many systems, `.dll` on Windows, `.bundle` on Mac OS X / Darwin. Relative to the source root where the build was performed, it -is at the end of a long and redundant path that contains the project version -(twice), an "architecture-OS-linker" string (twice), and a build type -("plugin"), also twice. +is found in the `pljava-so/pljava-pgxs` directory. -An example, for version `${project.version}` and arch-os-linker of -`amd64-Linux-gpp` is (very deep breath): +An example for version `${project.version}` is: -`pljava-so/target/nar/pljava-so-${project.version}-amd64-Linux-gpp-plugin/lib/amd64-Linux-gpp/plugin/libpljava-so-${project.version}.so` +`pljava-so/pljava-pgxs/libpljava-so-${project.version}.so` diff --git a/src/site/markdown/install/locatejvm.md b/src/site/markdown/install/locatejvm.md index 1d6fab37f..24c4b6f80 100644 --- a/src/site/markdown/install/locatejvm.md +++ b/src/site/markdown/install/locatejvm.md @@ -27,6 +27,18 @@ by a process, this works: strace -e open java 2>&1 | grep libjvm ``` +## Version of the Java library selected + +The library pointed to be `pljava.libjvm_location` must be a Java 9 or later +JVM for the PL/Java 1.6 series. The actual version of the library will determine +what Java language features are available for PL/Java functions to use. + +The Java version also influences whether PL/Java can operate +[with security policy enforcement][policy] or +[with no policy enforcement][unenforced]. For stock Java 24 or later, it is only +possible to operate with no enforcement, and the implications detailed for +[PL/Java with no policy enforcement][unenforced] should be carefully reviewed. + ## Using a less-specific path The methods above may find the `libjvm` object on a very specific path @@ -47,3 +59,7 @@ generic one like `jre`, linked to whichever Java version is considered current. Using an alias that is too generic could possibly invite headaches if the default Java version is ever changed to one your PL/Java modules were not written for (or PL/Java itself was not built for). + + +[policy]: ../use/policy.html +[unenforced]: ../use/unenforced.html diff --git a/src/site/markdown/install/prepg92.md b/src/site/markdown/install/prepg92.md deleted file mode 100644 index b1fb8fbff..000000000 --- a/src/site/markdown/install/prepg92.md +++ /dev/null @@ -1,63 +0,0 @@ -# Installation on PostgreSQL releases earlier than 9.2 - -In PostgreSQL releases 9.2 and later, PL/Java can be installed entirely -without disturbing the `postgresql.conf` file or reloading/restarting the -server: the configuration variables can be set interactively in a session -until PL/Java loads sucessfully, then saved with a simple -`ALTER DATABASE` _dbname_ `SET` _var_ `FROM CURRENT` for each setting -that had to be changed. - -Releases earlier than 9.2 are slightly less convenient. It is still possible -to work out the right settings in an interactive session, but once found, -the settings must be added to `postgresql.conf` to be persistent, and the -postmaster signalled (with `pg_ctl reload`) to pick up the new settings. - -Releases before 9.2 also require setting `custom_variable_classes` in -`postgresql.conf` to include the prefix `pljava`, and that assignment must -be earlier in the file than any settings of `pljava.*` variables. - -## Trying settings interactively - -It is still possible to do an exploratory session to find the variable settings -that work before touching `postgresql.conf` at all, but -the details are slightly different. - -In later PostgreSQL versions, you would typically use some `SET` commands -followed by a `LOAD` (followed, perhaps, by more `SET` commands unless you -always get things right on the first try). - -Before release 9.2, however, the order has to be `LOAD` first, which typically -will lead to an incompletely-started warning because the configuration settings -have not been made yet. _Then_, because the module has been loaded, -`pljava.*` variables will be recognized and can be set and changed until -PL/Java successfully loads, just as in the newer versions of PostgreSQL. - -Once working settings are found, edit `postgresql.conf`, make sure that -`custom_variable_classes` includes `pljava`, copy in the variable settings -that worked, and use `pg_ctl reload` to make the new settings effective. - -## But what if I want the load to fail and it doesn't? - -The procedure above relies on the way loading stops when the settings are not -right, giving you a chance to adjust them interactively. That turns out to be -a problem if there are previously-saved settings, or the original defaults, -that happen to *work* even if they are not the settings you want. In that case, -the `LOAD` command starts PL/Java right up, leaving you no chance in the -interactive session to change anything. - -To escape that behavior, there is one more very simple configuration variable, -`pljava.enable`. If it is `off`, `LOAD`ing PL/Java will always stop early and -allow you to set other variables before setting `pljava.enable` to `on`. - -To answer the hen-and-egg question of how to set `pljava.enable` to `off` -before loading the module, it _defaults_ to `off` on PostgreSQL releases -earlier than 9.2, so you will always have the chance to test your settings -interactively (and you will always have to set it explicitly `on` when -you are ready). - -If it is already `on` because of an earlier configuration saved in -`postgresql.conf`, it will be recognized in your interactive session and you -can set it `off` as needed. - -On later PostgreSQL releases with no such complications, it defaults to `on` -and can be ignored. diff --git a/src/site/markdown/install/smproperty.md b/src/site/markdown/install/smproperty.md new file mode 100644 index 000000000..e58d2dd52 --- /dev/null +++ b/src/site/markdown/install/smproperty.md @@ -0,0 +1,49 @@ +# Available policy-enforcement settings by Java version + +In the PostgreSQL [configuration variable][variables] `pljava.vmoptions`, +whether and how to set the `java.security.manager` property depends on +the Java version in use (that is, on the version of the Java library that +the `pljava.libjvm_location` configuration variable points to). + +There are two ways of setting the `java.security.manager` property that may be +allowed or required depending on the Java version in use. + +`-Djava.security.manager=allow` +: PL/Java's familiar operating mode in which + security policy is enforced. More on that mode can be found in + [Configuring permissions in PL/Java][policy]. + +`-Djava.security.manager=disallow` +: A mode required on Java 24 and later, in which there is no enforcement of + policy. Before setting up PL/Java in this mode, the implications in + [PL/Java with no policy enforcement][unenforced] should be carefully + reviewed. + +This table lays out the requirements by specific version of Java. + +|Java version|Available settings| +|---------|:---| +|9–11|There must be no appearance of `-Djava.security.manager` in `pljava.vmoptions`. Mode will be policy-enforcing.| +|12–17|Either `-Djava.security.manager=allow` or `-Djava.security.manager=disallow` may appear in `pljava.vmoptions`. Default is policy-enforcing (same as `allow`) if neither appears.| +|18–23|One of `-Djava.security.manager=allow` or `-Djava.security.manager=disallow` must appear in `pljava.vmoptions`, or PL/Java will fail to start. There is no default.| +|24–|`-Djava.security.manager=disallow` must appear in `pljava.vmoptions`, or PL/Java will fail to start.| +[Allowed `java.security.manager` settings by Java version] + +When `pljava.libjvm_location` points to a Java 17 or earlier JVM, there is +no special VM option needed, and PL/Java will operate with policy enforcement +by default. However, when `pljava.libjvm_location` points to a Java 18 or later +JVM, `pljava.vmoptions` must contain either `-Djava.security.manager=allow` or +`-Djava.security.manager=disallow`, to select operation with or without policy +enforcement, respectively. No setting other than `allow` or `disallow` will +work. Only `disallow` is available for stock Java 24 or later. + +The behavior with `allow` (and the default before Java 18) is further described +in [Configuring permissions in PL/Java][policy]. + +The behavior with `disallow`, the only mode offered for Java 24 and later, +is detailed in [PL/Java with no policy enforcement][unenforced], which +should be carefully reviewed when PL/Java will be used in this mode. + +[variables]: ../use/variables.html +[policy]: ../use/policy.html +[unenforced]: ../use/unenforced.html diff --git a/src/site/markdown/install/vmoptions.md b/src/site/markdown/install/vmoptions.md index 87ed91fe1..674cb9fa0 100644 --- a/src/site/markdown/install/vmoptions.md +++ b/src/site/markdown/install/vmoptions.md @@ -7,6 +7,61 @@ options are likely to be worth setting. If using [the OpenJ9 JVM][hsj9], be sure to look also at the [VM options specific to OpenJ9][vmoptJ9]. +## Selecting operation with or without security policy enforcement + +PL/Java can operate [with security policy enforcement][policy], its former +default and only mode, or [with no policy enforcement][unenforced], the only +mode available on stock Java 24 and later. + +When `pljava.libjvm_location` points to a Java 17 or earlier JVM, there is +no special VM option needed, and PL/Java will operate with policy enforcement +by default. However, when `pljava.libjvm_location` points to a Java 18 or later +JVM, `pljava.vmoptions` must contain either `-Djava.security.manager=allow` or +`-Djava.security.manager=disallow`, to select operation with or without policy +enforcement, respectively. No setting other than `allow` or `disallow` will +work. Only `disallow` is available for stock Java 24 or later. + +For just how to configure specific Java versions, see +[Available policy-enforcement settings by Java version][smprop]. + +Before operating with `disallow`, the implications detailed in +[PL/Java with no policy enforcement][unenforced] should be carefully reviewed. + +[policy]: ../use/policy.html +[unenforced]: ../use/unenforced.html +[smprop]: smproperty.html + +## Adding to the set of readable modules + +By default, a small set of Java modules (including `java.base`, +`org.postgresql.pljava`, and `java.sql` and its transitive dependencies, +which include `java.xml`) will be readable to any Java code installed with +`install_jar`. + +While those modules may be enough for many uses, other modules are easily added +using `--add-modules` within `pljava.vmoptions`. For example, +`--add-modules=java.net.http,java.rmi` would make the HTTP Client and WebSocket +APIs readable, along with the Remote Method Invocation API. + +For convenience, the module `java.se` simply transitively requires all the +modules that make up the full Java SE API, so `--add-modules=java.se` will make +that full API available to PL/Java code without further thought. The cost, +however, may be that PL/Java uses more memory and starts more slowly than if +only a few needed modules were named. + +For just that reason, there is also a `--limit-modules` option that can be used +to trim the set of readable modules to the minimum genuinely needed. More on the +use of that option [here][limitmods]. + +[limitmods]: ../use/jpms.html#Limiting_the_module_graph + +Third-party modular code can be made available by adding the modular jars +to `pljava.module_path` (see [configuration variables](../use/variables.html)) +and naming those modules in `--add-modules`. PL/Java currently treats all jars +loaded with `install_jar` as unnamed-module, legacy classpath code. + +For more, see [PL/Java and the Java Platform Module System](../use/jpms.html). + ## Byte order for PL/Java-implemented user-defined types PL/Java is free of byte-order issues except when using its features for building diff --git a/src/site/markdown/releasenotes-pre1_6.md.vm b/src/site/markdown/releasenotes-pre1_6.md.vm index 405b1a9a1..730a7deb1 100644 --- a/src/site/markdown/releasenotes-pre1_6.md.vm +++ b/src/site/markdown/releasenotes-pre1_6.md.vm @@ -688,7 +688,7 @@ the Saxon-HE XML-processing library) provides a partial implementation of true `XMLQUERY` and `XMLTABLE` functions for PostgreSQL, using the standard-specified XML Query language rather than the XPath 1.0 of the native PostgreSQL functions. -[exxml]: pljava-examples/apidocs/index.html?org/postgresql/pljava/example/annotation/PassXML.html +[exxml]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/PassXML.html [exsaxon]: examples/saxon.html $h4 New Java property exposes the PostgreSQL server character-set encoding @@ -767,7 +767,7 @@ to PostgreSQL, and there has not been a way to suppress the row operation. The `TriggerData` interface now has a [`suppress`][tgsuppress] method that the trigger can invoke to suppress the operation for the row. -[tgsuppress]: pljava-api/apidocs/index.html?org/postgresql/pljava/TriggerData.html#suppress() +[tgsuppress]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/TriggerData.html#suppress() $h4 Constraint triggers @@ -1076,7 +1076,7 @@ functions, triggers, and user-defined types, both base and composite. [user]: use/use.html [hello]: use/hello.html [exanno]: $project.scm.url/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation -[apianno]: pljava-api/apidocs/index.html?org/postgresql/pljava/annotation/package-summary.html#package_description +[apianno]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/package-summary.html#package-description The history of this feature in PL/Java is long, with the first related commits appearing in 2005, six years in advance of an enhancement request for it. @@ -1164,7 +1164,7 @@ of major version because the prior API, while deprecated, is still available. and ignore the role should be rare, and should be discussed on the mailing list or opened as issues. -#set($sessapi = 'pljava-api/apidocs/index.html?org/postgresql/pljava/Session.html#') +#set($sessapi = 'pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/Session.html#') [goun]: ${sessapi}getOuterUserName() [eaou]: ${sessapi}executeAsOuterUser(java.sql.Connection,java.lang.String) diff --git a/src/site/markdown/releasenotes.md.vm b/src/site/markdown/releasenotes.md.vm index cb10d1dc2..d58fca2ec 100644 --- a/src/site/markdown/releasenotes.md.vm +++ b/src/site/markdown/releasenotes.md.vm @@ -10,7 +10,460 @@ #set($ghbug = 'https://github.com/tada/pljava/issues/') #set($ghpull = 'https://github.com/tada/pljava/pull/') -$h2 PL/Java 1.6.4 +$h2 PL/Java 1.6.9 + +This is the ninth minor update in the PL/Java 1.6 series. It adds support +for building and running with Java 24 _but only with no security enforcement_, +as explained below. Other than some minor bug fixes, that is the most notable +change. Further information on the changes may be found below. + +$h3 Version compatibility + +PL/Java 1.6.9 can be built against recent PostgreSQL versions including 17, and +older ones back to 9.5, using Java SE 9 or later. The Java version used at +runtime does not have to be the same version used for building. PL/Java itself +can run on any Java version 9 or later, but for security policy enforcement the +runtime Java version must be 23 or earlier. PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. + +Some builds of Java 20 are affected by a bug, [JDK-8309515][]. PL/Java will +report an error if it detects it is affected by that bug, and the solution can +be to use a Java version earlier than 20, or one recent enough to have the bug +fixed. The bug was fixed in Java 21. + +$h3 Security policy enforcement unavailable in Java 24 and later + +PL/Java 1.6 has historically enforced a flexible and fine-grained security +policy allowing it to offer, in PostrgreSQL parlance, both a 'trusted' and +'untrusted' procedural language with configurable limits on the allowed +behavior for both. That mode of operation, described in +[Configuring permissions in PL/Java][policy], remains fully supported +in PL/Java 1.6.9 _as long as the Java version used at runtime is Java 23 or +earlier_. + +The crucial Java language features making that possible have been removed +by the developers of Java, beginning with Java 24. PL/Java 1.6.9 can be used +with a Java 24 or later runtime, but only as an 'untrusted' language with no +policy enforcement, as described in +[PL/Java with no policy enforcement][nopolicy]. + +Because enforcement depends only on the Java version at runtime, a simple change +to the `pljava.libjvm_location` [configuration variable][variables] allows the +flexibility to host user code using the latest Java 24+ language features (but +with no policy enforcement) in an application where that is acceptable, or, +where the newest language features are not needed, to continue to use an older +supported Java version, including the long-term-support Java 21, with policy +enforced. + +The details in [PL/Java with no policy enforcement][nopolicy] should be +carefully reviewed before using PL/Java in that mode. The section on generic +Java hardening tips can also be a source of good practices for defense-in-depth +even when running with policy enforcement. + +$h3 Changes + +$h4 Supplied examples + +$h5 Softened dependency of examples jar on Saxon library + +The Maven build produces a `pljava-examples` jar from the supplied example code, +and can produce that jar with or without the examples that depend on the Saxon +XML library, based on a build-time profile setting. In past releases, the +examples jar, if built with the Saxon examples included, could not be deployed +(`sqlj.install_jar` with `deploy => true` would fail) in a database where a +Saxon jar had not already been installed and placed on the classpath. For cases +where the Saxon examples are not of interest, that inability to deploy the +examples jar based on a choice made at build time was an inconvenience. + +The examples jar now, if built with the Saxon examples included, simply will +leave those examples undeployed, if Saxon classes cannot be found on +the classpath at the time of deployment. + +$h5 New example function to examine Java's boot module layer + +The discussion of [PL/Java with no policy enforcement][nopolicy] encourages +attention to which Java modules are made available in Java's boot module layer. +A new example function is supplied to return that information. + +$h4 Continuous integration + +Workflows for GitHub Actions, AppVeyor, and Travis have had duplicated code +factored out into a single `jshell` script in the new `CI` directory. + +The script is able to use either the `PGJDBC` or `pgjdbc-ng` driver for +connecting to a test server instance. The CI configuration has been using +`pgjdbc-ng` but now uses `PGJDBC`, to avoid a `pgjdbc-ng` dependency on +a library whose native operations Java 24 now warns about and a future +Java release will forbid. + +The GitHub Actions CI workflow now covers Ubuntu, Mac OS on both Intel and ARM, +and Windows using both MSVC and MinGW-w64. + +$h4 Documentation + +$h5 Java stack traces and debugger control + +The needed settings of `client_min_messages` or `log_min_messages` for Java +exception stacktraces to be shown have been mentioned in passing in too many +places that were not the user documentation, while never clearly stated there. +There is now a [new section](use/use.html#Java_exception_stack_traces) for that, +and also one for [connecting a debugger](use/use.html#Connecting_a_debugger). + +$h5 Links into API docs now assume Java 21 `javadoc` version will be used + +While it should be possible to build the API documentation with the `javadoc` +tool of whatever Java version is used at build time, different versions of the +tool introduce changes in HTML output, such as anchor names, that affect +links into the API documentation from other pages, such as the build/install/use +documentation in Markdown. To build a full set of documentation with working +links, an assumption must be made about the version of `javadoc` that will be +used. Links have been updated on the assumption that the API docs will be built +with the `javadoc` tool of Java 21. + +$h3 Bugs fixed + +* [SQL generator unexpected case-sensitive matching of implementor tags](${ghbug}515) +* [Class path used during jar deploy/undeploy can be bad](${ghbug}516) + +$h3 Credits + +Thanks in release 1.6.9 to ZhangHuiGui for highlighting the need to better +document debugging arrangements and Java stack traces. + +[policy]: use/policy.html +[nopolicy]: use/unenforced.html +[variables]: use/variables.html + +$h2 Earlier releases + +## A nice thing about using Velocity is that each release can be entered at +## birth using h2 as its main heading, h3 and below within ... and then, when +## it is moved under 'earlier releases', just define those variables to be +## one heading level finer. Here goes: +#set($h2 = '###') +#set($h3 = '####') +#set($h4 = '#####') +#set($h5 = '######') + +$h2 PL/Java 1.6.8 (19 October 2024) + +This is the eighth minor update in the PL/Java 1.6 series. It adds support +for PostgreSQL 17, confirms compatibility with Java 23, and makes some slight +build-process improvements to simplify troubleshooting reported build problems. +Further information on the changes may be found below. + +$h3 Version compatibility + +PL/Java 1.6.8 can be built against recent PostgreSQL versions including 17, and +older ones back to 9.5, using Java SE 9 or later. The Java version used at +runtime does not have to be the same version used for building. PL/Java itself +can run on any Java version 9 or later. PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. + +Some builds of Java 20 are affected by a bug, [JDK-8309515][]. PL/Java will +report an error if it detects it is affected by that bug, and the solution can +be to use a Java version earlier than 20, or one recent enough to have the bug +fixed. The bug was fixed in Java 21. + +$h3 Changes + +$h4 Build system + +While building the PL/Java native code, Maven output will include the full +`PG_VERSION_STR` from the PostgreSQL development files that have been found +to build against. The string includes platform, compiler, and build notes, as +reported by the `version` function in SQL. This information should always be +included when reporting a PL/Java native build issue, so including it in the +Maven build output will make issues easier to report. + +When building with Maven's `-X` / `--debug` option for additional debug output, +the command arguments of the constructed compiling and linking commands will be +included in the output, which can be useful in troubleshooting a build problem. +The arguments are shown just as the compiler/linker is meant to ultimately +receive them; on a Unix-like platform, that is the Java `List` exactly as seen +with `ProcessBuilder.command()`. On Windows, that `List` is shown just before +the final application of extra quoting that simply ensures the compiler/linker +receives it correctly. + +When building with a platform or environment that does not satisfy the `probe` +predicate of any of the included platform build rules, a Maven error message +will clearly say so. In earlier versions, an uninformative null pointer +exception resulted instead. The new message includes guidance on how to add a +build rule set for a new platform or environment, and possibly contribute it for +inclusion in PL/Java. + +$h4 Documentation + +The build documentation now prominently notes that `mvn --version` will show the +version of Java that Maven has found to use for the build. There had been build +issues reported that could be traced to Maven finding a different Java +installation than expected, when that version was not usable to build +PL/Java 1.6. + +The documentation has been shorn of many lingering references to PostgreSQL +versions older than 9.5, the oldest that PL/Java 1.6 supports, and other +holdovers from pre-1.6 PL/Java. + +$h3 Enhancement requests addressed + +* [PostgreSQL 17 support](${ghpull}499) + +$h3 Bugs fixed + +* [Unhelpful output when build fails because no platform rules matched](${ghbug}485) + +$h3 Credits + +Thanks in release 1.6.8 to Francisco Miguel Biete Banon for determining the +changes needed for PostgreSQL 17. + +$h2 PL/Java 1.6.7 (3 April 2024) + +This is the seventh minor update in the PL/Java 1.6 series. It adds support +for FreeBSD and for building and running with Java 22, and fixes some bugs, +with few other notable changes. Further information on the changes may be found +below. + +$h3 Version compatibility + +PL/Java 1.6.7 can be built against recent PostgreSQL versions including 16, and +older ones back to 9.5, using Java SE 9 or later. The Java version used at +runtime does not have to be the same version used for building. PL/Java itself +can run on any Java version 9 or later. PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. + +Some builds of Java 20 are affected by a bug, [JDK-8309515][]. PL/Java will +report an error if detects it is affected by that bug, and the solution can be +to use a Java version earlier than 20, or one recent enough to have the bug +fixed. The bug has been fixed in Java 21. + +$h3 Changes + +$h4 Changes in XML support + +$h5 Java 22's new XML property to control DTD processing is supported + +Java 22 introduces a new property, `jdk.xml.dtd.support`, which can take values +`allow`, `deny`, and `ignore`. + +The values `allow` and `deny` provide a new way to specify behavior that could +already be requested by other means, and the `allowDTD(boolean)` method of +PL/Java's `Adjusting.XML` API now tries this property first, falling back to the +older means on Java releases that do not support it. + +The value `ignore` offers a previously-unavailable behavior where an XML +document with a DTD can be successfully parsed but with its DTD ignored. A new +method `ignoreDTD()` is added to the `Adjusting.XML` API to request this +treatment, and will only succeed on Java 22 or later. The last-invoked of this +method and `allowDTD(boolean)` will govern. + +In Java 22, bug [JDK-8329295][] can cause parsing to fail when `ignoreDTD` is in +effect, if the document has only a minimal DTD and the SAX or DOM API is used. + +$h4 Build system + +The build logic that is implemented in JavaScript is now executed using the +Nashorn engine, either included with Java through release 14, or downloaded +by Maven for Java 15 and later. The build system was formerly downloading the +JavaScript engine from GraalVM to build on Java 15 and later, but a new version +of that engine needed for Java 22 would have complicated version management. + +Versions of some Maven plugins used at build time +[have been updated](${ghpull}468) where critical vulnerabilities were reported. + +$h3 Enhancement requests addressed + +* [Build on FreeBSD](${ghpull}478) +* [Vulnerable Maven plugins used at build time](${ghbug}449) + +$h3 Bugs fixed + +* ["PostgreSQL backend function after an elog(ERROR)" in class loading](${ghbug}471) +* [XML parsing errors reported as XX000 when DOM API is used](${ghbug}481) + +$h3 Credits + +Thanks in release 1.6.7 to Francisco Miguel Biete Banon, Bear Giles, Achilleas +Mantzios, `hunterpayne`, `kamillo`. + +[JDK-8329295]: https://bugs.openjdk.org/browse/JDK-8329295 + +$h2 PL/Java 1.6.6 (19 September 2023) + +This is the sixth minor update in the PL/Java 1.6 series. It adds support +for PostgreSQL 16 and confirms compatibility with Java 21, and fixes some bugs, +with few other notable changes. Further information on the changes may be found +below. + +$h3 Version compatibility + +PL/Java 1.6.6 can be built against recent PostgreSQL versions including 16, and +older ones back to 9.5, using Java SE 9 or later. The Java version used at +runtime does not have to be the same version used for building. PL/Java itself +can run on any Java version 9 or later. PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. + +Some builds of Java 20 are affected by a bug, [JDK-8309515][]. PL/Java will +report an error if detects it is affected by that bug, and the solution can be +to use a Java version earlier than 20, or one recent enough to have the bug +fixed. The bug has been fixed in Java 21. + +PL/Java 1.6.6 will definitely no longer build on PostgreSQL versions older +than 9.5. It has made no attempt to support them since 1.6.0, and lingering +conditional code for older versions has now been removed. + +$h3 Changes + +$h4 Changes in XML support + +$h5 Java 17's standardized XML feature and property names added + +Java 17 added standardized, easy-to-remember names for a number of features and +properties the underlying XML implementations had formerly supported under +implementation-specific names. PL/Java's `Adjusting.XML` API already needed to +know those various other names, to attempt using them to configure the desired +features and properties. Now it tries the new standard names too. + +$h5 Better control when a feature or property can't be set as intended + +The original documentation for the feature and property setters in the +`Adjusting.XML` API said "the adjusting methods are best-effort and do not +provide an indication of whether the requested adjustment was made". (At the +same time, failures could produce voluminous output to the log.) + +The new [`lax(boolean)`][adjlax] method offers more control. If not used, +adjustment failures are logged (as before, but more compactly in the case of +multiple failures in one sequence of adjustments). Or, `lax(true)` can be used +to silently discard any failures up to that point in a sequence of adjustments, +or `lax(false)` to have the exceptions chained together and thrown. + +The addition of the new Java 17 standardized names can complicate +version-agnostic configuration of other elements in the Java XML APIs, such as +`Transformer`, that are not directly covered by PL/Java's +`Adjusting.XML.Parsing` methods. Client code may find the new `Adjusting.XML` +method [`setFirstSupported`][adjsfs] convenient for that purpose; +[an example][egsfs] illustrates. + +$h4 Packaging / testing + +$h5 Support choice of `PGJDBC` or `pgjdbc-ng` in `Node` + +The package jar produced as the last step of the build includes a +[test harness](develop/node.html) similar to the `PostgresNode` Perl module. +It formerly worked only with the `pgjdbc-ng` driver. Now it works with either +`PGJDBC` or `pgjdbc-ng`, and provides features for writing test scripts that +do not depend on the driver chosen. + +$h4 Source code + +* Minor changes to support PostgreSQL 16 +* Conditional code supporting PostgreSQL versions older than 9.5 removed +* Old non-HTML5 elements (rejected by Javadoc 17 and later) removed from + doc comments +* The `pureNonVirtualCalled` method removed to quiet warnings from recent + C compilers +* Schema qualification in embedded SQL added to two operators that had been + overlooked in the earlier round of adding such qualification +$h3 Bugs fixed + +* [`NEWLINE` pattern can fail to match](${ghbug}455) + +[adjlax]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/Adjusting.XML.Parsing.html#method-summary +[adjsfs]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/Adjusting.XML.html#method-detail +[egsfs]: https://github.com/tada/pljava/blob/V1_6_6/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java#L528 + +$h2 PL/Java 1.6.5 (13 June 2023) + +This is the fifth minor update in the PL/Java 1.6 series. It adds support +for PostgreSQL 15 and fixes some bugs, with few other notable changes. Further +information on the changes may be found below. + +$h3 Version compatibility + +PL/Java 1.6.5 can be built against recent PostgreSQL versions including 15, and +older ones back to 9.5, using Java SE 9 or later. The Java version used at +runtime does not have to be the same version used for building. PL/Java itself +can run on any Java version 9 or later. PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. + +Some builds of Java 20 are affected by a bug, [JDK-8309515][]. PL/Java will +report an error if detects it is affected by that bug, and the solution can be +to use a Java version earlier than 20, or one recent enough to have the bug +fixed. + +$h3 Changes + +$h4 Changes affecting administration + +$h5 Bugs affecting `install_jar` from http/https URLs fixed + +CI testing now makes sure that http URLs work and the appropriate +`java.net.URLPermission` can be granted in `pljava.policy` where the comments +indicate. + +$h4 Improvements to the annotation-driven SQL generator + +$h5 PL/Java functions can be declared on interfaces as well as classes + +The SQL/JRT specification has always only said 'class', but it could be +debated whether 'class' was intended strictly or inclusively. As there is +no technical obstacle to using static methods declared on an interface, +and PL/Java's runtime already could do so, the SQL generator no longer +disallows `@Function` annotations on them. + +$h5 SQL generator reports compatibility with a more recent Java source version + +Because PL/Java 1.6 retains compatibility for building on Java versions +back to 9, the SQL generator previously reported 9 as the source version +supported. This produced warnings building user code to target a later version +of Java, which were only an issue for sites using a fail-on-warning policy. + +The SQL generator now reports its supported source version as the earlier of: +the Java version being used, or the latest Java version on which it has been +successfully tested. In this release, that is Java 20. + +$h4 Improvements to documentation + +$h5 Use of `--add-modules` to access Java modules not read by default, explained + +By default, PL/Java starts up with a fairly small set of Java modules readable. +The documentation did not explain the use of `--add-modules` in +`pljava.vmoptions` to expand that set when user code will refer to other +modules. That is [now documented][addm]. + +$h3 Enhancement requests addressed + +* [Allow functions from an interface](${ghbug}426) + +$h3 Bugs fixed + +* [Crash on startup with `SQL_ASCII` database and bad `vmoptions`](${ghbug}416) +* [Installed by `LOAD` then packaged as extension broken in recent PostgreSQL updates](${ghbug}434) +* [Java 20 breaks `LexicalsTest.testSeparator`](${ghbug}435) +* [Not found JDK 11 `java.net.http.HttpClient`](${ghbug}419) (documentation added) +* [`SocketPermission` on `install_jar`](${ghbug}425) +* [The timer in `_destroyJavaVM` does not take effect](${ghbug}407) +* PostgreSQL 15 support [410](${ghbug}410), [412](${ghbug}412) +* [Cannot specify java release other than '9' for `maven-compiler-plugin`](${ghbug}403) +* [Fail validation if function declares `TRANSFORM FOR TYPE`](${ghbug}402) +* ["cannot parse AS string" for 1-letter identifiers](${ghbug}438) + +$h3 Credits + +Thanks in release 1.6.5 to Francisco Miguel Biete Banon, Christoph Berg, Frank +Blanning, Stephen Frost, Casey Lai, Krzysztof Nienartowicz, Yuril Rashkovskii, +Tim Van Holder, `aadrian`, `sincatter`, `tayalrun1`. + +[addm]: install/vmoptions.html#Adding_to_the_set_of_readable_modules +[versions]: build/versions.html +[JDK-8309515]: https://bugs.openjdk.org/browse/JDK-8309515 + +$h2 PL/Java 1.6.4 (19 January 2022) This is the fourth minor update in the PL/Java 1.6 series. It is a minor bug-fix release with few other notable changes. Further information @@ -77,18 +530,7 @@ $h3 Bugs fixed * [Set-returning function has context classloader set too many times](${ghbug}389) * [`java.time.LocalDate` mismapping within 30 years of +/-infinity](${ghbug}390) -[exoneout]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/ReturnComposite.html#method.summary - -$h2 Earlier releases - -## A nice thing about using Velocity is that each release can be entered at -## birth using h2 as its main heading, h3 and below within ... and then, when -## it is moved under 'earlier releases', just define those variables to be -## one heading level finer. Here goes: -#set($h2 = '###') -#set($h3 = '####') -#set($h4 = '#####') -#set($h5 = '######') +[exoneout]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/ReturnComposite.html#method-summary $h2 PL/Java 1.6.3 (10 October 2021) @@ -258,7 +700,7 @@ $h3 Credits Thanks to Francisco Biete for the report of [#331](${ghbug}331). -[PassXML]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/PassXML.html#method.summary +[PassXML]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/PassXML.html#method-summary $h2 PL/Java 1.6.1 (16 November 2020) @@ -318,8 +760,8 @@ $h3 Bugs fixed * [1.6.0: opening a ResourceBundle (or a resource) fails](${ghbug}322) * [Better workaround needed for javac 10 and 11 --release bug](${ghbug}328) -[outprm]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Function.html#annotation.type.element.detail -[outprmeg]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/ReturnComposite.html#method.detail +[outprm]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Function.html#annotation-interface-element-detail +[outprmeg]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/ReturnComposite.html#method-detail [agganno]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Aggregate.html [castanno]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Cast.html [opranno]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Operator.html @@ -520,7 +962,7 @@ continuous integration was supported by Google Summer of Code. [linkage]: examples/examples.html#Exception_resolving_class_or_method_.28message_when_installing_examples.29 [udtd32f84e]: https://github.com/jcflack/pljava-udt-type-extension/commit/d32f84e [udt0066a1e]: https://github.com/jcflack/pljava-udt-type-extension/commit/0066a1e -[variadic]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/Variadic.html#method.detail +[variadic]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/Variadic.html#method-detail [charsets]: use/charsets.html [jpms]: use/jpms.html diff --git a/src/site/markdown/use/catch.md b/src/site/markdown/use/catch.md new file mode 100644 index 000000000..b4754c1a3 --- /dev/null +++ b/src/site/markdown/use/catch.md @@ -0,0 +1,132 @@ +# Catching PostgreSQL exceptions in Java + +When your Java code calls into PostgreSQL to do database operations, +a PostgreSQL error may result. It gets converted into a special subclass +of `SQLException` that (internally to PL/Java) retains all the elements +of the PostgreSQL error report. If your Java code does not catch this exception +and it propagates all the way out of your function, it gets turned back into +the original error report and is handled by PostgreSQL in the usual way. + +Your Java code can also catch this exception in any `catch` block that +covers `SQLException`. After catching one, there are two legitimate things +your Java code can do with it: + +0. Perform some cleanup as needed and rethrow it, or construct some other, + more-descriptive or higher-level exception and throw that, so that the + exception continues to propagate and your code returns exceptionally + to PostgreSQL. + +0. Roll back to a previously-established `Savepoint`, perform any other + recovery actions needed, and continue processing, without throwing or + rethrowing anything. + +If your code catches a PostgreSQL exception, and continues without rethrowing +it or throwing a new one, and also without rolling back to a prior `Savepoint`, +that is a bug. Without rolling back, the current PostgreSQL transaction is +spoiled and any later calls your Java function tries to make into PostgreSQL +will throw their own exceptions because of that. Historically, such bugs have +been challenging to track down, as you may end up only seeing a later exception +having nothing at all to do with the one that was originally mishandled, +which you never see. + +## Tips for debugging mishandled exceptions + +Some features arriving in PL/Java 1.6.10 simplify debugging code that catches +but mishandles exceptions. + +### More-informative in-failed-transaction exception + +First, the exception that results when a call into PostgreSQL fails because of +an earlier mishandled exception has been made more informative. It has an +`SQLState` of `25P02` (PostgreSQL's "in failed SQL transaction" code), and its +`getCause` method actually returns the unrelated earlier exception that was +mishandled (and so, in that sense, really is the original 'cause'). Java code +that catches this exception can use `getStackTrace` to examine its stack +trace, or call `getCause` and examine the stack trace of the earlier exception. +The stack trace of the failed-transaction exception shows the context of the +later call that failed because of the earlier mishandling, and the stack trace +of the 'cause' shows the context of the original mishandled problem. + +Note, however, that while your code may mishandle an exception, the next call +into PostgreSQL that is going to fail as a result might not be made from your +code at all. It could, for example, happen in PL/Java's class loader and appear +to your code as an unexplained `ClassNotFoundException`. The failed-transaction +`SQLException` and its cause should often be retrievable from the `cause` chain +of whatever exception you get, but could require following multiple `cause` +links. + +### Additional logging + +Additionally, there is logging that can assist with debugging when it isn't +practical to add to your Java code or run with a debugger to catch and examine +exceptions. + +When your Java function returns to PostgreSQL, normally or exceptionally, +PL/Java checks whether there was any PostgreSQL error raised during your +function's execution but not resolved by rolling back to a savepoint. + +If there was, the logging depends on whether your function is returning normally +or exceptionally. + +#### If your function has returned normally + +If a PostgreSQL error was raised, and was not resolved by rolling back to +a savepoint, and your function is making a normal non-exception return, then, +technically, your function has mishandled that exception. The mishandling may be +more benign (your function made no later attempts to call into PostgreSQL that +failed because of it) or less benign (if one or more later calls did get made +and failed). In either case, an exception stack trace will be logged, but the +log level will differ. + +_Note: "More benign" still does not mean "benign". Such code may be the cause +of puzzling PostgreSQL warnings about active snapshots or unclosed resources, +or it may produce no visible symptoms, but it is buggy and should be found and +fixed._ + +In the more-benign case, it is possible that your code has long been mishandling +that exception without a problem being noticed, and it might not be desirable +for new logging added in PL/Java 1.6.10 to create a lot of new log traffic about +it. Therefore, the stack trace will be logged at `DEBUG1` level. You can use +`SET log_min_messages TO DEBUG1` to see any such stack traces. + +In the less-benign case, the mishandling is likely to be causing some problem, +and the stack trace will be logged at `WARNING` level, and so will appear in the +log unless you have configured warnings not to be logged. The first +in-failed-transaction exception is the one whose stack trace will be logged, and +that stack trace will include `Caused by:` and the original mishandled exception +with its own stack trace. + +#### If your function has returned exceptionally + +If a PostgreSQL error was raised and your function is returning +exceptionally, then there may have been no mishandling at all. The exception +emerging from your function may be the original PostgreSQL exception, +or a higher-level one your code constructed around it. That would be normal, +non-buggy behavior. + +It is also possible, though, that your code could have caught a PostgreSQL +exception, mishandled it, and later returned exceptionally on account of some +other, even unrelated, exception. PL/Java has no way to tell the difference, +so it will log the PostgreSQL exception in this case too, but only at `DEBUG2` +level. + +PL/Java's already existing pre-1.6.10 practice is to log an exception stack +trace at `DEBUG1` level any time your function returns exceptionally. Simply +by setting `log_level` to `DEBUG1`, then, you can see the stack trace of +whatever exception caused the exceptional return of your function. If that +exception was a direct result of the original PostgreSQL exception or of a later +in-failed-transaction exception, then the `cause` chain in its stack trace +should have all the information you need. + +If, on the other hand, the exception causing your function's exceptional return +is unrelated and its `cause` chain does not include that information, then by +bumping the log level to `DEBUG2` you can ensure the mishandled exception's +stack trace also is logged. + +### Example + +PL/Java's supplied examples include a [`MishandledExceptions`][] class creating +a `mishandle` function that can be used to demonstrate the effects of +mishandling and what is visble at different logging levels. + +[`MishandledExceptions`]: ../pljava-examples/apidocs/org/postgresql/pljava/example/annotation/MishandledExceptions.html#method-detail diff --git a/src/site/markdown/use/hello.md.vm b/src/site/markdown/use/hello.md.vm index e37ae7b00..9731671d8 100644 --- a/src/site/markdown/use/hello.md.vm +++ b/src/site/markdown/use/hello.md.vm @@ -119,7 +119,7 @@ individual projects with shorter `pom.xml` files naming this as the parent. org.apache.maven.plugins maven-compiler-plugin - 3.8.1 + 3.10.1 9 @@ -127,7 +127,7 @@ individual projects with shorter `pom.xml` files naming this as the parent. org.apache.maven.plugins maven-jar-plugin - 2.6 + 3.3.0 @@ -463,6 +463,6 @@ From here, consider: * The user guide pages [on the wiki][uwik] * The many pre-built [examples][] -[pljapi]: ../pljava-api/apidocs/index.html?org/postgresql/pljava/package-summary.html#package_description +[pljapi]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/package-summary.html#package-description [uwik]: https://github.com/tada/pljava/wiki/User-guide [examples]: ../examples/examples.html diff --git a/src/site/markdown/use/jpms.md b/src/site/markdown/use/jpms.md index 7c7890a1c..4770e06a3 100644 --- a/src/site/markdown/use/jpms.md +++ b/src/site/markdown/use/jpms.md @@ -36,10 +36,11 @@ legacy code can be migrated over time: * A jar file containing legacy, non-modular code should be placed on the class path, and is treated as part of an unnamed module that has access - to the exports and opens of any other modules, so it will continue to work - as it did before Java 9. (Even a jar containing Java 9+ modular code - will be treated this way, if found on the class path rather than the - module path.) + to the exports and opens of all other _readable_ modules. Such code will, + therefore, continue to work as it did before Java 9, provided the needed + modules are _readable_, as explained below. (Even a jar containing Java 9+ + modular code will be treated this way, if found on the class path rather + than the module path.) * A jar file can be placed on the module path even if it does not contain an explicit named module. In that case, it becomes an "automatic" module, @@ -55,6 +56,75 @@ that does not include Java module system concepts. Its `sqlj.set_classpath` function manipulates an internal class path, not a module path, and a jar installed with `sqlj.install_jar` behaves as legacy code in an unnamed module. +## Readable versus observable modules + +Using Java's terminology, modules that can be found on the module path are +_observable_. Not all of those are automatically _readable_; the _readable_ +ones in a JVM instance are initially those encountered, at JVM start-up, in +the "recursive enumeration" step of [module resolution][resolution]. + +Recursive enumeration begins with some root modules, and proceeds until all of +the modules on which they (transitively) depend have been added to the readable +set. When PL/Java is launched in a session, PL/Java's own module is a root, +and so the readable modules will include those PL/Java itself depends on, +such as `java.base`, `java.sql`, and the other modules `java.sql` names with +`requires transitive` directives. + +Those modules may be enough for many uses of PL/Java. However, if code for use +in PL/Java will refer to other modules, +[`--add-modules` in `pljava.vmoptions`][addm] can be used to add more roots. +Because of recursive enumeration, it is enough to add just one module, or a few +modules, whose dependencies recursively cover whatever modules will be needed. + +At one extreme for convenience, Java provides a module, `java.se`, that simply +declares dependencies on the other modules that make up the full Java SE API. +Therefore, `--add-modules=java.se` will ensure that any PL/Java code is able to +refer to any of the Java SE API. However, PL/Java instances may use less memory +and start up more quickly if an effort is made to add only modules actually +needed. + +### Limiting the module graph + +Less conveniently perhaps, but advantageously for memory footprint and quick +startup, the [`--limit-modules`][limitmods] option can be used. As of this +writing in early 2025, starting up a simple PL/Java installation on Java 24 +with no `--add-modules` option results in 48 modules resolved, and the 48 +include some unlikely choices for PL/Java purposes, such as `java.desktop`, +`jdk.unsupported.desktop`, `jdk.javadoc`, and others. + +With the option `--limit-modules=org.postgresql.pljava.internal` added, only +nine modules are resolved---the transitive closure of those required by PL/Java +itself---and all of PL/Java's supplied examples successfully run. + +The `--add-modules` option can then be used to make any other actually-needed +modules available again. Those named with `--add-modules` are implicitly added +to those named with `--limit-modules`, so there is no need to change the +`--limit-modules` setting when adding another module. For example, + +``` +--limit-modules=org.postgresql.pljava.internal --add-modules=java.net.http +``` + +will allow use of `java.net.http` in addition to the nine modules resolved for +PL/Java itself. + +Limiting the module graph can be especially advisable when running PL/Java with +no security policy enforcement, as required on stock Java 24 and later. The page +[PL/Java with no policy enforcement][unenforced] should be carefully reviewed +for other implications of running PL/Java that way. + +The supplied [examples jar][examples] provides a function, [java_modules][], +that can be used to see what modules have been resolved into Java's boot module +layer. + +For more detail on why the boot layer includes the modules it does, +`-Djdk.module.showModuleResolution=true` can be added temporarily in +`pljava.vmoptions`, and a log of module requirements and bindings will be sent +to the standard output of the backend process when PL/Java starts. PostgreSQL, +however, may normally start backend processes with standard output going +nowhere, so the logged information may be invisible unless running PostgreSQL +in [a test harness][node]. + ## Configuring the launch-time module path The configuration variable `pljava.module_path` controls the @@ -84,4 +154,11 @@ the usual way. It can be set by adding a `-Djava.class.path=...` in the is simply the jar file pathnames, separated by the platform's path separator character. -[jpms]: http://cr.openjdk.java.net/~mr/jigsaw/spec/ +[jpms]: https://cr.openjdk.java.net/~mr/jigsaw/spec/ +[resolution]: https://docs.oracle.com/javase/9/docs/api/java/lang/module/package-summary.html#resolution +[addm]: ../install/vmoptions.html#Adding_to_the_set_of_readable_modules +[limitmods]: https://openjdk.org/jeps/261#Limiting-the-observable-modules +[unenforced]: unenforced.html +[examples]: ../examples/examples.html +[java_modules]: ../pljava-examples/apidocs/org/postgresql/pljava/example/annotation/Modules.html#method-detail +[node]: ../develop/node.html diff --git a/src/site/markdown/use/parallel.md b/src/site/markdown/use/parallel.md index 861a621b4..11524d899 100644 --- a/src/site/markdown/use/parallel.md +++ b/src/site/markdown/use/parallel.md @@ -1,9 +1,7 @@ # PL/Java in parallel query or background worker With some restrictions, PL/Java can be used in [parallel queries][parq], from -PostgreSQL 9.6, and in some [background worker processes][bgworker] (as -introduced in PostgreSQL 9.3, though 9.5 or later is needed for support -in PL/Java). +PostgreSQL 9.6, and in some [background worker processes][bgworker]. [bgworker]: https://www.postgresql.org/docs/current/static/bgworker.html [parq]: https://www.postgresql.org/docs/current/static/parallel-query.html diff --git a/src/site/markdown/use/policy.md b/src/site/markdown/use/policy.md index 566743c49..e71c46408 100644 --- a/src/site/markdown/use/policy.md +++ b/src/site/markdown/use/policy.md @@ -1,5 +1,17 @@ # Configuring permissions in PL/Java +This page describes how PL/Java operates when enforcing security policy, +available when using Java 23 or earlier. + +When using PL/Java with stock Java 24 or later, please see instead the +[PL/Java without policy enforcement][unenforced] page. + +To operate with policy enforcement as described here, no special configuration +is needed on Java 17 and earlier, while on Java 18 through 23, an entry +`-Djava.security.manager=allow` in [`pljava.vmoptions`][confvar] must be present +for PL/Java to start. For just how to configure specific Java versions, see +[Available policy-enforcement settings by Java version][smprop]. + ## `TRUSTED` (and untrusted) procedural languages PostgreSQL allows a procedural language to be installed with or without @@ -8,12 +20,11 @@ can be created in that language by any user (PostgreSQL role) with `USAGE` permission on that language, as configured with the SQL commands `GRANT USAGE ON LANGUAGE ...` and `REVOKE USAGE ON LANGUAGE ...`. For a language that is _not_ designated `TRUSTED`, only a database superuser -may create functions that use it, no matter who has been granted `USAGE` -on it. +may create functions that use it. No `USAGE` permission can be granted on it. In either case, once any function has been created, that function may be executed by any user/role granted `EXECUTE` permission on the function -itself; a language's `USAGE` privilege (plus superuser status, if the language +itself; a language's `USAGE` privilege (or superuser status, if the language is not `TRUSTED`) is only needed to create a function that uses the language. Because PL functions execute in the database server, a general-purpose @@ -121,7 +132,7 @@ installed with PL/Java. The `pljava.policy` file, by default, is used _instead of_ any `.java.policy` file in the OS user's home directory that Java would normally load. There probably is no such file in the `postgres` user's home directory, and if -for any reason there is one, it probably is not tailored to PL/Java. +for any reason there is one, it probably was not put there with PL/Java in mind. The [configuration variable][confvar] `pljava.policy_urls` can be used to name different, or additional, policy files. @@ -371,12 +382,17 @@ That should be regarded as an implementation detail; it may change in a future release, so relying on it is not recommended. The developers of Java have elected to phase out important language features -used by PL/Java to enforce policy. The changes will come in releases after -Java 17. For migration planning, Java versions up to and including 17 -remain fully usable with this version of PL/Java, and Java 17 -is positioned as a long-term support release. For details on -how PL/Java will adapt, please bookmark [the JEP 411 topic][jep411] -on the PL/Java wiki. +used by PL/Java to enforce policy. The functionality has been removed in +Java 24. For migration planning, this version of PL/Java can still enable +policy enforcement in Java versions up to and including 23, and Java 17 and 21 +are positioned as long-term support releases. (There is a likelihood, +increasing with later Java versions, even before policy stops being enforceable, +that some internal privileged operations by Java itself, or other libraries, +will cease to work transparently, and may have to be manually added to a site's +PL/Java policy.) + +For details on how PL/Java will adapt, please bookmark +[the JEP 411 topic][jep411] on the PL/Java wiki. [pfsyn]: https://docs.oracle.com/en/java/javase/14/security/permissions-jdk1.html#GUID-7942E6F8-8AAB-4404-9FE9-E08DD6FFCFFA @@ -386,4 +402,6 @@ on the PL/Java wiki. [sqljajl]: ../pljava/apidocs/org.postgresql.pljava.internal/org/postgresql/pljava/management/Commands.html#alias_java_language [tssec]: https://docs.oracle.com/en/java/javase/14/security/troubleshooting-security.html [trial]: trial.html +[unenforced]: unenforced.html [jep411]: https://github.com/tada/pljava/wiki/JEP-411 +[smprop]: ../install/smproperty.html diff --git a/src/site/markdown/use/sqlxml.md b/src/site/markdown/use/sqlxml.md index c766cfd6d..1c4f5de34 100644 --- a/src/site/markdown/use/sqlxml.md +++ b/src/site/markdown/use/sqlxml.md @@ -419,6 +419,12 @@ only existing methods for setting features/properties, as described `setFirstSupportedFeature` and `setFirstSupportedProperty` methods in PL/Java's `Adjusting` API. +When running on Java 22 or later, there is also a fallback catalog that can +satisfy requests for a small number of DTDs that are defined by the Java +platform. The behavior when this fallback resolver cannot satisfy a request +can be configured by setting the `jdk.xml.jdkcatalog.resolve` property, for +which, again, the `setFirstSupportedProperty` method can be used. + ### Extended API to set the content of a PL/Java `SQLXML` instance When a `SQLXML` instance is returned from a PL/Java function, or passed in to diff --git a/src/site/markdown/use/unenforced.md b/src/site/markdown/use/unenforced.md new file mode 100644 index 000000000..82636be18 --- /dev/null +++ b/src/site/markdown/use/unenforced.md @@ -0,0 +1,246 @@ +# PL/Java with no policy enforcement + +This page describes how PL/Java operates when it is not enforcing any security +policy, as when running on stock Java 24 or later. + +When the newest Java language features are not needed, it may be preferable to +use a Java 23 or earlier JVM to retain PL/Java's historically fine-grained and +configurable limits on what the Java code can do. For that case, please see +instead the [configuring permissions in PL/Java][policy] page. + +## History: policy enforcement pre-Java 24 + +PL/Java has historically been able to enforce configurable limits on the +behavior of Java code, and to offer more than one "procedural language" with +distinct names, such as `java` and `javau`, for declaring functions with +different limits on what they can do. In PostgreSQL parlance, the language named +without 'u' would be described as 'trusted', meaning any functions created in +that language would run with strict limits. Such functions could be created by +any PostgreSQL user granted `USAGE` permission on that language. The language +named with 'u' would be described as 'untrusted' and impose fewer limits on what +functions can do; accordingly, only PostgreSQL superusers would be allowed to +create functions in such a language. + +PL/Java, going further than many PLs, allowed tailoring of the exact policies +imposed for both `java` and `javau`, and also allowed creation of additional +language aliases beyond those two, with different tailored policies for each. + +Those capabilities remain available when PL/Java is used with Java versions +up through Java 23, and are described more fully in +[configuring permissions in PL/Java][policy]. + +## The present: Java 24 and later, no policy enforcement in PL/Java 1.6 + +The Java language features necessary for policy enforcement in the PL/Java 1.6 +series have been removed from the language as of Java 24. It is possible to +use Java 24 or later with an up-to-date 1.6-series PL/Java, but only by running +with no policy enforcement at all. + +That does not mean only that PL/Java's 'trusted' and 'untrusted' languages are +no longer different: it means that even the 'untrusted' language's more-relaxed +former limits can no longer be enforced. When run with enforcement disabled, +PL/Java is better described as a wholly-'untrusted' PL with nearly no limits on +what the Java code can do. + +The only limits a Java 24 or later runtime can impose on what the Java code can +do are those imposed by the isolation of modules in the +[Java Platform Module System][jpms] and by a small number of VM options, which +will be discussed further below. + +This picture is radically different from the historical one with enforcement. To +run PL/Java in this mode may be a reasonable choice if Java 24 or later language +features are wanted and if all of the Java code to be used is considered well +vetted, thoroughly trusted, and defensively written. + +For news of possible directions for policy enforcement in future PL/Java +versions, please bookmark [this wiki page][jep411]. + +## Opting in to PL/Java with no enforcement + +For PL/Java to run with no policy enforcement (and, therefore, for it to run +at all on Java 24 or later), specific configuration settings must be made to opt +in. + +### In `pljava.vmoptions` + +The string `-Djava.security.manager=disallow` must appear in the setting of +[`pljava.vmoptions`][vmoptions] or PL/Java will be unable to start on Java 24 +or later. + +For details on what `java.security.manager` settings to use on other Java +versions, see [Available policy-enforcement settings by Java version][smprop]. + +### in `pljava.allow_unenforced` + +Typically, a PL extension that provides only 'untrusted' execution will define +only a single, untrusted, PL name: `plpython3u` would be an example. + +PL/Java, however: + +* Has historically offered both a `javau` and a trusted `java` PL +* Still can offer both, when run on a Java 23 or older JVM +* May have been installed in a database with functions already created of both + types, and then switched to running on Java 24 and without enforcement +* Can also be switched back to a Java 23 or older JVM and provide enforcement + again + +Therefore, a PL/Java installation still normally provides two (or more) named +PLs, each being declared to PostgreSQL as either 'trusted' or not. + +When running with no enforcement, however: + +* Only PostgreSQL superusers can create functions, even using PL names shown as + 'trusted', and without regard to any grants of `USAGE` on those PLs. + + There may, however, be functions already defined in 'trusted' PLs that were + created by non-superusers with `USAGE` granted, at some earlier time when + PL/Java was running with enforcement. It may be important to audit those + functions' code before allowing them to run. + +* No PL/Java function at all will be allowed to run unless the name of its PL is + included in the `pljava.allow_unenforced` [configuration variable][vbls]. + +* When there are existing PL/Java functions declared in more than one named PL, + they can be audited in separate batches, with the name of each PL added + to the `pljava.allow_unenforced` setting after the functions declared + in that PL have been approved. Or, individual functions, once approved, can + be redeclared with the PL name changed to one already listed in + `pljava.allow_unenforced`. + +* Creation of a new function, even by a superuser, with a PL name not listed in + `pljava.allow_unenforced` will normally raise an error when PL/Java is + running without enforcement. This will not be detected, however, at times + when `check_function_bodies` is `off`, so is better seen as a reminder than + as a form of security. The more-important check is the one made when + the function executes. + +### in `pljava.allow_unenforced_udt` + +Java methods for input and output conversion of PL/Java +[mapped user-defined types][mappedudt], which are executed directly by PL/Java +and have no SQL declarations to carry a PL name, are allowed to execute only if +`pljava.allow_unenforced_udt` is `on`. The table `sqlj.typemap_entry` can be +queried for a list of mapped UDT Java classes to audit before changing this +setting to `on`. + +## Hardening for PL/Java with no policy enforcement + +### External hardening measures + +Developers of the Java language, in their rationale for removing the +Java features needed for policy enforcement, have placed strong emphasis on +available protections at the OS or container level, external to the process +running Java. For the case of PL/Java, that would mean typical hardening +measures such as running PostgreSQL in a container, using [SELinux][selinux], +perhaps in conjunction with [sepgsql][], and so on. + +Those external measures, however, generally confine what the process can do as a +whole. Because PL/Java executes within a PostgreSQL backend process, which must +still be allowed to do everything PostgreSQL itself does, it is difficult for an +external measure to restrict what Java code can do any more narrowly than that. + +### Java hardening measures + +Java features do remain that can be used to put some outer guardrails on what +the Java code can do. They include some specific settings that can be made in +`pljava.vmoptions`, and the module-isolation features of the +[Java Platform Module System][jpms] generally. These should be conscientiously +used: + +#### `--sun-misc-unsafe-memory-access=deny` + +This setting is first available in Java 23. It should be used whenever +available, and especially in Java 24 or later with no policy enforcement. +Without this setting, and in the absence of policy enforcement, any Java code +can access memory in ways that break the Java object model. + +The only reason not to set this option would be when knowingly using a Java +library that requires the access, if there is no update or alternative to using +that library. More modern code would use later APIs for which access can be +selectively granted to specific modules. + +#### `--illegal-native-access=deny` + +This setting is first available in Java 24 and should be used whenever +available. Without this setting, in the absence of policy enforcement, +any Java code can execute native code. There is arguably no good reason to +relax this setting, as options already exist to selectively grant such access +to specific modules that need it, if any. + +#### Module system protections + +Java's module system is one of the most important remaining mechanisms for +limiting what Java code may be able to do. Keeping unneeded modules out of the +module graph, advantageous already for startup speed and memory footprint, +also means whatever those modules do won't be available to Java code. + +The supplied [examples jar][examples] provides a function, [java_modules][], +that can be used to see what modules have been resolved into Java's boot module +layer. + +The `--limit-modules` VM option can be effectively used to resolve fewer modules +when PL/Java loads. As of this writing, in early 2025, starting PL/Java with no +`--add-modules` or `--limit-modules` options results in 48 modules in the graph, +while a simple `--limit-modules=org.postgresql.pljava.internal` added to +`pljava.vmoptions` reduces the graph to nine modules---all the transitive +requirements of PL/Java itself---and all of PL/Java's supplied examples +successfully run. Any additional modules needed for user code can be added back +with `--add-modules`. More details at [Limiting the module graph][limiting]. + +The `--sun-misc-unsafe-memory-access=deny` option mentioned above denies access +to certain methods of the `sun.misc.Unsafe` class, which is supplied by +the `jdk.unsupported` module. It may be preferable, when there is no other need +for it, to also make sure `jdk.unsupported` is not present in the module graph +at all. + +##### Modularize code needing special access + +It is currently less convenient in PL/Java 1.6 to provide user code in modular +form: the `sqlj.install_jar` and `sqlj.set_classpath` functions manage a class +path, not a module path. Supplying a module requires placing it on the file +system and adding it to `pljava.module_path`. + +The extra inconvenience may be worthwhile in some cases where there is a subset +of code that requires special treatment, such as an exception to the native +access restriction. Placing just that code into a named module on the module +path allows the exception to be made just for that module by name. With the +removal of Java's former fine-grained policy permissions, such module-level +exceptions are the finest-grained controls remaining in stock Java. + +For news of possible directions for policy enforcement in future PL/Java +versions, please bookmark [this wiki page][jep411]. + +### Defensive coding + +#### Java system properties + +It can be laborious to audit a code base for assumptions that a given Java +system property has a value that is reliable. In the case of no policy +enforcement, when any system property can be changed by any code at any time, +best practice is to rely on defensive copies taken early, before arbitrary +user code can have run. + +For example, `PrintWriter.println` uses a copy of the `line.separator` property +taken early in the JVM's own initialization, so code that relies on `println` to +write a newline will be more dependable than code using `line.separator` +directly. + +PL/Java itself takes a defensive copy of all system properties early in its own +startup, immediately after adding the properties that PL/Java sets. The +`frozenSystemProperties` method of the `org.postgresql.pljava.Session` object +returns this defensive copy, as a subclass of `java.util.Properties` that is +unmodifiable (throwing `UnsupportedOperationException` from methods where a +modification would otherwise result). + +[policy]: policy.html +[jpms]: jpms.html +[vmoptions]: ../install/vmoptions.html +[vbls]: variables.html +[jep411]: https://github.com/tada/pljava/wiki/JEP-411 +[selinux]: ../install/selinux.html +[sepgsql]: https://www.postgresql.org/docs/17/sepgsql.html +[limiting]: jpms.html#Limiting_the_module_graph +[mappedudt]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/MappedUDT.html +[examples]: ../examples/examples.html +[java_modules]: ../pljava-examples/apidocs/org/postgresql/pljava/example/annotation/Modules.html#method-detail +[smprop]: ../install/smproperty.html diff --git a/src/site/markdown/use/use.md b/src/site/markdown/use/use.md index c25bef799..4fe219488 100644 --- a/src/site/markdown/use/use.md +++ b/src/site/markdown/use/use.md @@ -34,13 +34,26 @@ Several [configuration variables](variables.html) can affect PL/Java's operation, including some common PostgreSQL variables as well as PL/Java's own. +### Enabling additional Java modules + +By default, PL/Java code can see a small set of Java modules, including +`java.base` and `java.sql` and a few others. To include others, use +[`--add-modules` in `pljava.vmoptions`][addm]. + +[addm]: ../install/vmoptions.html#Adding_to_the_set_of_readable_modules + ## Special topics ### Configuring permissions -The permissions in effect for PL/Java functions can be tailored, independently -for functions declared to the `TRUSTED` or untrusted language, as described -[here](policy.html). +When PL/Java is used with Java 23 or earlier, the permissions in effect +for PL/Java functions can be tailored, independently for functions declared to +the `TRUSTED` or untrusted language, as described [here](policy.html). + +When PL/Java is used with stock Java 24 or later, no such tailoring of +permissions is possible, and the +[PL/Java with no policy enforcement](unenforced.html) page should be carefully +reviewed. #### Tailoring permissions for code migrated from PL/Java pre-1.6 @@ -51,6 +64,69 @@ to run with a 'trial' policy initially, allowing code to run but logging permissions that may need to be added in `pljava.policy`. How to do that is described [here](trial.html). +### Catching and handling PostgreSQL exceptions in Java + +If the Java code calls back into PostgreSQL (such as through the internal JDBC +interface), errors reported by PostgreSQL are turned into Java exceptions and +can be caught in Java `catch` clauses, but they need to be properly handled. +More at [Catching PostgreSQL exceptions in Java](catch.html). + +### Debugging PL/Java functions + +#### Java exception stack traces + +PL/Java catches any Java exceptions uncaught by your Java code, and passes them +on as familiar PostgreSQL errors that will be reported to the client, or can be +caught, as with PL/pgSQL's `EXCEPTION` clause. However, the created PostgreSQL +error does not include the stack trace of the original Java exception. + +If either of the PostgreSQL settings `client_min_messages` or `log_min_messages` +is `DEBUG1` or finer, the Java exception stack trace will be printed to +the standard error channel of the backend process, where it will be collected +and saved in the server log if the PostgreSQL setting `logging_collector` is on. +Otherwise, it will go wherever the error channel of the backend process is +directed, possibly nowhere. + +#### Connecting a debugger + +To allow connecting a Java debugger, the PostgreSQL setting `pljava.vmoptions` +can be changed, in a particular session, to contain a string like: + +``` +-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=localhost:0 +``` + +On the first action in that session that uses PL/Java, the debugger transport +will be set up as specified. For the example above, PL/Java will listen for +a connection from a Java debugger at a randomly-chosen port, which will be +identified with this message (where _nnnnn_ is the port number): + +``` +Listening for transport dt_socket at address: nnnnn +``` + +A Java debugger can then be started and attached to the listening address and +port. + +The "Listening" message, however, is written to the standard output channel +of the PostgreSQL backend process. It may be immediately visible if you are +running PostgreSQL in a [test harness](../develop/node.html), but in a +production setting it may go nowhere. In such a setting, you may prefer to set +a specific port number, rather than 0, in the `pljava.vmoptions` setting, to +be sure of the port the debugger should attach to. Choosing a port that is not +already in use is then up to you. + +As an alternative, `server=y` can be changed to `server=n`, and PL/Java will +then attempt to attach to an already-listening debugger process. The +address:port should be adjusted to reflect where the debugger process is +listening. + +With `suspend=n`, PL/Java proceeds normally without waiting for the debugger +connection, but the debugger will be able to set break or watch points, and will +have control when Java exceptions are thrown. With `suspend=y`, PL/Java only +proceeds once the debugger is connected and in control. This setting is more +commonly used for debugging PL/Java itself. + ### The thread context class loader Starting with PL/Java 1.6.3, within an SQL-declared PL/Java function, the @@ -90,8 +166,8 @@ significant advantages to using the ### Parallel query -PostgreSQL 9.3 introduced [background worker processes][bgworker] -(though at least PostgreSQL 9.5 is needed for support in PL/Java), +PL/Java understands [background worker processes][bgworker] +in PostgreSQL 9.5 and later, and PostgreSQL 9.6 introduced [parallel query][parq]. For details on PL/Java in a background worker or parallel query, see @@ -106,7 +182,7 @@ PL/Java will work most seamlessly when the server encoding in PostgreSQL is `UTF8`. For other cases, please see the [character encoding notes][charsets]. [hello]: hello.html -[pljapi]: ../pljava-api/apidocs/index.html?org/postgresql/pljava/package-summary.html#package_description +[pljapi]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/package-summary.html#package-description [uwik]: https://github.com/tada/pljava/wiki/User-guide [examples]: ../examples/examples.html [charsets]: charsets.html diff --git a/src/site/markdown/use/variables.md b/src/site/markdown/use/variables.md index 867b847b3..a26055327 100644 --- a/src/site/markdown/use/variables.md +++ b/src/site/markdown/use/variables.md @@ -30,6 +30,21 @@ These PostgreSQL configuration variables can influence PL/Java's operation: define what any values outside ASCII represent; it is usable, but [subject to limitations][sqlascii]. +`pljava.allow_unenforced` +: Only used when PL/Java is run with no policy enforcement, this setting is + a list of language names (such as `javau` and `java`) in which functions + will be allowed to execute. This setting has an empty default, and should + only be changed after careful review of the + [PL/Java with no policy enforcement][unenforced] page. + +`pljava.allow_unenforced_udt` +: Only used when PL/Java is run with no policy enforcement, this on/off + setting controls whether data conversion functions associated with + PL/Java [mapped user-defined types][mappedudt] + will be allowed to execute. This setting defaults to off, and should + only be changed after careful review of the + [PL/Java with no policy enforcement][unenforced] page. + `pljava.debug` : A boolean variable that, if set `on`, stops the process on first entry to PL/Java before the Java virtual machine is started. The process cannot @@ -40,8 +55,7 @@ These PostgreSQL configuration variables can influence PL/Java's operation: `pljava.enable` : Setting this variable `off` prevents PL/Java startup from completing, until - the variable is later set `on`. It can be useful when - [installing PL/Java on PostgreSQL versions before 9.2][pre92]. + the variable is later set `on`. It can be useful in some debugging settings. `pljava.implementors` : A list of "implementor names" that PL/Java will recognize when processing @@ -51,7 +65,8 @@ These PostgreSQL configuration variables can influence PL/Java's operation: only on a system recognizing that name. By default, this list contains only the entry `postgresql`. A deployment descriptor that contains commands with other implementor names can achieve a rudimentary kind of conditional - execution if earlier commands adjust this list of names. _Commas separate + execution if earlier commands adjust this list of names, as described + [here][condex]. _Commas separate elements of this list. Elements that are not regular identifiers need to be surrounded by double-quotes; prior to PostgreSQL 11, that syntax can be used directly in a `SET` command, while in 11 and after, such a value needs to be @@ -93,6 +108,10 @@ These PostgreSQL configuration variables can influence PL/Java's operation: object (filename typically ending with `.so`, `.dll`, or `.dylib`). To determine the proper setting, see [finding the `libjvm` library][fljvm]. + The version of the Java library pointed to by this variable will determine + whether PL/Java can run [with security policy enforcement][policy] or + [with no policy enforcement][unenforced]. + `pljava.module_path` : The module path to be passed to the Java application class loader. The default is computed from the PostgreSQL configuration and is usually correct, unless @@ -101,13 +120,20 @@ These PostgreSQL configuration variables can influence PL/Java's operation: PL/Java API jar file and the PL/Java internals jar file. To determine the proper setting, see [finding the files produced by a PL/Java build](../install/locate.html). + + If additional modular jars are added to the module path, + `--add-modules` in [`pljava.vmoptions`][addm] will make them readable by + PL/Java code. + For more on PL/Java's "module path" and "class path", see [PL/Java and the Java Platform Module System](jpms.html). `pljava.policy_urls` -: A list of URLs to Java security [policy files](policy.html) determining - the permissions available to PL/Java functions. Each URL should be - enclosed in double quotes; any double quote that is literally part of +: Only used when PL/Java is running [with security policy enforcement][policy]. + When running [with no policy enforcement][unenforced], this variable is + ignored. It is a list of URLs to Java security [policy files][policy] + determining the permissions available to PL/Java functions. Each URL should + be enclosed in double quotes; any double quote that is literally part of the URL may be represented as two double quotes (in SQL style) or as `%22` in the URL convention. Between double-quoted URLs, a comma is the list delimiter. @@ -170,7 +196,13 @@ These PostgreSQL configuration variables can influence PL/Java's operation: may be adjusted in a future PL/Java version. Some important settings can be made here, and are described on the - [VM options page][vmop]. + [VM options page][vmop]. For Java 18 and later, this variable must include + a `-Djava.security.manager=allow` or `-Djava.security.manager=disallow]` + setting, determining whether PL/Java will run + [with security policy enforcement][policy] or + [with no policy enforcement][unenforced], and those pages should be reviewed + for the implications of the choice. Details vary by Java version; see + [Available policy-enforcement settings by Java version][smprop]. [pre92]: ../install/prepg92.html [depdesc]: https://github.com/tada/pljava/wiki/Sql-deployment-descriptor @@ -181,3 +213,9 @@ These PostgreSQL configuration variables can influence PL/Java's operation: [jou]: https://docs.oracle.com/javase/8/docs/technotes/tools/unix/java.html [vmop]: ../install/vmoptions.html [sqlascii]: charsets.html#Using_PLJava_with_server_encoding_SQL_ASCII +[addm]: ../install/vmoptions.html#Adding_to_the_set_of_readable_modules +[condex]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/package-summary.html#conditional-execution-in-the-deployment-descriptor-heading +[policy]: policy.html +[unenforced]: unenforced.html +[mappedudt]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/MappedUDT.html +[smprop]: ../install/smproperty.html