diff --git a/.github/workflows/ci-packagedpg.yml b/.github/workflows/ci-packagedpg.yml new file mode 100644 index 000000000..a8feac535 --- /dev/null +++ b/.github/workflows/ci-packagedpg.yml @@ -0,0 +1,322 @@ +# This workflow will build and test PL/Java against versions of PostgreSQL +# installed from prebuilt packages. + +name: PL/Java CI with PostgreSQL prebuilt packaged versions + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + if: false + + runs-on: ${{ matrix.oscc.os }} + strategy: + matrix: + oscc: + - os: ubuntu-latest + cc: gcc + - os: macos-latest + cc: clang + - os: windows-latest + cc: msvc + - os: windows-latest + cc: mingw + java: [9, 11, 12, 14, 15-ea] + pgsql: [12, 11, 10, 9.6, 9.5] + + steps: + + - name: Check out PL/Java + uses: actions/checkout@v2 + with: + path: pljava + + - name: Install PostgreSQL + shell: bash + run: echo here a miracle occurs + + - name: Set up JDK + uses: actions/setup-java@v1 + with: + java-version: ${{ matrix.java }} + + - name: Report Java, Maven, and PostgreSQL versions (Linux, macOS) + if: ${{ 'Windows' != runner.os }} + run: | + java -version + mvn --version + pg_config # might need attention to the path + + - name: Report Java, Maven, and PostgreSQL versions (Windows) + if: ${{ 'Windows' == runner.os }} + run: | + java -version + mvn --version + & "$Env:PGBIN\pg_config" # might need attention to the path + + - name: Build PL/Java (Linux, macOS) + if: ${{ 'Windows' != runner.os }} + working-directory: pljava + run: | + pgConfig=pg_config # might need attention to the path + mvn clean install --batch-mode \ + -Dpgsql.pgconfig="$pgConfig" \ + -Psaxon-examples -Ppgjdbc-ng \ + -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn + + - name: Build PL/Java (Windows MinGW-w64) + if: ${{ 'Windows' == runner.os && 'mingw' == matrix.oscc.cc }} + working-directory: pljava + # + # GitHub Actions will allow 'bash' as a shell choice, even on a Windows + # runner, in which case it's the bash from Git for Windows. That isn't the + # same as the msys64\usr\bin\bash that we want; what's more, while both + # rely on a cygwin DLL, they don't rely on the same one, and an attempt + # to exec one from the other leads to a "fatal error - cygheap base + # mismatch". So, the bash we want has to be started by something other + # than the bash we've got. In this case, set shell: to a command that + # will use cmd to start the right bash. + # + # Some of the MinGW magic is set up by the bash profile run at "login", so + # bash must be started with -l. That profile ends with a cd $HOME, so to + # avoid changing the current directory, set HOME=. first (credit for that: + # https://superuser.com/a/806371). As set above, . is really the pljava + # working-directory, so the bash script should start by resetting HOME to + # the path of its parent. + # + # The runner is provisioned with a very long PATH that includes separate + # bin directories for pre-provisioned packages. The MinGW profile replaces + # that with a much shorter path, so mvn and pg_config below must be given + # as absolute paths (using M2 and PGBIN supplied in the environment) or + # they won't be found. As long as mvn itself can be found, it is able + # to find java without difficulty, using the JAVA_HOME that is also in + # the environment. + # + # Those existing variables in the environment are all spelled in Windows + # style with drive letters, colons, and backslashes, rather than the MinGW + # unixy style, but the mingw bash doesn't seem to object. + # + # If you use the runner-supplied bash to examine the environment, you will + # see MSYSTEM=MINGW64 already in it, but that apparently is something the + # runner-supplied bash does. It must be set here before invoking the MinGW + # bash directly. + # + env: + HOME: . + MSYSTEM: MINGW64 + shell: 'cmd /C "c:\msys64\usr\bin\bash -l "{0}""' + run: | + HOME=$( (cd .. && pwd) ) + pgConfig="$PGBIN"'\pg_config' # might need attention to the path + "$M2"/mvn clean install --batch-mode \ + -Dpgsql.pgconfig="$pgConfig' \ + -Psaxon-examples -Ppgjdbc-ng \ + -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn + + - name: Install and test PL/Java + if: ${{ '9' != matrix.java || 'Windows' != runner.os }} + working-directory: pljava + shell: bash + run: | + pgConfig=pg_config # might need attention to the path + + packageJar=$(find pljava-packaging -name pljava-pg*.jar -print) + + mavenRepo="$HOME/.m2/repository" + + saxonVer=$( + find "$mavenRepo/net/sf/saxon/Saxon-HE" \ + -name 'Saxon-HE-*.jar' -print | + sort | + tail -n 1 + ) + saxonVer=${saxonVer%/*} + saxonVer=${saxonVer##*/} + + jdbcJar=$( + find "$mavenRepo/com/impossibl/pgjdbc-ng/pgjdbc-ng-all" \ + -name 'pgjdbc-ng-all-*.jar' -print | + sort | + tail -n 1 + ) + + # + # The runner on a Unix-like OS is running as a non-privileged user, but + # has passwordless sudo available (needed to install the PL/Java files + # into the system directories where the supplied PostgreSQL lives). By + # contrast, on Windows the runner has admin privilege, and can install + # the files without any fuss (but later below, pg_ctl will have to be + # used when starting PostgreSQL; pg_ctl has a Windows-specific ability + # to drop admin privs so postgres will not refuse to start). + # + # The Windows runner seems to have an extra pg_config somewhere on the + # path, that reports it was built with MinGW and installed in paths + # containing Strawberry that don't really exist. $PGBIN\pg_config refers + # to a different build made with MSVC, and those directories really + # exist, so specify that one explicitly when running on Windows. + # + # The Git for Windows bash environment includes a find command, and the + # things found have unixy paths returned. Make them Windowsy here, with + # a hardcoded assumption they start with /c which should become c: (as + # appears to be the case in the Windows runner currently). + # + if [[ $RUNNER_OS == Windows ]] + then + pathSep=';' + pgConfig="$PGBIN"'\pg_config' + java -Dpgconfig="$pgConfig" -jar "$packageJar" + function toWindowsPath() { + local p + p="c:${1#/c}" + printf "%s" "${p//\//\\}" + } + jdbcJar="$(toWindowsPath "$jdbcJar")" + mavenRepo="$(toWindowsPath "$mavenRepo")" + else + pathSep=':' + sudo "$JAVA_HOME"/bin/java -Dpgconfig="$pgConfig" -jar "$packageJar" + fi + + jshell \ + -execution local \ + "-J--class-path=$packageJar$pathSep$jdbcJar" \ + "--class-path=$packageJar" \ + "-J--add-modules=java.sql,java.sql.rowset" \ + "-J-Dpgconfig=$pgConfig" \ + "-J-Dcom.impossibl.shadow.io.netty.noUnsafe=true" \ + "-J-DmavenRepo=$mavenRepo" \ + "-J-DsaxonVer=$saxonVer" - <<\ENDJSHELL + + boolean succeeding = false; // begin pessimistic + + import static java.nio.file.Paths.get + import java.sql.Connection + import org.postgresql.pljava.packaging.Node + import static org.postgresql.pljava.packaging.Node.q + import static org.postgresql.pljava.packaging.Node.stateMachine + import static org.postgresql.pljava.packaging.Node.isVoidResultSet + import static org.postgresql.pljava.packaging.Node.s_isWindows + + Path javaLibDir = + get(System.getProperty("java.home"), s_isWindows ? "bin" : "lib") + + Path libjvm = ( + "Mac OS X".equals(System.getProperty("os.name")) + ? Stream.of("libjli.dylib", "jli/libjli.dylib") + .map(s -> javaLibDir.resolve(s)) + .filter(Files::exists).findFirst().get() + : javaLibDir.resolve(s_isWindows ? "jvm.dll" : "server/libjvm.so") + ); + + String vmopts = "-enableassertions:org.postgresql.pljava... -Xcheck:jni" + + Node n1 = Node.get_new_node("TestNode1") + + if ( s_isWindows ) + n1.use_pg_ctl(true) + + /* + * Keep a tally of the three types of diagnostic notices that may be + * received, and, independently, how many represent no-good test results + * (error always, but also warning if seen from the tests in the + * examples.jar deployment descriptor). + */ + Map results = + Stream.of("info", "warning", "error", "ng").collect( + LinkedHashMap::new, + (m,k) -> m.put(k, 0), (r,s) -> {}) + + boolean isDiagnostic(Object o, Set whatIsNG) + { + if ( ! ( o instanceof Throwable ) ) + return false; + String[] parts = Node.classify((Throwable)o); + String type = parts[0]; + results.compute(type, (k,v) -> 1 + v); + if ( whatIsNG.contains(type) ) + results.compute("ng", (k,v) -> 1 + v); + return true; + } + + try ( + AutoCloseable t1 = n1.initialized_cluster(); + AutoCloseable t2 = n1.started_server(Map.of( + "client_min_messages", "info", + "pljava.vmoptions", vmopts, + "pljava.libjvm_location", libjvm.toString() + )); + ) + { + try ( Connection c = n1.connect() ) + { + succeeding = true; // become optimistic, will be using &= below + + succeeding &= stateMachine( + "create extension no result", + null, + + q(c, "create extension pljava") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + // state 1: consume any diagnostics, or to state 2 with same item + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + + // state 2: must be end of input + (o,p,q) -> null == o + ); + } + + /* + * Get a new connection; 'create extension' always sets a near-silent + * logging level, and PL/Java only checks once at VM start time, so in + * the same session where 'create extension' was done, logging is + * somewhat suppressed. + */ + try ( Connection c = n1.connect() ) + { + succeeding &= stateMachine( + "saxon path examples path", + null, + + Node.installSaxonAndExamplesAndPath(c, + System.getProperty("mavenRepo"), + System.getProperty("saxonVer"), + true) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + // states 1,2: diagnostics* then a void result set (saxon install) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + + // states 3,4: diagnostics* then a void result set (set classpath) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 3 : -4, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 5 : false, + + // states 5,6: diagnostics* then void result set (example install) + (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 5 : -6, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 7 : false, + + // states 7,8: diagnostics* then a void result set (set classpath) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 7 : -8, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 9 : false, + + // state 9: must be end of input + (o,p,q) -> null == o + ); + } + } catch ( Throwable t ) + { + succeeding = false; + throw t; + } + + System.out.println(results); + succeeding &= (0 == results.get("ng")); + System.exit(succeeding ? 0 : 1) + ENDJSHELL diff --git a/.github/workflows/ci-runnerpg.yml b/.github/workflows/ci-runnerpg.yml new file mode 100644 index 000000000..8d95a0d24 --- /dev/null +++ b/.github/workflows/ci-runnerpg.yml @@ -0,0 +1,409 @@ +# This workflow will build and test PL/Java against the version of PostgreSQL +# preinstalled in the GitHub Actions runner environment. Naturally, this one +# does not have a PostgreSQL version in the build matrix. The version that's +# preinstalled is the version you get. + +name: PL/Java CI with PostgreSQL version supplied by the runner + +on: + push: + branches: [ master, REL1_6_STABLE ] + pull_request: + branches: [ master, REL1_6_STABLE ] + +jobs: + build: + if: true + + runs-on: ${{ matrix.oscc.os }} + continue-on-error: true + strategy: + matrix: + oscc: + - os: ubuntu-latest + cc: gcc + - os: macos-latest + cc: clang +# - os: windows-latest +# cc: msvc +# - os: windows-latest +# cc: mingw + java: [9, 11] # , 12, 14, 15] + + steps: + + - name: Check out PL/Java + uses: actions/checkout@v2 + with: + path: pljava + + - name: Set up JDK + uses: actions/setup-java@v1 + with: + java-version: ${{ matrix.java }} + + - name: Report Java, Maven, and PostgreSQL versions (Linux, macOS) + if: ${{ 'Windows' != runner.os }} + run: | + java -version + mvn --version + pg_config + + - name: Report Java, Maven, and PostgreSQL versions (Windows) + if: ${{ 'Windows' == runner.os }} + run: | + java -version + mvn --version + & "$Env:PGBIN\pg_config" + + - name: Obtain PG development files (Ubuntu, PGDG) + if: ${{ 'Linux' == runner.os }} + run: | + sudo apt-get update + sudo apt-get install postgresql-server-dev-13 libkrb5-dev + + - name: Build PL/Java (Linux, macOS) + if: ${{ 'Windows' != runner.os }} + working-directory: pljava + run: | + mvn clean install --batch-mode \ + -Psaxon-examples -Ppgjdbc-ng \ + -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn + + - name: Build PL/Java (Windows MinGW-w64) + if: ${{ 'Windows' == runner.os && 'mingw' == matrix.oscc.cc }} + working-directory: pljava + # + # GitHub Actions will allow 'bash' as a shell choice, even on a Windows + # runner, in which case it's the bash from Git for Windows. That isn't the + # same as the msys64\usr\bin\bash that we want; what's more, while both + # rely on a cygwin DLL, they don't rely on the same one, and an attempt + # to exec one from the other leads to a "fatal error - cygheap base + # mismatch". So, the bash we want has to be started by something other + # than the bash we've got. In this case, set shell: to a command that + # will use cmd to start the right bash. + # + # Some of the MinGW magic is set up by the bash profile run at "login", so + # bash must be started with -l. That profile ends with a cd $HOME, so to + # avoid changing the current directory, set HOME=. first (credit for that: + # https://superuser.com/a/806371). As set above, . is really the pljava + # working-directory, so the bash script should start by resetting HOME to + # the path of its parent. + # + # The runner is provisioned with a very long PATH that includes separate + # bin directories for pre-provisioned packages. The MinGW profile replaces + # that with a much shorter path, so mvn and pg_config below must be given + # as absolute paths (using M2 and PGBIN supplied in the environment) or + # they won't be found. As long as mvn itself can be found, it is able + # to find java without difficulty, using the JAVA_HOME that is also in + # the environment. + # + # Those existing variables in the environment are all spelled in Windows + # style with drive letters, colons, and backslashes, rather than the MinGW + # unixy style, but the mingw bash doesn't seem to object. + # + # If you use the runner-supplied bash to examine the environment, you will + # see MSYSTEM=MINGW64 already in it, but that apparently is something the + # runner-supplied bash does. It must be set here before invoking the MinGW + # bash directly. + # + env: + HOME: . + MSYSTEM: MINGW64 + shell: 'cmd /C "c:\msys64\usr\bin\bash -l "{0}""' + run: | + HOME=$( (cd .. && pwd) ) + "$M2"/mvn clean install --batch-mode \ + -Dpgsql.pgconfig="$PGBIN"'\pg_config' \ + -Psaxon-examples -Ppgjdbc-ng \ + -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn + + - name: Install and test PL/Java + if: ${{ '9' != matrix.java || 'Windows' != runner.os }} + working-directory: pljava + shell: bash + run: | + pgConfig=pg_config # runner-supplied, just get it from the PATH + + packageJar=$(find pljava-packaging -name pljava-pg*.jar -print) + + mavenRepo="$HOME/.m2/repository" + + saxonVer=$( + find "$mavenRepo/net/sf/saxon/Saxon-HE" \ + -name 'Saxon-HE-*.jar' -print | + sort | + tail -n 1 + ) + saxonVer=${saxonVer%/*} + saxonVer=${saxonVer##*/} + + jdbcJar=$( + find "$mavenRepo/com/impossibl/pgjdbc-ng/pgjdbc-ng-all" \ + -name 'pgjdbc-ng-all-*.jar' -print | + sort | + tail -n 1 + ) + + # + # The runner on a Unix-like OS is running as a non-privileged user, but + # has passwordless sudo available (needed to install the PL/Java files + # into the system directories where the supplied PostgreSQL lives). By + # contrast, on Windows the runner has admin privilege, and can install + # the files without any fuss (but later below, pg_ctl will have to be + # used when starting PostgreSQL; pg_ctl has a Windows-specific ability + # to drop admin privs so postgres will not refuse to start). + # + # The Windows runner seems to have an extra pg_config somewhere on the + # path, that reports it was built with MinGW and installed in paths + # containing Strawberry that don't really exist. $PGBIN\pg_config refers + # to a different build made with MSVC, and those directories really + # exist, so specify that one explicitly when running on Windows. + # + # The Git for Windows bash environment includes a find command, and the + # things found have unixy paths returned. Make them Windowsy here, with + # a hardcoded assumption they start with /c which should become c: (as + # appears to be the case in the Windows runner currently). + # + if [[ $RUNNER_OS == Windows ]] + then + pathSep=';' + pgConfig="$PGBIN"'\pg_config' + java -Dpgconfig="$pgConfig" -jar "$packageJar" + function toWindowsPath() { + local p + p="c:${1#/c}" + printf "%s" "${p//\//\\}" + } + jdbcJar="$(toWindowsPath "$jdbcJar")" + mavenRepo="$(toWindowsPath "$mavenRepo")" + else + pathSep=':' + sudo "$JAVA_HOME"/bin/java -Dpgconfig="$pgConfig" -jar "$packageJar" + fi + + jshell \ + -execution local \ + "-J--class-path=$packageJar$pathSep$jdbcJar" \ + "--class-path=$packageJar" \ + "-J--add-modules=java.sql.rowset" \ + "-J-Dpgconfig=$pgConfig" \ + "-J-Dcom.impossibl.shadow.io.netty.noUnsafe=true" \ + "-J-DmavenRepo=$mavenRepo" \ + "-J-DsaxonVer=$saxonVer" - <<\ENDJSHELL + + boolean succeeding = false; // begin pessimistic + + import static java.nio.file.Files.createTempFile + import static java.nio.file.Files.write + import java.nio.file.Path + import static java.nio.file.Paths.get + import java.sql.Connection + import java.sql.PreparedStatement + import java.sql.ResultSet + import org.postgresql.pljava.packaging.Node + import static org.postgresql.pljava.packaging.Node.q + import static org.postgresql.pljava.packaging.Node.stateMachine + import static org.postgresql.pljava.packaging.Node.isVoidResultSet + import static org.postgresql.pljava.packaging.Node.s_isWindows + + String javaHome = System.getProperty("java.home"); + + Path javaLibDir = get(javaHome, s_isWindows ? "bin" : "lib") + + Path libjvm = ( + "Mac OS X".equals(System.getProperty("os.name")) + ? Stream.of("libjli.dylib", "jli/libjli.dylib") + .map(s -> javaLibDir.resolve(s)) + .filter(Files::exists).findFirst().get() + : javaLibDir.resolve(s_isWindows ? "jvm.dll" : "server/libjvm.so") + ); + + String vmopts = "-enableassertions:org.postgresql.pljava... -Xcheck:jni" + + Node n1 = Node.get_new_node("TestNode1") + + if ( s_isWindows ) + n1.use_pg_ctl(true) + + /* + * Keep a tally of the three types of diagnostic notices that may be + * received, and, independently, how many represent no-good test results + * (error always, but also warning if seen from the tests in the + * examples.jar deployment descriptor). + */ + Map results = + Stream.of("info", "warning", "error", "ng").collect( + LinkedHashMap::new, + (m,k) -> m.put(k, 0), (r,s) -> {}) + + boolean isDiagnostic(Object o, Set whatIsNG) + { + if ( ! ( o instanceof Throwable ) ) + return false; + String[] parts = Node.classify((Throwable)o); + String type = parts[0]; + results.compute(type, (k,v) -> 1 + v); + if ( whatIsNG.contains(type) ) + results.compute("ng", (k,v) -> 1 + v); + return true; + } + + try ( + AutoCloseable t1 = n1.initialized_cluster(); + AutoCloseable t2 = n1.started_server(Map.of( + "client_min_messages", "info", + "pljava.vmoptions", vmopts, + "pljava.libjvm_location", libjvm.toString() + )); + ) + { + try ( Connection c = n1.connect() ) + { + succeeding = true; // become optimistic, will be using &= below + + succeeding &= stateMachine( + "create extension no result", + null, + + q(c, "create extension pljava") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + // state 1: consume any diagnostics, or to state 2 with same item + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + + // state 2: must be end of input + (o,p,q) -> null == o + ); + } + + /* + * Get a new connection; 'create extension' always sets a near-silent + * logging level, and PL/Java only checks once at VM start time, so in + * the same session where 'create extension' was done, logging is + * somewhat suppressed. + */ + try ( Connection c = n1.connect() ) + { + succeeding &= stateMachine( + "saxon path examples path", + null, + + Node.installSaxonAndExamplesAndPath(c, + System.getProperty("mavenRepo"), + System.getProperty("saxonVer"), + true) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + // states 1,2: diagnostics* then a void result set (saxon install) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + + // states 3,4: diagnostics* then a void result set (set classpath) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 3 : -4, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 5 : false, + + // states 5,6: diagnostics* then void result set (example install) + (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 5 : -6, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 7 : false, + + // states 7,8: diagnostics* then a void result set (set classpath) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 7 : -8, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 9 : false, + + // state 9: must be end of input + (o,p,q) -> null == o + ); + + /* + * Exercise TrialPolicy some. Need another connection to change + * vmoptions. Uses some example functions, so insert here before the + * test of undeploying the examples. + */ + try ( Connection c2 = n1.connect() ) + { + Path trialPolicy = + createTempFile(n1.data_dir().getParent(), "trial", "policy"); + + write(trialPolicy, List.of( + "grant {", + " permission", + " org.postgresql.pljava.policy.TrialPolicy$Permission;", + "};" + )); + + PreparedStatement setVmOpts = c2.prepareStatement( + "SELECT null::pg_catalog.void" + + " FROM pg_catalog.set_config('pljava.vmoptions', ?, false)" + ); + + setVmOpts.setString(1, vmopts + + " -Dorg.postgresql.pljava.policy.trial=" + trialPolicy.toUri()); + + succeeding &= stateMachine( + "change pljava.vmoptions", + null, + + q(setVmOpts, setVmOpts::execute) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + + PreparedStatement tryForbiddenRead = c2.prepareStatement( + "SELECT" + + " CASE WHEN javatest.java_getsystemproperty('java.home')" + + " OPERATOR(pg_catalog.=) ?" + + " THEN javatest.logmessage('INFO', 'trial policy test ok')" + + " ELSE javatest.logmessage('WARNING', 'trial policy test ng')" + + " END" + ); + + tryForbiddenRead.setString(1, javaHome); + + succeeding &= stateMachine( + "try to read a forbidden property", + null, + + q(tryForbiddenRead, tryForbiddenRead::execute) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + // done with connection c2 + } + + /* + * Also confirm that the generated undeploy actions work. + */ + succeeding &= stateMachine( + "remove jar void result", + null, + + q(c, "SELECT sqlj.remove_jar('examples', true)") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + } + } catch ( Throwable t ) + { + succeeding = false; + throw t; + } + + System.out.println(results); + succeeding &= (0 == results.get("ng")); + System.exit(succeeding ? 0 : 1) + ENDJSHELL diff --git a/.github/workflows/ci-sourcepg.yml b/.github/workflows/ci-sourcepg.yml new file mode 100644 index 000000000..a33a5c449 --- /dev/null +++ b/.github/workflows/ci-sourcepg.yml @@ -0,0 +1,329 @@ +# This workflow will build and test PL/Java against versions of PostgreSQL +# built from source. + +name: PL/Java CI with PostgreSQL versions built from source + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + if: false + + runs-on: ${{ matrix.oscc.os }} + strategy: + matrix: + oscc: + - os: ubuntu-latest + cc: gcc + - os: macos-latest + cc: clang + - os: windows-latest + cc: msvc + - os: windows-latest + cc: mingw + java: [9, 11, 12, 14, 15-ea] + pgsql: [REL_12_4, REL_11_9, REL_10_14, REL9_6_19, REL9_5_23] + + steps: + + - name: Check out PL/Java + uses: actions/checkout@v2 + with: + path: pljava + + - name: Check out PostgreSQL + uses: actions/checkout@v2 + with: + path: postgresql + repository: postgres/postgres + ref: ${{ matrix.pgsql }} + + - name: Configure and build PostgreSQL + shell: bash + run: echo here a miracle occurs + + - name: Set up JDK + uses: actions/setup-java@v1 + with: + java-version: ${{ matrix.java }} + + - name: Report Java, Maven, and PostgreSQL versions (Linux, macOS) + if: ${{ 'Windows' != runner.os }} + run: | + java -version + mvn --version + pg_config # might need attention to the path + + - name: Report Java, Maven, and PostgreSQL versions (Windows) + if: ${{ 'Windows' == runner.os }} + run: | + java -version + mvn --version + & "$Env:PGBIN\pg_config" # might need attention to the path + + - name: Build PL/Java (Linux, macOS) + if: ${{ 'Windows' != runner.os }} + working-directory: pljava + run: | + pgConfig=pg_config # might need attention to the path + mvn clean install --batch-mode \ + -Dpgsql.pgconfig="$pgConfig" \ + -Psaxon-examples -Ppgjdbc-ng \ + -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn + + - name: Build PL/Java (Windows MinGW-w64) + if: ${{ 'Windows' == runner.os && 'mingw' == matrix.oscc.cc }} + working-directory: pljava + # + # GitHub Actions will allow 'bash' as a shell choice, even on a Windows + # runner, in which case it's the bash from Git for Windows. That isn't the + # same as the msys64\usr\bin\bash that we want; what's more, while both + # rely on a cygwin DLL, they don't rely on the same one, and an attempt + # to exec one from the other leads to a "fatal error - cygheap base + # mismatch". So, the bash we want has to be started by something other + # than the bash we've got. In this case, set shell: to a command that + # will use cmd to start the right bash. + # + # Some of the MinGW magic is set up by the bash profile run at "login", so + # bash must be started with -l. That profile ends with a cd $HOME, so to + # avoid changing the current directory, set HOME=. first (credit for that: + # https://superuser.com/a/806371). As set above, . is really the pljava + # working-directory, so the bash script should start by resetting HOME to + # the path of its parent. + # + # The runner is provisioned with a very long PATH that includes separate + # bin directories for pre-provisioned packages. The MinGW profile replaces + # that with a much shorter path, so mvn and pg_config below must be given + # as absolute paths (using M2 and PGBIN supplied in the environment) or + # they won't be found. As long as mvn itself can be found, it is able + # to find java without difficulty, using the JAVA_HOME that is also in + # the environment. + # + # Those existing variables in the environment are all spelled in Windows + # style with drive letters, colons, and backslashes, rather than the MinGW + # unixy style, but the mingw bash doesn't seem to object. + # + # If you use the runner-supplied bash to examine the environment, you will + # see MSYSTEM=MINGW64 already in it, but that apparently is something the + # runner-supplied bash does. It must be set here before invoking the MinGW + # bash directly. + # + env: + HOME: . + MSYSTEM: MINGW64 + shell: 'cmd /C "c:\msys64\usr\bin\bash -l "{0}""' + run: | + HOME=$( (cd .. && pwd) ) + pgConfig="$PGBIN"'\pg_config' # might need attention to the path + "$M2"/mvn clean install --batch-mode \ + -Dpgsql.pgconfig="$pgConfig' \ + -Psaxon-examples -Ppgjdbc-ng \ + -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn + + - name: Install and test PL/Java + if: ${{ '9' != matrix.java || 'Windows' != runner.os }} + working-directory: pljava + shell: bash + run: | + pgConfig=pg_config # might need attention to the path + + packageJar=$(find pljava-packaging -name pljava-pg*.jar -print) + + mavenRepo="$HOME/.m2/repository" + + saxonVer=$( + find "$mavenRepo/net/sf/saxon/Saxon-HE" \ + -name 'Saxon-HE-*.jar' -print | + sort | + tail -n 1 + ) + saxonVer=${saxonVer%/*} + saxonVer=${saxonVer##*/} + + jdbcJar=$( + find "$mavenRepo/com/impossibl/pgjdbc-ng/pgjdbc-ng-all" \ + -name 'pgjdbc-ng-all-*.jar' -print | + sort | + tail -n 1 + ) + + # + # The runner on a Unix-like OS is running as a non-privileged user, but + # has passwordless sudo available (needed to install the PL/Java files + # into the system directories where the supplied PostgreSQL lives). By + # contrast, on Windows the runner has admin privilege, and can install + # the files without any fuss (but later below, pg_ctl will have to be + # used when starting PostgreSQL; pg_ctl has a Windows-specific ability + # to drop admin privs so postgres will not refuse to start). + # + # The Windows runner seems to have an extra pg_config somewhere on the + # path, that reports it was built with MinGW and installed in paths + # containing Strawberry that don't really exist. $PGBIN\pg_config refers + # to a different build made with MSVC, and those directories really + # exist, so specify that one explicitly when running on Windows. + # + # The Git for Windows bash environment includes a find command, and the + # things found have unixy paths returned. Make them Windowsy here, with + # a hardcoded assumption they start with /c which should become c: (as + # appears to be the case in the Windows runner currently). + # + if [[ $RUNNER_OS == Windows ]] + then + pathSep=';' + pgConfig="$PGBIN"'\pg_config' + java -Dpgconfig="$pgConfig" -jar "$packageJar" + function toWindowsPath() { + local p + p="c:${1#/c}" + printf "%s" "${p//\//\\}" + } + jdbcJar="$(toWindowsPath "$jdbcJar")" + mavenRepo="$(toWindowsPath "$mavenRepo")" + else + pathSep=':' + sudo "$JAVA_HOME"/bin/java -Dpgconfig="$pgConfig" -jar "$packageJar" + fi + + jshell \ + -execution local \ + "-J--class-path=$packageJar$pathSep$jdbcJar" \ + "--class-path=$packageJar" \ + "-J--add-modules=java.sql,java.sql.rowset" \ + "-J-Dpgconfig=$pgConfig" \ + "-J-Dcom.impossibl.shadow.io.netty.noUnsafe=true" \ + "-J-DmavenRepo=$mavenRepo" \ + "-J-DsaxonVer=$saxonVer" - <<\ENDJSHELL + + boolean succeeding = false; // begin pessimistic + + import static java.nio.file.Paths.get + import java.sql.Connection + import org.postgresql.pljava.packaging.Node + import static org.postgresql.pljava.packaging.Node.q + import static org.postgresql.pljava.packaging.Node.stateMachine + import static org.postgresql.pljava.packaging.Node.isVoidResultSet + import static org.postgresql.pljava.packaging.Node.s_isWindows + + Path javaLibDir = + get(System.getProperty("java.home"), s_isWindows ? "bin" : "lib") + + Path libjvm = ( + "Mac OS X".equals(System.getProperty("os.name")) + ? Stream.of("libjli.dylib", "jli/libjli.dylib") + .map(s -> javaLibDir.resolve(s)) + .filter(Files::exists).findFirst().get() + : javaLibDir.resolve(s_isWindows ? "jvm.dll" : "server/libjvm.so") + ); + + String vmopts = "-enableassertions:org.postgresql.pljava... -Xcheck:jni" + + Node n1 = Node.get_new_node("TestNode1") + + if ( s_isWindows ) + n1.use_pg_ctl(true) + + /* + * Keep a tally of the three types of diagnostic notices that may be + * received, and, independently, how many represent no-good test results + * (error always, but also warning if seen from the tests in the + * examples.jar deployment descriptor). + */ + Map results = + Stream.of("info", "warning", "error", "ng").collect( + LinkedHashMap::new, + (m,k) -> m.put(k, 0), (r,s) -> {}) + + boolean isDiagnostic(Object o, Set whatIsNG) + { + if ( ! ( o instanceof Throwable ) ) + return false; + String[] parts = Node.classify((Throwable)o); + String type = parts[0]; + results.compute(type, (k,v) -> 1 + v); + if ( whatIsNG.contains(type) ) + results.compute("ng", (k,v) -> 1 + v); + return true; + } + + try ( + AutoCloseable t1 = n1.initialized_cluster(); + AutoCloseable t2 = n1.started_server(Map.of( + "client_min_messages", "info", + "pljava.vmoptions", vmopts, + "pljava.libjvm_location", libjvm.toString() + )); + ) + { + try ( Connection c = n1.connect() ) + { + succeeding = true; // become optimistic, will be using &= below + + succeeding &= stateMachine( + "create extension no result", + null, + + q(c, "create extension pljava") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + // state 1: consume any diagnostics, or to state 2 with same item + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + + // state 2: must be end of input + (o,p,q) -> null == o + ); + } + + /* + * Get a new connection; 'create extension' always sets a near-silent + * logging level, and PL/Java only checks once at VM start time, so in + * the same session where 'create extension' was done, logging is + * somewhat suppressed. + */ + try ( Connection c = n1.connect() ) + { + succeeding &= stateMachine( + "saxon path examples path", + null, + + Node.installSaxonAndExamplesAndPath(c, + System.getProperty("mavenRepo"), + System.getProperty("saxonVer"), + true) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + // states 1,2: diagnostics* then a void result set (saxon install) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + + // states 3,4: diagnostics* then a void result set (set classpath) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 3 : -4, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 5 : false, + + // states 5,6: diagnostics* then void result set (example install) + (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 5 : -6, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 7 : false, + + // states 7,8: diagnostics* then a void result set (set classpath) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 7 : -8, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 9 : false, + + // state 9: must be end of input + (o,p,q) -> null == o + ); + } + } catch ( Throwable t ) + { + succeeding = false; + throw t; + } + + System.out.println(results); + succeeding &= (0 == results.get("ng")); + System.exit(succeeding ? 0 : 1) + ENDJSHELL diff --git a/.travis.yml b/.travis.yml index ab9964768..68099e57c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,10 @@ arch: - ppc64le dist: bionic env: + - POSTGRESQL_VERSION: 13 + JAVA_VERSION: 15 + JVM_IMPL: hotspot + MVN_VERSION: 3.5.2 - POSTGRESQL_VERSION: 12 JAVA_VERSION: 14 JVM_IMPL: hotspot @@ -151,8 +155,13 @@ script: | boolean succeeding = false; // begin pessimistic + import static java.nio.file.Files.createTempFile + import static java.nio.file.Files.write + import java.nio.file.Path import static java.nio.file.Paths.get import java.sql.Connection + import java.sql.PreparedStatement + import java.sql.ResultSet import org.postgresql.pljava.packaging.Node import static org.postgresql.pljava.packaging.Node.q import static org.postgresql.pljava.packaging.Node.stateMachine @@ -250,6 +259,121 @@ script: | // state 9: must be end of input (o,p,q) -> null == o ); + + /* + * Exercise TrialPolicy some. Need another connection to change + * vmoptions. Uses some example functions, so insert here before the + * test of undeploying the examples. + */ + try ( Connection c2 = n1.connect() ) + { + Path trialPolicy = + createTempFile(n1.data_dir().getParent(), "trial", "policy"); + + write(trialPolicy, List.of( + "grant {", + " permission", + " org.postgresql.pljava.policy.TrialPolicy$Permission;", + "};" + )); + + PreparedStatement setVmOpts = c2.prepareStatement( + "SELECT null::pg_catalog.void" + + " FROM pg_catalog.set_config('pljava.vmoptions', ?, false)" + ); + + setVmOpts.setString(1, vmopts + + " -Dorg.postgresql.pljava.policy.trial=" + trialPolicy.toUri()); + + succeeding &= stateMachine( + "change pljava.vmoptions", + null, + + q(setVmOpts, setVmOpts::execute) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + + PreparedStatement tryForbiddenRead = c2.prepareStatement( + "SELECT" + + " CASE WHEN javatest.java_getsystemproperty('java.home')" + + " OPERATOR(pg_catalog.=) ?" + + " THEN javatest.logmessage('INFO', 'trial policy test ok')" + + " ELSE javatest.logmessage('WARNING', 'trial policy test ng')" + + " END" + ); + + tryForbiddenRead.setString(1, System.getProperty("java.home")); + + succeeding &= stateMachine( + "try to read a forbidden property", + null, + + q(tryForbiddenRead, tryForbiddenRead::execute) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + // done with connection c2 + } + + /* + * Also confirm that the generated undeploy actions work. + */ + succeeding &= stateMachine( + "remove jar void result", + null, + + q(c, "SELECT sqlj.remove_jar('examples', true)") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + } + + /* + * Get another new connection and make sure the extension can be loaded + * in a non-superuser session. + */ + try ( Connection c = n1.connect() ) + { + succeeding &= stateMachine( + "become non-superuser", + null, + + q(c, + "CREATE ROLE alice;" + + "GRANT USAGE ON SCHEMA sqlj TO alice;" + + "SET SESSION AUTHORIZATION alice") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> null == o + ); + + succeeding &= stateMachine( + "load as non-superuser", + null, + + q(c, "SELECT null::pg_catalog.void FROM sqlj.get_classpath('public')") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); } } catch ( Throwable t ) { diff --git a/appveyor.yml b/appveyor.yml index 8fab11664..849223b7f 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -10,16 +10,22 @@ environment: JDK: 10 PG: 12 - SYS: MINGW - JDK: 14 + JDK: 11 + PG: 12 + - SYS: MINGW + JDK: 12 PG: 12 - SYS: MINGW JDK: 13 PG: 12 - SYS: MINGW - JDK: 12 + JDK: 14 PG: 12 - SYS: MINGW - JDK: 11 + JDK: 15 + PG: 12 + - SYS: MSVC + JDK: 15 PG: 12 - SYS: MSVC JDK: 14 @@ -82,8 +88,13 @@ test_script: @' boolean succeeding = false; // begin pessimistic + import static java.nio.file.Files.createTempFile + import static java.nio.file.Files.write + import java.nio.file.Path import static java.nio.file.Paths.get import java.sql.Connection + import java.sql.PreparedStatement + import java.sql.ResultSet import org.postgresql.pljava.packaging.Node import static org.postgresql.pljava.packaging.Node.q import static org.postgresql.pljava.packaging.Node.stateMachine @@ -188,6 +199,122 @@ test_script: // state 9: must be end of input (o,p,q) -> null == o ); + + /* + * Exercise TrialPolicy some. Need another connection to change + * vmoptions. Uses some example functions, so insert here before the + * test of undeploying the examples. + */ + try ( Connection c2 = n1.connect() ) + { + Path trialPolicy = + createTempFile(n1.data_dir().getParent(), "trial", "policy"); + + write(trialPolicy, List.of( + "grant {", + " permission", + " org.postgresql.pljava.policy.TrialPolicy$Permission;", + "};" + )); + + PreparedStatement setVmOpts = c2.prepareStatement( + "SELECT null::pg_catalog.void" + + " FROM pg_catalog.set_config('pljava.vmoptions', ?, false)" + ); + + setVmOpts.setString(1, vmopts + + " -Dorg.postgresql.pljava.policy.trial=" + trialPolicy.toUri()); + + succeeding &= stateMachine( + "change pljava.vmoptions", + null, + + q(setVmOpts, setVmOpts::execute) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + + PreparedStatement tryForbiddenRead = c2.prepareStatement( + "SELECT" + + " CASE WHEN javatest.java_getsystemproperty('java.home')" + + " OPERATOR(pg_catalog.=) ?" + + " THEN javatest.logmessage('INFO', 'trial policy test ok')" + + " ELSE javatest.logmessage('WARNING', 'trial policy test ng')" + + " END" + ); + + tryForbiddenRead.setString(1, System.getProperty("java.home")); + + succeeding &= stateMachine( + "try to read a forbidden property", + null, + + q(tryForbiddenRead, tryForbiddenRead::execute) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + // done with connection c2 + } + + /* + * Also confirm that the generated undeploy actions work. + */ + succeeding &= stateMachine( + "remove jar void result", + null, + + q(c, "SELECT sqlj.remove_jar('examples', true)") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + } + + /* + * Get another new connection and make sure the extension can be loaded + * in a non-superuser session. + */ + try ( Connection c = n1.connect() ) + { + succeeding &= stateMachine( + "become non-superuser", + null, + + q(c, + "CREATE ROLE alice;" + + "GRANT USAGE ON SCHEMA sqlj TO alice;" + + "SET SESSION AUTHORIZATION alice") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> null == o + ); + + succeeding &= stateMachine( + "load as non-superuser", + null, + + q(c, + "SELECT null::pg_catalog.void FROM sqlj.get_classpath('public')") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); } } catch ( Throwable t ) { diff --git a/pljava-ant/pom.xml b/pljava-ant/pom.xml index 01ddaecbc..0f25aeb35 100644 --- a/pljava-ant/pom.xml +++ b/pljava-ant/pom.xml @@ -4,7 +4,7 @@ org.postgresql pljava.app - 1.6.0-SNAPSHOT + 1.6-SNAPSHOT pljava-ant PL/Java Ant tasks diff --git a/pljava-api/pom.xml b/pljava-api/pom.xml index f0da547ae..b655cb6a8 100644 --- a/pljava-api/pom.xml +++ b/pljava-api/pom.xml @@ -4,7 +4,7 @@ org.postgresql pljava.app - 1.6.0-SNAPSHOT + 1.6-SNAPSHOT pljava-api PL/Java API diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Aggregate.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Aggregate.java new file mode 100644 index 000000000..7479c17b5 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Aggregate.java @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.annotation; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Repeatable; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Declares a PostgreSQL aggregate. + *

+ * An aggregate function in PostgreSQL is defined by using + * {@code CREATE AGGREGATE} to specify its name and argument types, along with + * at least one "plan" for evaluating it, where the plan specifies at least: + * a data type to use for the accumulating state, and a function (here called + * "accumulate") called for each row to update the state. If the plan includes + * a function "finish", its return type is the return type of the aggregate; + * with no "finish" function, the state type is also the aggregate's return + * type. + *

+ * Optionally, a plan can include a "combine" function, which is passed two + * instances of the state type and combines them, to allow aggregate evaluation + * to be parallelized. The names "accumulate", "combine", and "finish" are not + * exactly as used in the PostgreSQL command (those are unpronounceable + * abbreviations), but follow the usage in {@code java.util.stream.Collector}, + * which should make them natural to Java programmers. PL/Java will generate the + * SQL with the unpronounceable names. + *

+ * If an aggregate function might be used in a window with a moving frame start, + * it can be declared with a second plan ({@code movingPlan}) that includes a + * "remove" function that may be called, passing values that were earlier + * accumulated into the state, to remove them again as the frame start advances + * past them. (Java's {@code Collector} has no equivalent of a "remove" + * function.) A "remove" function may only be specified (and must be specified) + * in a plan given as {@code movingPlan}. + *

+ * Any function referred to in a plan is specified by its name, optionally + * schema-qualified. Its argument types are not specified; they are implied by + * those declared for the aggregate itself. An "accumulate" function gets one + * argument of the state type, followed by all those given as {@code arguments}. + * The same is true of a "remove" function. A "combine" function is passed + * two arguments of the state type. + *

+ * A "finish" function has a first argument of the state type. If the aggregate + * is declared with any {@code directArguments}, those follow the state type. + * (Declaring {@code directArguments} makes the aggregate an "ordered-set + * aggregate", which could additionally have {@code hypothetical=true} to make + * it a "hypothetical-set aggregate", for which the PostgreSQL documentation + * covers the details.) If {@code polymorphic=true}, the "finish" function's + * argument list will end with {@code arguments.length} additional arguments; + * they will all be passed as {@code NULL} when the finisher is called, but will + * have the right run-time types, which may be necessary to resolve the + * finisher's return type, if polymorphic types are involved. + *

+ * If any of the functions or types mentioned in this declaration are also being + * generated into the same deployment descriptor, the {@code CREATE AGGREGATE} + * generated from this annotation will follow them. Other ordering dependencies, + * if necessary, can be explicitly arranged with {@code provides} and + * {@code requires}. + *

+ * While this annotation can generate {@code CREATE AGGREGATE} deployment + * commands with the features available in PostgreSQL, + * at present there are limits to which aggregate features can be implemented + * purely in PL/Java. In particular, PL/Java functions currently have no access + * to the PostgreSQL data structures needed for an ordered-set or + * hypothetical-set aggregate. Such an aggregate could be implemented by writing + * some of its support functions in another procedural language; this annotation + * could still be used to automatically generate the declaration. + * @author Chapman Flack + */ +@Documented +@Target({ElementType.TYPE,ElementType.METHOD}) +@Repeatable(Aggregate.Container.class) +@Retention(RetentionPolicy.CLASS) +public @interface Aggregate +{ + /** + * Declares the effect of the {@code finish} function in a {@code Plan}. + *

+ * If {@code READ_ONLY}, PostgreSQL can continue updating the same state + * with additional rows, and call the finisher again for updated results. + *

+ * If {@code SHAREABLE}, the state cannot be further updated after + * a finisher call, but finishers for other aggregates that use the same + * state representation (and are also {@code SHAREABLE}) can be called to + * produce the results for those aggregates. An example could be the several + * linear-regression-related aggregates, all of which can work from a state + * that contains the count of values, sum of values, and sum of squares. + *

+ * If {@code READ_WRITE}, no further use can be made of the state after + * the finisher has run. + */ + enum FinishEffect { READ_ONLY, SHAREABLE, READ_WRITE }; + + /** + * Specifies one "plan" for evaluating the aggregate; one must always be + * specified (as {@code plan}), and a second may be specified (as + * {@code movingPlan}). + *

+ * A plan must specify a data type {@code stateType} to hold the + * accumulating state, optionally an estimate of its expected size in bytes, + * and optionally its initial contents. The plan also specifies up to four + * functions {@code accumulate}, {@code combine}, {@code finish}, and + * {@code remove}. Only {@code accumulate} is always required; + * {@code remove} is required in a {@code movingPlan}, and otherwise not + * allowed. + *

+ * Each of the four functions may be specified as a single string "name", + * which will be leniently parsed as an optionally schema-qualified name, + * or as two strings {@code {"schema","local"}} with the schema made + * explicit. The two-string form with {@code ""} as the schema represents + * an explicitly non-schema-qualified name. + */ + @Documented + @Target({}) + @Retention(RetentionPolicy.CLASS) + @interface Plan + { + /** + * The data type to be used to hold the accumulating state. + *

+ * This will be the first argument type for all of the support functions + * except {@code deserialize} (both argument types for {@code combine}) + * and also, if there is no {@code finish} function, the result type + * of the aggregate. + */ + String stateType() default ""; + + /** + * An optional estimate of the size in bytes that the state may grow + * to occupy. + */ + int stateSize() default 0; + + /** + * An optional initial value for the state (which will otherwise be + * initially null). + *

+ * Must be a string the state type's text-input conversion would accept. + *

+ * Omitting the initial value only works if the {@code accumulate} + * function is {@code onNullInput=CALLED}, or if the aggregate's first + * argument type is the same as the state type. + */ + String initialState() default ""; + + /** + * Name of the function that will be called for each row being + * aggregated. + *

+ * The function named here must have an argument list that starts with + * one argument of the state type, followed by all of this aggregate's + * {@code arguments}. It does not receive the {@code directArguments}, + * if any. + */ + String[] accumulate() default {}; + + /** + * Name of an optional function to combine two instances of the state + * type. + *

+ * The function named here should be one that has two arguments, both + * of the state type, and returns the state type. + */ + String[] combine() default {}; + + /** + * Name of an optional function to produce the aggregate's result from + * the final value of the state; without this function, the aggregate's + * result type is the state type, and the result is simply the final + * value of the state. + *

+ * When this function is specified, its result type determines the + * result type of the aggregate. Its argument list signature is a single + * argument of the state type, followed by all the + * {@code directArguments} if any, followed (only if {@code polymorphic} + * is true) by {@code arguments.length} additional arguments for which + * nulls will be passed at runtime but with their resolved runtime + * types. + */ + String[] finish() default {}; + + /** + * Name of an optional function that can reverse the effect on the state + * of a row previously passed to {@code accumulate}. + *

+ * The function named here should have the same argument list signature + * as the {@code accumulate} function. + *

+ * Required in a {@code movingPlan}; not allowed otherwise. + */ + String[] remove() default {}; + + /** + * Whether the argument list for {@code finish} should be extended with + * slots corresponding to the aggregated {@code arguments}, all nulls at + * runtime but with their resolved runtime types. + */ + boolean polymorphic() default false; + + /** + * The effect of the {@code finish} function in this {@code Plan}. + *

+ * If {@code READ_ONLY}, PostgreSQL can continue updating the same + * state with additional rows, and call the finisher again for updated + * results. + *

+ * If {@code SHAREABLE}, the state cannot be further updated after a + * finisher call, but finishers for other aggregates that use the same + * state representation (and are also {@code SHAREABLE}) can be called + * to produce the results for those aggregates. An example could be the + * several linear-regression-related aggregates, all of which can work + * from a state that contains the count of values, sum of values, and + * sum of squares. + *

+ * If {@code READ_WRITE}, no further use can be made of the state after + * the finisher has run. + *

+ * Leaving this to default is not exactly equivalent to specifying the + * default value shown here. If left to default, it will be left + * unspecified in the generated {@code CREATE AGGREGATE}, and PostgreSQL + * will apply its default, which is {@code READ_ONLY} in the case of an + * ordinary aggregate, but {@code READ_WRITE} for an ordered-set or + * hypothetical-set aggregate. + */ + FinishEffect finishEffect() default FinishEffect.READ_ONLY; + + /** + * Name of a serializing function ({@code internal} to {@code bytea}), + * usable only if a {@link #combine() combine} function is specified and + * the aggregate's state type is {@code internal}. + *

+ * Not allowed in a {@code movingPlan}. Not allowed without + * {@code deserialize}. + */ + String[] serialize() default {}; + + /** + * Name of a deserializing function (({@code bytea}, {@code internal}) + * to {@code internal}), usable only if a {@code serialize} function is + * also specified. + *

+ * Not allowed in a {@code movingPlan}. + */ + String[] deserialize() default {}; + } + + /** + * Name for this aggregate. + *

+ * May be specified in explicit {@code {"schema","localname"}} form, or as + * a single string that will be leniently parsed as an optionally + * schema-qualified name. In the explicit form, {@code ""} as the schema + * will make the name explicitly unqualified (in case the local name might + * contain a dot and be misread as a qualified name). + *

+ * When this annotation is not placed on a method, there is no default, and + * a name must be supplied. When the annotation is on a method (which can be + * either the {@code accumulate} or the {@code finish} function for the + * aggregate), the default name will be the same as the SQL name given for + * the function. That is typically possible because the parameter signature + * for the aggregate function will not be the same as either the + * {@code accumulate} or the {@code finish} function. The exception is if + * the annotation is on the {@code finish} function and the aggregate has + * exactly one parameter of the same type as the state; in that case another + * name must be given here. + */ + String[] name() default {}; + + /** + * Names and types of the arguments to be aggregated: the ones passed to the + * {@code accumulate} function for each aggregated row. + *

+ * Each element is a name and a type specification, separated by whitespace. + * An element that begins with whitespace declares a parameter with no + * name, only a type. The name is an ordinary SQL identifier; if it would + * be quoted in SQL, naturally each double-quote must be represented as + * {@code \"} in Java. + *

+ * When this annotation does not appear on a method, there is no default, + * and arguments must be declared here. If the annotation appears on a + * method supplying the {@code accumulate} function, this element can be + * omitted, and the arguments will be those of the function (excepting the + * first one, which corresponds to the state). + */ + String[] arguments() default {}; + + /** + * Names and types of the "direct arguments" to an ordered-set or + * hypothetical-set aggregate (specifying this element is what makes an + * ordered-set aggregate, which will be a hypothetical-set aggregate if + * {@code hypothetical=true} is also supplied). + *

+ * Specified as for {@code arguments}. The direct arguments are not passed + * to the {@code accumulate} function for each aggregated row; they are only + * passed to the {@code finish} function when producing the result. + */ + String[] directArguments() default {}; + + /** + * Specify {@code true} in an ordered-set aggregate (one with + * {@code directArguments} specified) to make it a hypothetical-set + * aggregate. + *

+ * When {@code true}, the {@code directArguments} list must be at least as + * long as {@code arguments}, and its last {@code arguments.length} types + * must match {@code arguments} one-to-one. When the {@code finish} function + * is called, those last direct arguments will carry the caller-supplied + * values for the "hypothetical" row. + */ + boolean hypothetical() default false; + + /** + * Whether the aggregate has a variadic last argument. + *

+ * Specify as a single boolean, {@code variadic=true}, to declare an + * ordinary aggregate variadic. The last type of its declared + * {@code arguments} must then be either an array type, or + * {@code pg_catalog."any"} + *

+ * The form {@code variadic={boolean,boolean}} is for an ordered-set + * aggregate, which has both a list of {@code directArguments} (the first + * boolean) and its aggregated {@code arguments} (the second). For an + * ordered-set aggregate, {@code "any"} is the only allowed type for a + * variadic argument. + *

+ * When also {@code hypothetical} is true, the requirement that the + * {@code directArguments} have a tail matching the {@code arguments} + * implies that the two lists must both or neither be variadic. + */ + boolean[] variadic() default {}; + + /** + * The {@link Plan Plan} normally to be used for evaluating this aggregate, + * except possibly in a moving-window context if {@code movingPlan} is also + * supplied. + *

+ * Though declared as an array, only one plan is allowed here. It may not + * name a {@code remove} function; only a {@code movingPlan} can do that. + * This plan can be omitted only if the {@code @Aggregate} annotation + * appears on a Java method intended as the {@code accumulate} function and + * the rest of the plan is all to be inferred or defaulted. + */ + Plan[] plan() default {}; + + + /** + * An optional {@link Plan Plan} that may be more efficient for evaluating + * this aggregate in a moving-window context. + *

+ * Though declared as an array, only one plan is allowed here. It must + * name a {@code remove} function. + *

+ * A {@code movingPlan} may not have {@code serialize}/{@code deserialize} + * functions; only {@code plan} can have those. + */ + Plan[] movingPlan() default {}; + + /** + * Parallel-safety declaration for this aggregate; PostgreSQL's planner + * will consult this only, not the declarations on the individual supporting + * functions. + *

+ * See {@link Function#parallel() Function.parallel} for the implications. + * In PL/Java, any setting other than {@code UNSAFE} should be considered + * experimental. + */ + Function.Parallel parallel() default Function.Parallel.UNSAFE; + + /** + * Name of an operator (declared as either the less-than or greater-than + * strategy member of a {@code btree} operator class) such that the result + * of this aggregate is the same as the first result from {@code ORDER BY} + * over the aggregated values, using this operator. + *

+ * May be specified in explicit {@code {"schema","localname"}} form, or as + * a single string that will be leniently parsed as an optionally + * schema-qualified name. In the explicit form, {@code ""} as the schema + * will make the name explicitly unqualified. The operator will be assumed + * to have two operands of the same type as the argument to the aggregate + * (which must have exactly one aggregated argument, and no direct + * arguments). The operator's membership in a {@code btree} operator class + * is not (currently) checked at compile time, but if it does not hold at + * run time, the optimization will not be used. + */ + String[] sortOperator() default {}; + + /** + * One or more arbitrary labels that will be considered 'provided' by the + * object carrying this annotation. The deployment descriptor will be + * generated in such an order that other objects that 'require' labels + * 'provided' by this come later in the output for install actions, and + * earlier for remove actions. + */ + String[] provides() default {}; + + /** + * One or more arbitrary labels that will be considered 'required' by the + * object carrying this annotation. The deployment descriptor will be + * generated in such an order that other objects that 'provide' labels + * 'required' by this come earlier in the output for install actions, and + * later for remove actions. + */ + String[] requires() default {}; + + /** + * The {@code } to be used around SQL code generated + * for this aggregate. Defaults to {@code PostgreSQL}. Set explicitly to + * {@code ""} to emit code not wrapped in an {@code }. + */ + String implementor() default ""; + + /** + * A comment to be associated with the aggregate. The default is no comment + * if the annotation does not appear on a method, or the first sentence of + * the method's Javadoc comment, if any, if it does. + */ + String comment() default ""; + + /** + * @hidden container type allowing Cast to be repeatable. + */ + @Documented + @Target(ElementType.TYPE) + @Retention(RetentionPolicy.CLASS) + @interface Container + { + Aggregate[] value(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/BaseUDT.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/BaseUDT.java index 7f65883ac..87fb5550a 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/BaseUDT.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/BaseUDT.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015- Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -11,6 +11,7 @@ */ package org.postgresql.pljava.annotation; +import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -64,7 +65,7 @@ * be used in their {@code @Function} annotations and this annotation, to make * the order come out right. */ -@Target(ElementType.TYPE) @Retention(RetentionPolicy.CLASS) +@Target(ElementType.TYPE) @Retention(RetentionPolicy.CLASS) @Documented public @interface BaseUDT { /** diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Cast.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Cast.java new file mode 100644 index 000000000..140993f85 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Cast.java @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.annotation; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Repeatable; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Declares a PostgreSQL {@code CAST}. + *

+ * May annotate a Java method (which should also carry a + * {@link Function @Function} annotation, making it a PostgreSQL function), + * or a class or interface (just to have a place to put it when not directly + * associated with a method). + *

+ * If not applied to a method, must supply {@code path=} specifying + * {@code BINARY} or {@code INOUT}. + *

+ * The source and target types must be specified with {@code from} and + * {@code to}, unless the annotation appears on a method, in which case these + * default to the first parameter and return types of the function, + * respectively. + *

+ * The cast will, by default, have to be applied explicitly, unless + * {@code application=ASSIGNMENT} or {@code application=IMPLICIT} is given. + * + * @author Chapman Flack + */ +@Documented +@Target({ElementType.METHOD, ElementType.TYPE}) +@Repeatable(Cast.Container.class) +@Retention(RetentionPolicy.CLASS) +public @interface Cast +{ + /** + * When this cast can be applied: only in explicit form, when used in + * assignment context, or implicitly whenever needed. + */ + enum Application { EXPLICIT, ASSIGNMENT, IMPLICIT }; + + /** + * A known conversion path when a dedicated function is not supplied: + * {@code BINARY} for two types that are known to have the same internal + * representation, or {@code INOUT} to invoke the first type's text-output + * function followed by the second type's text-input function. + */ + enum Path { BINARY, INOUT }; + + /** + * The source type to be cast. Will default to the first parameter type of + * the associated function, if known. + *

+ * PostgreSQL will allow this type and the function's first parameter type + * to differ, if there is an existing binary cast between them. That cannot + * be checked at compile time, so a cast with a different type given here + * might successfully compile but fail to deploy in PostgreSQL. + */ + String from() default ""; + + /** + * The target type to cast to. Will default to the return type of + * the associated function, if known. + *

+ * PostgreSQL will allow this type and the function's return type + * to differ, if there is an existing binary cast between them. That cannot + * be checked at compile time, so a cast with a different type given here + * might successfully compile but fail to deploy in PostgreSQL. + */ + String to() default ""; + + /** + * A stock conversion path when a dedicated function is not supplied: + * {@code BINARY} for two types that are known to have the same internal + * representation, or {@code INOUT} to invoke the first type's text-output + * function followed by the second type's text-input function. + *

+ * To declare an {@code INOUT} cast, {@code with=INOUT} must appear + * explicitly; the default value is treated as unspecified. + */ + Path path() default Path.INOUT; + + /** + * When this cast can be applied: only in explicit form, when used in + * assignment context, or implicitly whenever needed. + */ + Application application() default Application.EXPLICIT; + + /** + * One or more arbitrary labels that will be considered 'provided' by the + * object carrying this annotation. The deployment descriptor will be + * generated in such an order that other objects that 'require' labels + * 'provided' by this come later in the output for install actions, and + * earlier for remove actions. + */ + String[] provides() default {}; + + /** + * One or more arbitrary labels that will be considered 'required' by the + * object carrying this annotation. The deployment descriptor will be + * generated in such an order that other objects that 'provide' labels + * 'required' by this come earlier in the output for install actions, and + * later for remove actions. + */ + String[] requires() default {}; + + /** + * The {@code } to be used around SQL code generated + * for this cast. Defaults to {@code PostgreSQL}. Set explicitly to + * {@code ""} to emit code not wrapped in an {@code }. + */ + String implementor() default ""; + + /** + * A comment to be associated with the cast. If left to default, and the + * annotated Java construct has a doc comment, its first sentence will be + * used. If an empty string is explicitly given, no comment will be set. + */ + String comment() default ""; + + /** + * @hidden container type allowing Cast to be repeatable. + */ + @Documented + @Target({ElementType.METHOD, ElementType.TYPE}) + @Retention(RetentionPolicy.CLASS) + @interface Container + { + Cast[] value(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Function.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Function.java index e37e224e5..9e9ad07d3 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Function.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Function.java @@ -78,9 +78,27 @@ enum Parallel { UNSAFE, RESTRICTED, SAFE }; * {@link org.postgresql.pljava.ResultSetProvider ResultSetProvider}, * or can be used to specify the return type of any function if the * compiler hasn't inferred it correctly. + *

+ * Only one of {@code type} or {@code out} may appear. */ String type() default ""; + /** + * The result column names and types of a composite-returning function. + *

+ * This is for a function defining its own one-off composite type + * (declared with {@code OUT} parameters). If the function returns some + * composite type known to the catalog, simply use {@code type} and the name + * of that type. + *

+ * Each element is a name and a type specification, separated by whitespace. + * An element that begins with whitespace declares an output column with no + * name, only a type. The name is an ordinary SQL identifier; if it would + * be quoted in SQL, naturally each double-quote must be represented as + * {@code \"} in Java. + */ + String[] out() default {}; + /** * The name of the function. This is optional. The default is * to use the name of the annotated method. @@ -173,7 +191,8 @@ enum Parallel { UNSAFE, RESTRICTED, SAFE }; String language() default ""; /** - * Whether the function is UNSAFE to use in any parallel query plan at all + * Whether the function is UNSAFE to use in any + * parallel query plan at all * (the default), or avoids all disqualifying operations and so is SAFE to * execute anywhere in a parallel plan, or, by avoiding some such * operations, may appear in parallel plans but RESTRICTED to execute only diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/MappedUDT.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/MappedUDT.java index 69563f849..fcd168a5c 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/MappedUDT.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/MappedUDT.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015- Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -11,6 +11,7 @@ */ package org.postgresql.pljava.annotation; +import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -42,7 +43,7 @@ * of the class being annotated, and found in {@link #schema schema} if * specified, or by following the search path) to the annotated class. */ -@Target(ElementType.TYPE) @Retention(RetentionPolicy.CLASS) +@Target(ElementType.TYPE) @Retention(RetentionPolicy.CLASS) @Documented public @interface MappedUDT { /** diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Operator.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Operator.java new file mode 100644 index 000000000..cf2b47717 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Operator.java @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.annotation; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Repeatable; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Declares a PostgreSQL {@code OPERATOR}. + *

+ * May annotate a Java method (which should also carry a + * {@link Function @Function} annotation, making it a PostgreSQL function), + * or a class or interface (just to have a place to put it when not directly + * annotating a method). + * + * @author Chapman Flack + */ +@Documented +@Target({ElementType.METHOD, ElementType.TYPE}) +@Repeatable(Operator.Container.class) +@Retention(RetentionPolicy.CLASS) +public @interface Operator +{ + /** + * Distinguished value usable for {@link #commutator commutator=} to + * indicate that an operator is its own commutator without having to + * repeat its schema and name. + *

+ * This value strictly declares that the operator is its own commutator, and + * therefore is only allowed for an operator with two operands of the same + * type. If the types are different, a commutator with the same + * name would in fact be a different operator with the operand + * types exchanged; use {@link #TWIN TWIN} for that. + */ + String SELF = ".self."; + + /** + * Distinguished value usable for {@link #commutator commutator=} to + * indicate that an operator's commutator is another operator with the same + * schema and name, without having to repeat them. + *

+ * This value strictly declares that the commutator is a different + * operator, and therefore is only allowed for an operator with two + * operands of different types. As commutators, this operator and its twin + * will have those operand types in opposite orders, so PostgreSQL + * overloading will allow them to have the same name. + *

+ * This value may also be used with {@link #synthetic synthetic=} to give + * the synthesized function the same schema and name as the one it is based + * on; this also is possible only for a function synthesized by commutation + * where the commuted parameter types differ. + */ + String TWIN = ".twin."; + + /** + * Name for this operator. + *

+ * May be specified in explicit {@code {"schema","operatorname"}} form, or + * as a single string that will be leniently parsed as an optionally + * schema-qualified name. In the explicit form, {@code ""} as the schema + * will make the name explicitly unqualified. + */ + String[] name(); + + /** + * The type of the operator's left operand, if any. + * Will default to the first parameter type of an associated two-parameter + * function, or none for an associated one-parameter function. + */ + String left() default ""; + + /** + * The type of the operator's right operand, if any. + * Will default to the second parameter type of an associated two-parameter + * function, or the parameter type for an associated one-parameter function. + */ + String right() default ""; + + /** + * Name of the function backing the operator; may be omitted if this + * annotation appears on a method. + *

+ * The function named here must take one parameter of the matching type if + * only one of {@code left} or {@code right} is specified, or the + * {@code left} and {@code right} types in that order if both are present. + */ + String[] function() default {}; + + /** + * Name of a function to be synthesized by PL/Java based on the method this + * annotation appears on and this operator's {@code commutator} or + * {@code negator} relationship to another operator declared on the same + * method. + *

+ * Only allowed in an annotation on a Java method, and where + * {@code function} is not specified. + *

+ * The special value {@link #TWIN TWIN} is allowed, to avoid repeating the + * schema and name when the desired name for the synthesized function is the + * same as the one it is derived from (which is only possible if the + * derivation involves commuting the arguments and their types are + * different, so the two functions can be distinguished by overloading). A + * typical case would be the twin of a cross-type function like {@code add} + * that is commutative, so using the same name makes sense. + */ + String[] synthetic() default {}; + + /** + * Name of an operator that is the commutator of this one. + *

+ * Specified in the same ways as {@code name}. The value + * {@link #SELF SELF} can be used to avoid repeating the schema and name + * for the common case of an operator that is its own commutator. The value + * {@link #TWIN TWIN} can likewise declare that the commutator is the + * different operator with the same name and schema but the operand types + * (which must be different) reversed. A typical case would be the twin of a + * cross-type operator like {@code +} that is commutative, so using the same + * name makes sense. + */ + String[] commutator() default {}; + + /** + * Name of an operator that is the negator of this one. + *

+ * Specified in the same ways as {@code name}. + */ + String[] negator() default {}; + + /** + * Whether this operator can be used in computing a hash join. + *

+ * Only sensible for a boolean-valued binary operator, which must have a + * commutator in the same hash index operator family, with the underlying + * functions marked {@link Function.Effects#IMMUTABLE} or + * {@link Function.Effects#STABLE}. + */ + boolean hashes() default false; + + /** + * Whether this operator can be used in computing a merge join. + *

+ * Only sensible for a boolean-valued binary operator, which must have a + * commutator also appearing as an equality member in the same btree index + * operator family, with the underlying functions marked + * {@link Function.Effects#IMMUTABLE} or {@link Function.Effects#STABLE}. + */ + boolean merges() default false; + + /** + * Name of a function that can estimate the selectivity of this operator + * when used in a {@code WHERE} clause. + *

+ * Specified in the same ways as {@code function}. + *

+ * A custom estimator is a complex undertaking (and, at present, requires + * a language other than Java), but several predefined ones can be found in + * {@link SelectivityEstimators}. + */ + String[] restrict() default {}; + + /** + * Name of a function that can estimate the selectivity of this operator + * when used in a join. + *

+ * Specified in the same ways as {@code function}. + *

+ * A custom estimator is a complex undertaking (and, at present, requires + * a language other than Java), but several predefined ones can be found in + * {@link SelectivityEstimators}. + */ + String[] join() default {}; + + /** + * One or more arbitrary labels that will be considered 'provided' by the + * object carrying this annotation. The deployment descriptor will be + * generated in such an order that other objects that 'require' labels + * 'provided' by this come later in the output for install actions, and + * earlier for remove actions. + */ + String[] provides() default {}; + + /** + * One or more arbitrary labels that will be considered 'required' by the + * object carrying this annotation. The deployment descriptor will be + * generated in such an order that other objects that 'provide' labels + * 'required' by this come earlier in the output for install actions, and + * later for remove actions. + */ + String[] requires() default {}; + + /** + * The {@code } to be used around SQL code generated + * for this operator. Defaults to {@code PostgreSQL}. Set explicitly to + * {@code ""} to emit code not wrapped in an {@code }. + */ + String implementor() default ""; + + /** + * A comment to be associated with the operator. If left to default, and the + * annotated Java construct has a doc comment, its first sentence will be + * used. If an empty string is explicitly given, no comment will be set. + */ + String comment() default ""; + + /** + * Names of several functions predefined in PostgreSQL for estimating the + * selectivity of operators in restriction clauses or joins. + */ + interface SelectivityEstimators + { + /** + * A restriction-selectivity estimator suitable for an operator + * with rather high selectivity typical of an operator like {@code =}. + */ + String EQSEL = "pg_catalog.eqsel"; + + /** + * A restriction-selectivity estimator suitable for an operator + * somewhat less strict than a typical {@code =} operator. + */ + String MATCHINGSEL = "pg_catalog.matchingsel"; + + /** + * A restriction-selectivity estimator suitable for an operator + * with rather low selectivity typical of an operator like {@code <>}. + */ + String NEQSEL = "pg_catalog.neqsel"; + + /** + * A restriction-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code <}. + */ + String SCALARLTSEL = "pg_catalog.scalarltsel"; + + /** + * A restriction-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code <=}. + */ + String SCALARLESEL = "pg_catalog.scalarlesel"; + + /** + * A restriction-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code >}. + */ + String SCALARGTSEL = "pg_catalog.scalargtsel"; + + /** + * A restriction-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code >=}. + */ + String SCALARGESEL = "pg_catalog.scalargesel"; + + /** + * A join-selectivity estimator suitable for an operator + * with rather high selectivity typical of an operator like {@code =}. + */ + String EQJOINSEL = "pg_catalog.eqjoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * somewhat less strict than a typical {@code =} operator. + */ + String MATCHINGJOINSEL = "pg_catalog.matchingjoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * with rather low selectivity typical of an operator like {@code <>}. + */ + String NEQJOINSEL = "pg_catalog.neqjoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code <}. + */ + String SCALARLTJOINSEL = "pg_catalog.scalarltjoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code <=}. + */ + String SCALARLEJOINSEL = "pg_catalog.scalarlejoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code >}. + */ + String SCALARGTJOINSEL = "pg_catalog.scalargtjoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code >=}. + */ + String SCALARGEJOINSEL = "pg_catalog.scalargejoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * doing 2-D area-based comparisons. + */ + String AREAJOINSEL = "pg_catalog.areajoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * doing 2-D position-based comparisons. + */ + String POSITIONJOINSEL = "pg_catalog.positionjoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * doing 2-D containment-based comparisons. + */ + String CONTJOINSEL = "pg_catalog.contjoinsel"; + } + + /** + * @hidden container type allowing Operator to be repeatable. + */ + @Documented + @Target({ElementType.METHOD, ElementType.TYPE}) + @Retention(RetentionPolicy.CLASS) + @interface Container + { + Operator[] value(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLAction.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLAction.java index edbf63feb..face77719 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLAction.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLAction.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -14,6 +14,7 @@ import java.lang.annotation.Documented; import java.lang.annotation.ElementType; +import java.lang.annotation.Repeatable; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; @@ -35,6 +36,7 @@ */ @Documented @Target({ElementType.PACKAGE,ElementType.TYPE}) +@Repeatable(SQLActions.class) @Retention(RetentionPolicy.CLASS) public @interface SQLAction { diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLActions.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLActions.java index bc618b4a8..753e1df25 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLActions.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLActions.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -21,7 +21,11 @@ /** * Container for multiple {@link SQLAction} annotations (in case it is * convenient to hang more than one on a given program element). - * + *

+ * This container annotation is documented for historical reasons (it existed + * in PL/Java versions targeting earlier Java versions than 8). In new code, it + * would be more natural to simply hang more than one {@code SQLAction} + * annotation directly on a program element. * @author Thomas Hallgren - pre-Java6 version * @author Chapman Flack (Purdue Mathematics) - updated to Java6, * added SQLActions diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Trigger.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Trigger.java index ab037ca6b..5c6293893 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Trigger.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Trigger.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -9,9 +9,11 @@ * Contributors: * Tada AB * Purdue University + * Chapman Flack */ package org.postgresql.pljava.annotation; +import java.lang.annotation.Documented; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; @@ -40,7 +42,7 @@ * the complete transition tables on each invocation. * @author Thomas Hallgren */ -@Target({}) @Retention(RetentionPolicy.CLASS) +@Target({}) @Retention(RetentionPolicy.CLASS) @Documented public @interface Trigger { /** diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessor.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessor.java index 63c716702..a2f9fcd44 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessor.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessor.java @@ -45,25 +45,35 @@ import java.util.Collections; import static java.util.Collections.unmodifiableSet; import java.util.Comparator; +import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.IdentityHashMap; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; +import java.util.ListIterator; import java.util.Locale; import java.util.Map; import java.util.NoSuchElementException; +import java.util.Objects; import static java.util.Objects.hash; import static java.util.Objects.requireNonNull; import java.util.PriorityQueue; import java.util.Queue; import java.util.Set; +import java.util.function.BiConsumer; import java.util.function.Supplier; +import static java.util.function.UnaryOperator.identity; import java.util.stream.Stream; +import static java.util.stream.Collectors.groupingBy; import static java.util.stream.Collectors.joining; +import static java.util.stream.Collectors.mapping; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toSet; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -79,6 +89,7 @@ import javax.lang.model.element.ElementKind; import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; +import javax.lang.model.element.ModuleElement; import javax.lang.model.element.NestingKind; import javax.lang.model.element.Name; import javax.lang.model.element.TypeElement; @@ -104,7 +115,10 @@ import org.postgresql.pljava.ResultSetProvider; import org.postgresql.pljava.TriggerData; +import org.postgresql.pljava.annotation.Aggregate; +import org.postgresql.pljava.annotation.Cast; import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.Operator; import org.postgresql.pljava.annotation.SQLAction; import org.postgresql.pljava.annotation.SQLActions; import org.postgresql.pljava.annotation.SQLType; @@ -214,30 +228,42 @@ class DDRProcessorImpl // Our own annotations // final TypeElement AN_FUNCTION; - final TypeElement AN_SQLACTION; - final TypeElement AN_SQLACTIONS; final TypeElement AN_SQLTYPE; final TypeElement AN_TRIGGER; final TypeElement AN_BASEUDT; final TypeElement AN_MAPPEDUDT; + final TypeElement AN_SQLACTION; + final TypeElement AN_SQLACTIONS; + final TypeElement AN_CAST; + final TypeElement AN_CASTS; + final TypeElement AN_AGGREGATE; + final TypeElement AN_AGGREGATES; + final TypeElement AN_OPERATOR; + final TypeElement AN_OPERATORS; // Certain familiar DBTypes (capitalized as this file historically has) // + final DBType DT_BOOLEAN = new DBType.Reserved("boolean"); + final DBType DT_INTEGER = new DBType.Reserved("integer"); final DBType DT_RECORD = new DBType.Named( Identifier.Qualified.nameFromJava("pg_catalog.RECORD")); final DBType DT_TRIGGER = new DBType.Named( Identifier.Qualified.nameFromJava("pg_catalog.trigger")); final DBType DT_VOID = new DBType.Named( Identifier.Qualified.nameFromJava("pg_catalog.void")); + final DBType DT_ANY = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.\"any\"")); + final DBType DT_BYTEA = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.bytea")); + final DBType DT_INTERNAL = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.internal")); // Function signatures for certain known functions // final DBType[] SIG_TYPMODIN = { DBType.fromSQLTypeAnnotation("pg_catalog.cstring[]") }; - final DBType[] SIG_TYPMODOUT = - { DBType.fromSQLTypeAnnotation("integer") }; - final DBType[] SIG_ANALYZE = - { DBType.fromSQLTypeAnnotation("pg_catalog.internal") }; + final DBType[] SIG_TYPMODOUT = { DT_INTEGER }; + final DBType[] SIG_ANALYZE = { DT_INTERNAL }; DDRProcessorImpl( ProcessingEnvironment processingEnv) { @@ -286,35 +312,37 @@ class DDRProcessorImpl snippetTiebreaker = reproducible ? new SnippetTiebreaker() : null; - TY_ITERATOR = typu.getDeclaredType( - elmu.getTypeElement( java.util.Iterator.class.getName())); - TY_OBJECT = typu.getDeclaredType( - elmu.getTypeElement( Object.class.getName())); - TY_RESULTSET = typu.getDeclaredType( - elmu.getTypeElement( java.sql.ResultSet.class.getName())); - TY_RESULTSETPROVIDER = typu.getDeclaredType( - elmu.getTypeElement( ResultSetProvider.class.getName())); - TY_RESULTSETHANDLE = typu.getDeclaredType( - elmu.getTypeElement( ResultSetHandle.class.getName())); - TY_SQLDATA = typu.getDeclaredType( - elmu.getTypeElement( SQLData.class.getName())); - TY_SQLINPUT = typu.getDeclaredType( - elmu.getTypeElement( SQLInput.class.getName())); - TY_SQLOUTPUT = typu.getDeclaredType( - elmu.getTypeElement( SQLOutput.class.getName())); - TY_STRING = typu.getDeclaredType( - elmu.getTypeElement( String.class.getName())); - TY_TRIGGERDATA = typu.getDeclaredType( - elmu.getTypeElement( TriggerData.class.getName())); - TY_VOID = typu.getNoType( TypeKind.VOID); + TY_ITERATOR = declaredTypeForClass(java.util.Iterator.class); + TY_OBJECT = declaredTypeForClass(Object.class); + TY_RESULTSET = declaredTypeForClass(java.sql.ResultSet.class); + TY_RESULTSETPROVIDER = declaredTypeForClass(ResultSetProvider.class); + TY_RESULTSETHANDLE = declaredTypeForClass(ResultSetHandle.class); + TY_SQLDATA = declaredTypeForClass(SQLData.class); + TY_SQLINPUT = declaredTypeForClass(SQLInput.class); + TY_SQLOUTPUT = declaredTypeForClass(SQLOutput.class); + TY_STRING = declaredTypeForClass(String.class); + TY_TRIGGERDATA = declaredTypeForClass(TriggerData.class); + TY_VOID = typu.getNoType(TypeKind.VOID); AN_FUNCTION = elmu.getTypeElement( Function.class.getName()); - AN_SQLACTION = elmu.getTypeElement( SQLAction.class.getName()); - AN_SQLACTIONS = elmu.getTypeElement( SQLActions.class.getName()); AN_SQLTYPE = elmu.getTypeElement( SQLType.class.getName()); AN_TRIGGER = elmu.getTypeElement( Trigger.class.getName()); AN_BASEUDT = elmu.getTypeElement( BaseUDT.class.getName()); AN_MAPPEDUDT = elmu.getTypeElement( MappedUDT.class.getName()); + + // Repeatable annotations and their containers. + // + AN_SQLACTION = elmu.getTypeElement( SQLAction.class.getName()); + AN_SQLACTIONS = elmu.getTypeElement( SQLActions.class.getName()); + AN_CAST = elmu.getTypeElement( Cast.class.getName()); + AN_CASTS = elmu.getTypeElement( + Cast.Container.class.getCanonicalName()); + AN_AGGREGATE = elmu.getTypeElement( Aggregate.class.getName()); + AN_AGGREGATES = elmu.getTypeElement( + Aggregate.Container.class.getCanonicalName()); + AN_OPERATOR = elmu.getTypeElement( Operator.class.getName()); + AN_OPERATORS = elmu.getTypeElement( + Operator.Container.class.getCanonicalName()); } void msg( Kind kind, String fmt, Object... args) @@ -339,6 +367,52 @@ void msg( Kind kind, Element e, AnnotationMirror a, AnnotationValue v, msgr.printMessage( kind, String.format( fmt, args), e, a, v); } + /** + * Map a {@code Class} to a {@code TypeElement} and from there to a + * {@code DeclaredType}. + *

+ * This needs to work around some weird breakage in javac 10 and 11 when + * given a {@code --release} option naming an earlier release, as described + * in commit c763cee. The version of of {@code getTypeElement} with a module + * parameter is needed then, because the other version will go bonkers and + * think it found the class in every module that transitively requires + * its actual module and then return null because the result wasn't + * unique. That got fixed in Java 12, but because 11 is the LTS release and + * there won't be another for a while yet, it is better to work around the + * issue here. + *

+ * If not supporting Java 10 or 11, this could be simplified to + * {@code typu.getDeclaredType(elmu.getTypeElement(className))}. + */ + private DeclaredType declaredTypeForClass(Class clazz) + { + String className = clazz.getName(); + String moduleName = clazz.getModule().getName(); + + TypeElement e; + + if ( null == moduleName ) + e = elmu.getTypeElement(className); + else + { + ModuleElement m = elmu.getModuleElement(moduleName); + if ( null == m ) + e = elmu.getTypeElement(className); + else + e = elmu.getTypeElement(m, className); + } + + requireNonNull(e, + () -> "unexpected failure to resolve TypeElement " + className); + + DeclaredType t = typu.getDeclaredType(e); + + requireNonNull(t, + () -> "unexpected failure to resolve DeclaredType " + e); + + return t; + } + /** * Key usable in a mapping from (Object, Snippet-subtype) to Snippet. * Because there's no telling in which order a Map implementation will @@ -377,7 +451,13 @@ public int hashCode() * one round), keyed by the object for which each snippet has been * generated. */ - Map snippets = new HashMap<>(); + /* + * This is a LinkedHashMap so that the order of handling annotation types + * in process() below will be preserved in calling their characterize() + * methods at end-of-round, and so, for example, characterize() on a Cast + * can use values set by characterize() on an associated Function. + */ + Map snippets = new LinkedHashMap<>(); S getSnippet(Object o, Class c, Supplier ctor) { @@ -405,10 +485,14 @@ void putSnippet( Object o, Snippet s) /** * Map from each arbitrary provides/requires label to the snippet - * that 'provides' it. Has to be out here as an instance field for the - * same reason {@code snippetVPairs} does. + * that 'provides' it (snippets, in some cases). Has to be out here as an + * instance field for the same reason {@code snippetVPairs} does. + *

+ * Originally limited each tag to have only one provider; that is still + * enforced for implicitly-generated tags, but relaxed for explicit ones + * supplied in annotations, hence the list. */ - Map> provider = new HashMap<>(); + Map>> provider = new HashMap<>(); /** * Find the elements in each round that carry any of the annotations of @@ -419,9 +503,11 @@ boolean process( Set tes, RoundEnvironment re) { boolean functionPresent = false; boolean sqlActionPresent = false; - boolean sqlActionsPresent = false; boolean baseUDTPresent = false; boolean mappedUDTPresent = false; + boolean castPresent = false; + boolean aggregatePresent = false; + boolean operatorPresent = false; boolean willClaim = true; @@ -429,16 +515,20 @@ boolean process( Set tes, RoundEnvironment re) { if ( AN_FUNCTION.equals( te) ) functionPresent = true; - else if ( AN_SQLACTION.equals( te) ) - sqlActionPresent = true; - else if ( AN_SQLACTIONS.equals( te) ) - sqlActionsPresent = true; else if ( AN_BASEUDT.equals( te) ) baseUDTPresent = true; else if ( AN_MAPPEDUDT.equals( te) ) mappedUDTPresent = true; else if ( AN_SQLTYPE.equals( te) ) ; // these are handled within FunctionImpl + else if ( AN_SQLACTION.equals( te) || AN_SQLACTIONS.equals( te) ) + sqlActionPresent = true; + else if ( AN_CAST.equals( te) || AN_CASTS.equals( te) ) + castPresent = true; + else if ( AN_AGGREGATE.equals( te) || AN_AGGREGATES.equals( te) ) + aggregatePresent = true; + else if ( AN_OPERATOR.equals( te) || AN_OPERATORS.equals( te) ) + operatorPresent = true; else { msg( Kind.WARNING, te, @@ -461,14 +551,31 @@ else if ( AN_SQLTYPE.equals( te) ) processFunction( e); if ( sqlActionPresent ) - for ( Element e : re.getElementsAnnotatedWith( AN_SQLACTION) ) - processSQLAction( e); - - if ( sqlActionsPresent ) - for ( Element e : re.getElementsAnnotatedWith( AN_SQLACTIONS) ) - processSQLActions( e); - - tmpr.workAroundJava7Breakage(); // perhaps it will be fixed in Java 9? + for ( Element e + : re.getElementsAnnotatedWithAny( AN_SQLACTION, AN_SQLACTIONS) ) + processRepeatable( + e, AN_SQLACTION, AN_SQLACTIONS, SQLActionImpl.class, null); + + if ( castPresent ) + for ( Element e + : re.getElementsAnnotatedWithAny( AN_CAST, AN_CASTS) ) + processRepeatable( + e, AN_CAST, AN_CASTS, CastImpl.class, null); + + if ( operatorPresent ) + for ( Element e + : re.getElementsAnnotatedWithAny( AN_OPERATOR, AN_OPERATORS) ) + processRepeatable( + e, AN_OPERATOR, AN_OPERATORS, OperatorImpl.class, + this::operatorPreSynthesize); + + if ( aggregatePresent ) + for ( Element e + : re.getElementsAnnotatedWithAny( AN_AGGREGATE, AN_AGGREGATES) ) + processRepeatable( + e, AN_AGGREGATE, AN_AGGREGATES, AggregateImpl.class, null); + + tmpr.workAroundJava7Breakage(); // perhaps to be fixed in Java 9? nope. if ( ! re.processingOver() ) defensiveEarlyCharacterize(); @@ -492,13 +599,24 @@ void defensiveEarlyCharacterize() { for ( Snippet snip : snippets.values() ) { - if ( ! snip.characterize() ) - continue; - VertexPair v = new VertexPair<>( snip); - snippetVPairs.add( v); - for ( DependTag s : snip.provideTags() ) - if ( null != provider.put( s, v) ) - msg( Kind.ERROR, "tag %s has more than one provider", s); + Set ready = snip.characterize(); + for ( Snippet readySnip : ready ) + { + VertexPair v = new VertexPair<>( readySnip); + snippetVPairs.add( v); + for ( DependTag t : readySnip.provideTags() ) + { + List> ps = + provider.computeIfAbsent(t, k -> new ArrayList<>()); + /* + * Explicit tags are allowed more than one provider. + */ + if ( t instanceof DependTag.Explicit || ps.isEmpty() ) + ps.add(v); + else + msg(Kind.ERROR, "tag %s has more than one provider", t); + } + } } snippets.clear(); } @@ -516,7 +634,7 @@ void generateDescriptor() for ( VertexPair v : snippetVPairs ) { - VertexPair p; + List> ps; /* * First handle the implicit requires(implementor()). This is unlike @@ -529,23 +647,26 @@ void generateDescriptor() DependTag imp = v.payload().implementorTag(); if ( null != imp ) { - p = provider.get( imp); - if ( null != p ) + ps = provider.get( imp); + if ( null != ps ) { fwdConsumers.add( imp); revConsumers.add( imp); - p.fwd.precede( v.fwd); - p.rev.precede( v.rev); - - /* - * A snippet providing an implementor tag probably has no - * undeployStrings, because its deployStrings should be used - * on both occasions; if so, replace it with a proxy that - * returns deployStrings for undeployStrings. - */ - if ( 0 == p.rev.payload.undeployStrings().length ) - p.rev.payload = new ImpProvider( p.rev.payload); + ps.forEach(p -> + { + p.fwd.precede( v.fwd); + p.rev.precede( v.rev); + + /* + * A snippet providing an implementor tag probably has + * no undeployStrings, because its deployStrings should + * be used on both occasions; if so, replace it with a + * proxy that returns deployStrings for undeployStrings. + */ + if ( 0 == p.rev.payload.undeployStrings().length ) + p.rev.payload = new ImpProvider( p.rev.payload); + }); } else if ( ! defaultImplementor.equals( impName, msgr) ) { @@ -564,13 +685,16 @@ else if ( ! defaultImplementor.equals( impName, msgr) ) } for ( DependTag s : v.payload().requireTags() ) { - p = provider.get( s); - if ( null != p ) + ps = provider.get( s); + if ( null != ps ) { fwdConsumers.add( s); revConsumers.add( s); - p.fwd.precede( v.fwd); - v.rev.precede( p.rev); // these relationships do reverse + ps.forEach(p -> + { + p.fwd.precede( v.fwd); + v.rev.precede( p.rev); // these relationships do reverse + }); } else if ( s instanceof DependTag.Explicit ) { @@ -737,38 +861,67 @@ Snippet[] order( } return snips.toArray(new Snippet[snips.size()]); } - - /** - * Process a single element annotated with @SQLAction. - */ - void processSQLAction( Element e) + + void putRepeatableSnippet(Element e, T snip) { - SQLActionImpl sa = - getSnippet( e, SQLActionImpl.class, SQLActionImpl::new); - for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) - { - if ( am.getAnnotationType().asElement().equals( AN_SQLACTION) ) - populateAnnotationImpl( sa, e, am); - } + if ( null != snip ) + putSnippet( snip, (Snippet)snip); } /** - * Process a single element annotated with @SQLActions (which simply takes - * an array of @SQLAction as a way to associate more than one SQLAction with - * a single program element).. + * Process an element carrying a repeatable annotation, the container + * of that repeatable annotation, or both. + *

+ * Snippets corresponding to repeatable annotations might not be entered in the + * {@code snippets} map keyed by the target element, as that might not be + * unique. Each populated snippet is passed to putter along with + * the element it annotates, and putter determines what to do with + * it. If putter is null, the default enters the snippet with a key + * made from its class and itself, as typical repeatable snippets are are + * not expected to be looked up, only processed when all of the map entries + * are enumerated. + *

+ * After all snippets of the desired class have been processed for a given + * element, a final call to putter is made passing the element and + * null for the snippet. */ - void processSQLActions( Element e) + void processRepeatable( + Element e, TypeElement annot, TypeElement container, Class clazz, + BiConsumer putter) { + if ( null == putter ) + putter = this::putRepeatableSnippet; + for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) { - if ( am.getAnnotationType().asElement().equals( AN_SQLACTIONS) ) + Element asElement = am.getAnnotationType().asElement(); + if ( asElement.equals( annot) ) + { + T snip; + try + { + snip = clazz.getDeclaredConstructor( DDRProcessorImpl.class, + Element.class, AnnotationMirror.class) + .newInstance( DDRProcessorImpl.this, e, am); + } + catch ( ReflectiveOperationException re ) + { + throw new RuntimeException( + "Incorrect implementation of annotation processor", re); + } + populateAnnotationImpl( snip, e, am); + putter.accept( e, snip); + } + else if ( asElement.equals( container) ) { - SQLActionsImpl sas = new SQLActionsImpl(); - populateAnnotationImpl( sas, e, am); - for ( SQLAction sa : sas.value() ) - putSnippet( sa, (Snippet)sa); + Container c = new Container<>(clazz); + populateAnnotationImpl( c, e, am); + for ( T snip : c.value() ) + putter.accept( e, snip); } } + + putter.accept( e, null); } static enum UDTKind { BASE, MAPPED } @@ -1013,6 +1166,7 @@ public Identifier.Simple implementorName() Identifier.Simple _implementor = defaultImplementor; String _comment; + boolean commentDerived; public void setImplementor( Object o, boolean explicit, Element e) { @@ -1039,7 +1193,18 @@ public void setComment( Object o, boolean explicit, Element e) _comment = null; } else + { _comment = ((Commentable)this).derivedComment( e); + commentDerived = true; + } + } + + protected void replaceCommentIfDerived( String comment) + { + if ( ! commentDerived ) + return; + commentDerived = false; + _comment = comment; } public String derivedComment( Element e) @@ -1098,6 +1263,18 @@ public Set requireTags() } } + class Repeatable extends AbstractAnnotationImpl + { + final Element m_targetElement; + final AnnotationMirror m_origin; + + Repeatable(Element e, AnnotationMirror am) + { + m_targetElement = e; + m_origin = am; + } + } + /** * Populate an AbstractAnnotationImpl-derived Annotation implementation * from the element-value pairs in an AnnotationMirror. For each element @@ -1265,30 +1442,56 @@ public void setName( Object o, boolean explicit, Element e) } } - class SQLActionsImpl extends AbstractAnnotationImpl implements SQLActions + class Container + extends AbstractAnnotationImpl { - public SQLAction[] value() { return _value; } + public T[] value() { return _value; } - SQLAction[] _value; + T[] _value; + final Class _clazz; + + Container(Class clazz) + { + _clazz = clazz; + } public void setValue( Object o, boolean explicit, Element e) { AnnotationMirror[] ams = avToArray( o, AnnotationMirror.class); - _value = new SQLAction [ ams.length ]; + + @SuppressWarnings("unchecked") + T[] t = (T[])Array.newInstance( _clazz, ams.length); + _value = t; + int i = 0; for ( AnnotationMirror am : ams ) { - SQLActionImpl a = new SQLActionImpl(); - populateAnnotationImpl( a, e, am); - _value [ i++ ] = a; + try + { + T a = _clazz.getDeclaredConstructor(DDRProcessorImpl.class, + Element.class, AnnotationMirror.class) + .newInstance(DDRProcessorImpl.this, e, am); + populateAnnotationImpl( a, e, am); + _value [ i++ ] = a; + } + catch ( ReflectiveOperationException re ) + { + throw new RuntimeException( + "Incorrect implementation of annotation processor", re); + } } } } class SQLActionImpl - extends AbstractAnnotationImpl + extends Repeatable implements SQLAction, Snippet { + SQLActionImpl(Element e, AnnotationMirror am) + { + super(e, am); + } + public String[] install() { return _install; } public String[] remove() { return _remove; } public String[] provides() { return _provides; } @@ -1302,10 +1505,10 @@ class SQLActionImpl public String[] _provides; public String[] _requires; - public boolean characterize() + public Set characterize() { recordExplicitTags(_provides, _requires); - return true; + return Set.of(this); } } @@ -1375,7 +1578,7 @@ public void setConstraint( Object o, boolean explicit, Element e) origin = am; } - public boolean characterize() + public Set characterize() { if ( Scope.ROW.equals( _scope) ) { @@ -1457,7 +1660,7 @@ else if ( Called.INSTEAD_OF.equals( _called) ) if ( "".equals( _name) ) _name = TriggerNamer.synthesizeName( this); - return false; + return Set.of(); } public String[] deployStrings() @@ -1528,7 +1731,7 @@ public String[] deployStrings() if ( ! "".equals( _when) ) sb.append( "\n\tWHEN ").append( _when); sb.append( "\n\tEXECUTE PROCEDURE "); - func.appendNameAndParams( sb, false); + func.appendNameAndParams( sb, true, false, false); sb.setLength( sb.length() - 1); // drop closing ) s = _arguments.length; for ( String a : _arguments ) @@ -1566,6 +1769,7 @@ class FunctionImpl implements Function, Snippet, Commentable { public String type() { return _type; } + public String[] out() { return _out; } public String name() { return _name; } public String schema() { return _schema; } public boolean variadic() { return _variadic; } @@ -1589,6 +1793,7 @@ public String language() ExecutableElement func; public String _type; + public String[] _out; public String _name; public String _schema; public boolean _variadic; @@ -1616,6 +1821,7 @@ public String language() DBType returnType; DBType[] parameterTypes; + List> outParameters; boolean subsumed = false; @@ -1624,6 +1830,18 @@ public String language() func = e; } + public void setType( Object o, boolean explicit, Element e) + { + if ( explicit ) + _type = (String)o; + } + + public void setOut( Object o, boolean explicit, Element e) + { + if ( explicit ) + _out = avToArray( o, String.class); + } + public void setTrust( Object o, boolean explicit, Element e) { if ( explicit ) @@ -1664,7 +1882,7 @@ public void setTriggers( Object o, boolean explicit, Element e) } } - public boolean characterize() + public Set characterize() { if ( "".equals( _name) ) _name = func.getSimpleName().toString(); @@ -1682,7 +1900,7 @@ public boolean characterize() { msg( Kind.ERROR, func, "Unable to resolve return type of function"); - return false; + return Set.of(); } ExecutableType et = (ExecutableType)func.asType(); @@ -1690,7 +1908,7 @@ public boolean characterize() List typeArgs; int arity = ptms.size(); - if ( ! "".equals( type()) + if ( ( null != _type || null != _out ) && ret.getKind().equals( TypeKind.BOOLEAN) ) { complexViaInOut = true; @@ -1702,7 +1920,7 @@ public boolean characterize() msg( Kind.ERROR, func.getParameters().get( arity - 1), "Last parameter of complex-type-returning function " + "must be ResultSet"); - return false; + return Set.of(); } } else if ( null != (typeArgs = specialization( ret, TY_ITERATOR)) ) @@ -1712,14 +1930,14 @@ else if ( null != (typeArgs = specialization( ret, TY_ITERATOR)) ) { msg( Kind.ERROR, func, "Need one type argument for Iterator return type"); - return false; + return Set.of(); } setofComponent = typeArgs.get( 0); if ( null == setofComponent ) { msg( Kind.ERROR, func, "Failed to find setof component type"); - return false; + return Set.of(); } } else if ( typu.isAssignable( ret, TY_RESULTSETPROVIDER) @@ -1774,7 +1992,7 @@ else if ( ret.getKind().equals( TypeKind.VOID) && 1 == arity ) for ( Trigger t : triggers() ) ((TriggerImpl)t).characterize(); - return true; + return Set.of(this); } void resolveLanguage() @@ -1862,8 +2080,12 @@ Stream parameterInfo() */ void resolveParameterAndReturnTypes() { - if ( ! "".equals( type()) ) - returnType = DBType.fromSQLTypeAnnotation( type()); + if ( null != _type && null != _out ) + msg( Kind.ERROR, func, "A PL/Java function may specify " + + "only one of type, out"); + + if ( null != _type ) + returnType = DBType.fromSQLTypeAnnotation( _type); else if ( null != setofComponent ) returnType = tmpr.getSQLType( setofComponent, func); else if ( setof ) @@ -1874,6 +2096,14 @@ else if ( setof ) parameterTypes = parameterInfo() .map(i -> tmpr.getSQLType(i.tm, i.ve, i.st, true, true)) .toArray(DBType[]::new); + + if ( null != _out ) + { + returnType = DT_RECORD; + outParameters = Arrays.stream(_out) + .map(DBType::fromNameAndType) + .collect(toList()); + } } /** @@ -1901,6 +2131,12 @@ void recordImplicitTags() if ( null != t ) requires.add(t); } + + if ( null != outParameters ) + outParameters.stream() + .map(m -> m.getValue().dependTag()) + .filter(Objects::nonNull) + .forEach(requires::add); } @Override @@ -1916,40 +2152,76 @@ public void subsume() * * @param dflts Whether to include the defaults, if any. */ - void appendNameAndParams( StringBuilder sb, boolean dflts) + void appendNameAndParams( + StringBuilder sb, boolean names, boolean outs, boolean dflts) + { + appendNameAndParams(sb, names, outs, dflts, + qnameFrom(name(), schema()), parameterInfo().collect(toList())); + } + + /** + * Internal version taking name and parameter stream as extra arguments + * so they can be overridded from {@link Transformed}. + */ + void appendNameAndParams( + StringBuilder sb, boolean names, boolean outs, boolean dflts, + Identifier.Qualified qname, + Iterable params) { - sb.append(qnameFrom(name(), schema())).append( '('); - appendParams( sb, dflts); + sb.append(qname).append( '('); + appendParams( sb, names, outs, dflts, params); // TriggerImpl relies on ) being the very last character sb.append( ')'); } - void appendParams( StringBuilder sb, boolean dflts) + /** + * Takes the parameter stream as an extra argument + * so it can be overridded from {@link Transformed}. + */ + void appendParams( + StringBuilder sb, boolean names, boolean outs, boolean dflts, + Iterable params) { - int count = parameterTypes.length; - for ( ParameterInfo i - : (Iterable)parameterInfo()::iterator ) + int lengthOnEntry = sb.length(); + + Iterator iter = params.iterator(); + ParameterInfo i; + while ( iter.hasNext() ) { - -- count; + i = iter.next(); - String name = null == i.st ? null : i.st.name(); - if ( null == name ) - name = i.ve.getSimpleName().toString(); + String name = i.name(); sb.append("\n\t"); - if ( _variadic && 0 == count ) + if ( _variadic && ! iter.hasNext() ) sb.append("VARIADIC "); - sb.append(name).append(' ').append(i.dt.toString(dflts)); + if ( names ) + sb.append(name).append(' '); - if ( 0 < count ) - sb.append(','); + sb.append(i.dt.toString(dflts)); + + sb.append(','); + } + + if ( outs && null != outParameters ) + { + outParameters.forEach(e -> { + sb.append("\n\tOUT "); + if ( null != e.getKey() ) + sb.append(e.getKey()).append(' '); + sb.append(e.getValue().toString(false)).append(','); + }); } + + if ( lengthOnEntry < sb.length() ) + sb.setLength(sb.length() - 1); // that last pesky comma } - void appendAS( StringBuilder sb) + String makeAS() { + StringBuilder sb = new StringBuilder(); if ( ! ( complexViaInOut || setof || trigger ) ) sb.append( typu.erasure( func.getReturnType())).append( '='); Element e = func.getEnclosingElement(); @@ -1959,14 +2231,29 @@ void appendAS( StringBuilder sb) "than a class"); sb.append( e.toString()).append( '.'); sb.append( trigger ? func.getSimpleName() : func.toString()); + return sb.toString(); } public String[] deployStrings() + { + return deployStrings( + qnameFrom(name(), schema()), parameterInfo().collect(toList()), + makeAS(), comment()); + } + + /** + * Internal version taking the function name, parameter stream, + * AS string, and comment (if any) as extra arguments so they can be + * overridden from {@link Transformed}. + */ + String[] deployStrings( + Identifier.Qualified qname, + Iterable params, String as, String comment) { ArrayList al = new ArrayList<>(); StringBuilder sb = new StringBuilder(); sb.append( "CREATE OR REPLACE FUNCTION "); - appendNameAndParams( sb, true); + appendNameAndParams( sb, true, true, true, qname, params); sb.append( "\n\tRETURNS "); if ( trigger ) sb.append( DT_TRIGGER.toString()); @@ -1994,19 +2281,16 @@ public String[] deployStrings() sb.append( "\tROWS ").append( rows()).append( '\n'); for ( String s : settings() ) sb.append( "\tSET ").append( s).append( '\n'); - sb.append( "\tAS '"); - appendAS( sb); - sb.append( '\''); + sb.append( "\tAS ").append( DDRWriter.eQuote( as)); al.add( sb.toString()); - String comm = comment(); - if ( null != comm ) + if ( null != comment ) { sb.setLength( 0); sb.append( "COMMENT ON FUNCTION "); - appendNameAndParams( sb, false); + appendNameAndParams( sb, true, false, false, qname, params); sb.append( "\nIS "); - sb.append( DDRWriter.eQuote( comm)); + sb.append( DDRWriter.eQuote( comment)); al.add( sb.toString()); } @@ -2017,6 +2301,14 @@ public String[] deployStrings() } public String[] undeployStrings() + { + return undeployStrings( + qnameFrom(name(), schema()), parameterInfo().collect(toList())); + } + + String[] undeployStrings( + Identifier.Qualified qname, + Iterable params) { if ( subsumed ) return new String[0]; @@ -2029,7 +2321,7 @@ public String[] undeployStrings() StringBuilder sb = new StringBuilder(); sb.append( "DROP FUNCTION "); - appendNameAndParams( sb, false); + appendNameAndParams( sb, true, false, false, qname, params); rslt [ rslt.length - 1 ] = sb.toString(); return rslt; } @@ -2066,6 +2358,85 @@ List specialization( */ return Collections.emptyList(); } + + class Transformed implements Snippet + { + final Identifier.Qualified m_qname; + final boolean m_commute; + final boolean m_negate; + final String m_comment; + + Transformed( + Identifier.Qualified qname, + boolean commute, boolean negate, String comment) + { + assert commute || negate : "no transformation to apply"; + m_qname = requireNonNull(qname); + m_commute = commute; + m_negate = negate; + m_comment = comment; + } + + List parameterInfo() + { + List params = + FunctionImpl.this.parameterInfo().collect(toList()); + if ( ! m_commute ) + return params; + assert 2 == params.size() : "commute with arity != 2"; + Collections.reverse(params); + return params; + } + + @Override + public Set characterize() + { + return Set.of(); + } + + @Override + public Identifier.Simple implementorName() + { + return FunctionImpl.this.implementorName(); + } + + @Override + public Set requireTags() + { + return FunctionImpl.this.requireTags(); + } + + @Override + public Set provideTags() + { + DBType[] sig = + parameterInfo().stream() + .map(p -> p.dt) + .toArray(DBType[]::new); + return Set.of(new DependTag.Function(m_qname, sig)); + } + + @Override + public String[] deployStrings() + { + String as = Stream.of( + m_commute ? "commute" : (String)null, + m_negate ? "negate" : (String)null) + .filter(Objects::nonNull) + .collect(joining(",", "[", "]")) + + FunctionImpl.this.makeAS(); + + return FunctionImpl.this.deployStrings( + m_qname, parameterInfo(), as, m_comment); + } + + @Override + public String[] undeployStrings() + { + return FunctionImpl.this.undeployStrings( + m_qname, parameterInfo()); + } + } } static enum BaseUDTFunctionID @@ -2138,7 +2509,27 @@ class BaseUDTFunctionImpl extends FunctionImpl BaseUDTFunctionID id; @Override - void appendParams( StringBuilder sb, boolean dflts) + public String[] deployStrings() + { + return deployStrings( + qnameFrom(name(), schema()), + null, // parameter iterable unused in appendParams below + "UDT[" + te + "] " + id.name(), + comment()); + } + + @Override + public String[] undeployStrings() + { + return undeployStrings( + qnameFrom(name(), schema()), + null); // parameter iterable unused in appendParams below + } + + @Override + void appendParams( + StringBuilder sb, boolean names, boolean outs, boolean dflts, + Iterable params) { sb.append( Arrays.stream(id.getParam( ui)) @@ -2147,13 +2538,6 @@ void appendParams( StringBuilder sb, boolean dflts) ); } - @Override - void appendAS( StringBuilder sb) - { - sb.append( "UDT[").append( te.toString()).append( "] "); - sb.append( id.name()); - } - StringBuilder appendTypeOp( StringBuilder sb) { sb.append( id.name()).append( " = "); @@ -2161,12 +2545,12 @@ StringBuilder appendTypeOp( StringBuilder sb) } @Override - public boolean characterize() + public Set characterize() { resolveLanguage(); recordImplicitTags(); recordExplicitTags(_provides, _requires); - return true; + return Set.of(this); } public void setType( Object o, boolean explicit, Element e) @@ -2176,6 +2560,13 @@ public void setType( Object o, boolean explicit, Element e) "The type of a UDT function may not be changed"); } + public void setOut( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "The type of a UDT function may not be changed"); + } + public void setVariadic( Object o, boolean explicit, Element e) { if ( explicit ) @@ -2315,7 +2706,7 @@ public void registerMapping() setQname(); } - public boolean characterize() + public Set characterize() { if ( null != structure() ) { @@ -2324,7 +2715,7 @@ public boolean characterize() provideTags().add(t); } recordExplicitTags(_provides, _requires); - return true; + return Set.of(this); } public String[] deployStrings() @@ -2396,9 +2787,9 @@ public Set requireTags() } @Override - public boolean characterize() + public Set characterize() { - return false; + return Set.of(); } } @@ -2530,7 +2921,7 @@ void registerFunctions() send); } - public boolean characterize() + public Set characterize() { if ( "".equals( typeModifierInput()) && ! "".equals( typeModifierOutput()) ) @@ -2563,7 +2954,7 @@ public boolean characterize() recordImplicitTags(); recordExplicitTags(_provides, _requires); - return true; + return Set.of(this); } void recordImplicitTags() @@ -2700,66 +3091,2120 @@ public Vertex breakCycle(Vertex v, boolean deploy) } } - /** - * Provides the default mappings from Java types to SQL types. - */ - class TypeMapper + class CastImpl + extends Repeatable + implements Cast, Snippet, Commentable { - ArrayList> protoMappings; - ArrayList> finalMappings; - - TypeMapper() + CastImpl(Element e, AnnotationMirror am) { - protoMappings = new ArrayList<>(); + super(e, am); + } - // Primitives (these need not, indeed cannot, be schema-qualified) - // - this.addMap(boolean.class, "boolean"); - this.addMap(Boolean.class, "boolean"); - this.addMap(byte.class, "smallint"); - this.addMap(Byte.class, "smallint"); - this.addMap(char.class, "smallint"); - this.addMap(Character.class, "smallint"); - this.addMap(double.class, "double precision"); - this.addMap(Double.class, "double precision"); - this.addMap(float.class, "real"); - this.addMap(Float.class, "real"); - this.addMap(int.class, "integer"); - this.addMap(Integer.class, "integer"); - this.addMap(long.class, "bigint"); - this.addMap(Long.class, "bigint"); - this.addMap(short.class, "smallint"); - this.addMap(Short.class, "smallint"); + public String from() { return _from; } + public String to() { return _to; } + public Cast.Path path() { return _path; } + public Cast.Application application() { return _application; } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } - // Known common mappings - // - this.addMap(Number.class, "pg_catalog", "numeric"); - this.addMap(String.class, "pg_catalog", "varchar"); - this.addMap(java.util.Date.class, "pg_catalog", "timestamp"); - this.addMap(Timestamp.class, "pg_catalog", "timestamp"); - this.addMap(Time.class, "pg_catalog", "time"); - this.addMap(java.sql.Date.class, "pg_catalog", "date"); - this.addMap(java.sql.SQLXML.class, "pg_catalog", "xml"); - this.addMap(BigInteger.class, "pg_catalog", "numeric"); - this.addMap(BigDecimal.class, "pg_catalog", "numeric"); - this.addMap(ResultSet.class, "pg_catalog", "record"); - this.addMap(Object.class, "pg_catalog", "\"any\""); + public String _from; + public String _to; + public Cast.Path _path; + public Cast.Application _application; + public String[] _provides; + public String[] _requires; - this.addMap(byte[].class, "pg_catalog", "bytea"); + FunctionImpl func; + DBType fromType; + DBType toType; - this.addMap(LocalDate.class, "pg_catalog", "date"); - this.addMap(LocalTime.class, "pg_catalog", "time"); - this.addMap(OffsetTime.class, "pg_catalog", "timetz"); - this.addMap(LocalDateTime.class, "pg_catalog", "timestamp"); - this.addMap(OffsetDateTime.class, "pg_catalog", "timestamptz"); + public void setPath( Object o, boolean explicit, Element e) + { + if ( explicit ) + _path = Path.valueOf( + ((VariableElement)o).getSimpleName().toString()); } - private boolean mappingsFrozen() + public Set characterize() { - return null != finalMappings; - } + boolean ok = true; - /* + if ( ElementKind.METHOD.equals(m_targetElement.getKind()) ) + { + func = getSnippet(m_targetElement, FunctionImpl.class, + () -> (FunctionImpl)null); + if ( null == func ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A method annotated with @Cast must also have @Function" + ); + ok = false; + } + } + + if ( null == func && "".equals(_from) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Cast not annotating a method must specify from=" + ); + ok = false; + } + + if ( null == func && "".equals(_to) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Cast not annotating a method must specify to=" + ); + ok = false; + } + + if ( null == func && null == _path ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Cast not annotating a method, and without path=, " + + "is not yet supported" + ); + ok = false; + } + + if ( ok ) + { + fromType = ("".equals(_from)) + ? func.parameterTypes[0] + : DBType.fromSQLTypeAnnotation(_from); + + toType = ("".equals(_to)) + ? func.returnType + : DBType.fromSQLTypeAnnotation(_to); + } + + if ( null != _path ) + { + if ( ok && Path.BINARY == _path && fromType.equals(toType) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A cast with from and to types the same can only " + + "apply a type modifier; path=BINARY will have " + + "no effect"); + ok = false; + } + } + else if ( null != func ) + { + int nparams = func.parameterTypes.length; + + if ( ok && 2 > nparams && fromType.equals(toType) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A cast with from and to types the same can only " + + "apply a type modifier, therefore must have at least " + + "two parameters"); + ok = false; + } + + if ( 1 > nparams || nparams > 3 ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A cast function must have 1, 2, or 3 parameters"); + ok = false; + } + + if (1 < nparams && ! DT_INTEGER.equals(func.parameterTypes[1])) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Parameter 2 of a cast function must have integer type" + ); + ok = false; + } + + if (3 == nparams && ! DT_BOOLEAN.equals(func.parameterTypes[2])) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Parameter 3 of a cast function must have boolean type" + ); + ok = false; + } + } + + if ( ! ok ) + return Set.of(); + + recordImplicitTags(); + recordExplicitTags(_provides, _requires); + return Set.of(this); + } + + void recordImplicitTags() + { + Set requires = requireTags(); + + DependTag dt = fromType.dependTag(); + if ( null != dt ) + requires.add(dt); + + dt = toType.dependTag(); + if ( null != dt ) + requires.add(dt); + + if ( null == _path ) + { + dt = func.provideTags().stream() + .filter(DependTag.Function.class::isInstance) + .findAny().get(); + requires.add(dt); + } + } + + public String[] deployStrings() + { + List al = new ArrayList<>(); + + StringBuilder sb = new StringBuilder(); + + sb.append("CREATE CAST (") + .append(fromType).append(" AS ").append(toType).append(")\n\t"); + + if ( Path.BINARY == _path ) + sb.append("WITHOUT FUNCTION"); + else if ( Path.INOUT == _path ) + sb.append("WITH INOUT"); + else + { + sb.append("WITH FUNCTION "); + func.appendNameAndParams(sb, false, false, false); + } + + switch ( _application ) + { + case ASSIGNMENT: sb.append("\n\tAS ASSIGNMENT"); break; + case EXPLICIT: break; + case IMPLICIT: sb.append("\n\tAS IMPLICIT"); + } + + al.add(sb.toString()); + + if ( null != comment() ) + al.add( + "COMMENT ON CAST (" + + fromType + " AS " + toType + ") IS " + + DDRWriter.eQuote(comment())); + + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + return new String[] + { + "DROP CAST (" + fromType + " AS " + toType + ")" + }; + } + } + + /* + * Called by processRepeatable for each @Operator processed. + * This happens before characterize, but after populating, so the + * operator's name and commutator/negator/synthetic elements can be + * inspected. All operators annotating a given element e are processed + * consecutively, and followed by a call with the same e and null snip. + * + * This will accumulate the snippets onto two lists, for non-synthetic and + * synthetic ones and, on the final call, process the lists to find possible + * paths from non-synthetic to synthetic ones via commutation and/or + * negation. The possible paths will be recorded on each synthetic operator. + * They will have to be confirmed during characterize after things like + * operand types and arity have been resolved. + */ + void operatorPreSynthesize( Element e, OperatorImpl snip) + { + if ( ! ElementKind.METHOD.equals(e.getKind()) ) + { + if ( null != snip ) + putSnippet( snip, (Snippet)snip); + return; + } + + if ( null != snip ) + { + if ( snip.selfCommutator || snip.twinCommutator ) + snip.commutator = snip.qname; + + (snip.isSynthetic ? m_synthetic : m_nonSynthetic).add(snip); + return; + } + + /* + * Initially: + * processed: is empty + * ready: contains all non-synthetic snippets + * pending: contains all synthetic snippets + * Step: + * A snippet s is removed from ready and added to processed. + * If s.commutator or s.negator matches a synthetic snippet in pending + * or ready, a corresponding path is recorded on that snippet. If it is + * the first path recorded on that snippet (which must have been found + * on pending), that snippet is moved to ready. + */ + + List processed = + new ArrayList<>(m_nonSynthetic.size() + m_synthetic.size()); + Queue ready = new LinkedList<>(m_nonSynthetic); + LinkedList pending = new LinkedList<>(m_synthetic); + m_nonSynthetic.clear(); + m_synthetic.clear(); + + while ( null != (snip = ready.poll()) ) + { + processed.add(snip); + if ( null != snip.commutator ) + { + for ( OperatorImpl other : ready ) + maybeAddPath(snip, other, + OperatorPath.Transform.COMMUTATION); + ListIterator it = pending.listIterator(); + while ( it.hasNext() ) + { + OperatorImpl other = it.next(); + if ( maybeAddPath(snip, other, + OperatorPath.Transform.COMMUTATION) ) + { + it.remove(); + ready.add(other); + } + } + } + if ( null != snip.negator ) + { + for ( OperatorImpl other : ready ) + maybeAddPath(snip, other, + OperatorPath.Transform.NEGATION); + ListIterator it = pending.listIterator(); + while ( it.hasNext() ) + { + OperatorImpl other = it.next(); + if ( maybeAddPath(snip, other, + OperatorPath.Transform.NEGATION) ) + { + it.remove(); + ready.add(other); + } + } + } + } + + if ( ! pending.isEmpty() ) + msg(Kind.ERROR, e, "Cannot synthesize operator(s) (%s)", + pending.stream() + .map(o -> o.qname.toString()) + .collect(joining(" "))); + + for ( OperatorImpl s : processed ) + putSnippet( s, (Snippet)s); + } + + boolean maybeAddPath( + OperatorImpl from, OperatorImpl to, OperatorPath.Transform how) + { + if ( ! to.isSynthetic ) + return false; // don't add paths to a non-synthetic operator + + switch ( how ) + { + case COMMUTATION: + if ( ! from.commutator.equals(to.qname) ) + return false; // this is not the operator you're looking for + if ( null != to.commutator && ! to.commutator.equals(from.qname) ) + return false; // you're not the one it's looking for + break; + case NEGATION: + if ( ! from.negator.equals(to.qname) ) + return false; // move along + if ( null != to.negator && ! to.negator.equals(from.qname) ) + return false; // move along + break; + } + + if ( null == to.paths ) + to.paths = new ArrayList<>(); + + if ( null == from.synthetic ) + to.paths.add(new OperatorPath(from, from, null, EnumSet.of(how))); + else + { + for ( OperatorPath path : from.paths ) + { + to.paths.add(new OperatorPath( + path.base, from, path.fromBase, EnumSet.of(how))); + } + } + + return true; + } + + List m_nonSynthetic = new ArrayList<>(); + List m_synthetic = new ArrayList<>(); + + static class OperatorPath + { + OperatorImpl base; + OperatorImpl proximate; + EnumSet fromBase; + EnumSet fromProximate; + + enum Transform { NEGATION, COMMUTATION } + + OperatorPath( + OperatorImpl base, OperatorImpl proximate, + EnumSet baseToProximate, + EnumSet proximateToNew) + { + this.base = base; + this.proximate = proximate; + fromProximate = proximateToNew.clone(); + + if ( base == proximate ) + fromBase = proximateToNew; + else + { + fromBase = baseToProximate.clone(); + fromBase.removeAll(proximateToNew); + proximateToNew = proximateToNew.clone(); + proximateToNew.removeAll(fromBase); + fromBase.addAll(proximateToNew); + } + } + + public String toString() + { + return + base.commentDropForm() + " " + fromBase + + (base == proximate + ? "" + : " (... " + proximate.commentDropForm() + + " " + fromProximate); + } + } + + class OperatorImpl + extends Repeatable + implements Operator, Snippet, Commentable + { + OperatorImpl(Element e, AnnotationMirror am) + { + super(e, am); + } + + public String[] name() { return qstrings(qname); } + public String left() { return operand(0); } + public String right() { return operand(1); } + public String[] function() { return qstrings(funcName); } + public String[] synthetic() { return qstrings(synthetic); } + public String[] commutator() { return qstrings(commutator); } + public String[] negator() { return qstrings(negator); } + public boolean hashes() { return _hashes; } + public boolean merges() { return _merges; } + public String[] restrict() { return qstrings(restrict); } + public String[] join() { return qstrings(join); } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + + public String[] _provides; + public String[] _requires; + public boolean _hashes; + public boolean _merges; + + Identifier.Qualified qname; + DBType[] operands = { null, null }; + FunctionImpl func; + Identifier.Qualified funcName; + Identifier.Qualified commutator; + Identifier.Qualified negator; + Identifier.Qualified restrict; + Identifier.Qualified join; + Identifier.Qualified synthetic; + boolean isSynthetic; + boolean selfCommutator; + boolean twinCommutator; + List paths; + + private String operand(int i) + { + return null == operands[i] ? null : operands[i].toString(); + } + + public void setName( Object o, boolean explicit, Element e) + { + qname = operatorNameFrom(avToArray( o, String.class)); + } + + public void setLeft( Object o, boolean explicit, Element e) + { + if ( explicit ) + operands[0] = DBType.fromSQLTypeAnnotation((String)o); + } + + public void setRight( Object o, boolean explicit, Element e) + { + if ( explicit ) + operands[1] = DBType.fromSQLTypeAnnotation((String)o); + } + + public void setFunction( Object o, boolean explicit, Element e) + { + if ( explicit ) + funcName = qnameFrom(avToArray( o, String.class)); + } + + public void setSynthetic( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + /* + * Use isSynthetic to indicate that synthetic= has been used at all. + * Set synthetic to the supplied qname only if it is a qname, and + * not the distinguished value TWIN. + * + * Most of the processing below only needs to look at isSynthetic. + * The TWIN case, recognized by isSynthetic && null == synthetic, + * will be handled late in the game by copying the base function's + * qname. + */ + + isSynthetic = true; + String[] ss = avToArray( o, String.class); + if ( 1 != ss.length || ! TWIN.equals(ss[0]) ) + synthetic = qnameFrom(ss); + } + + public void setCommutator( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + String[] ss = avToArray( o, String.class); + if ( 1 == ss.length ) + { + if ( SELF.equals(ss[0]) ) + { + selfCommutator = true; + return; + } + if ( TWIN.equals(ss[0]) ) + { + twinCommutator = true; + return; + } + } + commutator = operatorNameFrom(ss); + } + + public void setNegator( Object o, boolean explicit, Element e) + { + if ( explicit ) + negator = operatorNameFrom(avToArray( o, String.class)); + } + + public void setRestrict( + Object o, boolean explicit, Element e) + { + if ( explicit ) + restrict = qnameFrom(avToArray( o, String.class)); + } + + public void setJoin( + Object o, boolean explicit, Element e) + { + if ( explicit ) + join = qnameFrom(avToArray( o, String.class)); + } + + public Set characterize() + { + boolean ok = true; + Snippet syntheticFunction = null; + + if ( ElementKind.METHOD.equals(m_targetElement.getKind()) ) + { + func = getSnippet(m_targetElement, FunctionImpl.class, + () -> (FunctionImpl)null); + } + + if ( isSynthetic ) + { + if ( null != funcName ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator may not specify both function= and " + + "synthetic=" + ); + ok = false; + } + funcName = synthetic; // can be null (the TWIN case) + } + + if ( null == func && null == funcName && ! isSynthetic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator not annotating a method must specify function=" + ); + ok = false; + } + + if ( null == func ) + { + if ( null == operands[0] && null == operands[1] ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator not annotating a method must specify " + + "left= or right= or both" + ); + ok = false; + } + } + else + { + Identifier.Qualified fn = + qnameFrom(func.name(), func.schema()); + + if ( null == funcName ) + funcName = fn; + else if ( ! funcName.equals(fn) && ! isSynthetic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator annotates a method but function= gives a " + + "different name" + ); + ok = false; + } + + long explicit = + Arrays.stream(operands).filter(Objects::nonNull).count(); + + if ( 0 != explicit && isSynthetic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator with synthetic= must not specify " + + "operand types" + ); + ok = false; + } + + if ( 0 == explicit ) + { + int nparams = func.parameterTypes.length; + if ( 1 > nparams || nparams > 2 ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "method annotated with @Operator must take one " + + "or two parameters" + ); + ok = false; + } + if ( 1 == nparams ) + operands[1] = func.parameterTypes[0]; + else + System.arraycopy(func.parameterTypes,0, operands,0,2); + } + else if ( explicit != func.parameterTypes.length ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator annotates a method but specifies " + + "a different number of operands" + ); + ok = false; + } + else if ( 2 == explicit + && ! Arrays.equals(operands, func.parameterTypes) + || 1 == explicit + && ! Arrays.asList(operands) + .contains(func.parameterTypes[0]) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator annotates a method but specifies " + + "different operand types" + ); + ok = false; + } + } + + /* + * At this point, ok ==> there is a non-null funcName ... UNLESS + * isSynthetic is true, synthetic=TWIN was given, and we are not + * annotating a method (that last condition is currently not + * supported, so we could in fact rely on having a funcName here, + * but that condition may be worth supporting in the future, so + * better to keep the exception in mind). + */ + + if ( ! ok ) + return Set.of(); + + long arity = + Arrays.stream(operands).filter(Objects::nonNull).count(); + + if ( 1 == arity && null == operands[1] ) + { + msg(Kind.WARNING, m_targetElement, m_origin, + "Right unary (postfix) operators are deprecated and will " + + "be removed in PostgreSQL version 14." + ); + } + + if ( null != commutator ) + { + if ( 2 != arity ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "unary @Operator cannot have a commutator" + ); + ok = false; + } + else if ( selfCommutator && ! operands[0].equals(operands[1]) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator with different left and right operand " + + "types cannot have commutator=SELF" + ); + ok = false; + } + else if ( twinCommutator && operands[0].equals(operands[1]) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator with matching left and right operand " + + "types cannot have commutator=TWIN" + ); + ok = false; + } + } + + boolean knownNotBoolean = + null != func && ! DT_BOOLEAN.equals(func.returnType); + + if ( null != negator ) + { + if ( knownNotBoolean ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "negator= only belongs on a boolean @Operator" + ); + ok = false; + } + else if ( negator.equals(qname) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator can never be its own negator" + ); + ok = false; + } + } + + boolean knownNotBinaryBoolean = 2 != arity || knownNotBoolean; + boolean knownVolatile = + null != func && Function.Effects.VOLATILE == func.effects(); + boolean operandTypesDiffer = + 2 == arity && ! operands[0].equals(operands[1]); + boolean selfCommutates = + null != commutator && commutator.equals(qname); + + ok &= Stream.of( + _hashes ? "hashes" : null, + _merges ? "merges" : null) + .filter(Objects::nonNull) + .map(s -> + { + boolean inner_ok = true; + if ( knownNotBinaryBoolean ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "%s= only belongs on a boolean " + + "binary @Operator", s + ); + inner_ok = false; + } + if ( null == commutator ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "%s= requires that the @Operator " + + "have a commutator", s + ); + inner_ok = false; + } + else if ( ! (operandTypesDiffer || selfCommutates) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "%s= requires the @Operator to be its own" + + "commutator as its operand types are the same", s + ); + inner_ok = false; + } + if ( knownVolatile ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "%s= requires an underlying function " + + "declared IMMUTABLE or STABLE", s + ); + inner_ok = false; + } + return inner_ok; + }) + .allMatch(t -> t); + + if ( null != restrict && knownNotBinaryBoolean ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "restrict= only belongs on a boolean binary @Operator" + ); + ok = false; + } + + if ( null != join && knownNotBinaryBoolean ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "join= only belongs on a boolean binary @Operator" + ); + ok = false; + } + + if ( ! ok ) + return Set.of(); + + if ( isSynthetic ) + { + if ( null == func ) + { + /* + * It could be possible to relax this requirement if there + * is a need, but this way is easier. + */ + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator annotation must appear " + + "on the method to be used as the base"); + ok = false; + } + + if ( paths.isEmpty() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s has no derivation path " + + "involving negation or commutation from another " + + "operator", qnameUnwrapped()); + /* + * If no paths at all, return empty from here; no point in + * further checks. + */ + return Set.of(); + } + + /* + * Check for conditions where deriving by commutation wouldn't + * make sense. Any of these three conditions will trigger the + * test of available paths. The conditions are rechecked but the + * third one is changed, so either of the first two will always + * preclude commutation, but ! operandTypesDiffer only does if + * the synthetic function's name will be the same as the base's. + * (If the types were different, PostgreSQL overloading would + * allow the functions to share a name, but that's not possible + * if the types are the same.) In those cases, any commutation + * paths are filtered out; if no path remains, that's an error. + */ + if ( 2 != arity || selfCommutator || ! operandTypesDiffer ) + { + List filtered = + paths.stream() + .filter( + p -> ! p.fromBase.contains( + OperatorPath.Transform.COMMUTATION)) + .collect(toList()); + if ( 2 != arity || selfCommutator + || null == synthetic || + synthetic.equals(qnameFrom(func.name(), func.schema()))) + { + if ( filtered.isEmpty() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s cannot be another " + + "operator's commutator, but found only " + + "path(s) involving commutation: %s", + qnameUnwrapped(), paths.toString()); + ok = false; + } + else + paths = filtered; + } + } + + ok &= paths.stream().collect( + groupingBy(p -> p.base, + mapping(p -> p.fromBase, toSet()))) + .entrySet().stream() + .filter(e -> 1 < e.getValue().size()) + .map(e -> + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s found paths with " + + "different transforms %s from same base %s", + qnameUnwrapped(), + e.getValue(), e.getKey().qnameUnwrapped()); + return false; + }) + .allMatch(t -> t); + + ok &= paths.stream().collect( + groupingBy(p -> p.proximate, + mapping(p -> p.fromProximate, toSet()))) + .entrySet().stream() + .filter(e -> 1 < e.getValue().size()) + .map(e -> + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s found paths with " + + "different transforms %s from %s", + qnameUnwrapped(), + e.getValue(), e.getKey().qnameUnwrapped()); + return false; + }) + .allMatch(t -> t); + + Set> + commutatorCandidates = + paths.stream() + .filter( + p -> p.fromProximate.contains( + OperatorPath.Transform.COMMUTATION)) + .map(p -> p.proximate.qname) + .collect(toSet()); + if ( null == commutator && 0 < commutatorCandidates.size() ) + { + if ( 1 == commutatorCandidates.size() ) + commutator = commutatorCandidates.iterator().next(); + else + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s has muliple commutator " + + "candidates %s", + qnameUnwrapped(), commutatorCandidates); + ok = false; + } + } + + Set> + negatorCandidates = + paths.stream() + .filter( + p -> p.fromProximate.contains( + OperatorPath.Transform.NEGATION)) + .map(p -> p.proximate.qname) + .collect(toSet()); + if ( null == negator && 0 < negatorCandidates.size() ) + { + if ( 1 == negatorCandidates.size() ) + negator = negatorCandidates.iterator().next(); + else + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s has muliple negator " + + "candidates %s", + qnameUnwrapped(), negatorCandidates); + ok = false; + } + } + + /* + * Filter paths to only those based on an operator that is built + * over this method. (That's currently guaranteed by the way + * operatorPreSynthesize generates paths, but may as well check + * here to ensure sanity during future maintenance.) + * + * For synthetic=TWIN (represented here by null==synthetic), + * also filter out paths that don't involve commutation (without + * it, the synthetic function would collide with the base one). + */ + + boolean nonCommutedOK = null != synthetic; + + paths = paths.stream() + .filter( + p -> p.base.func == func + && (nonCommutedOK || p.fromBase.contains( + OperatorPath.Transform.COMMUTATION)) + ).collect(toList()); + + if ( 0 == paths.size() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s has no derivation path " + + "from an operator that is based on this method%s", + qnameUnwrapped(), + nonCommutedOK ? "" : " and involves commutation"); + ok = false; + } + + if ( ! ok ) + return Set.of(); + + /* + * Select a base. Could there be more than one? As the checks + * for transform inconsistencies above found none, we will + * assume any should be ok, and choose one semi-arbitrarily. + */ + + OperatorPath selected = + paths.stream() + .sorted( + Comparator.comparingInt( + p -> p.fromBase.size()) + .thenComparingInt( + p -> p.fromBase.stream() + .mapToInt(Enum::ordinal) + .max().getAsInt()) + .thenComparing(p -> p.base.qnameUnwrapped())) + .findFirst().get(); + + /* + * At last, the possibly null funcName (synthetic=TWIN case) + * can be fixed up. + */ + if ( null == synthetic ) + { + FunctionImpl f = selected.base.func; + funcName = synthetic = qnameFrom(f.name(), f.schema()); + } + + replaceCommentIfDerived("Operator " + qnameUnwrapped() + + " automatically derived by " + + selected.fromBase + " from " + + selected.base.qnameUnwrapped()); + + boolean commute = selected.fromBase + .contains(OperatorPath.Transform.COMMUTATION); + boolean negate = selected.fromBase + .contains(OperatorPath.Transform.NEGATION); + + if ( operandTypesDiffer && commute ) + { + DBType t = operands[0]; + operands[0] = operands[1]; + operands[1] = t; + } + + syntheticFunction = + func.new Transformed(synthetic, commute, negate, comment()); + } + + recordImplicitTags(); + recordExplicitTags(_provides, _requires); + return null == syntheticFunction + ? Set.of(this) : Set.of(syntheticFunction, this); + } + + void recordImplicitTags() + { + Set provides = provideTags(); + Set requires = requireTags(); + + provides.add(new DependTag.Operator(qname, operands)); + + /* + * Commutator and negator often involve cycles. PostgreSQL already + * has its own means of breaking them, so it is not necessary here + * even to declare dependencies based on them. + * + * There is also, for now, no point in declaring dependencies on + * selectivity estimators; they can't be written in Java, so they + * won't be products of this compilation. + * + * So, just require the operand types and the function. + */ + + Arrays.stream(operands) + .filter(Objects::nonNull) + .map(DBType::dependTag) + .filter(Objects::nonNull) + .forEach(requires::add); + + if ( null != func && null == synthetic ) + { + func.provideTags().stream() + .filter(DependTag.Function.class::isInstance) + .forEach(requires::add); + } + else + { + requires.add(new DependTag.Function(funcName, + Arrays.stream(operands) + .filter(Objects::nonNull) + .toArray(DBType[]::new))); + } + } + + /** + * Just to keep things interesting, a schema-qualified operator name is + * wrapped in OPERATOR(...) pretty much everywhere, except as the guest + * of honor in a CREATE OPERATOR or DROP OPERATOR, where the unwrapped + * form is needed. + */ + private String qnameUnwrapped() + { + String local = qname.local().toString(); + Identifier.Simple qualifier = qname.qualifier(); + return null == qualifier ? local : qualifier + "." + local; + } + + /** + * An operator is identified this way in a COMMENT or DROP. + */ + private String commentDropForm() + { + return qnameUnwrapped() + " (" + + (null == operands[0] ? "NONE" : operands[0]) + ", " + + (null == operands[1] ? "NONE" : operands[1]) + ")"; + } + + public String[] deployStrings() + { + List al = new ArrayList<>(); + + StringBuilder sb = new StringBuilder(); + + sb.append("CREATE OPERATOR ").append(qnameUnwrapped()); + sb.append(" (\n\tPROCEDURE = ").append(funcName); + + if ( null != operands[0] ) + sb.append(",\n\tLEFTARG = ").append(operands[0]); + + if ( null != operands[1] ) + sb.append(",\n\tRIGHTARG = ").append(operands[1]); + + if ( null != commutator ) + sb.append(",\n\tCOMMUTATOR = ").append(commutator); + + if ( null != negator ) + sb.append(",\n\tNEGATOR = ").append(negator); + + if ( null != restrict ) + sb.append(",\n\tRESTRICT = ").append(restrict); + + if ( null != join ) + sb.append(",\n\tJOIN = ").append(join); + + if ( _hashes ) + sb.append(",\n\tHASHES"); + + if ( _merges ) + sb.append(",\n\tMERGES"); + + sb.append(')'); + + al.add(sb.toString()); + if ( null != comment() ) + al.add( + "COMMENT ON OPERATOR " + commentDropForm() + " IS " + + DDRWriter.eQuote(comment())); + + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + return new String[] + { + "DROP OPERATOR " + commentDropForm() + }; + } + } + + class AggregateImpl + extends Repeatable + implements Aggregate, Snippet, Commentable + { + AggregateImpl(Element e, AnnotationMirror am) + { + super(e, am); + } + + public String[] name() { return qstrings(qname); } + public String[] arguments() { return argsOut(aggregateArgs); } + public String[] directArguments() { return argsOut(directArgs); } + public boolean hypothetical() { return _hypothetical; } + public boolean[] variadic() { return _variadic; } + public Plan[] plan() { return new Plan[]{_plan}; } + public Plan[] movingPlan() { return _movingPlan; } + public Function.Parallel parallel() { return _parallel; } + public String[] sortOperator() { return qstrings(sortop); } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + + public boolean _hypothetical; + public boolean[] _variadic = {false, false}; + public Plan _plan; + public Plan[] _movingPlan; + public Function.Parallel _parallel; + public String[] _provides; + public String[] _requires; + + FunctionImpl func; + Identifier.Qualified qname; + List> aggregateArgs; + List> directArgs; + Identifier.Qualified sortop; + static final int DIRECT_ARGS = 0; // index into _variadic[] + static final int AGG_ARGS = 1; // likewise + boolean directVariadicExplicit; + + private List> + argsIn(String[] names) + { + return Arrays.stream(names) + .map(DBType::fromNameAndType) + .collect(toList()); + } + + private String[] + argsOut(List> names) + { + return names.stream() + .map(e -> e.getKey() + " " + e.getValue()) + .toArray(String[]::new); + } + + @Override + public String derivedComment( Element e) + { + /* + * When this annotation targets a TYPE, just as a + * place to hang it, there's no particular reason to believe a + * doc comment on the type is a good choice for this aggregate. + * When the annotation is on a method, the chances are better. + */ + if ( ElementKind.METHOD.equals(e.getKind()) ) + return super.derivedComment(e); + return null; + } + + public void setName( Object o, boolean explicit, Element e) + { + if ( explicit ) + qname = qnameFrom(avToArray( o, String.class)); + } + + public void setArguments( Object o, boolean explicit, Element e) + { + if ( explicit ) + aggregateArgs = argsIn( avToArray( o, String.class)); + } + + public void setDirectArguments( Object o, boolean explicit, Element e) + { + if ( explicit ) + directArgs = argsIn( avToArray( o, String.class)); + } + + public void setSortOperator( Object o, boolean explicit, Element e) + { + if ( explicit ) + sortop = operatorNameFrom(avToArray( o, String.class)); + } + + public void setVariadic( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + Boolean[] a = avToArray( o, Boolean.class); + + if ( 1 > a.length || a.length > 2 ) + throw new IllegalArgumentException( + "supply only boolean or {boolean,boolean} for variadic"); + + if ( ! Arrays.asList(a).contains(true) ) + throw new IllegalArgumentException( + "supply variadic= only if aggregated arguments, direct " + + "arguments, or both, are variadic"); + + _variadic[AGG_ARGS] = a[a.length - 1]; + if ( 2 == a.length ) + { + directVariadicExplicit = true; + _variadic[DIRECT_ARGS] = a[0]; + } + } + + public void setPlan( Object o, boolean explicit, Element e) + { + _plan = new Plan(); // always a plan, even if members uninitialized + + if ( explicit ) + _plan = planFrom( _plan, o, e, "plan"); + } + + public void setMovingPlan( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + _movingPlan = new Plan[1]; + _movingPlan [ 0 ] = planFrom( new Moving(), o, e, "movingPlan"); + } + + Plan planFrom( Plan p, Object o, Element e, String which) + { + AnnotationMirror[] ams = avToArray( o, AnnotationMirror.class); + + if ( 1 != ams.length ) + throw new IllegalArgumentException( + which + " must be given exactly one @Plan"); + + populateAnnotationImpl( p, e, ams[0]); + return p; + } + + public Set characterize() + { + boolean ok = true; + boolean orderedSet = null != directArgs; + boolean moving = null != _movingPlan; + boolean checkAccumulatorSig = false; + boolean checkFinisherSig = false; + boolean unary = false; + + if ( ElementKind.METHOD.equals(m_targetElement.getKind()) ) + { + func = getSnippet(m_targetElement, FunctionImpl.class, + () -> (FunctionImpl)null); + if ( null == func ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A method annotated with @Aggregate must " + + "also have @Function" + ); + ok = false; + } + } + + if ( null != func ) + { + Identifier.Qualified funcName = + qnameFrom(func.name(), func.schema()); + boolean inferAccumulator = + null == _plan.accumulate || null == aggregateArgs; + boolean inferFinisher = + null == _plan.finish && ! inferAccumulator; + boolean stateTypeExplicit = false; + + if ( null == qname ) + { + + if ( inferFinisher && 1 == aggregateArgs.size() + && 1 == func.parameterTypes.length + && func.parameterTypes[0] == + aggregateArgs.get(0).getValue() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Default name %s for this aggregate would " + + "collide with finish function; use name= to " + + "specify a name", funcName + ); + ok = false; + } + else + qname = funcName; + } + + if ( 1 > func.parameterTypes.length ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Function with no arguments cannot be @Aggregate " + + "accumulate or finish function" + ); + ok = false; + } + else if ( null == _plan.stateType ) + { + _plan.stateType = func.parameterTypes[0]; + if (null != _movingPlan + && null == _movingPlan[0].stateType) + _movingPlan[0].stateType = func.parameterTypes[0]; + } + else + stateTypeExplicit = true; + + if ( inferAccumulator || inferFinisher ) + { + if ( ok ) + { + if ( inferAccumulator ) + { + if ( null == aggregateArgs ) + { + aggregateArgs = + func.parameterInfo() + .skip(1) // skip the state argument + .map(pi -> + (Map.Entry) + new AbstractMap.SimpleImmutableEntry<>( + Identifier.Simple.fromJava( + pi.name() + ), + pi.dt + ) + ) + .collect(toList()); + } + else + checkAccumulatorSig = true; + _plan.accumulate = funcName; + if ( null != _movingPlan + && null == _movingPlan[0].accumulate ) + _movingPlan[0].accumulate = funcName; + } + else // inferFinisher + { + _plan.finish = funcName; + if ( null != _movingPlan + && null == _movingPlan[0].finish ) + _movingPlan[0].finish = funcName; + } + } + + if ( stateTypeExplicit + && ! _plan.stateType.equals(func.parameterTypes[0]) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "First function argument does not match " + + "stateType specified with @Aggregate" + ); + ok = false; + } + } + else if ( funcName.equals(_plan.accumulate) ) + checkAccumulatorSig = true; + else if ( funcName.equals(_plan.finish) ) + checkFinisherSig = true; + else + { + msg(Kind.WARNING, m_targetElement, m_origin, + "@Aggregate annotation on a method not recognized " + + "as either the accumulate or the finish function " + + "for the aggregate"); + } + + // If the method is the accumulator and is RETURNS_NULL, ensure + // there is either an initialState or a first aggregate arg that + // matches the stateType. + if ( ok && ( inferAccumulator || checkAccumulatorSig ) ) + { + if ( Function.OnNullInput.RETURNS_NULL == func.onNullInput() + && ( 0 == aggregateArgs.size() + || ! _plan.stateType.equals( + aggregateArgs.get(0).getValue()) ) + && null == _plan._initialState ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate without initialState= must have " + + "either a first argument matching the stateType " + + "or an accumulate method with onNullInput=CALLED."); + ok = false; + } + } + } + + if ( null == qname ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate missing name="); + ok = false; + } + + if ( null == aggregateArgs ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate missing arguments="); + ok = false; + } + else + unary = 1 == aggregateArgs.size(); + + if ( null == _plan.stateType ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate missing stateType="); + ok = false; + } + + if ( null == _plan.accumulate ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate plan missing accumulate="); + ok = false; + } + + // Could check argument count against FUNC_MAX_ARGS, but that would + // hardcode an assumed value for PostgreSQL's FUNC_MAX_ARGS. + + // Check that, if a stateType is polymorphic, there are compatible + // polymorphic arg types? Not today. + + // If a plan has no initialState, then either the accumulate + // function must NOT be RETURNS NULL ON NULL INPUT, or the first + // aggregated argument type must be the same as the state type. + // The type check is easy, but the returnsNull check on the + // accumulate function would require looking up the function (and + // still we wouldn't know, if it's not seen in this compilation). + // For another day. + + // Allow hypothetical only for ordered-set aggregate. + if ( _hypothetical && ! orderedSet ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "hypothetical=true is only allowed for an ordered-set " + + "aggregate (one with directArguments specified, " + + "even if only {})"); + ok = false; + } + + // Allow two-element variadic= only for ordered-set aggregate. + if ( directVariadicExplicit && ! orderedSet ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Two values for variadic= are only allowed for an " + + "ordered-set aggregate (one with directArguments " + + "specified, even if only {})"); + ok = false; + } + + // Require a movingPlan to have a remove function. + if ( moving && null == _movingPlan[0].remove ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "a movingPlan must include a remove function"); + ok = false; + } + + // Checks if the aggregated argument list is declared variadic. + // The last element must be an array type or "any"; an ordered-set + // aggregate allows only one argument and it must be "any". + if ( _variadic[AGG_ARGS] ) + { + if ( 1 > aggregateArgs.size() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "To declare the aggregated argument list variadic, " + + "there must be at least one argument."); + ok = false; + } + else + { + DBType t = + aggregateArgs.get(aggregateArgs.size() - 1).getValue(); + boolean isAny = // allow omission of pg_catalog namespace + DT_ANY.equals(t) || "\"any\"".equals(t.toString()); + if ( orderedSet && (! isAny || 1 != aggregateArgs.size()) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "If variadic, an ordered-set aggregate's " + + "aggregated argument list must be only one " + + "argument and of type \"any\"."); + ok = false; + } + else if ( ! isAny && ! t.isArray() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "If variadic, the last aggregated argument must " + + "be an array type (or \"any\")."); + ok = false; + } + } + } + + // Checks specific to ordered-set aggregates. + if ( orderedSet ) + { + if ( 0 == aggregateArgs.size() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "An ordered-set aggregate needs at least one " + + "aggregated argument"); + ok = false; + } + + // Checks specific to hypothetical-set aggregates. + // The aggregated argument types must match the trailing direct + // arguments, and the two variadic declarations must match. + if ( _hypothetical ) + { + if ( _variadic[DIRECT_ARGS] != _variadic[AGG_ARGS] ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "For a hypothetical-set aggregate, neither or " + + "both the direct and aggregated argument lists " + + "must be declared variadic."); + ok = false; + } + if ( directArgs.size() < aggregateArgs.size() + || + ! directArgs.subList( + directArgs.size() - aggregateArgs.size(), + directArgs.size()) + .equals(aggregateArgs) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The last direct arguments of a hypothetical-set " + + "aggregate must match the types of the " + + "aggregated arguments"); + ok = false; + } + } + } + + // It is allowed to omit a finisher function, but some things + // make no sense without one. + if ( orderedSet && null == _plan.finish && 0 < directArgs.size() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Direct arguments serve no purpose without a finisher"); + ok = false; + } + + if ( null == _plan.finish && _plan._polymorphic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The polymorphic flag is meaningless with no finisher"); + ok = false; + } + + // The same finisher checks for a movingPlan, if present. + if ( moving ) + { + if ( orderedSet + && null == _movingPlan[0].finish + && directArgs.size() > 0 ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Direct arguments serve no purpose without a finisher"); + ok = false; + } + + if ( null == _movingPlan[0].finish + && _movingPlan[0]._polymorphic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The polymorphic flag is meaningless with no finisher"); + ok = false; + } + } + + // Checks involving sortOperator + if ( null != sortop ) + { + if ( orderedSet ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The sortOperator optimization is not available for " + + "an ordered-set aggregate (one with directArguments)"); + ok = false; + } + + if ( ! unary || _variadic[AGG_ARGS] ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The sortOperator optimization is only available for " + + "a one-argument (and non-variadic) aggregate"); + ok = false; + } + } + + // Checks involving serialize / deserialize + if ( null != _plan.serialize || null != _plan.deserialize ) + { + if ( null == _plan.combine ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "An aggregate plan without combine= may not have " + + "serialize= or deserialize="); + ok = false; + } + + if ( null == _plan.serialize || null == _plan.deserialize ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "An aggregate plan must have both " + + "serialize= and deserialize= or neither"); + ok = false; + } + + if ( ! DT_INTERNAL.equals(_plan.stateType) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Only an aggregate plan with stateType " + + "pg_catalog.internal may have serialize=/deserialize="); + ok = false; + } + } + + if ( ! ok ) + return Set.of(); + + Set requires = requireTags(); + + DBType[] accumulatorSig = + Stream.of( + Stream.of(_plan.stateType), + aggregateArgs.stream().map(Map.Entry::getValue)) + .flatMap(identity()).toArray(DBType[]::new); + + DBType[] combinerSig = { _plan.stateType, _plan.stateType }; + + DBType[] finisherSig = + Stream.of( + Stream.of(_plan.stateType), + orderedSet + ? directArgs.stream().map(Map.Entry::getValue) + : Stream.of(), + _plan._polymorphic + ? aggregateArgs.stream().map(Map.Entry::getValue) + : Stream.of() + ) + .flatMap(identity()) + .toArray(DBType[]::new); + + if ( checkAccumulatorSig + && ! Arrays.equals(accumulatorSig, func.parameterTypes) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate annotation on a method that matches the name " + + "but not argument types expected for the aggregate's " + + "accumulate function"); + ok = false; + } + + if ( checkFinisherSig + && ! Arrays.equals(finisherSig, func.parameterTypes) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate annotation on a method that matches the name " + + "but not argument types expected for the aggregate's " + + "finish function"); + ok = false; + } + + requires.add( + new DependTag.Function(_plan.accumulate, accumulatorSig)); + + if ( null != _plan.combine ) + { + DBType[] serialSig = { DT_INTERNAL }; + DBType[] deserialSig = { DT_BYTEA, DT_INTERNAL }; + + requires.add( + new DependTag.Function(_plan.combine, combinerSig)); + + if ( null != _plan.serialize ) + { + requires.add( + new DependTag.Function(_plan.serialize, serialSig)); + requires.add( + new DependTag.Function(_plan.deserialize, deserialSig)); + } + } + + if ( null != _plan.finish ) + requires.add( + new DependTag.Function(_plan.finish, finisherSig)); + + if ( moving ) + { + accumulatorSig[0] = _movingPlan[0].stateType; + Arrays.fill(combinerSig, _movingPlan[0].stateType); + finisherSig[0] = _movingPlan[0].stateType; + + requires.add(new DependTag.Function( + _movingPlan[0].accumulate, accumulatorSig)); + + requires.add(new DependTag.Function( + _movingPlan[0].remove, accumulatorSig)); + + if ( null != _movingPlan[0].combine ) + requires.add(new DependTag.Function( + _movingPlan[0].combine, combinerSig)); + + if ( null != _movingPlan[0].finish ) + requires.add(new DependTag.Function( + _movingPlan[0].finish, finisherSig)); + } + + if ( null != sortop ) + { + DBType arg = aggregateArgs.get(0).getValue(); + DBType[] opSig = { arg, arg }; + requires.add(new DependTag.Operator(sortop, opSig)); + } + + /* + * That establishes dependency on the various support functions, + * which should, transitively, depend on all of the types. But it is + * possible we do not have a whole-program view (perhaps some + * support functions are implemented in other languages, and there + * are @SQLActions setting them up?). Therefore also, redundantly as + * it may be, declare dependency on the types. + */ + + Stream.of( + aggregateArgs.stream().map(Map.Entry::getValue), + orderedSet + ? directArgs.stream().map(Map.Entry::getValue) + : Stream.of(), + Stream.of(_plan.stateType), + moving + ? Stream.of(_movingPlan[0].stateType) + : Stream.of() + ) + .flatMap(identity()) + .map(DBType::dependTag) + .filter(Objects::nonNull) + .forEach(requires::add); + + recordExplicitTags(_provides, _requires); + return Set.of(this); + } + + public String[] deployStrings() + { + List al = new ArrayList<>(); + + StringBuilder sb = new StringBuilder("CREATE AGGREGATE "); + appendNameAndArguments(sb); + sb.append(" ("); + + String[] planStrings = _plan.deployStrings(); + int n = planStrings.length; + for ( String s : planStrings ) + { + sb.append("\n\t").append(s); + if ( 0 < -- n ) + sb.append(','); + } + + if ( null != _movingPlan ) + { + planStrings = _movingPlan[0].deployStrings(); + for ( String s : planStrings ) + sb.append(",\n\tM").append(s); + } + + if ( null != sortop ) + sb.append(",\n\tSORTOP = ").append(sortop); + + if ( Function.Parallel.UNSAFE != _parallel ) + sb.append(",\n\tPARALLEL = ").append(_parallel); + + if ( _hypothetical ) + sb.append(",\n\tHYPOTHETICAL"); + + sb.append(')'); + + al.add(sb.toString()); + + if ( null != comment() ) + { + sb = new StringBuilder("COMMENT ON AGGREGATE "); + appendNameAndArguments(sb); + sb.append(" IS ").append(DDRWriter.eQuote(comment())); + al.add(sb.toString()); + } + + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + StringBuilder sb = new StringBuilder("DROP AGGREGATE "); + appendNameAndArguments(sb); + return new String[] { sb.toString() }; + } + + private void appendNameAndArguments(StringBuilder sb) + { + ListIterator> iter; + Map.Entry entry; + + sb.append(qname).append('('); + if ( null != directArgs ) + { + iter = directArgs.listIterator(); + while ( iter.hasNext() ) + { + entry = iter.next(); + sb.append("\n\t"); + if ( _variadic[DIRECT_ARGS] && ! iter.hasNext() ) + sb.append("VARIADIC "); + if ( null != entry.getKey() ) + sb.append(entry.getKey()).append(' '); + sb.append(entry.getValue()); + if ( iter.hasNext() ) + sb.append(','); + else + sb.append("\n\t"); + } + sb.append("ORDER BY"); + } + else if ( 0 == aggregateArgs.size() ) + sb.append('*'); + + iter = aggregateArgs.listIterator(); + while ( iter.hasNext() ) + { + entry = iter.next(); + sb.append("\n\t"); + if ( _variadic[AGG_ARGS] && ! iter.hasNext() ) + sb.append("VARIADIC "); + if ( null != entry.getKey() ) + sb.append(entry.getKey()).append(' '); + sb.append(entry.getValue()); + if ( iter.hasNext() ) + sb.append(','); + } + sb.append(')'); + } + + class Plan extends AbstractAnnotationImpl implements Aggregate.Plan + { + public String stateType() { return stateType.toString(); } + public int stateSize() { return _stateSize; } + public String initialState() { return _initialState; } + public String[] accumulate() { return qstrings(accumulate); } + public String[] combine() { return qstrings(combine); } + public String[] finish() { return qstrings(finish); } + public String[] remove() { return qstrings(remove); } + public String[] serialize() { return qstrings(serialize); } + public String[] deserialize() { return qstrings(deserialize); } + public boolean polymorphic() { return _polymorphic; } + public FinishEffect finishEffect() { return _finishEffect; } + + public int _stateSize; + public String _initialState; + public boolean _polymorphic; + public FinishEffect _finishEffect; + + DBType stateType; + Identifier.Qualified accumulate; + Identifier.Qualified combine; + Identifier.Qualified finish; + Identifier.Qualified remove; + Identifier.Qualified serialize; + Identifier.Qualified deserialize; + + public void setStateType(Object o, boolean explicit, Element e) + { + if ( explicit ) + stateType = DBType.fromSQLTypeAnnotation((String)o); + } + + public void setStateSize(Object o, boolean explicit, Element e) + { + _stateSize = (Integer)o; + if ( explicit && 0 >= _stateSize ) + throw new IllegalArgumentException( + "An explicit stateSize must be positive"); + } + + public void setInitialState(Object o, boolean explicit, Element e) + { + if ( explicit ) + _initialState = (String)o; + } + + public void setAccumulate(Object o, boolean explicit, Element e) + { + if ( explicit ) + accumulate = qnameFrom(avToArray( o, String.class)); + } + + public void setCombine(Object o, boolean explicit, Element e) + { + if ( explicit ) + combine = qnameFrom(avToArray( o, String.class)); + } + + public void setFinish(Object o, boolean explicit, Element e) + { + if ( explicit ) + finish = qnameFrom(avToArray( o, String.class)); + } + + public void setRemove(Object o, boolean explicit, Element e) + { + if ( explicit ) + throw new IllegalArgumentException( + "Only a movingPlan may have a remove function"); + } + + public void setSerialize(Object o, boolean explicit, Element e) + { + if ( explicit ) + serialize = qnameFrom(avToArray( o, String.class)); + } + + public void setDeserialize(Object o, boolean explicit, Element e) + { + if ( explicit ) + deserialize = qnameFrom(avToArray( o, String.class)); + } + + public void setFinishEffect( Object o, boolean explicit, Element e) + { + if ( explicit ) + _finishEffect = FinishEffect.valueOf( + ((VariableElement)o).getSimpleName().toString()); + } + + public Set characterize() + { + return Set.of(); + } + + /** + * Returns one string per plan element (not per SQL statement). + *

+ * This method has to be here anyway because the class extends + * {@code AbstractAnnotationImpl}, but it will never be processed as + * an actual SQL snippet. This will be called by the containing + * {@code AggregateImpl} and return the individual plan elements + * that it will build into its own deploy strings. + *

+ * When this class represents a moving plan, the caller will prefix + * each of these strings with {@code M}. + */ + public String[] deployStrings() + { + List al = new ArrayList<>(); + + al.add("STYPE = " + stateType); + + if ( 0 != _stateSize ) + al.add("SSPACE = " + _stateSize); + + if ( null != _initialState ) + al.add("INITCOND = " + DDRWriter.eQuote(_initialState)); + + al.add("SFUNC = " + accumulate); + + if ( null != remove ) + al.add("INVFUNC = " + remove); + + if ( null != finish ) + al.add("FINALFUNC = " + finish); + + if ( _polymorphic ) + al.add("FINALFUNC_EXTRA"); + + if ( null != _finishEffect ) + al.add("FINALFUNC_MODIFY = " + _finishEffect); + + if ( null != combine ) + al.add("COMBINEFUNC = " + combine); + + if ( null != serialize ) + al.add("SERIALFUNC = " + serialize); + + if ( null != deserialize ) + al.add("DESERIALFUNC = " + deserialize); + + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + return null; + } + } + + class Moving extends Plan + { + public void setRemove(Object o, boolean explicit, Element e) + { + if ( explicit ) + remove = qnameFrom(avToArray( o, String.class)); + } + + public void setSerialize(Object o, boolean explicit, Element e) + { + if ( explicit ) + throw new IllegalArgumentException( + "Only a (non-moving) plan may have a " + + "serialize function"); + } + + public void setDeserialize(Object o, boolean explicit, Element e) + { + if ( explicit ) + throw new IllegalArgumentException( + "Only a (non-moving) plan may have a " + + "deserialize function"); + } + } + } + + /** + * Provides the default mappings from Java types to SQL types. + */ + class TypeMapper + { + ArrayList> protoMappings; + ArrayList> finalMappings; + + TypeMapper() + { + protoMappings = new ArrayList<>(); + + // Primitives (these need not, indeed cannot, be schema-qualified) + // + this.addMap(boolean.class, DT_BOOLEAN); + this.addMap(Boolean.class, DT_BOOLEAN); + this.addMap(byte.class, "smallint"); + this.addMap(Byte.class, "smallint"); + this.addMap(char.class, "smallint"); + this.addMap(Character.class, "smallint"); + this.addMap(double.class, "double precision"); + this.addMap(Double.class, "double precision"); + this.addMap(float.class, "real"); + this.addMap(Float.class, "real"); + this.addMap(int.class, DT_INTEGER); + this.addMap(Integer.class, DT_INTEGER); + this.addMap(long.class, "bigint"); + this.addMap(Long.class, "bigint"); + this.addMap(short.class, "smallint"); + this.addMap(Short.class, "smallint"); + + // Known common mappings + // + this.addMap(Number.class, "pg_catalog", "numeric"); + this.addMap(String.class, "pg_catalog", "varchar"); + this.addMap(java.util.Date.class, "pg_catalog", "timestamp"); + this.addMap(Timestamp.class, "pg_catalog", "timestamp"); + this.addMap(Time.class, "pg_catalog", "time"); + this.addMap(java.sql.Date.class, "pg_catalog", "date"); + this.addMap(java.sql.SQLXML.class, "pg_catalog", "xml"); + this.addMap(BigInteger.class, "pg_catalog", "numeric"); + this.addMap(BigDecimal.class, "pg_catalog", "numeric"); + this.addMap(ResultSet.class, DT_RECORD); + this.addMap(Object.class, DT_ANY); + + this.addMap(byte[].class, DT_BYTEA); + + this.addMap(LocalDate.class, "pg_catalog", "date"); + this.addMap(LocalTime.class, "pg_catalog", "time"); + this.addMap(OffsetTime.class, "pg_catalog", "timetz"); + this.addMap(LocalDateTime.class, "pg_catalog", "timestamp"); + this.addMap(OffsetDateTime.class, "pg_catalog", "timestamptz"); + } + + private boolean mappingsFrozen() + { + return null != finalMappings; + } + + /* * What worked in Java 6 was to keep a list of Class -> sqltype * mappings, and get TypeMirrors from the Classes at the time of trying * to identify types (in the final, after-all-sources-processed round). @@ -2866,13 +5311,7 @@ private TypeMirror typeMirrorFromClass( Class k) return null; } - TypeElement te = elmu.getTypeElement( cname); - if ( null == te ) - { - msg( Kind.WARNING, "Found no TypeElement for %s", cname); - return null; // hope it wasn't one we'll need! - } - return te.asType(); + return declaredTypeForClass(k); } /** @@ -2904,6 +5343,18 @@ void addMap(Class k, String schema, String local) new DBType.Named(qnameFrom(local, schema))); } + /** + * Add a custom mapping from a Java class to an SQL type + * already in the form of a {@code DBType}. + * + * @param k Class representing the Java type + * @param DBType representing the SQL type to be used + */ + void addMap(Class k, DBType type) + { + addMap( typeMirrorFromClass( k), type); + } + /** * Add a custom mapping from a Java class to an SQL type, if a class * with the given name exists. @@ -3174,6 +5625,57 @@ Identifier.Qualified qnameFrom(String name) { return Identifier.Qualified.nameFromJava(name, msgr); } + + /** + * Return an {@code Identifier.Qualified} from an array of Java strings + * representing schema and local name separately if of length two, or as by + * {@link #qnameFrom(String)} if of length one; invalid if of any other + * length. + *

+ * The first of two elements may be explicitly {@code ""} to produce a + * qualified name with null qualifier. + */ + Identifier.Qualified qnameFrom(String[] names) + { + switch ( names.length ) + { + case 2: return qnameFrom(names[1], names[0]); + case 1: return qnameFrom(names[0]); + default: + throw new IllegalArgumentException( + "Only a one- or two-element String array is accepted"); + } + } + + /** + * Like {@link #qnameFrom(String[])} but for an operator name. + */ + Identifier.Qualified operatorNameFrom(String[] names) + { + switch ( names.length ) + { + case 2: + Identifier.Simple qualifier = null; + if ( ! names[0].isEmpty() ) + qualifier = Identifier.Simple.fromJava(names[0], msgr); + return Identifier.Operator.from(names[1], msgr) + .withQualifier(qualifier); + case 1: + return Identifier.Qualified.operatorFromJava(names[0], msgr); + default: + throw new IllegalArgumentException( + "Only a one- or two-element String array is accepted"); + } + } + + String[] qstrings(Identifier.Qualified qname) + { + if ( null == qname ) + return null; + Identifier.Simple q = qname.qualifier(); + String local = qname.local().toString(); + return new String[] { null == q ? null : q.toString(), local }; + } } /** @@ -3241,10 +5743,14 @@ default DependTag implementorTag() * undeployStrings() can be called. May also check for and report semantic * errors that are not easily checked earlier while populating the * element/value pairs. - * @return true if this Snippet is standalone and should be scheduled and - * emitted based on provides/requires; false if something else will emit it. + * @return A set of snippets that are now prepared and should be added to + * the graph to be scheduled and emitted according to provides/requires. + * Typically Set.of(this) if all went well, or Set.of() in case of an error + * or when the snippet will be emitted by something else. In some cases a + * characterize method can return additional snippets that are ready to be + * scheduled. */ - public boolean characterize(); + public Set characterize(); /** * If it is possible to break an ordering cycle at this snippet, return a @@ -3516,7 +6022,7 @@ class ImpProvider implements Snippet @Override public String[] undeployStrings() { return s.deployStrings(); } @Override public Set provideTags() { return s.provideTags(); } @Override public Set requireTags() { return s.requireTags(); } - @Override public boolean characterize() { return s.characterize(); } + @Override public Set characterize() { return s.characterize(); } } /** @@ -3684,6 +6190,30 @@ public final boolean equals(Object o, Messager msgr) ")" ); + /** + * Parse a string, representing an optional parameter/column name followed + * by a type, into an {@code Identifier.Simple}, possibly null, and a + * {@code DBType}. + *

+ * Whitespace (or, strictly, separator; comments would be accepted) must + * separate the name from the type, if the name is not quoted. To omit a + * name and supply only the type, the string must begin with whitespace + * (ahem, separator). + */ + static Map.Entry fromNameAndType(String nandt) + { + Identifier.Simple name = null; + Matcher m = ISO_AND_PG_IDENTIFIER_CAPTURING.matcher(nandt); + if ( m.lookingAt() ) + { + nandt = nandt.substring(m.end()); + name = identifierFrom(m); + } + return + new AbstractMap.SimpleImmutableEntry<>( + name, fromSQLTypeAnnotation(nandt)); + } + /** * Make a {@code DBType} from whatever might appear in an {@code SQLType} * annotation. @@ -3722,6 +6252,7 @@ static DBType fromSQLTypeAnnotation(String value) Matcher m = SEPARATOR.matcher(value); separator(m, false); + int postSeparator = m.regionStart(); if ( m.usePattern(ISO_AND_PG_IDENTIFIER_CAPTURING).lookingAt() ) { @@ -3821,7 +6352,8 @@ else if ( null != qname.qualifier() ) DBType result; if ( reserved ) - result = new DBType.Reserved(value.substring(0, m.regionEnd())); + result = new DBType.Reserved( + value.substring(postSeparator, m.regionEnd())); else { result = new DBType.Named(qname); @@ -4276,6 +6808,54 @@ public boolean equals(Object o, Messager msgr) } return true; } + + @Override + public String toString() + { + return super.toString() + Arrays.toString(m_signature); + } + } + + static final class Operator + extends Named> + { + private DBType[] m_signature; + + Operator( + Identifier.Qualified value, DBType[] signature) + { + super(requireNonNull(value)); + assert 2 == signature.length : "invalid Operator signature length"; + m_signature = signature.clone(); + } + + @Override + public boolean equals(Object o, Messager msgr) + { + if ( ! super.equals(o, msgr) ) + return false; + Operator op = (Operator)o; + if ( m_signature.length != op.m_signature.length ) + return false; + for ( int i = 0; i < m_signature.length; ++ i ) + { + if ( null == m_signature[i] || null == op.m_signature[i] ) + { + if ( m_signature[i] != op.m_signature[i] ) + return false; + continue; + } + if ( ! m_signature[i].equals(op.m_signature[i], msgr) ) + return false; + } + return true; + } + + @Override + public String toString() + { + return super.toString() + Arrays.toString(m_signature); + } } } @@ -4290,6 +6870,14 @@ class ParameterInfo final SQLType st; final DBType dt; + String name() + { + String name = null == st ? null : st.name(); + if ( null == name ) + name = ve.getSimpleName().toString(); + return name; + } + ParameterInfo(TypeMirror m, VariableElement e, SQLType t, DBType d) { tm = m; diff --git a/pljava-api/src/test/java/LexicalsTest.java b/pljava-api/src/test/java/LexicalsTest.java index ec58a2a45..e12ca9674 100644 --- a/pljava-api/src/test/java/LexicalsTest.java +++ b/pljava-api/src/test/java/LexicalsTest.java @@ -20,6 +20,7 @@ import static org.junit.Assert.*; import static org.hamcrest.CoreMatchers.*; +import static org.hamcrest.MatcherAssert.assertThat; import static org.postgresql.pljava.sqlgen.Lexicals.ISO_AND_PG_IDENTIFIER_CAPTURING; diff --git a/pljava-examples/pom.xml b/pljava-examples/pom.xml index 9d6f7b468..320788c27 100644 --- a/pljava-examples/pom.xml +++ b/pljava-examples/pom.xml @@ -4,7 +4,7 @@ org.postgresql pljava.app - 1.6.0-SNAPSHOT + 1.6-SNAPSHOT pljava-examples PL/Java examples diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Aggregates.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Aggregates.java new file mode 100644 index 000000000..500d18bd8 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Aggregates.java @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import static java.lang.Math.fma; + +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.postgresql.pljava.annotation.Aggregate; +import org.postgresql.pljava.annotation.Function; +import static + org.postgresql.pljava.annotation.Function.OnNullInput.RETURNS_NULL; +import static org.postgresql.pljava.annotation.Function.Effects.IMMUTABLE; +import org.postgresql.pljava.annotation.SQLAction; + +/** + * A class demonstrating several aggregate functions. + *

+ * They are (some of) the same two-variable statistical aggregates already + * offered in core PostgreSQL, just because they make clear examples. For + * numerical reasons, they might not produce results identical to PG's built-in + * ones. These closely follow the "schoolbook" formulas in the HP-11C calculator + * owner's handbook, while the ones built into PostgreSQL use a more clever + * algorithm instead to reduce rounding error in the finishers. + *

+ * All these aggregates can be computed by different finishers that share a + * state that accumulates the count of rows, sum of x, sum of xx, sum of y, sum + * of yy, and sum of xy. That is easy with finishers that don't need to modify + * the state, so the default {@code FinishEffect=READ_ONLY} is appropriate. + *

+ * Everything here takes the y parameter first, then x, like the SQL ones. + */ +@SQLAction(requires = { "avgx", "avgy", "slope", "intercept" }, install = { + "WITH" + + " data (y, x) AS (VALUES" + + " (1.761 ::float8, 5.552::float8)," + + " (1.775, 5.963)," + + " (1.792, 6.135)," + + " (1.884, 6.313)," + + " (1.946, 6.713)" + + " )," + + " expected (avgx, avgy, slope, intercept) AS (" + + " SELECT 6.1352, 1.8316, 0.1718, 0.7773" + + " )," + + " got AS (" + + " SELECT" + + " round( avgx(y,x)::numeric, 4) AS avgx," + + " round( avgy(y,x)::numeric, 4) AS avgy," + + " round( slope(y,x)::numeric, 4) AS slope," + + " round(intercept(y,x)::numeric, 4) AS intercept" + + " FROM" + + " data" + + " )" + + "SELECT" + + " CASE WHEN expected IS NOT DISTINCT FROM got" + + " THEN javatest.logmessage('INFO', 'aggregate examples ok')" + + " ELSE javatest.logmessage('WARNING', 'aggregate examples ng')" + + " END" + + " FROM" + + " expected, got" +}) +@Aggregate(provides = "avgx", + name = { "javatest", "avgx" }, + arguments = { "y double precision", "x double precision" }, + plan = @Aggregate.Plan( + stateType = "double precision[]", + /* + * State size is merely a hint to PostgreSQL's planner and can + * be omitted. Perhaps it is worth hinting, as the state type + * "double precision[]" does not tell PostgreSQL how large the array + * might be. Anyway, this is an example and should show how to do it. + * For this aggregate, the state never grows; the size of the initial + * value is the size forever. + * + * To get a quick sense of the size, one can assign the initial state + * as the default for a table column, then consult the pg_node_tree for + * the attribute default entry: + * + * CREATE TEMPORARY TABLE + * foo (bar DOUBLE PRECISION[] DEFAULT '{0,0,0,0,0,0}'); + * + * SELECT + * xpath('/CONST/member[@name="constvalue"]/@length', + * javatest.pgNodeTreeAsXML(adbin) ) + * FROM pg_attrdef + * WHERE adrelid = 'foo'::regclass; + * + * In this case the 72 that comes back represents 48 bytes for six + * float8s, plus 24 for varlena and array overhead, with no null bitmap + * because no element is null. + */ + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" }, + finish = { "javatest", "finishAvgX" } + ) +) +@Aggregate(provides = "avgy", + name = { "javatest", "avgy" }, + arguments = { "y double precision", "x double precision" }, + plan = @Aggregate.Plan( + stateType = "double precision[]", + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" }, + finish = { "javatest", "finishAvgY" } + ) +) +@Aggregate(provides = "slope", + name = { "javatest", "slope" }, + arguments = { "y double precision", "x double precision" }, + plan = @Aggregate.Plan( + stateType = "double precision[]", + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" }, + finish = { "javatest", "finishSlope" } + ) +) +@Aggregate(provides = "intercept", + name = { "javatest", "intercept" }, + arguments = { "y double precision", "x double precision" }, + plan = @Aggregate.Plan( + stateType = "double precision[]", + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" }, + finish = { "javatest", "finishIntercept" } + ) +) +@Aggregate( + name = "javatest.regression", + arguments = { "y double precision", "x double precision" }, + plan = @Aggregate.Plan( + stateType = "double precision[]", + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" }, + finish = { "javatest", "finishRegr" } + ), + /* + * There is no special reason for this aggregate and not the others to have + * a movingPlan; one example is enough, that's all. + */ + movingPlan = @Aggregate.Plan( + stateType = "double precision[]", + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" }, + remove = { "javatest", "removeXY" }, + finish = { "javatest", "finishRegr" } + ) +) +public class Aggregates +{ + private Aggregates() { } // do not instantiate + + private static final int N = 0; + private static final int SX = 1; + private static final int SXX = 2; + private static final int SY = 3; + private static final int SYY = 4; + private static final int SXY = 5; + + /** + * A common accumulator for two-variable statistical aggregates that + * depend on n, Sx, Sxx, Sy, Syy, and Sxy. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static double[] accumulateXY(double[] state, double y, double x) + { + state[N ] += 1.; + state[SX ] += x; + state[SXX] = fma(x, x, state[2]); + state[SY ] += y; + state[SYY] = fma(y, y, state[4]); + state[SXY] = fma(x, y, state[5]); + return state; + } + + /** + * 'Removes' from the state a row previously accumulated, for possible use + * in a window with a moving frame start. + *

+ * This can be a numerically poor idea for exactly the reasons covered in + * the PostgreSQL docs involving loss of significance in long sums, but it + * does demonstrate the idea. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static double[] removeXY(double[] state, double y, double x) + { + state[N ] -= 1.; + state[SX ] -= x; + state[SXX] = fma(x, -x, state[2]); + state[SY ] -= y; + state[SYY] = fma(y, -y, state[4]); + state[SXY] = fma(x, -y, state[5]); + return state; + } + + /** + * Finisher that returns the count of non-null rows accumulated. + *

+ * As an alternative to collecting all {@code @Aggregate} annotations up at + * the top of the class and specifying everything explicitly, an + * {@code @Aggregate} annotation can be placed on a method, either + * the accumulator or the finisher, in which case less needs to be + * specified. The state type can always be determined from the annotated + * method (whether it is the accumulator or the finisher), and its SQL name + * will be the default name for the aggregate also. When the method is the + * accumulator, the aggregate's arguments are also determined. + *

+ * This being a finisher method, the {@code @Aggregate} annotation placed + * here does need to specify the arguments, initial state, and accumulator. + */ + @Aggregate( + arguments = { "y double precision", "x double precision" }, + plan = @Aggregate.Plan( + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" } + ) + ) + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static long count(double[] state) + { + return (long)state[N]; + } + + /** + * Finisher that returns the mean of the accumulated x values. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static Double finishAvgX(double[] state) + { + if ( 0. == state[N] ) + return null; + return state[SX] / state[N]; + } + + /** + * Finisher that returns the mean of the accumulated y values. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static Double finishAvgY(double[] state) + { + if ( 0. == state[N] ) + return null; + return state[SY] / state[N]; + } + + /** + * Finisher that returns the slope of a regression line. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static Double finishSlope(double[] state) + { + if ( 2. > state[N] ) + return null; + + double numer = fma(state[SX], -state[SY], state[N] * state[SXY]); + double denom = fma(state[SX], -state[SX], state[N] * state[SXX]); + return 0. == denom ? null : numer / denom; + } + + /** + * Finisher that returns the intercept of a regression line. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static Double finishIntercept(double[] state) + { + if ( 2 > state[N] ) + return null; + + double numer = fma(state[SY], state[SXX], -state[SX] * state[SXY]); + double denom = fma(state[SX], -state[SX], state[N] * state[SXX]); + return 0. == denom ? null : numer / denom; + } + + /** + * A finisher that returns the slope and intercept together. + *

+ * An aggregate can be built over this finisher and will return a record + * result, but at present (PG 13) access to that record by field doesn't + * work, as its tuple descriptor gets lost along the way. Unclear so far + * whether it might be feasible to fix that. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL, + out = { "slope double precision", "intercept double precision" } + ) + public static boolean finishRegr(double[] state, ResultSet out) + throws SQLException + { + out.updateObject(1, finishSlope(state)); + out.updateObject(2, finishIntercept(state)); + return true; + } + + /** + * An example aggregate that sums its input. + *

+ * The simplest kind of aggregate, having only an accumulate function, + * default initial state, and no finisher (the state value is the return) + * can be declared very concisely by annotating the accumulate method. + */ + @Aggregate + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static double sum(double state, double x) + { + return state + x; + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ComplexScalar.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ComplexScalar.java index 9e643cab9..064bbea62 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ComplexScalar.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ComplexScalar.java @@ -15,13 +15,21 @@ import java.io.IOException; import java.io.StreamTokenizer; import java.io.StringReader; + +import static java.lang.Math.hypot; + import java.sql.SQLData; import java.sql.SQLException; import java.sql.SQLInput; import java.sql.SQLOutput; + import java.util.logging.Logger; +import org.postgresql.pljava.annotation.Aggregate; import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.Operator; +import static org.postgresql.pljava.annotation.Operator.SELF; +import static org.postgresql.pljava.annotation.Operator.TWIN; import org.postgresql.pljava.annotation.SQLAction; import org.postgresql.pljava.annotation.BaseUDT; @@ -31,11 +39,56 @@ /** * Complex (re and im parts are doubles) implemented in Java as a scalar UDT. + *

+ * The {@code SQLAction} here demonstrates a {@code requires} tag + * ("complex relationals"} that has multiple providers, something not allowed + * prior to PL/Java 1.6.1. It is more succinct to require one tag and have each + * of the relational operators 'provide' it than to have to define and require + * several different tags to accomplish the same thing. + *

+ * The operator class created here is not actively used for anything (the + * examples will not break if it is removed), but the {@code minMagnitude} + * example aggregate does specify a {@code sortOperator}, which PostgreSQL will + * not exploit in query optimization without finding it as a member of + * a {@code btree} operator class. + *

+ * Note that {@code CREATE OPERATOR CLASS} implicitly creates an operator family + * as well (unless one is explicitly specified), so the correct {@code remove} + * action to clean everything up is {@code DROP OPERATOR FAMILY} (which takes + * care of dropping the class). */ -@SQLAction(requires = "complex assertHasValues", +@SQLAction(requires = { "complex assertHasValues", "complex relationals" }, install = { + "CREATE OPERATOR CLASS javatest.complex_ops" + + " DEFAULT FOR TYPE javatest.complex USING btree" + + " AS" + + " OPERATOR 1 javatest.< ," + + " OPERATOR 2 javatest.<= ," + + " OPERATOR 3 javatest.= ," + + " OPERATOR 4 javatest.>= ," + + " OPERATOR 5 javatest.> ," + + " FUNCTION 1 javatest.cmpMagnitude(javatest.complex,javatest.complex)", + + "SELECT javatest.assertHasValues(" + + " CAST('(1,2)' AS javatest.complex), 1, 2)", + "SELECT javatest.assertHasValues(" + - " CAST('(1,2)' AS javatest.complex), 1, 2)" + " 2.0 + CAST('(1,2)' AS javatest.complex) + 3.0, 6, 2)", + + "SELECT" + + " CASE WHEN" + + " '(1,2)'::javatest.complex < '(2,2)'::javatest.complex" + + " AND" + + " '(2,2)'::javatest.complex > '(1,2)'::javatest.complex" + + " AND" + + " '(1,2)'::javatest.complex <= '(2,2)'::javatest.complex" + + " THEN javatest.logmessage('INFO', 'ComplexScalar operators ok')" + + " ELSE javatest.logmessage('WARNING', 'ComplexScalar operators ng')" + + " END" + }, + + remove = { + "DROP OPERATOR FAMILY javatest.complex_ops USING btree" } ) @BaseUDT(schema="javatest", name="complex", @@ -45,9 +98,16 @@ public class ComplexScalar implements SQLData { /** * Return the same 'complex' passed in, logging its contents at level INFO. + *

+ * Also create an unnecessary {@code <<} operator for this, with an equally + * unnecessary explicit operand type, simply as a regression test + * of issue #330. * @param cpl any instance of this UDT * @return the same instance passed in */ + @Operator( + name = "javatest.<<", right = "javatest.complex" + ) @Function( schema="javatest", name="logcomplex", effects=IMMUTABLE, onNullInput=RETURNS_NULL) @@ -86,7 +146,7 @@ public static ComplexScalar parse(String input, String typeName) && tz.nextToken() == StreamTokenizer.TT_NUMBER) { double y = tz.nval; if (tz.nextToken() == ')') { - s_logger.info(typeName + " from string"); + s_logger.fine(typeName + " from string"); return new ComplexScalar(x, y, typeName); } } @@ -107,6 +167,122 @@ public static ComplexScalar parse(String input, String typeName) public ComplexScalar() { } + /** + * Add two instances of {@code ComplexScalar}. + */ + @Operator(name = {"javatest","+"}, commutator = SELF) + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static ComplexScalar add(ComplexScalar a, ComplexScalar b) + { + return new ComplexScalar( + a.m_x + b.m_x, a.m_y + b.m_y, a.m_typeName); + } + + /** + * Add a {@code ComplexScalar} and a real (supplied as a {@code double}). + */ + @Operator(name = {"javatest","+"}, commutator = TWIN) + @Operator(name = {"javatest","+"}, synthetic = TWIN) + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static ComplexScalar add(ComplexScalar a, double b) + { + return new ComplexScalar(a.m_x + b, a.m_y, a.m_typeName); + } + + /** + * True if the left argument is smaller than the right in magnitude + * (Euclidean distance from the origin). + */ + @Operator( + name = "javatest.<", + commutator = "javatest.>", negator = "javatest.>=", + provides = "complex relationals" + ) + @Operator( + name = "javatest.<=", synthetic = "javatest.magnitudeLE", + provides = "complex relationals" + ) + @Operator( + name = "javatest.>=", synthetic = "javatest.magnitudeGE", + commutator = "javatest.<=", provides = "complex relationals" + ) + @Operator( + name = "javatest.>", synthetic = "javatest.magnitudeGT", + negator = "javatest.<=", provides = "complex relationals" + ) + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static boolean magnitudeLT(ComplexScalar a, ComplexScalar b) + { + return hypot(a.m_x, a.m_y) < hypot(b.m_x, b.m_y); + } + + /** + * True if the left argument and the right are componentwise equal. + */ + @Operator( + name = "javatest.=", + commutator = SELF, negator = "javatest.<>", + provides = "complex relationals" + ) + @Operator( + name = "javatest.<>", synthetic = "javatest.componentsNE", + commutator = SELF, provides = "complex relationals" + ) + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static boolean componentsEQ(ComplexScalar a, ComplexScalar b) + { + return a.m_x == b.m_x && a.m_y == b.m_y; + } + + /** + * As an ordinary function, returns the lesser in magnitude of two + * arguments; as a simple aggregate, returns the least in magnitude over its + * aggregated arguments. + *

+ * As an aggregate, this is a simple example where this method serves as the + * {@code accumulate} function, the state (a here) has the same + * type as the argument (here b), there is no {@code finish} + * function, and the final value of the state is the result. + *

+ * An optimization is available in case there is an index on the aggregated + * values based on the {@code <} operator above; in that case, the first + * value found in a scan of that index is the aggregate result. That is + * indicated here by naming the {@code <} operator as {@code sortOperator}. + */ + @Aggregate(sortOperator = "javatest.<") + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static ComplexScalar minMagnitude(ComplexScalar a, ComplexScalar b) + { + return magnitudeLT(a, b) ? a : b; + } + + /** + * An integer-returning comparison function by complex magnitude, usable to + * complete an example {@code btree} operator class. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL, + provides = "complex relationals" + ) + public static int cmpMagnitude(ComplexScalar a, ComplexScalar b) + { + if ( magnitudeLT(a, b) ) + return -1; + if ( magnitudeLT(b, a) ) + return 1; + return 0; + } + public ComplexScalar(double x, double y, String typeName) { m_x = x; m_y = y; @@ -121,7 +297,7 @@ public String getSQLTypeName() { @Function(effects=IMMUTABLE, onNullInput=RETURNS_NULL) @Override public void readSQL(SQLInput stream, String typeName) throws SQLException { - s_logger.info(typeName + " from SQLInput"); + s_logger.fine(typeName + " from SQLInput"); m_x = stream.readDouble(); m_y = stream.readDouble(); m_typeName = typeName; @@ -130,7 +306,7 @@ public void readSQL(SQLInput stream, String typeName) throws SQLException { @Function(effects=IMMUTABLE, onNullInput=RETURNS_NULL) @Override public String toString() { - s_logger.info(m_typeName + " toString"); + s_logger.fine(m_typeName + " toString"); StringBuffer sb = new StringBuffer(); sb.append('('); sb.append(m_x); @@ -143,7 +319,7 @@ public String toString() { @Function(effects=IMMUTABLE, onNullInput=RETURNS_NULL) @Override public void writeSQL(SQLOutput stream) throws SQLException { - s_logger.info(m_typeName + " to SQLOutput"); + s_logger.fine(m_typeName + " to SQLOutput"); stream.writeDouble(m_x); stream.writeDouble(m_y); } diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ConditionalDDR.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ConditionalDDR.java index ac26d8a95..0140f4372 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ConditionalDDR.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ConditionalDDR.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015- Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -12,7 +12,6 @@ package org.postgresql.pljava.example.annotation; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; /** * Test of a very simple form of conditional execution in the deployment @@ -68,71 +67,69 @@ * several statements setting PostgreSQL-version-based implementor tags that * are relied on by various other examples in this directory. */ -@SQLActions({ - @SQLAction(provides={"LifeIsGood","LifeIsNotGood"}, install= - "SELECT CASE 42 WHEN 42 THEN " + - " set_config('pljava.implementors', 'LifeIsGood,' || " + - " current_setting('pljava.implementors'), true) " + - "ELSE " + - " set_config('pljava.implementors', 'LifeIsNotGood,' || " + - " current_setting('pljava.implementors'), true) " + - "END" - ), +@SQLAction(provides={"LifeIsGood","LifeIsNotGood"}, install= + "SELECT CASE 42 WHEN 42 THEN " + + " set_config('pljava.implementors', 'LifeIsGood,' || " + + " current_setting('pljava.implementors'), true) " + + "ELSE " + + " set_config('pljava.implementors', 'LifeIsNotGood,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) - @SQLAction(implementor="LifeIsGood", install= - "SELECT javatest.logmessage('INFO', 'Looking good!')" - ), +@SQLAction(implementor="LifeIsGood", install= + "SELECT javatest.logmessage('INFO', 'Looking good!')" +) - @SQLAction(implementor="LifeIsNotGood", install= - "SELECT javatest.logmessage('WARNING', 'This should not be executed')" - ), +@SQLAction(implementor="LifeIsNotGood", install= + "SELECT javatest.logmessage('WARNING', 'This should not be executed')" +) - @SQLAction(provides="postgresql_ge_80300", install= - "SELECT CASE WHEN" + - " 80300 <= CAST(current_setting('server_version_num') AS integer)" + - " THEN set_config('pljava.implementors', 'postgresql_ge_80300,' || " + - " current_setting('pljava.implementors'), true) " + - "END" - ), +@SQLAction(provides="postgresql_ge_80300", install= + "SELECT CASE WHEN" + + " 80300 <= CAST(current_setting('server_version_num') AS integer)" + + " THEN set_config('pljava.implementors', 'postgresql_ge_80300,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) - @SQLAction(provides="postgresql_ge_80400", install= - "SELECT CASE WHEN" + - " 80400 <= CAST(current_setting('server_version_num') AS integer)" + - " THEN set_config('pljava.implementors', 'postgresql_ge_80400,' || " + - " current_setting('pljava.implementors'), true) " + - "END" - ), +@SQLAction(provides="postgresql_ge_80400", install= + "SELECT CASE WHEN" + + " 80400 <= CAST(current_setting('server_version_num') AS integer)" + + " THEN set_config('pljava.implementors', 'postgresql_ge_80400,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) - @SQLAction(provides="postgresql_ge_90000", install= - "SELECT CASE WHEN" + - " 90000 <= CAST(current_setting('server_version_num') AS integer)" + - " THEN set_config('pljava.implementors', 'postgresql_ge_90000,' || " + - " current_setting('pljava.implementors'), true) " + - "END" - ), +@SQLAction(provides="postgresql_ge_90000", install= + "SELECT CASE WHEN" + + " 90000 <= CAST(current_setting('server_version_num') AS integer)" + + " THEN set_config('pljava.implementors', 'postgresql_ge_90000,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) - @SQLAction(provides="postgresql_ge_90100", install= - "SELECT CASE WHEN" + - " 90100 <= CAST(current_setting('server_version_num') AS integer)" + - " THEN set_config('pljava.implementors', 'postgresql_ge_90100,' || " + - " current_setting('pljava.implementors'), true) " + - "END" - ), +@SQLAction(provides="postgresql_ge_90100", install= + "SELECT CASE WHEN" + + " 90100 <= CAST(current_setting('server_version_num') AS integer)" + + " THEN set_config('pljava.implementors', 'postgresql_ge_90100,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) - @SQLAction(provides="postgresql_ge_90300", install= - "SELECT CASE WHEN" + - " 90300 <= CAST(current_setting('server_version_num') AS integer)" + - " THEN set_config('pljava.implementors', 'postgresql_ge_90300,' || " + - " current_setting('pljava.implementors'), true) " + - "END" - ), +@SQLAction(provides="postgresql_ge_90300", install= + "SELECT CASE WHEN" + + " 90300 <= CAST(current_setting('server_version_num') AS integer)" + + " THEN set_config('pljava.implementors', 'postgresql_ge_90300,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) - @SQLAction(provides="postgresql_ge_100000", install= - "SELECT CASE WHEN" + - " 100000 <= CAST(current_setting('server_version_num') AS integer)" + - " THEN set_config('pljava.implementors', 'postgresql_ge_100000,' || " + - " current_setting('pljava.implementors'), true) " + - "END" - ), -}) +@SQLAction(provides="postgresql_ge_100000", install= + "SELECT CASE WHEN" + + " 100000 <= CAST(current_setting('server_version_num') AS integer)" + + " THEN set_config('pljava.implementors', 'postgresql_ge_100000,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) public class ConditionalDDR { } diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Enumeration.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Enumeration.java index 298050537..2fcee7df1 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Enumeration.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Enumeration.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015- Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -15,7 +15,6 @@ import java.util.Arrays; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; import org.postgresql.pljava.annotation.SQLType; import org.postgresql.pljava.annotation.Function; @@ -27,21 +26,19 @@ * version, set up in the {@link ConditionalDDR} example. PostgreSQL before 8.3 * did not have enum types. */ -@SQLActions({ - @SQLAction(provides="mood type", implementor="postgresql_ge_80300", - install="CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy')", - remove="DROP TYPE mood" - ), - @SQLAction(implementor="postgresql_ge_80300", - requires={"textToMood", "moodToText", "textsToMoods", "moodsToTexts"}, - install={ - "SELECT textToMood('happy')", - "SELECT moodToText('happy'::mood)", - "SELECT textsToMoods(array['happy','happy','sad','ok'])", - "SELECT moodsToTexts(array['happy','happy','sad','ok']::mood[])" - } - ) -}) +@SQLAction(provides="mood type", implementor="postgresql_ge_80300", + install="CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy')", + remove="DROP TYPE mood" +) +@SQLAction(implementor="postgresql_ge_80300", + requires={"textToMood", "moodToText", "textsToMoods", "moodsToTexts"}, + install={ + "SELECT textToMood('happy')", + "SELECT moodToText('happy'::mood)", + "SELECT textsToMoods(array['happy','happy','sad','ok'])", + "SELECT moodsToTexts(array['happy','happy','sad','ok']::mood[])" + } +) public class Enumeration { @Function(requires="mood type", provides="textToMood", type="mood", diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/IntWithMod.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/IntWithMod.java index c28215616..a31928a62 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/IntWithMod.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/IntWithMod.java @@ -21,6 +21,7 @@ import java.sql.SQLOutput; import java.sql.Statement; +import org.postgresql.pljava.annotation.Cast; import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.SQLAction; import org.postgresql.pljava.annotation.SQLType; @@ -51,32 +52,13 @@ *

* Of course this example more or less duplicates what you could do in two lines * with CREATE DOMAIN. But it is enough to illustrate the process. - *

- * Certainly, it would be less tedious with some more annotation support and - * autogeneration of the ordering dependencies that are now added by hand here. - *

- * Most of this must be suppressed (using conditional implementor tags) if the - * PostgreSQL instance is older than 8.3, because it won't have the cstring[] - * type, so the typeModifierInput function can't be declared, and so neither - * can the type, or functions that accept or return it. See the - * {@link ConditionalDDR} example for where the implementor tag is set up. */ -@SQLAction(requires={"IntWithMod type", "IntWithMod modApply"}, - implementor="postgresql_ge_80300", - remove="DROP CAST (javatest.IntWithMod AS javatest.IntWithMod)", +@SQLAction(requires="IntWithMod modCast", install={ - "CREATE CAST (javatest.IntWithMod AS javatest.IntWithMod)" + - " WITH FUNCTION javatest.intwithmod_typmodapply(" + - " javatest.IntWithMod, integer, boolean)", - - "COMMENT ON CAST (javatest.IntWithMod AS javatest.IntWithMod) IS '" + - "Cast that applies/verifies the type modifier on an IntWithMod.'", - "SELECT CAST('42' AS javatest.IntWithMod(even))" } ) @BaseUDT(schema="javatest", provides="IntWithMod type", - implementor="postgresql_ge_80300", typeModifierInput="javatest.intwithmod_typmodin", typeModifierOutput="javatest.intwithmod_typmodout", like="pg_catalog.int4") @@ -146,7 +128,6 @@ public void writeSQL(SQLOutput stream) throws SQLException { * "even" or "odd". The modifier value is 0 for even or 1 for odd. */ @Function(schema="javatest", name="intwithmod_typmodin", - implementor="postgresql_ge_80300", effects=IMMUTABLE, onNullInput=RETURNS_NULL) public static int modIn(@SQLType("pg_catalog.cstring[]") String[] toks) throws SQLException { @@ -180,9 +161,10 @@ public static String modOut(int mod) throws SQLException { * Function backing the type-modifier application cast for IntWithMod type. */ @Function(schema="javatest", name="intwithmod_typmodapply", - implementor="postgresql_ge_80300", - provides="IntWithMod modApply", effects=IMMUTABLE, onNullInput=RETURNS_NULL) + @Cast(comment= + "Cast that applies/verifies the type modifier on an IntWithMod.", + provides="IntWithMod modCast") public static IntWithMod modApply(IntWithMod iwm, int mod, boolean explicit) throws SQLException { diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/JDBC42_21.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/JDBC42_21.java index 768726173..212cb13ba 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/JDBC42_21.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/JDBC42_21.java @@ -13,7 +13,6 @@ import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; import org.postgresql.pljava.example.annotation.ConditionalDDR; // for javadoc @@ -27,102 +26,100 @@ * Relies on PostgreSQL-version-specific implementor tags set up in the * {@link ConditionalDDR} example. */ -@SQLActions({ - @SQLAction( - implementor="postgresql_ge_90300",requires="TypeRoundTripper.roundTrip", - install={ - " SELECT" + - " CASE WHEN every(orig = roundtripped)" + - " THEN javatest.logmessage('INFO', 'java.time.LocalDate passes')" + - " ELSE javatest.logmessage('WARNING', 'java.time.LocalDate fails')" + - " END" + - " FROM" + - " (VALUES" + - " (date '2017-08-21')," + - " (date '1970-03-07')," + - " (date '1919-05-29')" + - " ) AS p(orig)," + - " javatest.roundtrip(p, 'java.time.LocalDate')" + - " AS r(roundtripped date)", +@SQLAction( + implementor="postgresql_ge_90300",requires="TypeRoundTripper.roundTrip", + install={ + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'java.time.LocalDate passes')" + + " ELSE javatest.logmessage('WARNING', 'java.time.LocalDate fails')" + + " END" + + " FROM" + + " (VALUES" + + " (date '2017-08-21')," + + " (date '1970-03-07')," + + " (date '1919-05-29')" + + " ) AS p(orig)," + + " javatest.roundtrip(p, 'java.time.LocalDate')" + + " AS r(roundtripped date)", - " SELECT" + - " CASE WHEN every(orig = roundtripped)" + - " THEN javatest.logmessage('INFO', 'java.time.LocalTime passes')" + - " ELSE javatest.logmessage('WARNING', 'java.time.LocalTime fails')" + - " END" + - " FROM" + - " (SELECT current_time::time) AS p(orig)," + - " javatest.roundtrip(p, 'java.time.LocalTime')" + - " AS r(roundtripped time)", + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'java.time.LocalTime passes')" + + " ELSE javatest.logmessage('WARNING', 'java.time.LocalTime fails')" + + " END" + + " FROM" + + " (SELECT current_time::time) AS p(orig)," + + " javatest.roundtrip(p, 'java.time.LocalTime')" + + " AS r(roundtripped time)", - " SELECT" + - " CASE WHEN every(orig = roundtripped)" + - " THEN javatest.logmessage('INFO', 'java.time.OffsetTime passes')" + - " ELSE javatest.logmessage('WARNING', 'java.time.OffsetTime fails')" + - " END" + - " FROM" + - " (SELECT current_time::timetz) AS p(orig)," + - " javatest.roundtrip(p, 'java.time.OffsetTime')" + - " AS r(roundtripped timetz)", + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'java.time.OffsetTime passes')" + + " ELSE javatest.logmessage('WARNING', 'java.time.OffsetTime fails')" + + " END" + + " FROM" + + " (SELECT current_time::timetz) AS p(orig)," + + " javatest.roundtrip(p, 'java.time.OffsetTime')" + + " AS r(roundtripped timetz)", - " SELECT" + - " CASE WHEN every(orig = roundtripped)" + - " THEN javatest.logmessage('INFO', 'java.time.LocalDateTime passes')" + - " ELSE javatest.logmessage('WARNING','java.time.LocalDateTime fails')"+ - " END" + - " FROM" + - " (SELECT 'on' = current_setting('integer_datetimes')) AS ck(idt)," + - " LATERAL (" + - " SELECT" + - " value" + - " FROM" + - " (VALUES" + - " (true, timestamp '2017-08-21 18:25:29.900005')," + - " (true, timestamp '1970-03-07 17:37:49.300009')," + - " (true, timestamp '1919-05-29 13:08:33.600001')," + - " (idt, timestamp 'infinity')," + - " (idt, timestamp '-infinity')" + - " ) AS vs(cond, value)" + - " WHERE cond" + - " ) AS p(orig)," + - " javatest.roundtrip(p, 'java.time.LocalDateTime')" + - " AS r(roundtripped timestamp)", + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'java.time.LocalDateTime passes')" + + " ELSE javatest.logmessage('WARNING','java.time.LocalDateTime fails')"+ + " END" + + " FROM" + + " (SELECT 'on' = current_setting('integer_datetimes')) AS ck(idt)," + + " LATERAL (" + + " SELECT" + + " value" + + " FROM" + + " (VALUES" + + " (true, timestamp '2017-08-21 18:25:29.900005')," + + " (true, timestamp '1970-03-07 17:37:49.300009')," + + " (true, timestamp '1919-05-29 13:08:33.600001')," + + " (idt, timestamp 'infinity')," + + " (idt, timestamp '-infinity')" + + " ) AS vs(cond, value)" + + " WHERE cond" + + " ) AS p(orig)," + + " javatest.roundtrip(p, 'java.time.LocalDateTime')" + + " AS r(roundtripped timestamp)", - " SELECT" + - " CASE WHEN every(orig = roundtripped)" + - " THEN javatest.logmessage('INFO', 'java.time.OffsetDateTime passes')"+ - " ELSE javatest.logmessage(" + - " 'WARNING','java.time.OffsetDateTime fails')"+ - " END" + - " FROM" + - " (SELECT 'on' = current_setting('integer_datetimes')) AS ck(idt)," + - " LATERAL (" + - " SELECT" + - " value" + - " FROM" + - " (VALUES" + - " (true, timestamptz '2017-08-21 18:25:29.900005Z')," + - " (true, timestamptz '1970-03-07 17:37:49.300009Z')," + - " (true, timestamptz '1919-05-29 13:08:33.600001Z')," + - " (idt, timestamptz 'infinity')," + - " (idt, timestamptz '-infinity')" + - " ) AS vs(cond, value)" + - " WHERE cond" + - " ) AS p(orig)," + - " javatest.roundtrip(p, 'java.time.OffsetDateTime')" + - " AS r(roundtripped timestamptz)", + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'java.time.OffsetDateTime passes')"+ + " ELSE javatest.logmessage(" + + " 'WARNING','java.time.OffsetDateTime fails')"+ + " END" + + " FROM" + + " (SELECT 'on' = current_setting('integer_datetimes')) AS ck(idt)," + + " LATERAL (" + + " SELECT" + + " value" + + " FROM" + + " (VALUES" + + " (true, timestamptz '2017-08-21 18:25:29.900005Z')," + + " (true, timestamptz '1970-03-07 17:37:49.300009Z')," + + " (true, timestamptz '1919-05-29 13:08:33.600001Z')," + + " (idt, timestamptz 'infinity')," + + " (idt, timestamptz '-infinity')" + + " ) AS vs(cond, value)" + + " WHERE cond" + + " ) AS p(orig)," + + " javatest.roundtrip(p, 'java.time.OffsetDateTime')" + + " AS r(roundtripped timestamptz)", - " SELECT" + - " CASE WHEN every(orig = roundtripped)" + - " THEN javatest.logmessage('INFO', 'OffsetTime as stmt param passes')"+ - " ELSE javatest.logmessage(" + - " 'WARNING','java.time.OffsetTime as stmt param fails')"+ - " END" + - " FROM" + - " (SELECT current_time::timetz) AS p(orig)," + - " javatest.roundtrip(p, 'java.time.OffsetTime', true)" + - " AS r(roundtripped timetz)" - }) + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'OffsetTime as stmt param passes')"+ + " ELSE javatest.logmessage(" + + " 'WARNING','java.time.OffsetTime as stmt param fails')"+ + " END" + + " FROM" + + " (SELECT current_time::timetz) AS p(orig)," + + " javatest.roundtrip(p, 'java.time.OffsetTime', true)" + + " AS r(roundtripped timetz)" }) public class JDBC42_21 { diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java index e5614933b..1e61893de 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java @@ -67,7 +67,6 @@ import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.MappedUDT; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; import org.postgresql.pljava.annotation.SQLType; import static org.postgresql.pljava.example.LoggerTest.logMessage; @@ -105,152 +104,180 @@ * Everything mentioning the type XML here needs a conditional implementor tag * in case of being loaded into a PostgreSQL instance built without that type. */ -@SQLActions({ - @SQLAction(provides="postgresql_xml", install= - "SELECT CASE (SELECT 1 FROM pg_type WHERE typname = 'xml') WHEN 1" + - " THEN set_config('pljava.implementors', 'postgresql_xml,' || " + - " current_setting('pljava.implementors'), true) " + - "END" - ), - - @SQLAction(implementor="postgresql_ge_80400", - provides="postgresql_xml_ge84", - install= - "SELECT CASE (SELECT 1 FROM pg_type WHERE typname = 'xml') WHEN 1" + - " THEN set_config('pljava.implementors', 'postgresql_xml_ge84,' || " + - " current_setting('pljava.implementors'), true) " + - "END" - ), - - @SQLAction(implementor="postgresql_xml_ge84", requires="echoXMLParameter", - install= - "WITH" + - " s(how) AS (SELECT generate_series(1, 7))," + - " t(x) AS (" + - " SELECT table_to_xml('pg_catalog.pg_operator', true, false, '')" + - " )," + - " r(howin, howout, isdoc) AS (" + - " SELECT" + - " i.how, o.how," + - " javatest.echoxmlparameter(x, i.how, o.how) IS DOCUMENT" + - " FROM" + - " t, s AS i, s AS o" + - " WHERE" + - " NOT (i.how = 6 and o.how = 7)" + // 6->7 unreliable in some JREs - " ) " + +@SQLAction(provides="postgresql_xml", install= + "SELECT CASE (SELECT 1 FROM pg_type WHERE typname = 'xml') WHEN 1" + + " THEN set_config('pljava.implementors', 'postgresql_xml,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) + +@SQLAction(implementor="postgresql_ge_80400", + provides="postgresql_xml_ge84", + install= + "SELECT CASE (SELECT 1 FROM pg_type WHERE typname = 'xml') WHEN 1" + + " THEN set_config('pljava.implementors', 'postgresql_xml_ge84,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) + +@SQLAction(implementor="postgresql_xml_ge84", requires="echoXMLParameter", + install= + "WITH" + + " s(how) AS (SELECT generate_series(1, 7))," + + " t(x) AS (" + + " SELECT table_to_xml('pg_catalog.pg_operator', true, false, '')" + + " )," + + " r(howin, howout, isdoc) AS (" + + " SELECT" + + " i.how, o.how," + + " javatest.echoxmlparameter(x, i.how, o.how) IS DOCUMENT" + + " FROM" + + " t, s AS i, s AS o" + + " WHERE" + + " NOT (i.how = 6 and o.how = 7)" + // 6->7 unreliable in some JREs + " ) " + + "SELECT" + + " CASE WHEN every(isdoc)" + + " THEN javatest.logmessage('INFO', 'SQLXML echos succeeded')" + + " ELSE javatest.logmessage('WARNING', 'SQLXML echos had problems')" + + " END " + + "FROM" + + " r" +) + +@SQLAction(implementor="postgresql_xml_ge84", requires="proxiedXMLEcho", + install= + "WITH" + + " s(how) AS (SELECT unnest('{1,2,4,5,6,7}'::int[]))," + + " t(x) AS (" + + " SELECT table_to_xml('pg_catalog.pg_operator', true, false, '')" + + " )," + + " r(how, isdoc) AS (" + + " SELECT" + + " how," + + " javatest.proxiedxmlecho(x, how) IS DOCUMENT" + + " FROM" + + " t, s" + + " )" + + "SELECT" + + " CASE WHEN every(isdoc)" + + " THEN javatest.logmessage('INFO', 'proxied SQLXML echos succeeded')" + + " ELSE javatest.logmessage('WARNING'," + + " 'proxied SQLXML echos had problems')" + + " END " + + "FROM" + + " r" +) + +@SQLAction(implementor="postgresql_xml_ge84", requires="lowLevelXMLEcho", + install={ + "SELECT" + + " preparexmlschema('schematest', $$" + + "" + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + "" + + "$$, 'http://www.w3.org/2001/XMLSchema', 5)", + + "WITH" + + " s(how) AS (SELECT unnest('{4,5,7}'::int[]))," + + " r(isdoc) AS (" + + " SELECT" + + " javatest.lowlevelxmlecho(" + + " query_to_xml(" + + " 'SELECT ''hi'' AS textcol, 1 AS intcol', true, true, 'urn:testme'"+ + " ), how, params) IS DOCUMENT" + + " FROM" + + " s," + + " (SELECT 'schematest' AS schema) AS params" + + " )" + + "SELECT" + + " CASE WHEN every(isdoc)" + + " THEN javatest.logmessage('INFO', 'XML Schema tests succeeded')" + + " ELSE javatest.logmessage('WARNING'," + + " 'XML Schema tests had problems')" + + " END " + + "FROM" + + " r" + } +) + +@SQLAction(implementor="postgresql_xml", + requires={"prepareXMLTransform", "transformXML"}, + install={ + "REVOKE EXECUTE ON FUNCTION javatest.prepareXMLTransformWithJava" + + " (pg_catalog.varchar, pg_catalog.xml, integer, boolean," + + " pg_catalog.RECORD)" + + " FROM PUBLIC", + "SELECT" + - " CASE WHEN every(isdoc)" + - " THEN javatest.logmessage('INFO', 'SQLXML echos succeeded')" + - " ELSE javatest.logmessage('WARNING', 'SQLXML echos had problems')" + - " END " + - "FROM" + - " r" - ), - - @SQLAction(implementor="postgresql_xml_ge84", requires="proxiedXMLEcho", - install= - "WITH" + - " s(how) AS (SELECT unnest('{1,2,4,5,6,7}'::int[]))," + - " t(x) AS (" + - " SELECT table_to_xml('pg_catalog.pg_operator', true, false, '')" + - " )," + - " r(how, isdoc) AS (" + - " SELECT" + - " how," + - " javatest.proxiedxmlecho(x, how) IS DOCUMENT" + - " FROM" + - " t, s" + - " )" + + " javatest.prepareXMLTransform('distinctElementNames'," + + "'" + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + "', 5, true)", + "SELECT" + - " CASE WHEN every(isdoc)" + - " THEN javatest.logmessage('INFO', 'proxied SQLXML echos succeeded')" + - " ELSE javatest.logmessage('WARNING'," + - " 'proxied SQLXML echos had problems')" + - " END " + - "FROM" + - " r" - ), - - @SQLAction(implementor="postgresql_xml_ge84", requires="lowLevelXMLEcho", - install={ + " javatest.prepareXMLTransformWithJava('getPLJavaVersion'," + + "'" + + " " + + " " + + " " + + "', enableExtensionFunctions => true)", + "SELECT" + - " preparexmlschema('schematest', $$" + - "" + - " " + - " " + - " " + - " " + - " " + - " " + - " " + - " " + - "" + - "$$, 'http://www.w3.org/2001/XMLSchema', 5)", - - "WITH" + - " s(how) AS (SELECT unnest('{4,5,7}'::int[]))," + - " r(isdoc) AS (" + - " SELECT" + - " javatest.lowlevelxmlecho(" + - " query_to_xml(" + - " 'SELECT ''hi'' AS textcol, 1 AS intcol', true, true, 'urn:testme'"+ - " ), how, params) IS DOCUMENT" + - " FROM" + - " s," + - " (SELECT 'schematest' AS schema) AS params" + - " )" + + " CASE WHEN" + + " javatest.transformXML('distinctElementNames'," + + " '', 5, 5)::text" + + " =" + + " 'abcde'"+ + " THEN javatest.logmessage('INFO', 'XSLT 1.0 test succeeded')" + + " ELSE javatest.logmessage('WARNING', 'XSLT 1.0 test failed')" + + " END", + "SELECT" + - " CASE WHEN every(isdoc)" + - " THEN javatest.logmessage('INFO', 'XML Schema tests succeeded')" + - " ELSE javatest.logmessage('WARNING'," + - " 'XML Schema tests had problems')" + - " END " + - "FROM" + - " r" - } - ), - - @SQLAction(implementor="postgresql_xml", - requires={"prepareXMLTransform", "transformXML"}, - install={ - "SELECT" + - " javatest.prepareXMLTransform('distinctElementNames'," + - "'" + - " " + - " " + - " " + - " " + - " " + - " " + - " " + - " " + - " " + - " " + - " " + - " " + - "', 5, true)", - - "SELECT" + - " CASE WHEN" + - " javatest.transformXML('distinctElementNames'," + - " '', 5, 5)::text" + - " =" + - " 'abcde'"+ - " THEN javatest.logmessage('INFO', 'XSLT 1.0 test succeeded')" + - " ELSE javatest.logmessage('WARNING', 'XSLT 1.0 test failed')" + - " END" - } - ) -}) + " CASE WHEN" + + " javatest.transformXML('getPLJavaVersion', '')::text" + + " OPERATOR(pg_catalog.=) extversion" + + " THEN javatest.logmessage('INFO', 'XSLT 1.0 with Java succeeded')" + + " ELSE javatest.logmessage('WARNING', 'XSLT 1.0 with Java failed')" + + " END" + + " FROM pg_catalog.pg_extension" + + " WHERE extname = 'pljava'" + } +) @MappedUDT(schema="javatest", name="onexml", structure="c1 xml", implementor="postgresql_xml", comment="A composite type mapped by the PassXML example class") @@ -407,48 +434,135 @@ public static SQLXML castTextXML(@SQLType("text") SQLXML sx) */ @Function(schema="javatest", implementor="postgresql_xml", provides="prepareXMLTransform") - public static void prepareXMLTransform(String name, SQLXML source, int how, - @SQLType(defaultValue="false") boolean enableExtensionFunctions) + public static void prepareXMLTransform(String name, SQLXML source, + @SQLType(defaultValue="0") int how, + @SQLType(defaultValue="false") boolean enableExtensionFunctions, + @SQLType(defaultValue={}) ResultSet adjust) + throws SQLException + { + prepareXMLTransform( + name, source, how, enableExtensionFunctions, adjust, false); + } + + /** + * Precompile an XSL transform {@code source} and save it (for the + * current session) as {@code name}, where the transform may call Java + * methods. + *

+ * Otherwise identical to {@code prepareXMLTransform}, this version sets the + * {@code TransformerFactory}'s {@code extensionClassLoader} (to the same + * loader that loads this class), so the transform will be able to use + * xalan's Java call syntax to call any public Java methods that would be + * accessible to this class. (That can make a big difference in usefulness + * for the otherwise rather limited XSLT 1.0.) + *

+ * This example function will be installed with {@code EXECUTE} permission + * revoked from {@code PUBLIC}, as it essentially confers the ability to + * create arbitrary new Java functions, so should only be granted to roles + * you would be willing to grant {@code USAGE ON LANGUAGE java}. + *

+ * Because this function only prepares the transform, and + * {@link #transformXML transformXML} applies it, there is some division of + * labor in determining what limits apply to its behavior. The use of this + * method instead of {@code prepareXMLTransform} determines whether the + * transform is allowed to see external Java methods at all; it will be + * the policy permissions granted to {@code transformXML} that control what + * those methods can do when the transform is applied. For now, that method + * is defined in the trusted/sandboxed {@code java} language, so this + * function could reasonably be granted to any role with {@code USAGE} on + * {@code java}. If, by contrast, {@code transformXML} were declared in the + * 'untrusted' {@code javaU}, it would be prudent to allow only superusers + * access to this function, just as only they can {@code CREATE FUNCTION} in + * an untrusted language. + */ + @Function(schema="javatest", implementor="postgresql_xml", + provides="prepareXMLTransform") + public static void prepareXMLTransformWithJava(String name, SQLXML source, + @SQLType(defaultValue="0") int how, + @SQLType(defaultValue="false") boolean enableExtensionFunctions, + @SQLType(defaultValue={}) ResultSet adjust) + throws SQLException + { + prepareXMLTransform( + name, source, how, enableExtensionFunctions, adjust, true); + } + + private static void prepareXMLTransform(String name, SQLXML source, int how, + boolean enableExtensionFunctions, ResultSet adjust, boolean withJava) throws SQLException { TransformerFactory tf = TransformerFactory.newInstance(); String exf = "http://www.oracle.com/xml/jaxp/properties/enableExtensionFunctions"; + String ecl = "jdk.xml.transform.extensionClassLoader"; + Source src = sxToSource(source, how, adjust); try { tf.setFeature(exf, enableExtensionFunctions); - s_tpls.put(name, tf.newTemplates(sxToSource(source, how))); + if ( withJava ) + tf.setAttribute(ecl, PassXML.class.getClassLoader()); + s_tpls.put(name, tf.newTemplates(src)); } catch ( TransformerException te ) { - throw new SQLException("XML transformation failed", te); + throw new SQLException( + "Preparing XML transformation: " + te.getMessage(), te); } } /** * Transform some XML according to a named transform prepared with * {@code prepareXMLTransform}. + *

+ * Pass null for {@code transformName} to get a plain identity transform + * (not such an interesting thing to do, unless you also specify indenting). */ @Function(schema="javatest", implementor="postgresql_xml", provides="transformXML") public static SQLXML transformXML( - String transformName, SQLXML source, int howin, int howout) + String transformName, SQLXML source, + @SQLType(defaultValue="0") int howin, + @SQLType(defaultValue="0") int howout, + @SQLType(defaultValue={}) ResultSet adjust, + @SQLType(defaultValue="false") boolean indent, + @SQLType(defaultValue="4") int indentWidth) throws SQLException { - Templates tpl = s_tpls.get(transformName); - Source src = sxToSource(source, howin); + Templates tpl = null == transformName? null: s_tpls.get(transformName); + Source src = sxToSource(source, howin, adjust); + + if ( indent && 0 == howout ) + howout = 4; // transformer only indents if writing a StreamResult + Connection c = DriverManager.getConnection("jdbc:default:connection"); SQLXML result = c.createSQLXML(); - Result rlt = sxToResult(result, howout); + Result rlt = sxToResult(result, howout, adjust); try { - Transformer t = tpl.newTransformer(); + Transformer t = + null == tpl ? s_tf.newTransformer() : tpl.newTransformer(); + /* + * For the non-SAX/StAX/DOM flavors of output, you're responsible + * for setting the Transformer to use the server encoding. + */ + if ( rlt instanceof StreamResult ) + t.setOutputProperty(ENCODING, + System.getProperty("org.postgresql.server.encoding")); + else if ( indent ) + logMessage("WARNING", + "indent requested, but howout specifies a non-stream " + + "Result type; no indenting will happen"); + + t.setOutputProperty("indent", indent ? "yes" : "no"); + t.setOutputProperty( + "{http://xml.apache.org/xalan}indent-amount", "" + indentWidth); + t.transform(src, rlt); } catch ( TransformerException te ) { - throw new SQLException("XML transformation failed", te); + throw new SQLException("Transforming XML: " + te.getMessage(), te); } return ensureClosed(rlt, result, howout); @@ -1044,6 +1158,17 @@ public static void unclosedSQLXML(int howmany, int how) throws SQLException } } + + /** + * Return some instance of {@code Source} for reading an {@code SQLXML} + * object, depending on the parameter {@code how}. + *

+ * Note that this method always returns a {@code Source}, even for cases + * 1 and 2 (obtaining readable streams directly from the {@code SQLXML} + * object; this method wraps them in {@code Source}), and case 3 + * ({@code getString}; this method creates a {@code StringReader} and + * returns it wrapped in a {@code Source}. + */ private static Source sxToSource(SQLXML sx, int how) throws SQLException { switch ( how ) @@ -1090,6 +1215,70 @@ private static Result sxToResult(SQLXML sx, int how) throws SQLException } } + /** + * Return some instance of {@code Source} for reading an {@code SQLXML} + * object, depending on the parameter {@code how}, applying any adjustments + * in {@code adjust}. + *

+ * Allows {@code how} to be zero, meaning to let the implementation choose + * what kind of {@code Source} to present. Otherwise identical to the other + * {@code sxToSource}. + */ + private static Source sxToSource(SQLXML sx, int how, ResultSet adjust) + throws SQLException + { + Source s; + switch ( how ) + { + case 0: s = sx.getSource(Adjusting.XML.Source.class); break; + case 1: + case 2: + case 3: + case 4: + return sxToSource(sx, how); // no adjustments on a StreamSource + case 5: s = sx.getSource(Adjusting.XML.SAXSource.class); break; + case 6: s = sx.getSource(Adjusting.XML.StAXSource.class); break; + case 7: s = sx.getSource(Adjusting.XML.DOMSource.class); break; + default: throw new SQLDataException("how should be 0-7", "22003"); + } + + if ( s instanceof Adjusting.XML.Source ) + return applyAdjustments(adjust, (Adjusting.XML.Source)s).get(); + return s; + } + + /** + * Return some instance of {@code Result} for writing an {@code SQLXML} + * object, depending on the parameter {@code how} applying any adjustments + * in {@code adjust}. + *

+ * Allows {@code how} to be zero, meaning to let the implementation choose + * what kind of {@code Result} to present. Otherwise identical to the other + * {@code sxToResult}. + */ + private static Result sxToResult(SQLXML sx, int how, ResultSet adjust) + throws SQLException + { + Result r; + switch ( how ) + { + case 1: // you might wish you could adjust a raw BinaryStream + case 2: // or CharacterStream + case 3: // or String, but you can't. Ask for a StreamResult. + case 5: // SAXResult needs no adjustment + case 6: // StAXResult needs no adjustment + case 7: // DOMResult needs no adjustment + return sxToResult(sx, how); + case 4: r = sx.setResult(Adjusting.XML.StreamResult.class); break; + case 0: r = sx.setResult(Adjusting.XML.Result.class); break; + default: throw new SQLDataException("how should be 0-7", "22003"); + } + + if ( r instanceof Adjusting.XML.Result ) + return applyAdjustments(adjust, (Adjusting.XML.Result)r).get(); + return r; + } + /** * Ensure the closing of whatever method was used to add content to * an {@code SQLXML} object. diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PreJSR310.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PreJSR310.java index b19f5ce43..c0409c0d7 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PreJSR310.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PreJSR310.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018- Tada AB and other contributors, as listed below. + * Copyright (c) 2018-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -24,7 +24,6 @@ import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; /** * Some tests of pre-JSR 310 date/time/timestamp conversions. @@ -34,17 +33,15 @@ * This example relies on {@code implementor} tags reflecting the PostgreSQL * version, set up in the {@link ConditionalDDR} example. */ -@SQLActions({ - @SQLAction(provides="language java_tzset", install={ - "SELECT sqlj.alias_java_language('java_tzset', true)" - }, remove={ - "DROP LANGUAGE java_tzset" - }), +@SQLAction(provides="language java_tzset", install={ + "SELECT sqlj.alias_java_language('java_tzset', true)" +}, remove={ + "DROP LANGUAGE java_tzset" +}) - @SQLAction(implementor="postgresql_ge_90300", // needs LATERAL - requires="issue199", install={ - "SELECT javatest.issue199()" - }) +@SQLAction(implementor="postgresql_ge_90300", // needs LATERAL + requires="issue199", install={ + "SELECT javatest.issue199()" }) public class PreJSR310 { diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/RecordParameterDefaults.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/RecordParameterDefaults.java index d4f240cee..09d3dbbe8 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/RecordParameterDefaults.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/RecordParameterDefaults.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018- Tada AB and other contributors, as listed below. + * Copyright (c) 2018-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -20,7 +20,6 @@ import org.postgresql.pljava.ResultSetProvider; import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; import org.postgresql.pljava.annotation.SQLType; /** @@ -33,19 +32,17 @@ * This example relies on {@code implementor} tags reflecting the PostgreSQL * version, set up in the {@link ConditionalDDR} example. */ -@SQLActions({ - @SQLAction( - provides = "paramtypeinfo type", // created in Triggers.java - install = { - "CREATE TYPE javatest.paramtypeinfo AS (" + - " name text, pgtypename text, javaclass text, tostring text" + - ")" - }, - remove = { - "DROP TYPE javatest.paramtypeinfo" - } - ) -}) +@SQLAction( + provides = "paramtypeinfo type", // created in Triggers.java + install = { + "CREATE TYPE javatest.paramtypeinfo AS (" + + " name text, pgtypename text, javaclass text, tostring text" + + ")" + }, + remove = { + "DROP TYPE javatest.paramtypeinfo" + } +) public class RecordParameterDefaults implements ResultSetProvider { /** diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ReturnComposite.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ReturnComposite.java new file mode 100644 index 000000000..9a5be5edc --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ReturnComposite.java @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; + +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; + +/** + * Demonstrates {@code @Function(out={...})} for a function that returns a + * non-predeclared composite type. + */ +@SQLAction(requires = { "helloOutParams", "helloTable" }, install = { + "SELECT" + + " CASE WHEN want IS NOT DISTINCT FROM helloOutParams()" + + " THEN javatest.logmessage('INFO', 'composite return passes')" + + " ELSE javatest.logmessage('WARNING', 'composite return fails')" + + " END" + + " FROM" + + " (SELECT 'Hello' ::text, 'world' ::text) AS want", + + "WITH" + + " expected AS (VALUES" + + " ('Hello' ::text, 'twelve' ::text)," + + " ('Hello', 'thirteen')," + + " ('Hello', 'love')" + + " )" + + "SELECT" + + " CASE WHEN every(want IS NOT DISTINCT FROM got)" + + " THEN javatest.logmessage('INFO', 'set of composite return passes')" + + " ELSE javatest.logmessage('WARNING', 'set of composite return fails')" + + " END" + + " FROM" + + " (SELECT row_number() OVER (), * FROM expected) AS want" + + " LEFT JOIN (SELECT row_number() OVER (), * FROM hellotable()) AS got" + + " USING (row_number)" +}) +public class ReturnComposite implements ResultSetProvider.Large +{ + /** + * Returns a two-column composite result that does not have to be + * a predeclared composite type, or require the calling SQL query to + * follow the function call with a result column definition list, as is + * needed for a bare {@code RECORD} return type. + */ + @Function( + schema = "javatest", out = { "greeting text", "addressee text" }, + provides = "helloOutParams" + ) + public static boolean helloOutParams(ResultSet out) throws SQLException + { + out.updateString(1, "Hello"); + out.updateString(2, "world"); + return true; + } + + /** + * Returns a two-column table result that does not have to be + * a predeclared composite type, or require the calling SQL query to + * follow the function call with a result column definition list, as is + * needed for a bare {@code RECORD} return type. + */ + @Function( + schema = "javatest", out = { "greeting text", "addressee text" }, + provides = "helloTable" + ) + public static ResultSetProvider helloTable() + throws SQLException + { + return new ReturnComposite(); + } + + Iterator addressees = + List.of("twelve", "thirteen", "love").iterator(); + + @Override + public boolean assignRowValues(ResultSet out, long currentRow) + throws SQLException + { + if ( ! addressees.hasNext() ) + return false; + + out.updateString(1, "Hello"); + out.updateString(2, addressees.next()); + return true; + } + + @Override + public void close() + { + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SPIActions.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SPIActions.java index f2606c912..dca34e5c7 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SPIActions.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SPIActions.java @@ -31,37 +31,34 @@ import org.postgresql.pljava.annotation.Function; import static org.postgresql.pljava.annotation.Function.Effects.*; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; /** * Some methods used for testing the SPI JDBC driver. * * @author Thomas Hallgren */ -@SQLActions({ - @SQLAction(provides = "employees tables", install = { - "CREATE TABLE javatest.employees1" + - " (" + - " id int PRIMARY KEY," + - " name varchar(200)," + - " salary int" + - " )", - - "CREATE TABLE javatest.employees2" + - " (" + - " id int PRIMARY KEY," + - " name varchar(200)," + - " salary int," + - " transferDay date," + - " transferTime time" + - " )" - }, remove = { - "DROP TABLE javatest.employees2", - "DROP TABLE javatest.employees1" - } - ), - @SQLAction(requires = "issue228", install = "SELECT javatest.issue228()") -}) +@SQLAction(provides = "employees tables", install = { + "CREATE TABLE javatest.employees1" + + " (" + + " id int PRIMARY KEY," + + " name varchar(200)," + + " salary int" + + " )", + + "CREATE TABLE javatest.employees2" + + " (" + + " id int PRIMARY KEY," + + " name varchar(200)," + + " salary int," + + " transferDay date," + + " transferTime time" + + " )" + }, remove = { + "DROP TABLE javatest.employees2", + "DROP TABLE javatest.employees1" +} +) +@SQLAction(requires = "issue228", install = "SELECT javatest.issue228()") public class SPIActions { private static final String SP_CHECKSTATE = "sp.checkState"; diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java index f950a602c..804ef9d83 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -9,6 +9,7 @@ * Contributors: * Tada AB * Purdue University + * Chapman Flack */ package org.postgresql.pljava.example.annotation; @@ -22,7 +23,6 @@ import org.postgresql.pljava.TriggerData; import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; import org.postgresql.pljava.annotation.Trigger; import static org.postgresql.pljava.annotation.Trigger.Called.*; import static org.postgresql.pljava.annotation.Trigger.Constraint.*; @@ -41,41 +41,39 @@ * version, set up in the {@link ConditionalDDR} example. Constraint triggers * appear in PG 9.1, transition tables in PG 10. */ -@SQLActions({ - @SQLAction( - provides = "foobar tables", - install = { - "CREATE TABLE javatest.foobar_1 ( username text, stuff text )", - "CREATE TABLE javatest.foobar_2 ( username text, value numeric )" - }, - remove = { - "DROP TABLE javatest.foobar_2", - "DROP TABLE javatest.foobar_1" - } - ), - @SQLAction( - requires = "constraint triggers", - install = "INSERT INTO javatest.foobar_2(value) VALUES (45)" - ), - @SQLAction( - requires = "foobar triggers", - provides = "foobar2_42", - install = "INSERT INTO javatest.foobar_2(value) VALUES (42)" - ), - @SQLAction( - requires = { "transition triggers", "foobar2_42" }, - install = "UPDATE javatest.foobar_2 SET value = 43 WHERE value = 42" - ) - /* - * Note for another day: this would seem an excellent place to add a - * regression test for github issue #134 (make sure invocations of a - * trigger do not fail with SPI_ERROR_UNCONNECTED). However, any test - * here that runs from the deployment descriptor will be running when - * SPI is already connected, so a regression would not be caught. - * A proper test for it will have to wait for a proper testing harness - * invoking tests from outside PL/Java itself. - */ -}) +@SQLAction( + provides = "foobar tables", + install = { + "CREATE TABLE javatest.foobar_1 ( username text, stuff text )", + "CREATE TABLE javatest.foobar_2 ( username text, value numeric )" + }, + remove = { + "DROP TABLE javatest.foobar_2", + "DROP TABLE javatest.foobar_1" + } +) +@SQLAction( + requires = "constraint triggers", + install = "INSERT INTO javatest.foobar_2(value) VALUES (45)" +) +@SQLAction( + requires = "foobar triggers", + provides = "foobar2_42", + install = "INSERT INTO javatest.foobar_2(value) VALUES (42)" +) +@SQLAction( + requires = { "transition triggers", "foobar2_42" }, + install = "UPDATE javatest.foobar_2 SET value = 43 WHERE value = 42" +) +/* + * Note for another day: this would seem an excellent place to add a + * regression test for github issue #134 (make sure invocations of a + * trigger do not fail with SPI_ERROR_UNCONNECTED). However, any test + * here that runs from the deployment descriptor will be running when + * SPI is already connected, so a regression would not be caught. + * A proper test for it will have to wait for a proper testing harness + * invoking tests from outside PL/Java itself. + */ public class Triggers { /** diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UnicodeRoundTripTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UnicodeRoundTripTest.java index 568d64017..4f2c0ec47 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UnicodeRoundTripTest.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UnicodeRoundTripTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -15,7 +15,6 @@ import java.sql.SQLException; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; import org.postgresql.pljava.annotation.Function; /** @@ -39,18 +38,17 @@ * This example relies on {@code implementor} tags reflecting the PostgreSQL * version, set up in the {@link ConditionalDDR} example, and also sets its own. */ -@SQLActions({ - @SQLAction(provides="postgresql_unicodetest", - implementor="postgresql_ge_90000", install= - "SELECT CASE" + - " WHEN 'UTF8' = current_setting('server_encoding')" + - " THEN set_config('pljava.implementors', 'postgresql_unicodetest,' ||" + - " current_setting('pljava.implementors'), true) " + - "END" - ), - @SQLAction(requires="unicodetest fn", - implementor="postgresql_unicodetest", - install= +@SQLAction(provides="postgresql_unicodetest", + implementor="postgresql_ge_90000", install= + "SELECT CASE" + + " WHEN 'UTF8' = current_setting('server_encoding')" + + " THEN set_config('pljava.implementors', 'postgresql_unicodetest,' ||" + + " current_setting('pljava.implementors'), true) " + + "END" +) +@SQLAction(requires="unicodetest fn", +implementor="postgresql_unicodetest", +install= " with " + " usable_codepoints ( cp ) as ( " + " select generate_series(1,x'd7ff'::int) " + @@ -88,15 +86,14 @@ " 'all Unicode codepoint ranges roundtripped successfully.') " + " end " + " from test_summary" - ), - @SQLAction( - install= - "CREATE TYPE unicodetestrow AS " + - "(matched boolean, cparray integer[], s text)", - remove="DROP TYPE unicodetestrow", - provides="unicodetestrow type" - ) -}) +) +@SQLAction( + install= + "CREATE TYPE unicodetestrow AS " + + "(matched boolean, cparray integer[], s text)", + remove="DROP TYPE unicodetestrow", + provides="unicodetestrow type" +) public class UnicodeRoundTripTest { /** * This function takes a string and an array of ints constructed in PG, diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UsingProperties.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UsingProperties.java index bcd3f2b90..417853db7 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UsingProperties.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UsingProperties.java @@ -23,9 +23,11 @@ import java.util.Iterator; import java.util.Map; import java.util.Properties; +import java.util.ResourceBundle; import java.util.logging.Logger; import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.annotation.SQLAction; /** * An example that retrieves a {@code Properties} resource, and returns @@ -33,6 +35,35 @@ * interface. * @author Thomas Hallgren */ +@SQLAction(requires = {"propertyExampleAnno", "propertyExampleRB"}, install = { + "WITH" + + " expected AS (VALUES" + + " ('adjective' ::varchar(200), 'avaricious' ::varchar(200))," + + " ('noun', 'platypus')" + + " )" + + "SELECT" + + " CASE WHEN" + + " 2 = count(prop) AND every(prop IN (SELECT expected FROM expected))" + + " THEN javatest.logmessage('INFO', 'get resource passes')" + + " ELSE javatest.logmessage('WARNING', 'get resource fails')" + + " END" + + " FROM" + + " propertyExampleAnno() AS prop", + + "WITH" + + " expected AS (VALUES" + + " ('adjective' ::varchar(200), 'avaricious' ::varchar(200))," + + " ('noun', 'platypus')" + + " )" + + "SELECT" + + " CASE WHEN" + + " 2 = count(prop) AND every(prop IN (SELECT expected FROM expected))" + + " THEN javatest.logmessage('INFO', 'get ResourceBundle passes')" + + " ELSE javatest.logmessage('WARNING', 'get ResourceBundle fails')" + + " END" + + " FROM" + + " propertyExampleRB() AS prop" +}) public class UsingProperties implements ResultSetProvider.Large { private static Logger s_logger = Logger.getAnonymousLogger(); @@ -42,7 +73,9 @@ public UsingProperties() throws IOException { Properties v = new Properties(); - InputStream propStream = this.getClass().getResourceAsStream("example.properties"); + InputStream propStream = + this.getClass().getResourceAsStream("example.properties"); + if(propStream == null) { s_logger.fine("example.properties was null"); @@ -57,6 +90,33 @@ public UsingProperties() } } + /** + * This constructor (distinguished by signature) reads the same property + * file, but using the {@code ResourceBundle} machinery instead of + * {@code Properties}. + */ + private UsingProperties(Void usingResourceBundle) + { + ResourceBundle b = + ResourceBundle.getBundle(getClass().getPackageName() + ".example"); + + Iterator keys = b.getKeys().asIterator(); + + m_propertyIterator = new Iterator>() + { + public boolean hasNext() + { + return keys.hasNext(); + } + + public Map.Entry next() + { + String k = keys.next(); + return Map.entry(k, b.getString(k)); + } + }; + } + public boolean assignRowValues(ResultSet receiver, long currentRow) throws SQLException { @@ -76,7 +136,7 @@ public boolean assignRowValues(ResultSet receiver, long currentRow) * Return the contents of the {@code example.properties} resource, * one (key,value) row per entry. */ - @Function( type = "javatest._properties") + @Function(type = "javatest._properties", provides = "propertyExampleAnno") public static ResultSetProvider propertyExampleAnno() throws SQLException { @@ -90,6 +150,17 @@ public static ResultSetProvider propertyExampleAnno() } } + /** + * Return the contents of the {@code example.properties} resource, + * one (key,value) row per entry, using {@code ResourceBundle} to load it. + */ + @Function(type = "javatest._properties", provides = "propertyExampleRB") + public static ResultSetProvider propertyExampleRB() + throws SQLException + { + return new UsingProperties(null); + } + public void close() { } diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/XMLRenderedTypes.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/XMLRenderedTypes.java index ca7f501d9..871e96445 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/XMLRenderedTypes.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/XMLRenderedTypes.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Tada AB and other contributors, as listed below. + * Copyright (c) 2019-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -17,7 +17,6 @@ import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; import org.postgresql.pljava.annotation.SQLType; import static org.postgresql.pljava.example.LoggerTest.logMessage; @@ -30,16 +29,14 @@ * in case of being loaded into a PostgreSQL instance built without that type. * The {@code pg_node_tree} type appears in 9.1. */ -@SQLActions({ - @SQLAction(implementor="postgresql_ge_90100", - provides="postgresql_xml_ge91", - install= - "SELECT CASE (SELECT 1 FROM pg_type WHERE typname = 'xml') WHEN 1" + - " THEN set_config('pljava.implementors', 'postgresql_xml_ge91,' || " + - " current_setting('pljava.implementors'), true) " + - "END" - ) -}) +@SQLAction(implementor="postgresql_ge_90100", + provides="postgresql_xml_ge91", + install= + "SELECT CASE (SELECT 1 FROM pg_type WHERE typname = 'xml') WHEN 1" + + " THEN set_config('pljava.implementors', 'postgresql_xml_ge91,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) public class XMLRenderedTypes { @Function(schema="javatest", implementor="postgresql_xml_ge91") diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/S9.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/S9.java index 8f5301935..fc3fb5210 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/S9.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/S9.java @@ -915,7 +915,8 @@ public void close() } /** - * Produce and return one row of the {@code XMLTABLE} result table per call. + * Produce and return one row of + * the {@code XMLTABLE} result table per call. *

* The row expression has already been compiled and its evaluation begun, * producing a sequence iterator. The column XQuery expressions have all @@ -2941,7 +2942,8 @@ public void onGroupEnd(int groupNumber) } /** - * Function form of the ISO SQL {@code }. + * Function form of the ISO SQL + * {@code }. *

* Rewrite the standard form *

@@ -2995,7 +2997,8 @@ public static boolean like_regex(
 	}
 
 	/**
-	 * Syntax-sugar-free form of the ISO SQL {@code OCCURRENCES_REGEX} function:
+	 * Syntax-sugar-free form of the ISO SQL
+	 * {@code OCCURRENCES_REGEX} function:
 	 * how many times does a pattern occur in a string?
 	 *

* Rewrite the standard form @@ -3056,7 +3059,8 @@ public static int occurrences_regex( } /** - * Syntax-sugar-free form of the ISO SQL {@code POSITION_REGEX} function: + * Syntax-sugar-free form of the ISO SQL + * {@code POSITION_REGEX} function: * where does a pattern, or part of it, occur in a string? *

* Rewrite the standard forms @@ -3143,7 +3147,8 @@ public static int position_regex( } /** - * Syntax-sugar-free form of the ISO SQL {@code SUBSTRING_REGEX} function: + * Syntax-sugar-free form of the ISO SQL + * {@code SUBSTRING_REGEX} function: * return a substring specified by a pattern match in a string. *

* Rewrite the standard form @@ -3229,7 +3234,8 @@ public static String substring_regex( } /** - * Syntax-sugar-free form of the ISO SQL {@code TRANSLATE_REGEX} function: + * Syntax-sugar-free form of the ISO SQL + * {@code TRANSLATE_REGEX} function: * return a string constructed from the input string by replacing one * specified occurrence, or all occurrences, of a matching pattern. *

diff --git a/pljava-packaging/build.xml b/pljava-packaging/build.xml index ea612df8a..cc4fed957 100644 --- a/pljava-packaging/build.xml +++ b/pljava-packaging/build.xml @@ -255,6 +255,18 @@ jos.close(); simple update is possible, just repeat the next entry, with the from-version changed. --> + + + org.postgresql pljava.app - 1.6.0-SNAPSHOT + 1.6-SNAPSHOT pljava-packaging PL/Java packaging diff --git a/pljava-packaging/src/main/resources/pljava.policy b/pljava-packaging/src/main/resources/pljava.policy index 7513219e7..c360dcdb5 100644 --- a/pljava-packaging/src/main/resources/pljava.policy +++ b/pljava-packaging/src/main/resources/pljava.policy @@ -42,6 +42,11 @@ grant { // permission java.util.PropertyPermission "jdk.lang.ref.disableClearBeforeEnqueue", "read"; + + // Something similar happened in Java 14 (not yet fixed in 15). + // + permission java.util.PropertyPermission + "java.util.concurrent.ForkJoinPool.common.maximumSpares", "read"; }; @@ -58,6 +63,10 @@ grant codebase "${org.postgresql.pljava.codesource}" { "charsetProvider"; permission java.lang.RuntimePermission "createClassLoader"; + permission java.lang.RuntimePermission + "getProtectionDomain"; + permission java.net.NetPermission + "specifyStreamHandler"; permission java.util.logging.LoggingPermission "control"; permission java.security.SecurityPermission diff --git a/pljava-pgxs/pom.xml b/pljava-pgxs/pom.xml index 7b929397b..79660f0de 100644 --- a/pljava-pgxs/pom.xml +++ b/pljava-pgxs/pom.xml @@ -5,7 +5,7 @@ org.postgresql pljava.app - 1.6.0-SNAPSHOT + 1.6-SNAPSHOT pljava-pgxs diff --git a/pljava-so/pom.xml b/pljava-so/pom.xml index ae33321fb..e3555e788 100644 --- a/pljava-so/pom.xml +++ b/pljava-so/pom.xml @@ -4,7 +4,7 @@ org.postgresql pljava.app - 1.6.0-SNAPSHOT + 1.6-SNAPSHOT pljava-so PL/Java backend native code diff --git a/pljava-so/src/main/c/Backend.c b/pljava-so/src/main/c/Backend.c index 89d84125e..073709be2 100644 --- a/pljava-so/src/main/c/Backend.c +++ b/pljava-so/src/main/c/Backend.c @@ -208,6 +208,7 @@ static bool seenVisualVMName; static bool seenModuleMain; static char const visualVMprefix[] = "-Dvisualvm.display.name="; static char const moduleMainPrefix[] = "-Djdk.module.main="; +static char const policyUrlsGUC[] = "pljava.policy_urls"; /* * In a background worker, _PG_init may be called very early, before much of @@ -517,6 +518,7 @@ static void initsequencer(enum initstage is, bool tolerant) initstage = IS_GUCS_REGISTERED; if ( deferInit ) return; + /*FALLTHROUGH*/ case IS_GUCS_REGISTERED: if ( NULL == libjvmlocation ) @@ -529,6 +531,7 @@ static void initsequencer(enum initstage is, bool tolerant) goto check_tolerant; } initstage = IS_CAND_JVMLOCATION; + /*FALLTHROUGH*/ case IS_CAND_JVMLOCATION: if ( NULL == policy_urls ) @@ -541,6 +544,7 @@ static void initsequencer(enum initstage is, bool tolerant) goto check_tolerant; } initstage = IS_CAND_POLICYURLS; + /*FALLTHROUGH*/ case IS_CAND_POLICYURLS: if ( ! pljavaEnabled ) @@ -555,6 +559,7 @@ static void initsequencer(enum initstage is, bool tolerant) goto check_tolerant; } initstage = IS_PLJAVA_ENABLED; + /*FALLTHROUGH*/ case IS_PLJAVA_ENABLED: libjvm_handle = pg_dlopen(libjvmlocation); @@ -568,6 +573,7 @@ static void initsequencer(enum initstage is, bool tolerant) goto check_tolerant; } initstage = IS_CAND_JVMOPENED; + /*FALLTHROUGH*/ case IS_CAND_JVMOPENED: pljava_createvm = @@ -591,6 +597,7 @@ static void initsequencer(enum initstage is, bool tolerant) goto check_tolerant; } initstage = IS_CREATEVM_SYM_FOUND; + /*FALLTHROUGH*/ case IS_CREATEVM_SYM_FOUND: s_javaLogLevel = INFO; @@ -604,6 +611,7 @@ static void initsequencer(enum initstage is, bool tolerant) pljavaDebug = 1; #endif initstage = IS_MISC_ONCE_DONE; + /*FALLTHROUGH*/ case IS_MISC_ONCE_DONE: JVMOptList_init(&optList); /* uses CurrentMemoryContext */ @@ -624,6 +632,7 @@ static void initsequencer(enum initstage is, bool tolerant) JVMOptList_add(&optList, effectiveModulePath, 0, true); } initstage = IS_JAVAVM_OPTLIST; + /*FALLTHROUGH*/ case IS_JAVAVM_OPTLIST: JNIresult = initializeJavaVM(&optList); /* frees the optList */ @@ -647,6 +656,7 @@ static void initsequencer(enum initstage is, bool tolerant) jvmStartedAtLeastOnce = true; elog(DEBUG2, "successfully created Java virtual machine"); initstage = IS_JAVAVM_STARTED; + /*FALLTHROUGH*/ case IS_JAVAVM_STARTED: #ifdef USE_PLJAVA_SIGHANDLERS @@ -658,6 +668,7 @@ static void initsequencer(enum initstage is, bool tolerant) */ on_proc_exit(_destroyJavaVM, 0); initstage = IS_SIGHANDLERS; + /*FALLTHROUGH*/ case IS_SIGHANDLERS: Invocation_pushBootContext(&ctx); @@ -708,6 +719,7 @@ static void initsequencer(enum initstage is, bool tolerant) _destroyJavaVM(0, 0); goto check_tolerant; } + /*FALLTHROUGH*/ case IS_PLJAVA_FOUND: greeting = InstallHelper_hello(); @@ -716,11 +728,13 @@ static void initsequencer(enum initstage is, bool tolerant) errdetail("versions:\n%s", greeting))); pfree(greeting); initstage = IS_PLJAVA_INSTALLING; + /*FALLTHROUGH*/ case IS_PLJAVA_INSTALLING: if ( NULL != pljavaLoadPath ) InstallHelper_groundwork(); /* sqlj schema, language handlers, ...*/ initstage = IS_COMPLETE; + /*FALLTHROUGH*/ case IS_COMPLETE: pljavaLoadingAsExtension = false; @@ -1648,7 +1662,7 @@ static void registerGUCOptions(void) NULL); /* show hook */ STRING_GUC( - "pljava.policy_urls", + policyUrlsGUC, "URLs to Java security policy file(s) for PL/Java's use", "Quote each URL and separate with commas. Any URL may begin (inside " "the quotes) with n= where n is the index of the Java " @@ -1923,7 +1937,11 @@ JNICALL Java_org_postgresql_pljava_internal_Backend__1getConfigOption(JNIEnv* en { PG_TRY(); { - const char *value = PG_GETCONFIGOPTION(key); + const char *value; + if ( 0 == strcmp(policyUrlsGUC, key) ) + value = policy_urls; + else + value = PG_GETCONFIGOPTION(key); pfree(key); if(value != 0) result = String_createJavaStringFromNTS(value); diff --git a/pljava-so/src/main/c/type/Type.c b/pljava-so/src/main/c/type/Type.c index ef9a8849d..5cd700cf7 100644 --- a/pljava-so/src/main/c/type/Type.c +++ b/pljava-so/src/main/c/type/Type.c @@ -243,6 +243,7 @@ static Type _getCoerce(Type self, Type other, Oid fromOid, Oid toOid, case COERCION_PATH_NONE: elog(ERROR, "no conversion function from (regtype) %d to %d", fromOid, toOid); + pg_unreachable(); /*elog(ERROR is already so marked; what's with gcc?*/ case COERCION_PATH_RELABELTYPE: /* * Binary compatible type. No need for a special coercer. @@ -255,9 +256,11 @@ static Type _getCoerce(Type self, Type other, Oid fromOid, Oid toOid, case COERCION_PATH_COERCEVIAIO: elog(ERROR, "COERCEVIAIO not implemented from (regtype) %d to %d", fromOid, toOid); + pg_unreachable(); case COERCION_PATH_ARRAYCOERCE: elog(ERROR, "ARRAYCOERCE not implemented from (regtype) %d to %d", fromOid, toOid); + pg_unreachable(); case COERCION_PATH_FUNC: break; } diff --git a/pljava/pom.xml b/pljava/pom.xml index bf9ab6111..51eb86f51 100644 --- a/pljava/pom.xml +++ b/pljava/pom.xml @@ -4,7 +4,7 @@ org.postgresql pljava.app - 1.6.0-SNAPSHOT + 1.6-SNAPSHOT pljava PL/Java backend Java code diff --git a/pljava/src/main/java/module-info.java b/pljava/src/main/java/module-info.java index 610f58612..68923bbe4 100644 --- a/pljava/src/main/java/module-info.java +++ b/pljava/src/main/java/module-info.java @@ -23,6 +23,8 @@ exports org.postgresql.pljava.elog to java.logging; + exports org.postgresql.pljava.policy to java.base; // has custom Permission + provides java.net.spi.URLStreamHandlerProvider with org.postgresql.pljava.sqlj.Handler; diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Function.java b/pljava/src/main/java/org/postgresql/pljava/internal/Function.java index 31cc1a3bd..3a0274a8a 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/Function.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Function.java @@ -17,9 +17,11 @@ import static java.lang.invoke.MethodHandles.arrayElementGetter; import static java.lang.invoke.MethodHandles.arrayElementSetter; import static java.lang.invoke.MethodHandles.collectArguments; +import static java.lang.invoke.MethodHandles.constant; import static java.lang.invoke.MethodHandles.dropArguments; import static java.lang.invoke.MethodHandles.empty; import static java.lang.invoke.MethodHandles.exactInvoker; +import static java.lang.invoke.MethodHandles.explicitCastArguments; import static java.lang.invoke.MethodHandles.filterArguments; import static java.lang.invoke.MethodHandles.filterReturnValue; import static java.lang.invoke.MethodHandles.foldArguments; @@ -171,6 +173,7 @@ public static Class getClassIfUDT( */ private static MethodType buildSignature( ClassLoader schemaLoader, String[] jTypes, boolean forValidator, + boolean commute, boolean retTypeIsOutParameter, boolean isMultiCall, boolean altForm) throws SQLException { @@ -199,6 +202,13 @@ private static MethodType buildSignature( for ( int i = 0 ; i < rtIdx ; ++ i ) pTypes[i] = loadClass(schemaLoader, jTypes[i], forValidator); + if ( commute ) + { + Class t = pTypes[0]; + pTypes[0] = pTypes[1]; + pTypes[1] = t; + } + Class returnType = getReturnSignature(schemaLoader, retJType, forValidator, retTypeIsOutParameter, isMultiCall, altForm); @@ -273,12 +283,12 @@ private static Lookup lookupFor(Class clazz) */ private static MethodHandle getMethodHandle( ClassLoader schemaLoader, Class clazz, String methodName, - boolean forValidator, + boolean forValidator, boolean commute, String[] jTypes, boolean retTypeIsOutParameter, boolean isMultiCall) throws SQLException { MethodType mt = - buildSignature(schemaLoader, jTypes, forValidator, + buildSignature(schemaLoader, jTypes, forValidator, commute, retTypeIsOutParameter, isMultiCall, false); // try altForm false ReflectiveOperationException ex1 = null; @@ -320,7 +330,7 @@ private static MethodHandle getMethodHandle( if ( null != altType ) { jTypes[jTypes.length - 1] = altType.getCanonicalName(); - mt = buildSignature(schemaLoader, jTypes, forValidator, + mt = buildSignature(schemaLoader, jTypes, forValidator, commute, retTypeIsOutParameter, isMultiCall, true); // retry altForm true try { @@ -578,6 +588,8 @@ else if ( rt.isPrimitive() ) private static final MethodHandle s_paramCountsAre; private static final MethodHandle s_countsZeroer; private static final MethodHandle s_nonNull; + private static final MethodHandle s_not; + private static final MethodHandle s_boxedNot; /* * Handles used to retrieve rows using SFRM_ValuePerCall protocol, from a @@ -840,6 +852,18 @@ private static void pop() s_nonNull = l.findStatic(Objects.class, "nonNull", methodType(boolean.class, Object.class)); + s_not = guardWithTest(identity(boolean.class), + dropArguments(constant(boolean.class, false), 0, boolean.class), + dropArguments(constant(boolean.class, true), 0, boolean.class)); + + s_boxedNot = + guardWithTest( + explicitCastArguments(s_nonNull, + methodType(boolean.class, Boolean.class)), + explicitCastArguments(s_not, + methodType(Boolean.class, Boolean.class)), + identity(Boolean.class)); + /* * Build a bit of MethodHandle tree for invoking a set-returning * user function that will implement the ValuePerCall protocol. @@ -1304,6 +1328,8 @@ private static Invocable init( String[] resolvedTypes; boolean isMultiCall = false; boolean retTypeIsOutParameter = false; + boolean commute = (null != info.group("com")); + boolean negate = (null != info.group("neg")); if ( forValidator ) calledAsTrigger = isTrigger(procTup); @@ -1319,7 +1345,7 @@ private static Invocable init( boolean[] multi = new boolean[] { isMultiCall }; boolean[] rtiop = new boolean[] { retTypeIsOutParameter }; resolvedTypes = setupFunctionParams(wrappedPtr, info, procTup, - schemaLoader, clazz, readOnly, typeMap, multi, rtiop); + schemaLoader, clazz, readOnly, typeMap, multi, rtiop, commute); isMultiCall = multi [ 0 ]; retTypeIsOutParameter = rtiop [ 0 ]; } @@ -1327,11 +1353,39 @@ private static Invocable init( String methodName = info.group("meth"); MethodHandle handle = - adaptHandle( - getMethodHandle(schemaLoader, clazz, methodName, forValidator, - resolvedTypes, retTypeIsOutParameter, isMultiCall) - .asFixedArity() - ); + getMethodHandle(schemaLoader, clazz, methodName, forValidator, + commute, resolvedTypes, retTypeIsOutParameter, isMultiCall) + .asFixedArity(); + MethodType mt = handle.type(); + + if ( commute ) + { + Class[] types = mt.parameterArray(); + mt = mt + .changeParameterType(0, types[1]) + .changeParameterType(1, types[0]); + handle = retTypeIsOutParameter + ? permuteArguments(handle, mt, 1, 0, 2) + : permuteArguments(handle, mt, 1, 0); + } + + if ( negate ) + { + MethodHandle inverter = null; + Class rt = mt.returnType(); + if ( boolean.class == rt ) + inverter = s_not; + else if ( Boolean.class == rt ) + inverter = s_boxedNot; + + if ( null == inverter || retTypeIsOutParameter ) + throw new SQLSyntaxErrorException( + "wrong return type for transformation [negate]", "42P13"); + + handle = filterReturnValue(handle, inverter); + } + + handle = adaptHandle(handle); if ( isMultiCall ) handle = ( @@ -1470,7 +1524,7 @@ private static String[] setupFunctionParams( long wrappedPtr, Matcher info, ResultSet procTup, ClassLoader schemaLoader, Class clazz, boolean readOnly, Map> typeMap, - boolean[] multi, boolean[] returnTypeIsOP) + boolean[] multi, boolean[] returnTypeIsOP, boolean commute) throws SQLException { int numParams = procTup.getInt("pronargs"); @@ -1500,7 +1554,7 @@ private static String[] setupFunctionParams( * resolvedTypes that the mapping from SQL types suggested above. */ parseParameters( wrappedPtr, resolvedTypes, explicitSignature, - isMultiCall, returnTypeIsOutputParameter); + isMultiCall, returnTypeIsOutputParameter, commute); } /* As in the original C setupFunctionParams, if an explicit Java return @@ -1546,7 +1600,8 @@ private static String[] setupFunctionParams( */ private static void parseParameters( long wrappedPtr, String[] resolvedTypes, String explicitSignature, - boolean isMultiCall, boolean returnTypeIsOutputParameter) + boolean isMultiCall, boolean returnTypeIsOutputParameter, + boolean commute) throws SQLException { boolean lastIsOut = ( ! isMultiCall ) && returnTypeIsOutputParameter; @@ -1560,6 +1615,17 @@ private static void parseParameters( "AS (Java): expected %1$d parameter types, found %2$d", expect, explicitTypes.length), "42601"); + if ( commute ) + { + if ( explicitTypes.length != (lastIsOut ? 3 : 2) ) + throw new SQLSyntaxErrorException( + "wrong number of parameters for transformation [commute]", + "42P13"); + String t = explicitTypes[0]; + explicitTypes[0] = explicitTypes[1]; + explicitTypes[1] = t; + } + doInPG(() -> { for ( int i = 0 ; i < resolvedTypes.length - 1 ; ++ i ) @@ -1714,6 +1780,11 @@ private static String getAS(ResultSet procTup) throws SQLException /* or the non-UDT form (which can't begin, insensitively, with UDT) */ "|(?!(?i:udt\\[))" + + /* allow a prefix like [commute] or [negate] or [commute,negate] */ + "(?:\\[(?:" + + "(?:(?:(?commute)|(?negate))(?:(?=\\])|,(?!\\])))" + + ")++\\])?+" + + /* and the long-standing method spec syntax */ "(?:(?%2$s)=)?+(?%1$s)\\.(?%3$s)" + "(?:\\((?(?:(?:%2$s,)*+%2$s)?+)\\))?+", javaTypeName, diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/InstallHelper.java b/pljava/src/main/java/org/postgresql/pljava/internal/InstallHelper.java index f89d00159..42ed012b4 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/InstallHelper.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/InstallHelper.java @@ -17,6 +17,8 @@ import java.net.URL; import java.net.MalformedURLException; import java.nio.charset.Charset; +import java.security.NoSuchAlgorithmException; +import java.security.Policy; import java.security.Security; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -36,6 +38,7 @@ import org.postgresql.pljava.jdbc.SQLUtils; import org.postgresql.pljava.management.SQLDeploymentDescriptor; +import org.postgresql.pljava.policy.TrialPolicy; import static org.postgresql.pljava.annotation.processing.DDRWriter.eQuote; import static org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; @@ -161,6 +164,8 @@ public static String hello( e); } + setTrialPolicyIfSpecified(); + System.setSecurityManager( new SecurityManager()); StringBuilder sb = new StringBuilder(); @@ -255,6 +260,24 @@ private static void setPolicyURLs() } } + private static void setTrialPolicyIfSpecified() throws SQLException + { + String trialURI = System.getProperty( + "org.postgresql.pljava.policy.trial"); + + if ( null == trialURI ) + return; + + try + { + Policy.setPolicy( new TrialPolicy( trialURI)); + } + catch ( NoSuchAlgorithmException e ) + { + throw new SQLException(e.getMessage(), e); + } + } + public static void groundwork( String module_pathname, String loadpath_tbl, String loadpath_tbl_quoted, boolean asExtension, boolean exNihilo) @@ -668,8 +691,10 @@ void migrateFrom( SchemaVariant sv, Connection c, Statement s) UNREL20040120 ("5e4131738cd095b7ff6367d64f809f6cec6a7ba7"), EMPTY (null); + static final SchemaVariant REL_1_6_1 = REL_1_5_0; static final SchemaVariant REL_1_6_0 = REL_1_5_0; + static final SchemaVariant REL_1_5_7 = REL_1_5_0; static final SchemaVariant REL_1_5_6 = REL_1_5_0; static final SchemaVariant REL_1_5_5 = REL_1_5_0; static final SchemaVariant REL_1_5_4 = REL_1_5_0; diff --git a/pljava/src/main/java/org/postgresql/pljava/management/Commands.java b/pljava/src/main/java/org/postgresql/pljava/management/Commands.java index d6fec76fa..c7b659710 100644 --- a/pljava/src/main/java/org/postgresql/pljava/management/Commands.java +++ b/pljava/src/main/java/org/postgresql/pljava/management/Commands.java @@ -69,13 +69,14 @@ import static org.postgresql.pljava.annotation.Function.Security.DEFINER; /** - * This methods of this class are implementations of SQLJ commands. - *

SQLJ functions

+ * This methods of this class are implementations of SQLJ procedures (and some + * related ones beyond what ISO 9075-13 specifies). + *

SQLJ procedures

*

install_jar

- * The install_jar command loads a jar file from a location appointed by an URL - * or a binary image that constitutes the contents of a jar file into the SQLJ - * jar repository. It is an error if a jar with the given name already exists in - * the repository. + * The install_jar procedure loads a jar file from a location appointed by an + * URL or a binary image that constitutes the contents of a jar file into the + * SQLJ jar repository. It is an error if a jar with the given name already + * exists in the repository. *

Usage 1

*
SELECT sqlj.install_jar(<jar_url>, <jar_name>, <deploy>); *
@@ -121,8 +122,9 @@ * * *

replace_jar

- * The replace_jar will replace a loaded jar with another jar. Use this command - * to update already loaded files. It's an error if the jar is not found. + * The replace_jar procedure will replace a loaded jar with another jar. Use + * this command to update already loaded files. It's an error if the jar is not + * found. *

Usage 1

*
SELECT sqlj.replace_jar(<jar_url>, <jar_name>, <redeploy>); *
@@ -168,9 +170,9 @@ * * *

remove_jar

- * The remove_jar will drop the jar from the jar repository. Any classpath that - * references this jar will be updated accordingly. It's an error if the jar is - * not found. + * The remove_jar procedure will drop the jar from the jar repository. Any + * classpath that references this jar will be updated accordingly. It's an error + * if no such jar is installed. *

Usage

*
SELECT sqlj.remove_jar(<jar_name>, <undeploy>); *
@@ -188,9 +190,9 @@ * * *

get_classpath

- * The get_classpath will return the classpath that has been defined for the - * given schema or NULL if the schema has no classpath. It's an error if the - * given schema does not exist. + * The get_classpath procedure will return the classpath that has been defined + * for the given schema or NULL if the schema has no classpath. It's an error if + * the given schema does not exist. *

Usage

*
SELECT sqlj.get_classpath(<schema>); *
@@ -201,11 +203,11 @@ * The name of the schema * * - *

set_classpath

- * The set_classpath will define a classpath for the given schema. A classpath - * consists of a colon separated list of jar names. It's an error if the given - * schema does not exist or if one or more jar names references non existent - * jars. + *

set_classpath

+ * The set_classpath procedure will define a classpath for the given schema. A + * classpath consists of a colon separated list of jar names. It's an error if + * the given schema does not exist or if one or more jar names references + * non-existent jars. *

Usage

*
SELECT sqlj.set_classpath(<schema>, <classpath>); *
@@ -220,9 +222,9 @@ * The colon separated list of jar names * * - *

add_type_mapping

- * The add_type_mapping defines the mapping between an SQL type and a Java - * class. + *

add_type_mapping

+ * The add_type_mapping procedure defines the mapping between an SQL type and a + * Java class. *

Usage

*
SELECT sqlj.add_type_mapping(<sqlTypeName>, <className>); *
@@ -241,8 +243,8 @@ * * *

drop_type_mapping

- * The drop_type_mapping removes the mapping between an SQL type and a Java - * class. + * The drop_type_mapping procedure removes the mapping between an SQL type and a + * Java class. *

Usage

*
SELECT sqlj.drop_type_mapping(<sqlTypeName>); *
@@ -256,7 +258,7 @@ * * *

alias_java_language

- * The {@link #aliasJavaLanguage alias_java_language command} issues + * The {@link #aliasJavaLanguage alias_java_language procedure} issues * a PostgreSQL {@code CREATE LANGUAGE} command to define a named "language" * that is an alias for PL/Java. The name can appear in the * Java security policy to grant diff --git a/pljava/src/main/java/org/postgresql/pljava/policy/TrialPolicy.java b/pljava/src/main/java/org/postgresql/pljava/policy/TrialPolicy.java new file mode 100644 index 000000000..320a66c48 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/policy/TrialPolicy.java @@ -0,0 +1,439 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.policy; + +import java.lang.reflect.ReflectPermission; + +import java.net.URI; + +import java.security.CodeSource; +import java.security.NoSuchAlgorithmException; +import java.security.Permission; +import java.security.PermissionCollection; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.security.SecurityPermission; +import java.security.URIParameter; + +import java.util.ArrayList; +import java.util.Arrays; +import static java.util.Collections.emptyEnumeration; +import static java.util.Collections.enumeration; +import java.util.Enumeration; +import java.util.Iterator; +import java.util.List; + +import static org.postgresql.pljava.elog.ELogHandler.LOG_LOG; +import static org.postgresql.pljava.internal.Backend.log; +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import static org.postgresql.pljava.internal.Privilege.doPrivileged; + +/** + * An implementation of {@link Policy} intended for temporary use while + * identifying needed permission grants for existing code. + *

+ * This policy is meant to operate as a fallback in conjunction with the normal + * PL/Java policy specified with the {@code pljava.policy_urls} configuration + * setting. This policy is activated by specifying an additional policy file + * URL with {@code -Dorg.postgresql.pljava.policy.trial=}url in the + * {@code pljava.vmoptions} setting. + *

+ * Permission checks that are allowed by the normal policy in + * {@code pljava.policy_urls} are allowed with no further checking. Permissions + * denied by that policy are checked in this one. If denied in this policy, that + * is the end of the matter. A permission check that is denied by the normal + * policy but allowed by this one is allowed, with a message to the server log. + *

+ * The log message begins with {@code POLICY DENIES/TRIAL POLICY ALLOWS:} + * and the requested permission, followed by an abbreviated stack trace. + * To minimize log volume, the stack trace includes a frame above and below + * each crossing of a module or protection domain boundary; a single {@code ...} + * replaces intermediate frames within the same module and domain. + * At the position in the trace of the protection domain that failed the policy + * check, a line is inserted with the domain's code source and principals, + * such as {@code >> sqlj:examples [PLPrincipal.Sandboxed: java] <<}. This + * abbreviated trace should be well suited to the purpose of determining where + * any additional permission grants ought to be made. + *

+ * Because each check that is logged is then allowed, it can be possible to see + * multiple log entries for the same permission check, one for each domain in + * the call stack that is not granted the permission in the normal policy. + *

About false positives

+ * It is not uncommon to have software that checks in normal operation for + * certain permissions, catches exceptions, and proceeds to function normally. + * Use of this policy, if it is configured to grant the permissions being + * checked, will produce log entries for those 'hidden' checks and may create + * the appearance that permissions need to be granted when, in fact, the + * software would show no functional impairment without them. It is difficult + * to distinguish such false positives from other log entries for permissions + * that do need to be granted for the software to properly function. + *

+ * One approach would be to try to determine, from the log entries, which + * functions of the software led to the permission checks that were logged, and + * specifically test those functions in a database session that has been set up + * with a different policy file that does not grant those permissions. If the + * software then functions without incident, it may be concluded that those + * log entries were false positives. + */ +public class TrialPolicy extends Policy +{ + private static final String TYPE = "JavaPolicy"; + private static final RuntimePermission GET_PROTECTION_DOMAIN = + new RuntimePermission("getProtectionDomain"); + private final Policy realPolicy; + private final Policy limitPolicy; + private final StackWalker walker = + StackWalker.getInstance(StackWalker.Option.RETAIN_CLASS_REFERENCE); + + public TrialPolicy(String limitURI) throws NoSuchAlgorithmException + { + URIParameter lim = new URIParameter(URI.create(limitURI)); + realPolicy = Policy.getInstance(TYPE, null); + limitPolicy = Policy.getInstance(TYPE, lim); + } + + @Override + public PermissionCollection getPermissions(CodeSource codesource) + { + return realPolicy.getPermissions(codesource); + } + + @Override + public PermissionCollection getPermissions(ProtectionDomain domain) + { + return realPolicy.getPermissions(domain); + } + + @Override + public boolean implies( + ProtectionDomain domain, java.security.Permission permission) + { + if ( realPolicy.implies(domain, permission) ) + return true; + + if ( ! limitPolicy.implies(domain, permission) ) + { + /* + * The TrialPolicy.Permission below is an unusual one: like Java's + * own AllPermission, its implies() can be true for permissions of + * other classes than its own. Java's AllPermission is handled + * magically, and this one must be also, because deep down, the + * built-in Policy implementation keeps its PermissionCollections + * segregated by permission class. It would not notice on its own + * that 'permission' might be implied by a permission that is held + * but is of some other class. + */ + if ( ! limitPolicy.implies(domain, Permission.INSTANCE) + || ! Permission.INSTANCE.implies(permission) ) + return false; + } + + /* + * Construct a (with any luck, useful) abbreviated stack trace, using + * the first frame encountered at each change of protection domain while + * walking up the stack, saving the index of the first entry for the + * domain being checked. + */ + List stack = new ArrayList<>(); + int matchingDomainIndex = doPrivileged(() -> walker.walk(s -> + { + ProtectionDomain lastDomain = null; + StackWalker.StackFrame lastFrame = null; + Module lastModule = null; + Module thisModule = getClass().getModule(); + int matchIndex = -1; + int walkIndex = 0; + int newDomainIndex = 0; // walkIndex of first frame in a new domain + for ( StackWalker.StackFrame f : + (Iterable)s.skip(5)::iterator ) + { + ++ walkIndex; + Class frameClass = f.getDeclaringClass(); + Module frameModule = frameClass.getModule(); + ProtectionDomain frameDomain = frameClass.getProtectionDomain(); + if ( ! equals(lastDomain, frameDomain) + || null != lastModule && ! lastModule.equals(frameModule) ) + { + if ( null != lastFrame && walkIndex > 1 + newDomainIndex ) + { + if ( walkIndex > 2 + newDomainIndex ) + stack.add(null); // will be rendered as ... + stack.add(lastFrame.toStackTraceElement()); + } + if ( -1 == matchIndex && equals(domain, frameDomain) ) + matchIndex = stack.size(); + stack.add(f.toStackTraceElement()); + lastModule = frameModule; + lastDomain = frameDomain; + newDomainIndex = walkIndex; + } + + /* + * Exit the walk early, skip boring EntryPoints. + */ + if ( frameModule.equals(thisModule) + && "org.postgresql.pljava.internal.EntryPoints" + .equals(frameClass.getName()) ) + { + if ( newDomainIndex == walkIndex ) + stack.remove(stack.size() - 1); + -- walkIndex; + break; + } + + lastFrame = f; + } + + if ( null != lastFrame && walkIndex > 1 + newDomainIndex ) + stack.add(lastFrame.toStackTraceElement()); + + if ( -1 == matchIndex ) + matchIndex = stack.size(); + return matchIndex; + }), null, GET_PROTECTION_DOMAIN); + + /* + * Construct a string representation of the trace. + */ + StringBuilder sb = new StringBuilder( + "POLICY DENIES/TRIAL POLICY ALLOWS: " + permission + '\n'); + Iterator it = stack.iterator(); + int i = 0; + for ( ;; ) + { + if ( matchingDomainIndex == i ++ ) + sb.append(">> ") + .append(domain.getCodeSource().getLocation()) + .append(' ') + .append(Arrays.toString(domain.getPrincipals())) + .append(" <<\n"); + if ( ! it.hasNext() ) + break; + StackTraceElement e = it.next(); + sb.append(null == e ? "..." : e.toString()); + if ( it.hasNext() || matchingDomainIndex == i ) + sb.append('\n'); + } + + /* + * This is not the best way to avoid blocking on log(); in some flavors + * of pljava.java_thread_pg_entry, threadMayEnterPG can return false + * simply because it's not /known/ that PG could be entered right now, + * and this could send the message off to System.err at times even if + * log() would have completed with no blocking. But the always accurate + * "could I enter PG right now without blocking?" method isn't provided + * yet. + */ + if ( threadMayEnterPG() ) + log(LOG_LOG, sb.toString()); + else + System.err.println(sb); + + return true; + } + + @Override + public void refresh() + { + realPolicy.refresh(); + limitPolicy.refresh(); + } + + /* + * Compare two protection domains, only by their code source for now. + * It appears that StackWalker doesn't invoke domain combiners, so the + * frames seen in the walk won't match the principals of the argument + * to implies(). + */ + private boolean equals(ProtectionDomain a, ProtectionDomain b) + { + if ( null == a || null == b) + return a == b; + + CodeSource csa = a.getCodeSource(); + CodeSource csb = b.getCodeSource(); + + if ( null == csa || null == csb ) + return csa == csb; + + return csa.equals(csb); + } + + /** + * A permission like {@code java.security.AllPermission}, but without + * any {@code FilePermission} (the real policy's sandboxed/unsandboxed + * grants should handle those), nor a couple dozen varieties of + * {@code RuntimePermission}, {@code SecurityPermission}, and + * {@code ReflectPermission} that would typically not be granted without + * clear intent. + *

+ * This permission can be granted in a {@code TrialPolicy} while identifying + * any straggling permissions needed by some existing code, without quite + * the excitement of granting {@code AllPermission}. Any of the permissions + * excluded from this one can also be granted in the {@code TrialPolicy}, + * of course, if there is reason to believe the code might need them. + *

+ * The proper spelling in a policy file is + * {@code org.postgresql.pljava.policy.TrialPolicy$Permission}. + *

+ * This permission will probably only work right in a {@code TrialPolicy}. + * Any permission whose {@code implies} method can return true for + * permissions of other classes than its own may be ineffective in a stock + * Java policy, where permission collections are kept segregated by the + * class of the permission to be checked. Java's {@code AllPermission} gets + * special-case treatment in the stock implementation, and this permission + * likewise has to be treated specially in {@code TrialPolicy}. The only + * kind of custom permission that can genuinely drop in and work is one + * whose {@code implies} method only imposes semantics on the names/actions + * of different instances of that permission class. + *

+ * A permission that does not live on the boot classpath is initially read + * from a policy file as an instance of {@code UnresolvedPermission}, and + * only gets resolved when a permission check is made, checking for an + * instance of its actual class. That is another complication when + * implementing a permission that may imply permissions of other classes. + *

+ * A permission implemented in a different named module must be in a package + * that is exported to {@code java.base}. + */ + public static final class Permission extends java.security.Permission + { + private static final long serialVersionUID = 6401893677037633706L; + + /** + * An instance of this permission (not a singleton, merely one among + * possible others). + */ + static final Permission INSTANCE = new Permission(); + + public Permission() + { + super(""); + } + + public Permission(String name, String actions) + { + super(""); + } + + @Override + public boolean equals(Object other) + { + return other instanceof Permission; + } + + @Override + public int hashCode() + { + return 131113; + } + + @Override + public String getActions() + { + return null; + } + + @Override + public PermissionCollection newPermissionCollection() + { + return new Collection(); + } + + @Override + public boolean implies(java.security.Permission p) + { + if ( p instanceof Permission ) + return true; + + if ( p instanceof java.io.FilePermission ) + return false; + + if ( Holder.EXCLUDERHS.stream().anyMatch(r -> p.implies(r)) ) + return false; + + if ( Holder.EXCLUDELHS.stream().anyMatch(l -> l.implies(p)) ) + return false; + + return true; + } + + static class Collection extends PermissionCollection + { + private static final long serialVersionUID = 917249873714843122L; + + Permission the_permission = null; + + @Override + public void add(java.security.Permission p) + { + if ( isReadOnly() ) + throw new SecurityException( + "attempt to add a Permission to a readonly " + + "PermissionCollection"); + + if ( ! (p instanceof Permission) ) + throw new IllegalArgumentException( + "invalid in homogeneous PermissionCollection: " + p); + + if ( null == the_permission ) + the_permission = (Permission) p; + } + + @Override + public boolean implies(java.security.Permission p) + { + if ( null == the_permission ) + return false; + return the_permission.implies(p); + } + + @Override + public Enumeration elements() + { + if ( null == the_permission ) + return emptyEnumeration(); + return enumeration(List.of(the_permission)); + } + } + + static class Holder + { + static final List EXCLUDERHS = List.of( + new RuntimePermission("createClassLoader"), + new RuntimePermission("getClassLoader"), + new RuntimePermission("setContextClassLoader"), + new RuntimePermission("enableContextClassLoaderOverride"), + new RuntimePermission("setSecurityManager"), + new RuntimePermission("createSecurityManager"), + new RuntimePermission("shutdownHooks"), + new RuntimePermission("exitVM"), + new RuntimePermission("setFactory"), + new RuntimePermission("setIO"), + new RuntimePermission("getStackWalkerWithClassReference"), + new RuntimePermission("setDefaultUncaughtExceptionHandler"), + new RuntimePermission("manageProcess"), + new ReflectPermission("suppressAccessChecks"), + new SecurityPermission("createAccessControlContext"), + new SecurityPermission("setPolicy"), + new SecurityPermission("createPolicy.JavaPolicy") + ); + + static final List EXCLUDELHS = List.of( + new RuntimePermission("exitVM.*"), + new RuntimePermission("defineClassInPackage.*"), + new ReflectPermission("newProxyInPackage.*"), + new SecurityPermission("setProperty.*") + ); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/policy/package-info.java b/pljava/src/main/java/org/postgresql/pljava/policy/package-info.java new file mode 100644 index 000000000..dc411b58b --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/policy/package-info.java @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Purdue University + */ +/** + * Package implementing custom Java security policy useful while migrating + * existing code to policy-based PL/Java; allows permission checks denied by the + * main policy to succeed, while logging them so any needed permission grants + * can be identified and added to the main policy. + *

+ * This package is exported to {@code java.base} to provide a custom + * {@code Permission} that can be granted in policy. + */ +package org.postgresql.pljava.policy; diff --git a/pljava/src/main/java/org/postgresql/pljava/sqlj/Handler.java b/pljava/src/main/java/org/postgresql/pljava/sqlj/Handler.java index a0bd626b5..268c6be03 100644 --- a/pljava/src/main/java/org/postgresql/pljava/sqlj/Handler.java +++ b/pljava/src/main/java/org/postgresql/pljava/sqlj/Handler.java @@ -29,7 +29,7 @@ public class Handler extends URLStreamHandlerProvider { private static final Handler INSTANCE = new Handler(); - public URLStreamHandlerProvider provider() + public static URLStreamHandlerProvider provider() { return INSTANCE; } diff --git a/pljava/src/main/java/org/postgresql/pljava/sqlj/Loader.java b/pljava/src/main/java/org/postgresql/pljava/sqlj/Loader.java index 032ce8f73..7dbf84b29 100644 --- a/pljava/src/main/java/org/postgresql/pljava/sqlj/Loader.java +++ b/pljava/src/main/java/org/postgresql/pljava/sqlj/Loader.java @@ -342,12 +342,12 @@ private static URL entryURL(int entryId) { try { - return new URL( + return doPrivileged(() -> new URL( "dbf", "localhost", -1, "/" + entryId, - EntryStreamHandler.getInstance()); + EntryStreamHandler.getInstance())); } catch(MalformedURLException e) { diff --git a/pom.xml b/pom.xml index 2da8943d1..9a7e0f40c 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ 4.0.0 org.postgresql pljava.app - 1.6.0-SNAPSHOT + 1.6-SNAPSHOT pom PostgreSQL PL/Java https://tada.github.io/pljava/ @@ -69,42 +69,6 @@ - - release10 - - [10,11) - - - - - org.apache.maven.plugins - maven-compiler-plugin - - 10 - - - - - - - - release11 - - [11,12) - - - - - org.apache.maven.plugins - maven-compiler-plugin - - 11 - - - - - - nashorngone diff --git a/src/site/markdown/build/versions.md b/src/site/markdown/build/versions.md index ccf79f7b0..d5d36fb9e 100644 --- a/src/site/markdown/build/versions.md +++ b/src/site/markdown/build/versions.md @@ -17,16 +17,7 @@ feature introduced in that release. In the PL/Java 1.6.x series, the build can be done with Java 9 or newer. Once built, PL/Java is able to use another Java 9 or later JVM at run time, simply by setting -[the `pljava.libjvm_location` variable][jvml] to the desired version's library -(but see the exceptions described next). - -### Exceptions to build-version / runtime-version compatibility - -Because of compiler bugs in Java 10 and 11, builds done with those versions -will not run on earlier Java releases. A build on 10 requires 10 or later at -run time; a build on 11 requires 11 or later at run time. To ensure that the -built extension can use any Java 9 or later at run time, it must be built on -Java 9, or on Java 12 or later. +[the `pljava.libjvm_location` variable][jvml] to the desired version's library. PL/Java can run application code written for a later Java version than PL/Java itself was built with, as long as that later JRE version is used at run time. diff --git a/src/site/markdown/develop/coercion.md b/src/site/markdown/develop/coercion.md index 295d4f468..edd86bf3f 100644 --- a/src/site/markdown/develop/coercion.md +++ b/src/site/markdown/develop/coercion.md @@ -20,13 +20,13 @@ begin. The standard also provides an `SQLJ.ALTER_JAVA_PATH` function that gives complete control, based on the jar where a search begins, of which other jars should be searched for dependencies. -By contrast, PL/Java (through and including 1.5) *does not* include the +By contrast, PL/Java (through and including 1.6) *does not* include the jar name in `AS` clauses, and provides an [`SQLJ.SET_CLASSPATH`][scp] function that can set a distinct class path for any schema in the database. The schema `public` can also have a class path, which becomes the fallback for any search that is not resolved on another schema's class path. -[scp]: ../pljava/apidocs/index.html?org/postgresql/pljava/management/Commands.html#setClassPath(java.lang.String,%20java.lang.String) +[scp]: ../pljava/apidocs/org.postgresql.pljava.internal/org/postgresql/pljava/management/Commands.html#set_classpath The class named in an SQL function declaration's `AS` clause is looked up on the *class path for the schema in which the function is declared*, with @@ -41,8 +41,8 @@ in PL/Java with the [@BaseUDT annotation][baseudt]), which is completely integrated into PostgreSQL's type system and is usable from in or out of Java just like any other PostgreSQL type. -[basetype]: http://www.postgresql.org/docs/current/static/sql-createtype.html#AEN80283 -[baseudt]: ../pljava-api/apidocs/index.html?org/postgresql/pljava/annotation/BaseUDT.html +[basetype]: http://www.postgresql.org/docs/9.5/static/sql-createtype.html#AEN81321 +[baseudt]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/BaseUDT.html For the other flavors of user-defined type (described below), [`SQLJ.ADD_TYPE_MAPPING`][atm] (a PL/Java function, not in the standard) must @@ -50,8 +50,8 @@ be called to record the connection between the new type's SQL name and the Java class that implements it. The [@MappedUDT annotation][mappedudt] generates a call to this function along with any other SQL commands declaring the type. -[atm]: ../pljava/apidocs/index.html?org/postgresql/pljava/management/Commands.html#addTypeMapping(java.lang.String,%20java.lang.String) -[mappedudt]: ../pljava-api/apidocs/index.html?org/postgresql/pljava/annotation/MappedUDT.html +[atm]: ../pljava/apidocs/org.postgresql.pljava.internal/org/postgresql/pljava/management/Commands.html#add_type_mapping +[mappedudt]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/MappedUDT.html What it records is simply the SQL type name as a string, and the Java class name as a string, and these mappings apply database-wide. But internally, diff --git a/src/site/markdown/examples/saxon.md b/src/site/markdown/examples/saxon.md index 83ac7afc7..213ef9195 100644 --- a/src/site/markdown/examples/saxon.md +++ b/src/site/markdown/examples/saxon.md @@ -499,16 +499,16 @@ the encumbrance. [j9cds]: ../install/oj9vmopt.html#How_to_set_up_class_sharing_in_OpenJ9 [Saxon-HE]: http://www.saxonica.com/html/products/products.html [ptwp]: https://github.com/tada/pljava/wiki/Performance-tuning -[assignrowvalues]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#assignRowValues-java.sql.ResultSet-int- +[assignrowvalues]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#assignRowValues [xqre]: https://www.w3.org/TR/xpath-functions-31/#regex-syntax [xsre]: https://www.w3.org/TR/xmlschema-2/#regexs [xqflags]: https://www.w3.org/TR/xpath-functions-31/#flags [uts18rl16]: http://www.unicode.org/reports/tr18/#RL1.6 -[lrx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#like_regex-java.lang.String-java.lang.String-java.lang.String-boolean- -[orx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#occurrences_regex-java.lang.String-java.lang.String-java.lang.String-int-boolean-boolean- -[prx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#position_regex-java.lang.String-java.lang.String-java.lang.String-int-boolean-boolean-int-int-boolean- -[srx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#substring_regex-java.lang.String-java.lang.String-java.lang.String-int-boolean-int-int-boolean- -[trx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#translate_regex-java.lang.String-java.lang.String-java.lang.String-java.lang.String-int-boolean-int-boolean- +[lrx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#like_regex +[orx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#occurrences_regex +[prx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#position_regex +[srx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#substring_regex +[trx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#translate_regex [saxmatrix]: https://www.saxonica.com/html/products/feature-matrix-9-9.html [xqexpr]: https://www.w3.org/TR/xquery-31/#id-expressions [xqmainmod]: https://www.w3.org/TR/xquery-31/#dt-main-module diff --git a/src/site/markdown/releasenotes-pre1_6.md.vm b/src/site/markdown/releasenotes-pre1_6.md.vm new file mode 100644 index 000000000..09b5f0a45 --- /dev/null +++ b/src/site/markdown/releasenotes-pre1_6.md.vm @@ -0,0 +1,1607 @@ +# Release notes, releases prior to PL/Java 1.6 + +#set($h2 = '##') +#set($h3 = '###') +#set($h4 = '####') +#set($h5 = '#####') +#set($gborgbug = 'http://web.archive.org/web/20061208113236/http://gborg.postgresql.org/project/pljava/bugs/bugupdate.php?') +#set($pgfbug = 'https://web.archive.org/web/*/http://pgfoundry.org/tracker/?func=detail&atid=334&group_id=1000038&aid=') +#set($pgffeat = 'https://web.archive.org/web/*/http://pgfoundry.org/tracker/?func=detail&atid=337&group_id=1000038&aid=') +#set($ghbug = 'https://github.com/tada/pljava/issues/') +#set($ghpull = 'https://github.com/tada/pljava/pull/') + +## A nice thing about using Velocity is that each release can be entered at +## birth using h2 as its main heading, h3 and below within ... and then, when +## it is moved under 'earlier releases', just define those variables to be +## one heading level finer. Here goes: +#set($h2 = '###') +#set($h3 = '####') +#set($h4 = '#####') +#set($h5 = '######') + +$h2 PL/Java 1.5.7 + +1.5.7 is a bug-fix release, with a single issue backpatched from the 1.6 +branch, correcting a problem in XML Schema validation in some non-`en_US` +locales. + +$h3 Bugs fixed + +* [XML Schema regression-test failure in de_DE locale](${ghbug}312) + +$h3 Credits + +Thanks to Christoph Berg for the report. + +$h2 PL/Java 1.5.6 (4 October 2020) + +This release adds support for PostgreSQL 13. + +It includes improvements to the JDBC 4.0 `java.sql.SQLXML` API that first became +available in 1.5.1, an update of the ISO SQL/XML examples based on the Saxon +product to Saxon 10 (which now includes support for XML Query higher-order +functions in the freely-licensed Saxon-HE), some improvements to internals, +and a number of bug fixes. + +$h3 Version compatibility + +PL/Java 1.5.6 can be built against recent PostgreSQL versions including 13, +and older ones back to 8.2, using Java SE 8 or later. The source code avoids +features newer than Java 6, so building with Java 7 or 6 should also be +possible, but is no longer routinely tested. The Java version used at runtime +does not have to be the same version used for building. PL/Java itself can run +on any Java version 6 or later if built with Java 11 or earlier; it can run +on Java 7 or later if built with Java 12 or later. PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. + +PL/Java 1.5.6 cannot be built with Java 15 or later, as the Nashorn JavaScript +engine used in the build process no longer ships with Java 15. It can be built +with [GraalVM][], if `-Dpolyglot.js.nashorn-compat` is added to the `mvn` +command line. It will run on Java 15 if built with an earlier JDK or with Graal. + +When used with GraalVM as the runtime VM, PL/Java functions can use Graal's +"polyglot" capabilities to execute code in any other language available on +GraalVM. In this release, it is not yet possible to directly declare a function +in a language other than Java. + +$h3 Changes + +$h4 Improvements to the `java.sql.SQLXML` type + +Additions to the `Adjusting.XML` API support +[limiting resource usage][xmlreslim] in XML processing, controlling +[resolution][xmlresolv] of external documents and resources, +[validation against a schema][xmlschema], and integration of an +[XML catalog][xmlcatalog] to locally satisfy requests for external documents. + +Corrections and new documentation of [whitespace handling][xmlws] in XML values +of `CONTENT` form, and implementation [limitations][xmlimpl]. + +$h4 Improvements to the Saxon-based ISO SQL/XML example functions + +Updated the dependency for these optional examples to Saxon 10. Probably the +most significant of the [Saxon 10 changes][saxon10], for PostgreSQL's purposes, +will be that the XQuery [higher-order function feature][xqhof] is now included +in the freely-licensed Saxon-HE, so that it is now possible without cost to +integrate a modern XQuery 3.1 implementation that is lacking only the +[schema-aware feature][xqsaf] and the [typed data feature][xqtdf] (for those, +the paid Saxon-EE product is needed), and the [static typing feature][xqstf] +(which is not in any Saxon edition). + +To compensate for delivering the higher-order function support in -HE, +Saxonica moved certain optimizations to -EE. This seems a justifiable trade, as +it is better for development purposes to have the more complete implementation +of the language, leaving better optimization to be bought if and when needed. + +Thanks to a tip from Saxon's developer, the returning of results to SQL is now +done in a way that may incur less copying in some cases. + +$h4 Internals + +* Many sources of warnings reported by the Java VM's `-Xcheck:jni` option have + been tracked down, making it practical to use `-Xcheck:jni` in testing. +* Reduced pressure on the garbage collector in management of references to + PostgreSQL native state. + +$h3 Enhancement requests addressed + +* Work around PostgreSQL [API breakage in EnterpriseDB 11](${ghbug}260) + +$h3 Bugs fixed + +* [Support of arrays in composite types](${ghbug}300) +* [Order-dependent behavior caching array types](${ghbug}310) +* [Date conversion errors possible with PostgreSQL 10 on Windows/MSVC](${ghbug}297) +* [Build issue with Windows/MinGW-w64](${ghbug}282) +* ["xmltable" with XML output column or parameter](${ghbug}280) +* [Google Summer of Code catches 15-year-old PL/Java bug](${ghbug}274) +* [Several bugs in SQLXML handling](${ghbug}272) +* Work around an exception from `Reference.clear` on OpenJ9 JVM +* Bugs in SQL generator when supplying a function parameter name, or the + `category`, `delimiter`, or `storage` attribute of a user-defined type. + +$h3 Updated PostgreSQL APIs tracked + +* Removal of `CREATE EXTENSION ... FROM unpackaged` +* `numvals` in `SPITupleTable` +* `detoast.h` +* `detoast_external_attr` + +$h3 Credits + +There is a PL/Java 1.5.6 thanks in part to +Christoph Berg, +Chapman Flack, +Kartik Ohri, +original creator Thomas Hallgren, +and the many contributors to earlier versions. + +The work of Kartik Ohri in summer 2020 was supported by Google Summer of Code. + +[xmlreslim]: use/sqlxml.html#Additional_adjustments_in_recent_Java_versions +[xmlresolv]: use/sqlxml.html#Supplying_a_SAX_or_DOM_EntityResolver_or_Schema +[xmlschema]: use/sqlxml.html#Validation_against_a_schema +[xmlcatalog]: use/sqlxml.html#Using_XML_Catalogs_when_running_on_Java_9_or_later +[xmlws]: use/sqlxml.html#Effect_on_parsing_of_whitespace +[xmlimpl]: use/sqlxml.html#Known_limitations +[saxon10]: https://www.saxonica.com/html/documentation/changes/v10/installation.html +[xqhof]: https://www.w3.org/TR/xquery-31/#id-higher-order-function-feature +[xqsaf]: https://www.w3.org/TR/xquery-31/#id-schema-aware-feature +[xqtdf]: https://www.w3.org/TR/xquery-31/#id-typed-data-feature +[xqstf]: https://www.w3.org/TR/xquery-31/#id-static-typing-feature + +$h2 PL/Java 1.5.5 (4 November 2019) + +This bug-fix release fixes runtime issues reported in 32-bit `i386` builds, some +of which would not affect a more common 64-bit architecture, but some of which +could under the wrong circumstances, so this release should be used in +preference to 1.5.4 or 1.5.3 on any architecture. + +It is featurewise identical to 1.5.4, so those release notes, below, should be +consulted for the details of user-visible changes. + +Thanks to Christoph Berg for the `i386` testing that exposed these issues. + +$h3 Bugs fixed + +* [32bit i386 segfault](${ghbug}246) + +$h2 PL/Java 1.5.4 (29 October 2019) + +This minor release fixes a build issue reported with Java 11, and adds +support for building with Java 13. Issues with building the javadocs in later +Java versions are resolved. A work-in-progress feature that can +[apply the SQLXML API to other tree-structured data types](use/xmlview.html) +is introduced. + +Documentation updates include coverage of +[changes to Application Class Data Sharing](install/appcds.html) in recent +Hotspot versions, and ahead-of-time compilation using +[jaotc](install/vmoptions.html#a-XX:AOTLibrary). + +Otherwise, the release notes for 1.5.3, below, should be +consulted for the details of recent user-visible changes. + +$h3 Bugs fixed + +* [Build failure with Java 11 and --release](${ghbug}235) +* [Build with Java 13](${ghbug}236) +* [Javadoc build fails in Java 11+](${ghbug}239) +* [Javadoc build fails in Java 13](${ghbug}241) + +$h2 PL/Java 1.5.3 (4 October 2019) + +This release adds support for PostgreSQL 12, and removes the former +requirement to build with a Java release earlier than 9. + +It includes a rework of of threading and resource management, improvements to +the JDBC 4.0 `java.sql.SQLXML` API that first became available in 1.5.1, and +a substantially usable example providing the functionality of ISO SQL +`XMLEXISTS`, `XMLQUERY`, `XMLTABLE`, `XMLCAST`, `LIKE_REGEX`, +`OCCURRENCES_REGEX`, `POSITION_REGEX`, `SUBSTRING_REGEX`, and `TRANSLATE_REGEX`. +Some bugs are fixed. + +$h3 Version compatibility + +PL/Java 1.5.3 can be built against recent PostgreSQL versions including 12, +and older ones back to 8.2, using Java SE 8 or later. The source code avoids +features newer than Java 6, so building with Java 7 or 6 should also be +possible, but is no longer routinely tested. The Java version used at runtime +does not have to be the same version used for building. PL/Java itself can run +on any Java version 6 or later if built with Java 11 or earlier; it can run +on Java 7 or later if built with Java 12. PL/Java functions can be written for, +and use features of, whatever Java version will be loaded at run time. See +[version compatibility][versions] for more detail. + +When used with [GraalVM][] as the runtime VM, PL/Java functions can use its +"polyglot" capabilities to execute code in any other language available on +GraalVM. In this release, it is not yet possible to directly declare a function +in a language other than Java. + +$h3 Changes + +$h4 Threading/synchronization, finalizers, and new configuration variable + +Java is multithreaded while PostgreSQL is not, requiring ways to prevent +Java threads from entering PostgreSQL at the wrong times, while cleaning up +native resources in PostgreSQL when PL/Java references are released, and +_vice versa_. + +PL/Java has historically used an assortment of approaches including Java +object finalizers, which have long been deprecated informally, and formally +since Java 9. Finalizers enter PostgreSQL from a thread of their own, and the +synchronization approach used in PL/Java 1.5.2 and earlier has been associated +with occasional hangs at backend exit when using an OpenJ9 JVM at runtime. + +A redesigned approach using a new `DualState` class was introduced in 1.5.1, +at first only used in implementing the `java.sql.SQLXML` type, a newly-added +feature. In 1.5.3, other approaches used in the rest of PL/Java's code base are +migrated to use `DualState` also, and all uses of the deprecated Java object +finalizers have been retired. With the new techniques, the former occasional +OpenJ9 hangs have not been observed. + +This represents the most invasive change to PL/Java's thread synchronization +in many years, so it may be worthwhile to reserve extra time for +testing applications. + +A new [configuration variable](use/variables.html), +`pljava.java_thread_pg_entry`, allows adjusting the thread policy. The default +setting, `allow`, preserves PL/Java's former behavior, allowing Java threads +entry into PostgreSQL one at a time, only when any thread already in PG code +has entered or returned to Java. + +With object finalizers no longer used, PL/Java itself does not need the `allow` +mode, but there may be application code that does. Application code can be +tested by setting the `error` mode, which will raise an error for any attempted +entry to PG from a thread other than the original thread that launched PL/Java. +If an application runs in `error` mode with no errors, it can also be run in +`block` mode, which may be more efficient, as it eliminates many locking +operations that happen in `allow` or `error` mode. However, if `block` mode +is used with an application that has not been fully tested in `error` mode +first, and the application does attempt to enter PostgreSQL from a Java thread +other than the initial one, the result can be blocked threads or a deadlocked +backend that has to be killed. + +A JMX management client like `JConsole` or `jvisualvm` can identify threads that +are blocked, if needed. The new `DualState` class also offers some statistics +that can be viewed in `JConsole`, or `jvisualvm` with the `VisualVM-MBeans` +plugin. + +$h4 Improvements to the `java.sql.SQLXML` type + +Support for this JDBC 4.0 type was added in PL/Java 1.5.1. Release 1.5.3 +includes the following improvements: + +* A new ["Adjusting" API](use/sqlxml.html#Extended_API_to_configure_XML_parsers) + exposes configuration settings for Java XML parsers that may be created + internally during operations on `SQLXML` instances. That allows the default + settings to restrict certain XML parser features as advocated by the + [Open Web Application Security Project][OWASP] when XML content may be + coming from untrusted sources, with a simple API for relaxing those + restrictions when appropriate for XML content from a known source. +* It is now possible for a PL/Java function to return, pass into a + `PreparedStatement`, etc., an `SQLXML` instance that PL/Java did not create. + For example, a PL/Java function could use another database's JDBC driver to + obtain a `SQLXML` value from that database, and use that as its own return + value. Transparently, the content is copied to a PL/Java `SQLXML` instance. + The copy can also be done explicitly, allowing the "Adjusting" API to be + used if the default XML parser restrictions should be relaxed. +* Behavior when the server encoding is not UTF-8, or when it is not an + IANA-registered encoding (even if Java has a codec for it), has been + improved. + +$h4 Improvements to the Saxon-based ISO SQL/XML example functions + +Since PL/Java 1.5.1, the supplied examples have included a not-built-by-default +[example supplying ISO SQL/XML features missing from core PostgreSQL][exsaxon]. +It is not built by default because it raises the minimum Java version to 8, and +brings in the Saxon-HE XML-processing library. + +In 1.5.3, the example now provides versions of the ISO SQL `XMLEXISTS`, +`XMLQUERY`, `XMLTABLE`, and `XMLCAST` functions based on the W3C XQuery +language as ISO SQL specifies (while PostgreSQL has an "XMLTABLE" function +since release 10 and "XMLEXISTS" since 9.1, they have +[numerous limitations][appD31] inherited from a library that does not support +XQuery, and additional peculiarities prior to PostgreSQL 12), and the ISO SQL +`LIKE_REGEX`, `OCCURRENCES_REGEX`, `POSITION_REGEX`, `SUBSTRING_REGEX`, and +`TRANSLATE_REGEX` functions that apply XQuery regular expressions. It also +includes the `XMLTEXT` function, which is rather trivial, but also missing from +core PostgreSQL, and supplied here for completeness. + +As plain user-defined functions without special treatment in PostgreSQL's SQL +parser, these functions cannot be used with the exact syntax specified in +ISO SQL, but require simple rewriting into equivalent forms that are valid +ordinary PostgreSQL function calls. The rewritten forms are intended to be easy +to read and correspond closely to the ISO syntax. + +While still presented as examples and not a full implementation, these functions +are now intended to be substantially usable (subject to minor +[documented limits][exsaxon]), and testing and reports of shortcomings are +welcome. + +$h4 ResultSet holdability again + +A `ResultSet` obtained from a query done in PL/Java would return the value +`CLOSE_CURSORS_AT_COMMIT` to a caller of its `getHoldability` method, but in +reality would become unusable as soon as the PL/Java function creating it +returned to PostgreSQL. That was fixed in PL/Java 1.5.1 for a `ResultSet` +obtained from a `Statement`, but not for one obtained from a +`PreparedStatement`. It now correctly remains usable to the end of the +transaction in either case. + +$h4 Savepoint behavior at rollback + +Per JDBC, a `Savepoint` still exists after being used in a rollback, and can be +used again; the rollback only invalidates any `Savepoint` that had been created +after the one being rolled back. That should be familiar behavior, as it is the +same as PostgreSQL's own SQL `SAVEPOINT` behavior. It is also correct in pgJDBC, +which has test coverage to confirm it. PL/Java has been doing it wrong. + +In 1.5.3 it now has the JDBC-specified behavior. For compatibility with existing +application code, the meaning of the `pljava.release_lingering_savepoints` +[configuration variable](use/variables.html) has been adjusted. The setting +tells PL/Java what to do if a `Savepoint` still exists, neither released nor +rolled back, at the time a function exits. If `on`, the savepoint is released +(committed); if `off`, the savepoint is rolled back. A warning is issued in +either case. + +In an existing function that used savepoints and assumed that a rolled-back +savepoint would be no longer live, it will now be normal for such a savepoint +to reach the function exit still alive. To recognize this case, PL/Java tracks +whether any savepoint has been rolled back at least once. At function exit, any +savepoint that has been neither released nor ever rolled back is disposed of +according to the `release_lingering_savepoints` setting and with a warning, +as before, but any savepoint that has already been rolled back at least once +is simply released, regardless of the variable setting, and without producing +a warning. + +$h4 Control of function parameter names in generated SQL + +When generating the `CREATE FUNCTION` command in a deployment descriptor +according to an annotated Java function, PL/Java ordinarily gives the function +parameters names that match their Java names, unquoted. Because PostgreSQL +allows named notation when calling a function, the parameter names in its +declaration become part of its signature that cannot later be changed without +dropping and re-creating the function. + +In some cases, explicit control of the SQL parameter names may be wanted, +independently of the Java names: to align with an external standard, perhaps, +or when either the SQL or the Java name would collide with a reserved word. +For that purpose, the (already slightly overloaded) `@SQLType` annotation now +has a `name` attribute that can specify the SQL name of the annotated parameter. + +$h4 Documentation + +The user guide and guide for packagers contained incorrect instructions for +using Maven to build a single subproject of PL/Java (such as `pljava-api` or +`pljava-examples`) instead of the full project. Those have been corrected. + +$h3 Enhancement requests addressed + +* [Allow building with Java releases newer than 8](${ghbug}212) + +$h3 Bugs fixed + +* [ResultSet holdability still wrong when using PreparedStatement](${ghbug}209) +* [Can't return (or set/update PreparedStatement/ResultSet) non-PL/Java SQLXML object](${ghbug}225) +* [JDBC Savepoint behavior](${ghbug}228) +* Writing `SQLXML` via StAX when server encoding is not UTF-8 +* StAX rejecting server encoding if not an IANA-registered encoding +* Error handling when PL/Java startup fails + (may have been [issue 211](${ghbug}211)) +* SPI connection management for certain set-returning functions + +$h3 Updated PostgreSQL APIs tracked + +* Retirement of `dynloader.h` +* Retirement of magical Oids +* Retirement of `nabstime` +* Retirement of `pg_attrdef.adsrc` +* Extensible `TupleTableSlot`s +* `FunctionCallInfoBaseData` + +$h3 Credits + +There is a PL/Java 1.5.3 thanks in part to +Christoph Berg, +Chapman Flack, +`ppKrauss`, +original creator Thomas Hallgren, +and the many contributors to earlier versions. + +[GraalVM]: https://www.graalvm.org/ +[OWASP]: https://www.owasp.org/index.php/About_The_Open_Web_Application_Security_Project +[appD31]: https://www.postgresql.org/docs/12/xml-limits-conformance.html + +$h2 PL/Java 1.5.2 (5 November 2018) + +A pure bug-fix release, correcting a regression in 1.5.1 that was not caught +in pre-release testing, and could leave +[conversions between PostgreSQL `date` and `java.sql.Date`](${ghbug}199) off +by one day in certain timezones and times of the year. + +1.5.1 added support for the newer `java.time` classes from JSR 310 / JDBC 4.2, +which are [recommended as superior alternatives](use/datetime.html) to the +older conversions involving `java.sql.Date` and related classes. The new +versions are superior in part because they do not have hidden timezone +dependencies. + +However, the change to the historical `java.sql.Date` conversion behavior was +inadvertent, and is fixed in this release. + +$h3 Open issues with date/time/timestamp conversions + +During preparation of this release, other issues of longer standing were also +uncovered in the legacy conversions between PG `date`, `time`, and +`timestamp` classes and the `java.sql` types. They are detailed in +[issue #200](${ghbug}200). Because they are not regressions but long-established +behavior, they are left untouched in this release, and will be fixed in +a future release. + +The Java 8 `java.time` conversions are free of these issues as well. + +$h2 PL/Java 1.5.1 (17 October 2018) + +This release adds support for PostgreSQL 9.6, 10, and 11, +and plays more nicely with `pg_upgrade`. If a PostgreSQL installation +is to be upgraded using `pg_upgrade`, and is running a version of +PL/Java before 1.5.1, the PL/Java version should first be upgraded +in the running PostgreSQL version, and then the PostgreSQL `pg_upgrade` +can be done. + +The documentation is expanded on the topic of shared-memory precompiled +class cache features, which can substantially improve JVM startup time +and memory footprint, and are now available across Oracle Java, OpenJDK +with Hotspot, and OpenJDK with OpenJ9. When running on OpenJ9, PL/Java +cooperates with the JVM to include even the application's classes +(those loaded with `install_jar`) in the shared cache, something not +yet possible with Hotspot. While the advanced sharing feature in Oracle +Java is still subject to a commercial licensing encumbrance, the equivalent +(or superior, with OpenJ9) features in OpenJDK are not encumbered. + +Significant new functionality includes new datatype mapping support: +SQL `date`, `time`, and `timestamp` values can be mapped to the new +Java classes of the `java.time` package in Java 8 and later (JSR 310 / +JDBC 4.2), which are much more faithful representations of the values +in SQL. Values of `xml` type can be manipulated efficiently using the +JDBC 4.0 `SQLXML` API, supporting several different APIs for XML +processing in Java. + +For Java code that does not use the new date/time classes in the +`java.time` package, some minor conversion inaccuracies (less than +two seconds) in the older mapping to `java.sql.Timestamp` have been +corrected. + +Queries from PL/Java code now produce `ResultSet`s that are usable to the +end of the containing transaction, as they had already been claiming to be. + +With PostgreSQL 9.6 support comes the ability to declare functions +`PARALLEL { UNSAFE | RESTRICTED | SAFE }`, and with PG 10 support, +transition tables are available to triggers. + +$h3 Security + +$h4 Schema-qualification + +PL/Java now more consistently schema-qualifies objects in queries and DDL +it generates internally, as a measure of defense-in-depth in case the database +it is installed in has not been [protected][prot1058] from [CVE-2018-1058][]. + +_No schema-qualification work has been done on the example code._ If the +examples jar will be installed, it should be in a database that +[the recommended steps have been taken to secure][prot1058]. + +$h4 Some large-object code removed + +1.5.1 removes the code at issue in [CVE-2016-0768][], which pertained to +PostgreSQL large objects, but had never been documented or exposed as API. + +This is not expected to break any existing code at all, based on further +review showing the code in question had also been simply broken, since 2006, +with no reported issues in that time. That discovery would support an argument +for downgrading the severity of the reported vulnerability, but with no need +to keep that code around, it is more satisfying to remove it entirely. + +Developers wishing to manipulate large objects in PL/Java are able to do so +using the SPI JDBC interface and the large-object SQL functions already +available in every PostgreSQL version PL/Java currently supports. + +[CVE-2018-1058]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1058 +[prot1058]: https://wiki.postgresql.org/wiki/A_Guide_to_CVE-2018-1058:_Protect_Your_Search_Path#Next_Steps:_How_Can_I_Protect_My_Databases.3F + +$h3 Version compatibility + +PL/Java 1.5.1 can be built against recent PostgreSQL versions including 11, +and older ones back to 8.2, using Java SE 8, 7, or 6. It can _run_ using newer +Java versions including Java 11. PL/Java functions can be written for, and use +features of, the Java version loaded at run time. See +[version compatibility][versions] for more detail. + +OpenJDK is supported, and can be downloaded in versions using the Hotspot or the +OpenJ9 JVM. Features of modern Java VMs can drastically reduce memory footprint +and startup time, in particular class-data sharing. Several choices of Java +runtime now offer such features: Oracle Java has a simple class data sharing +feature for the JVM itself, freely usable in all supported versions, and an +"application class data sharing" feature in Java 8 and later that can also share +the internal classes of PL/Java, but is a commercial feature requiring a +license from Oracle. As of Java 10, the same application class sharing feature +is present in OpenJDK/Hotspot, where it is freely usable without an additional +license. OpenJDK/OpenJ9 includes a different, and very sophisticated, class +sharing feature, freely usable from Java 8 onward. More on these features +can be found [in the installation docs][vmopts]. + +$h3 Changes + +$h4 Typing of parameters in prepared statements + +PL/Java currently does not determine the necessary types of `PreparedStatement` +parameters from the results of PostgreSQL's own type analysis of the query +(as a network client would, when using PostgreSQL's "extended query" protocol). +PostgreSQL added the means to do so in SPI only in PostgreSQL 9.0, and a future +PL/Java major release should use it. However, this release does make two small +changes to the current behavior. + +Without the query analysis results from PostgreSQL, PL/Java tries to type the +prepared-statement parameters based on the types of values supplied by the +application Java code. It now has two additional ways to do so: + +* If Java code supplies a Java user-defined type (UDT)---that is, an object + implementing the `SQLData` interface---PL/Java will now call the `SQLData` + method `getSQLTypeName` on that object and use the result to pin down + the PostgreSQL type of the parameter. Existing code should already provide + this method, but could, in the past, have returned a bogus result without + detection, as PL/Java did not use it. + +* Java code can use the three-argument form of `setNull` to specify the exact + PostgreSQL type for a parameter, and then another method can be used to + supply a non-null value for it. If the following non-null value has + a default mapping to a different PostgreSQL type, in most cases it will + overwrite the type supplied with `setNull` and re-plan the query. That was + PL/Java's existing behavior, and was not changed for this minor release. + However, the new types introduced in this release---the `java.time` types + and `SQLXML`---behave in the way that should become universal in a future + major release: the already-supplied PostgreSQL type will be respected, and + PL/Java will try to find a usable coercion to it. + +$h4 Inaccuracies converting TIMESTAMP and TIMESTAMPTZ + +When converting between PostgreSQL values of `timestamp` or `timestamptz` type +and the pre-Java 8 original JDBC type `java.sql.Timestamp`, there were cases +where values earlier than 1 January 2000 would produce exceptions rather than +converting successfully. Those have been fixed. + +Also, converting in the other direction, from `java.sql.Timestamp` to a +PostgreSQL timestamp, an error of up to 1.998 seconds (averaging 0.999) +could be introduced. + +That error has been corrected. If an application has stored Java `Timestamp`s +and corresponding SQL `timestamp`s generated in the past and requires them +to match, it could be affected by this change. + +$h4 New date/time/timestamp API in Java 8 `java.time` package + +The old, and still default, mappings in JDBC from the SQL `date`, `time`, and +`timestamp` types to `java.sql.Date`, `java.sql.Time`, and `java.sql.Timestamp`, +were never well suited to represent the PostgreSQL data types. The `Time` and +`Timestamp` classes were used to map both the with-timezone and without-timezone +variants of the corresponding SQL types and, clearly, could not represent both +equally well. These Java classes all contain timezone dependencies, requiring +the conversion to involve timezone calculations even when converting non-zoned +SQL types, and making the conversion results for non-zoned types implicitly +depend on the current PostgreSQL session timezone setting. + +Applications are strongly encouraged to adopt Java 8 as a minimum language +version and use the new-in-Java-8 types in the `java.time` package, which +eliminate those problems and map the SQL types much more faithfully. +For PL/Java function parameters and returns, the class in the method declaration +can simply be changed. For retrieving date/time/timestamp values from a +`ResultSet` or `SQLInput` object, use the variants of `getObject` / `readObject` +that take a `Class` parameter. The class to use is: + +| PostgreSQL type | `java.time` class | +|--:|:--| +|`date`|`LocalDate`| +|`time without time zone`|`LocalTime`| +|`time with time zone`|`OffsetTime`| +|`timestamp without time zone`|`LocalDateTime`| +|`timestamp with time zone`|`OffsetDateTime`| +[Correspondence of PostgreSQL date/time types and Java 8 `java.time` classes] + +Details on these mappings are [added to the documentation](use/datetime.html). + +$h4 Newly supported `java.sql.SQLXML` type + +PL/Java has not, until now, supported the JDBC 4.0 `SQLXML` type. PL/Java +functions have been able to work with PostgreSQL XML values by mapping them +as Java `String`, but that conversion could introduce character encoding issues +outside the control of the XML APIs, and also has memory implications if an +application stores, or generates in queries, large XML values. Even if the +processing to be done in the application could be structured to run in constant +bounded memory while streaming through the XML, a conversion to `String` +requires the whole, uncompressed, character-serialized value to be brought into +the Java heap at once, and any heap-size tuning has to account for that +worst-case size. The `java.sql.SQLXML` API solves those problems by allowing +XML manipulation with any of several Java XML APIs with the data remaining in +PostgreSQL native memory, never brought fully into the Java heap unless that is +what the application does. Heap sizing can be based on the just the +application's processing needs. + +The `SQLXML` type can take the place of `String` in PL/Java function parameters +and returns simply by changing their declarations from `String` to `SQLXML`. +When retrieving XML values from `ResultSet` or `SQLInput` objects, the legacy +`getObject / readObject` methods will continue to return `String` for existing +application compatibility, so the specific `getSQLXML / readSQLXML` methods, or +the forms of `getObject / readObject` with a `Class` parameter and passing +`SQLXML.class`, must be used. A [documentation page](use/sqlxml.html) has been +added, and the [PassXML example][exxml] illustrates use of the API. + +A [not-built-by-default new example][exsaxon] (because it depends on Java 8 and +the Saxon-HE XML-processing library) provides a partial implementation of true +`XMLQUERY` and `XMLTABLE` functions for PostgreSQL, using the standard-specified +XML Query language rather than the XPath 1.0 of the native PostgreSQL functions. + +[exxml]: pljava-examples/apidocs/index.html?org/postgresql/pljava/example/annotation/PassXML.html +[exsaxon]: examples/saxon.html + +$h4 New Java property exposes the PostgreSQL server character-set encoding + +A Java system property, `org.postgresql.server.encoding`, is set to the +canonical name of a supported Java `Charset` that corresponds to PostgreSQL's +`server_encoding` setting, if one can be found. If the server encoding's name +is not recognized as any known Java `Charset`, this property will be unset, and +some functionality, such as the `SQLXML` API, may be limited. If a Java +`Charset` does exist (or is made available through a `CharsetProvider`) that +does match the PostgreSQL server encoding, but is not automatically selected +because of a naming mismatch, the `org.postgresql.server.encoding` property can +be set (with a `-D` in `pljava.vmoptions`) to select it by name. + +$h4 ResultSet holdability + +A `ResultSet` obtained from a query done in PL/Java would return the value +`CLOSE_CURSORS_AT_COMMIT` to a caller of its `getHoldability` method, but in +reality would become unusable as soon as the PL/Java function creating it +returned to PostgreSQL. It now remains usable to the end of the transaction, +as claimed. + +$h4 PostgreSQL 9.6 and parallel query + +A function in PL/Java can now be [annotated][apianno] +`parallel={UNSAFE | RESTRICTED | SAFE}`, with `UNSAFE` the default. +A new [user guide section][ugparqry] explains the possibilities and +tradeoffs. (Good cases for marking a PL/Java function `SAFE` may be +rare, as pushing such a function into multiple background processes +will require them all to start JVMs. But if a practical application +arises, PostgreSQL's `parallel_setup_cost` can be tuned to help the +planner make good plans.) + +Although `RESTRICTED` and `SAFE` Java functions work in simple tests, +there has been no exhaustive audit of the code to ensure that PL/Java's +internal workings never violate the behavior constraints on such functions. +The support should be considered experimental, and could be a fruitful +area for beta testing. + +[ugparqry]: use/parallel.html + +$h4 Tuple counts widened to 64 bits with PostgreSQL 9.6 + +To accommodate the possibility of more than two billion tuples in a single +operation, the SPI implementation of the JDBC `Statement` interface now +provides the JDK 8-specified `executeLargeBatch` and `getLargeUpdateCount` +methods defined to return `long` counts. The original `executeBatch` and +`getUpdateCount` methods remain but, obviously, cannot return counts that +exceed `INT_MAX`. In case the count is too large, `getUpdateCount` will throw +an `ArithmeticException`; `executeBatch` will store `SUCCESS_NO_INFO` for +any statement in the batch that affected too many tuples to report. + +For now, a `ResultSetProvider` cannot be used to return more than `INT_MAX` +tuples, but will check that condition and throw an error to ensure predictable +behavior. + +$h4 `pg_upgrade` + +PL/Java should be upgraded to 1.5.1 in a database cluster, before that +cluster is binary-upgraded to a newer PostgreSQL version using `pg_upgrade`. +A new [Upgrading][upgrading] installation-guide section centralizes information +on both upgrading PL/Java in a database, and upgrading a database with PL/Java +in it. + +[upgrading]: install/upgrade.html + +$h4 Suppressing row operations from triggers + +In PostgreSQL, a `BEFORE ROW` trigger is able to allow the proposed row +operation, allow it with modified values, or silently suppress the operation +for that row. Way back in PL/Java 1.1.0, the way to produce the 'suppress' +outcome was for the trigger method to throw an exception. Since PL/Java 1.2.0, +however, an exception thrown in a trigger method is used to signal an error +to PostgreSQL, and there has not been a way to suppress the row operation. + +The `TriggerData` interface now has a [`suppress`][tgsuppress] method that +the trigger can invoke to suppress the operation for the row. + +[tgsuppress]: pljava-api/apidocs/index.html?org/postgresql/pljava/TriggerData.html#suppress() + +$h4 Constraint triggers + +New attributes in the `@Trigger` annotation allow the SQL generator to +create constraint triggers (a type of trigger that can be created with SQL +since PostgreSQL 9.1). Such triggers will be delivered by the PL/Java runtime +(to indicate that a constraint would be violated, a constraint trigger +method should throw an informative exception). However, the trigger method +will have access, through the `TriggerData` interface, only to the properties +common to ordinary triggers; methods on that interface to retrieve properties +specific to constraint triggers have not been added for this release. + +$h4 PostgreSQL 10 and trigger transition tables + +A trigger [annotation][apianno] can now specify `tableOld="`_name1_`"` or +`tableNew="`_name2_`"`, or both, and the PL/Java function servicing the +trigger can do SPI JDBC queries and see the transition table(s) under the +given name(s). The [triggers example code][extrig] has been extended with +a demonstration. + +[extrig]: $project.scm.url/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java + +$h4 Logging from Java + +The way the Java logging system has historically been plumbed to PostgreSQL's, +as described in [issue 125](${ghbug}125), can be perplexing both because it +is unaffected by later changes to the PostgreSQL settings after PL/Java is +loaded in the session, and because it has honored only `log_min_messages` +and ignored `client_min_messages`. The second part is easy to fix, so in +1.5.1 the threshold where Java discards messages on the fast path is +determined by the finer of `log_min_messages` and `client_min_messages`. + +$h4 Conveniences for downstream package maintainers + +The `mvn` command to build PL/Java will now accept an option to provide +a useful default for `pljava.libjvm_location`, when building a package for +a particular software environment where the likely path to Java is known. + +The `mvn` command will also accept an option to specify, by the path to +the `pg_config` executable, the PostgreSQL version to build against, in +case multiple versions exist on the build host. This was already possible +by manipulating `PATH` ahead of running `mvn`, but the option makes it more +explicit. + +A new [packaging section][packaging] in the build guide documents those +and a number of considerations for making a PL/Java package. + +[packaging]: build/package.html + +$h3 Enhancement requests addressed + +$h4 In 1.5.1-BETA3 + +* [Add a ddr.reproducible option to SQL generator](${ghbug}186) + +$h4 In 1.5.1-BETA2 + +* [java 8 date/time api](${ghbug}137) +* [Annotations don't support CREATE CONSTRAINT TRIGGER](${ghbug}138) +* [Let annotations give defaults to row-type parameters](${ghpull}153) +* [Improve DDR generator on the dont-repeat-yourself dimension for UDT type mapping](${ghpull}159) +* [Support the JDBC 4.0 SQLXML type](${ghpull}171) + +$h3 Bugs fixed + +$h4 In 1.5.1-BETA3 + +* [self-install jar ClassCastException (...ConsString to String), some java 6/7 runtimes](${ghbug}179) +* [i386 libjvm_location gets mangled as .../jre/lib/1/server/libjvm.so](${ghbug}176) +* [java.lang.ClassNotFoundException installing examples jar](${ghbug}178) +* [Preprocessor errors building on Windows with MSVC](${ghbug}182) +* [Saxon example does not build since Saxon 9.9 released](${ghbug}185) +* [Segfault in VarlenaWrapper.Input on 32-bit](${ghbug}177) +* [Windows: self-install jar silently fails to replace existing files](${ghbug}189) +* [ERROR: java.sql.SQLException: _some Java class name_](${ghbug}192) +* [SetOfRecordTest with timestamp column influenced by environment ](${ghbug}195) + +$h4 In 1.5.1-BETA2 + +* [PostgreSQL 10: SPI_modifytuple failed with SPI_ERROR_UNCONNECTED](${ghbug}134) +* [SPIConnection prepareStatement doesn't recognize all parameters](${ghbug}136) +* [Ordinary (non-constraint) trigger has no way to suppress operation](${ghbug}142) +* [ResultSetHandle and column definition lists](${ghbug}146) +* [PreparedStatement doesn't get parameter types from PostgreSQL](${ghbug}149) + _(partial improvements)_ +* [internal JDBC: inaccuracies converting TIMESTAMP and TIMESTAMPTZ](${ghbug}155) +* [Missing type mapping for Java return `byte[]`](${ghbug}157) +* [The REMOVE section of DDR is in wrong order for conditionals](${ghbug}163) +* [Loading PL/Java reinitializes timeouts in PostgreSQL >= 9.3](${ghbug}166) +* [JDBC ResultSet.CLOSE_CURSORS_AT_COMMIT reported, but usable life shorter](${ghbug}168) + +$h4 In 1.5.1-BETA1 + +* [Add support for PostgreSQL 9.6](${ghbug}108) +* [Clarify documentation of ResultSetProvider](${ghbug}115) +* [`pg_upgrade` (upgrade failure from 9.5 to 9.6)](${ghbug}117) +* [Java logging should honor `client_min_messages` too](${ghbug}125) + +$h3 Updated PostgreSQL APIs tracked + +* `heap_form_tuple` +* 64-bit `SPI_processed` +* 64-bit `Portal->portalPos` +* 64-bit `FuncCallContext.call_cntr` +* 64-bit `SPITupleTable.alloced` and `.free` +* `IsBackgroundWorker` +* `IsBinaryUpgrade` +* `SPI_register_trigger_data` +* `SPI` without `SPI_push`/`SPI_pop` +* `AllocSetContextCreate` +* `DefineCustom...Variable` (no `GUC_LIST_QUOTE` in extensions) + +$h3 Credits + +There is a PL/Java 1.5.1 thanks in part to +Christoph Berg, +Thom Brown, +Luca Ferrari, +Chapman Flack, +Petr Michalek, +Steve Millington, +Kenneth Olson, +Fabian Zeindl, +original creator Thomas Hallgren, +and the many contributors to earlier versions. + +$h2 PL/Java 1.5.0 (29 March 2016) + +This, the first PL/Java numbered release since 1.4.3 in 2011, combines +compatibility with the latest PostgreSQL and Java versions with modernized +build and installation procedures, automatic generation of SQL deployment +code from Java annotations, and many significant fixes. + +$h3 Security + +Several security issues are addressed in this release. Sites already +using PL/Java are encouraged to update to 1.5.0. For several of the +issues below, practical measures are described to mitigate risk until +an update can be completed. + +[CVE-2016-0766][], a privilege escalation requiring an authenticated +PostgreSQL connection, is closed by installing PL/Java 1.5.0 (including +prereleases) or by updating PostgreSQL itself to at least 9.5.1, 9.4.6, +9.3.11, 9.2.15, 9.1.20. Vulnerable systems are only those running both +an older PL/Java and an older PostgreSQL. + +[CVE-2016-0767][], in which an authenticated PostgreSQL user with USAGE +permission on the `public` schema may alter the `public` schema classpath, +is closed by release 1.5.0 (including prereleases). If updating to 1.5.0 +must be delayed, risk can be mitigated by revoking public `EXECUTE` permission +on `sqlj.set_classpath` and granting it selectively to responsible users or +roles. + +This release brings a policy change to a more secure-by-default posture, +where the ability to create functions in `LANGUAGE java` is no longer +automatically granted to `public`, but can be selectively granted to roles +that will have that responsibility. The change reduces exposure to a known +issue present in 1.5.0 and earlier versions, that will be closed in a future +release ([CVE-2016-0768][], see **large objects, access control** below). + +The new policy will be applied in a new installation; permissions will not +be changed in an upgrade, but any site can move to this policy, even before +updating to 1.5.0, with `REVOKE USAGE ON LANGUAGE java FROM public;` followed by +explicit `GRANT` commands for the users/roles expected to create Java +functions. + +[CVE-2016-2192][], in which an authenticated user can alter type mappings +without owning the types involved. Exploitability is limited by other +permissions, but if type mapping is a feature being used at a site, one +can interfere with proper operation of code that relies on it. A mitigation +is simply to `REVOKE EXECUTE ... FROM PUBLIC` on the `sqlj.add_type_mapping` +and `sqlj.drop_type_mapping` functions, and grant the privilege only to +selected users or roles. As of 1.5.0, these functions require the invoker +to be superuser or own the type being mapped. + +[CVE-2016-0766]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-0766 +[CVE-2016-0767]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-0767 +[CVE-2016-0768]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-0768 +[CVE-2016-2192]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-2192 + +$h3 Version compatibility + +PL/Java 1.5.0 can be built against recent PostgreSQL versions including 9.5, +using Java SE 8, 7, or 6. See [version compatibility][versions] for more +detail. OpenJDK is well supported. Support for GCJ has been dropped; features +of modern Java VMs that are useful to minimize footprint and startup time, +such as class-data sharing, are now more deeply covered +[in the installation docs][vmopts]. + +[versions]: build/versions.html +[vmopts]: install/vmoptions.html + +$h3 Build procedures + +Since 2013, PL/Java has been hosted [on GitHub][ghpljava] and built +using [Apache Maven][mvn]. See the new [build instructions][bld] for details. + +Reported build issues for specific platforms have been resolved, +with new platform-specific build documentation +for [OS X][osxbld], [Solaris][solbld], [Ubuntu][ububld], +[Windows MSVC][msvcbld], and [Windows MinGW-w64][mgwbld]. + +The build produces a redistributable installation archive usable with +the version of PostgreSQL built against and the same operating system, +architecture, and linker. The type of archive is `jar` on all platforms, as +all PL/Java installations will have Java available. + +[ghpljava]: https://github.com/tada/pljava +[mvn]: http://maven.apache.org/ +[bld]: build/build.html +[msvcbld]: build/buildmsvc.html +[mgwbld]: build/mingw64.html +[osxbld]: build/macosx.html +[solbld]: build/solaris.html +[ububld]: build/ubuntu.html + +$h3 Installation procedures + +The jar produced by the build is executable and will self-extract, +consulting `pg_config` on the destination system to find the correct +default locations for the extracted files. Any location can be overridden. +(Enhancement requests [6][gh6], [9][gh9]) + +PL/Java now uses a PostgreSQL configuration variable, `pljava.libjvm_location`, +to find the Java runtime to use, eliminating the past need for highly +platform-specific tricks like link-time options or runtime-loader configuration +just so that PL/Java could find Java. PostgreSQL configuration variables are +now the only form of configuration needed for PL/Java, and the `libjvm_location` +should be the only setting needed if file locations have not been overridden. + +In PostgreSQL 9.1 and later, PL/Java can be installed with +`CREATE EXTENSION pljava`. Regardless of PostgreSQL version, installation +has been simplified. Former procedures involving `Deployer` or `install.sql` +are no longer required. Details are in the [new installation instructions][ins]. + +$h4 Schema migration + +The tables used internally by PL/Java have changed. If PL/Java 1.5.0 is +loaded in a database with an existing `sqlj` schema populated by an earlier +PL/Java version (1.3.0 or later), the structure will be updated without data +loss (enhancement request [12][gh12]). *Remember that PL/Java runs independently +in each database session where it is in use. Older PL/Java versions active in +other sessions can be disrupted by the schema change.* + +A trial installation of PL/Java 1.5.0 can be done in a transaction, and +rolled back if desired, leaving the schema as it was. Any concurrent sessions +with active older PL/Java versions will not be disrupted by the altered schema +as long as the transaction remains open, *but they may block for the duration, +so such a test transaction should be kept short*. + +[ins]: install/install.html + +$h3 Changes + +$h4 Behavior of `readSQL` and `writeSQL` for base and mirror user-defined types + +In the course of fixing [issue #98][gh98], the actual behavior of +`readSQL` and `writeSQL` with base or mirror types, which had not +previously been documented, [now is](develop/coercion.html), along with +other details of PL/Java's type coercion rules found only in the code. +Because machine byte order was responsible for issue #98, it now (a) is +selectable, and (b) has different, appropriate, defaults for mirror UDTs +(which need to match PostgreSQL's order) and for base UDTs (which must +stay big-endian because of how binary `COPY` is specified). +A [new documentation section](use/byteorder.html) explains in detail. + +$h4 `USAGE` to `PUBLIC` no longer default for `java` language + +Of the two languages installed by PL/Java, functions that declare +`LANGUAGE javau` can be created only by superusers, while those that +declare `LANGUAGE java` can be created by any user or role granted the +`USAGE` privilege on the language. + +In the past, the language `java` has been created with PostgreSQL's +default permission granting `USAGE` to `PUBLIC`, but PL/Java 1.5.0 +leaves the permission to be explicitly granted to those users or roles +expected to create Java functions, in keeping with least-privilege +principles. See **large objects, access control** under **known issues** +for background. + +$h4 SQL generated by Java annotations + +Java code developed for use by PL/Java can carry in-code annotations, +used by the Java compiler to generate the SQL commands to declare the +new functions, types, triggers, etc. in PostgreSQL (enhancement request +[1011112][], though different in implementation). This eliminates the need +to have Java code and the corresponding SQL commands developed in parallel, +and the class of errors possible when both are not updated together. It +also allows compile-time checks that the Java methods or classes being +annotated are suitable (correct access modifiers, signatures, etc.) +for their declared SQL purposes, rather than discovering +such issues only upon loading the code into PostgreSQL and trying to use it. + +The Java compiler writes the generated SQL into a "deployment descriptor" +file (`pljava.ddr` by default), as specified by the SQL/JRT standard. The +file can be included in a `jar` archive with the compiled code, and the +commands will be executed by PL/Java when the `install_jar` function is +used to load the jar. + +SQL generation is covered in the [updated user documentation][user], +and illustrated in the [Hello, World example][hello] and +[several other supplied examples][exanno]. Reference information +is [in the API documentation][apianno]. It is currently usable to declare +functions, triggers, and user-defined types, both base and composite. + +[user]: use/use.html +[hello]: use/hello.html +[exanno]: $project.scm.url/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation +[apianno]: pljava-api/apidocs/index.html?org/postgresql/pljava/annotation/package-summary.html#package_description + +The history of this feature in PL/Java is long, with the first related commits +appearing in 2005, six years in advance of an enhancement request for it. +It became generally usable in 2013 when building with +Java SE 6 or later, using the annotation processing framework Java introduced +in that release. 1.5.0 is the first PL/Java numbered release to feature it. + +$h5 Annotation keyword changes + +If you have been using the SQL generation feature in prerelease `git` builds of +2013 or later, be aware that some annotation keywords have changed in finalizing +the 1.5.0 release. Java code that was compiled using the earlier keywords will +continue to work, but will have to be updated before it can be recompiled. + +* For functions: `effects=(VOLATILE,STABLE,IMMUTABLE)` was formerly `type=` +* For functions: `type=` (_an explicit SQL return type for the function_) + was formerly `complexType=` +* For functions: `trust=(SANDBOXED,UNSANDBOXED)` was formerly + `(RESTRICTED,UNRESTRICTED)` +* For triggers: `called=(BEFORE,AFTER,INSTEAD_OF)` was formerly `when=` + and conflicted with the `WHEN` clause introduced for triggers + in PostgreSQL 9.0. + +$h4 A jar may have more than one deployment descriptor + +PL/Java formerly allowed only one entry in a jar to be a deployment +descriptor (that is, a file of SQL commands to be executed upon loading +or unloading the jar). The SQL/JRT standard allows multiple entries to +be deployment descriptors, executed in the order they are mentioned +_in the jar manifest_, or the reverse of that order when the jar is +being unloaded. PL/Java now conforms to the standard. + +The behavior is useful during transition to annotation-driven deployment +descriptor generation for a project that already has a manually-maintained +deployment descriptor. PL/Java's own `pljava-examples` project is an +illustration, in the midst of such a transition itself. + +Note the significance placed by SQL/JRT on the order of entries in a jar +manifest, whose order is normally _not_ significant according to the Jar File +Specification. Care can be needed when manipulating manifests with automated +tools that may not preserve order. + +$h4 Conditional execution within deployment descriptors + +Deployment descriptors have a primitive conditional-execution provision +defined in the SQL/JRT standard: commands wrapped in a +`BEGIN IMPLEMENTOR ` _identifier_ construct will only be executed if the +_identifier_ is recognized by the SQL/JRT implementation in use. The design +makes possible jars that can be installed on different database systems that +provide SQL/JRT features, with database-specific commands wrapped in +`BEGIN IMPLEMENTOR` blocks with an _identifier_ specific to the system. +By default, PL/Java recognizes the _identifier_ `postgresql` (matched without +regard to case). + +PL/Java extends the standard by allowing the PostgreSQL configuration +variable `pljava.implementors` to contain a list of identifiers that will +be recognized. SQL code in a deployment descriptor can conditionally add +or remove identifiers in this list to influence which subsequent implementor +blocks will be executed, giving a still-primitive but more general control +structure. + +In sufficiently recent PostgreSQL versions, the same effect could be +achieved using `DO` statements and PL/pgSQL control structures, but this +facility in PL/Java does not require either to be available. + +$h4 Interaction with `SET ROLE` corrected + +PL/Java formerly was aware of the user ID associated with the running +session, but not any role ID that user may have acquired with `SET ROLE`. +The result would commonly be failed permission checks made by PL/Java when +the session user did not have the needed permission, but had `SET ROLE` to +a role that did. Likewise, within `install_jar`, PL/Java would execute +deployment descriptor commands as the original session user rather than +as the user's current role, with permission failures a likely result. + +Correcting this issue has changed the PL/Java API, but without a bump +of major version because the prior API, while deprecated, is still available. + +* [`getOuterUserName`][goun] and [`executeAsOuterUser`][eaou] are new, and + correctly refer to the session user or current role, when active. +* [`getSessionUserName`][gsun] and [`executeAsSessionUser`][easu] are still + present but deprecated, and _their semantics are changed_. They are now + deprecated aliases for the corresponding new methods, which honor the + set role. Use cases that genuinely need to refer only to the _session_ user + and ignore the role should be rare, and should be discussed on the mailing + list or opened as issues. + +#set($sessapi = 'pljava-api/apidocs/index.html?org/postgresql/pljava/Session.html#') + +[goun]: ${sessapi}getOuterUserName() +[eaou]: ${sessapi}executeAsOuterUser(java.sql.Connection,java.lang.String) +[gsun]: ${sessapi}getSessionUserName() +[easu]: ${sessapi}executeAsSessionUser(java.sql.Connection,java.lang.String) + +$h4 Unicode transparency + +Since the resolution of [bug 21][gh21], PL/Java contains a regression test +to ensure that character strings passed and returned between PostgreSQL and +Java will round-trip without alteration for the full range of Unicode +characters, _when the database encoding is set to `UTF8`_. + +More considerations apply when the database encoding is anything other +than `UTF8`, and especially when it is `SQL_ASCII`. Please see +[character encoding support][charsets] for more. + +[charsets]: use/charsets.html + +$h3 Enhancement requests addressed + +* [Use Annotations instead of DDL Manifest][1011112] +* [Installation of pljava on postgresql servers][gh9] +* [Find an alternative way to install the pljava.so in `/usr/lib`][gh6] +* [Provide database migration][gh12] +* [Support types with type modifiers][1011140] (partial: see [example][typmex]) +* [Build process: accommodate Solaris 10][gh102] + +[1011112]: ${pgffeat}1011112 +[1011140]: ${pgffeat}1011140 +[gh9]: ${ghbug}9 +[gh6]: ${ghbug}6 +[gh12]: ${ghbug}12 +[gh102]: ${ghbug}102 + +[typmex]: $project.scm.url/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/IntWithMod.java + +$h3 Bugs fixed + +$h4 Since 1.5.0-BETA3 + +* [Build process: accept variation in PostgreSQL version string][gh101] +* [Build process: accommodate PostgreSQL built with private libraries][gh103] +* Clarified message when `CREATE EXTENSION` fails because new session needed +* Reduced stack usage in SQL generator + (small-memory build no longer needs `-Xss`) + +$h4 In 1.5.0-BETA3 + +* [Bogus mirror-UDT values on little-endian hardware][gh98] +* [Base UDT not registered if first access isn't in/out/send/recv][gh99] +* `TupleDesc` leak warnings with composite UDTs +* also added regression test from [1010962][] report + +$h4 In 1.5.0-BETA2 + +* [Generate SQL for trigger function with no parameters][gh92] +* [openssl/ssl.h needed on osx el-capitan (latest 10.11.3)/postgres 9.5][gh94] + (documented) +* [Source location missing for some annotation errors][gh95] +* [OS X El Capitan "Java 6" dialog when loading ... Java 8][gh96] +* pljava-api jar missing from installation jar + +$h4 In 1.5.0-BETA1 + +* [SPIPreparedStatement.setObject() fails with Types.BIT][1011119] +* [SSLSocketFactory throws IOException on Linux][1011095] +* [PL/Java fails to compile with -Werror=format-security][1011181] +* [PL/Java does not build on POWER 7][1011197] +* [The built in functions do not use the correct error codes][1011206] +* [TupleDesc reference leak][1010962] +* [String conversion to enum fails][gh4] +* [segfault if SETOF RECORD-returning function used without AS at callsite][gh7] +* [pl/java PG9.3 Issue][gh17] +* [No-arg functions unusable: "To many parameters - expected 0"][gh8] +* [Exceptions in static initializers are masked][gh54] +* [UDT in varlena form breaks if length > 32767][gh52] +* [PL/Java kills unicode?][gh21] +* [Type.c expects pre-8.3 find_coercion_pathway behavior][gh65] +* [Support PostgreSQL 9.5][gh48] +* [pl/java getting a build on MacOSX - PostgreSQL 9.3.2][gh22] +* [build pljava on windows for PostgreSQL 9.2][gh23] +* [Error while installing PL/Java with Postgresql 9.3.4 64 bit on Windows 7 64 bit System][gh28] +* [pljava does not compile on mac osx ver 10.11.1 and postgres 9.4][gh63] +* [pljava does not compile on centos 6.5 and postgres 9.4][gh64] +* [Error installing pljava with Windows 7 64 Bit and Postgres 9.4][gh71] +## JNI_getIntArrayRegion instead of JNI_getShortArrayRegion +## Eclipse IDE artifacts +## Site +## Warnings +## Javadoc + +[1011119]: ${pgfbug}1011119 +[1011095]: ${pgfbug}1011095 +[1011181]: ${pgfbug}1011181 +[1011197]: ${pgfbug}1011197 +[1011206]: ${pgfbug}1011206 +[1010962]: ${pgfbug}1010962 +[gh4]: ${ghbug}4 +[gh7]: ${ghbug}7 +[gh8]: ${ghbug}8 +[gh17]: ${ghbug}17 +[gh54]: ${ghbug}54 +[gh52]: ${ghbug}52 +[gh21]: ${ghbug}21 +[gh65]: ${ghbug}65 +[gh48]: ${ghbug}48 +[gh22]: ${ghbug}22 +[gh23]: ${ghbug}23 +[gh28]: ${ghbug}28 +[gh63]: ${ghbug}63 +[gh64]: ${ghbug}64 +[gh71]: ${ghbug}71 +[gh92]: ${ghbug}92 +[gh94]: ${ghbug}94 +[gh95]: ${ghbug}95 +[gh96]: ${ghbug}96 +[gh98]: ${ghbug}98 +[gh99]: ${ghbug}99 +[gh101]: ${ghbug}101 +[gh103]: ${ghbug}103 + +$h3 Updated PostgreSQL APIs tracked + +Several APIs within PostgreSQL itself have been added or changed; +PL/Java now uses the current versions of these where appropriate: + +* `find_coercion_pathway` +* `set_stack_base` +* `GetOuterUserId` +* `GetUserNameFromId` +* `GetUserIdAndSecContext` +* `pg_attribute_*` +* Large objects: truncate, and 64-bit offsets + +$h3 Known issues and areas for future work + +$h4 Developments in PostgreSQL not yet covered + +Large objects, access control +: PL/Java does not yet expose PostgreSQL large objects with a documented, + stable API, and the support it does contain was developed against pre-9.0 + PostgreSQL versions, where no access control applied to large objects and + any object could be accessed by any database user. PL/Java's behavior is + proper for PostgreSQL before 9.0, but improper on 9.0+ where it would be + expected to honor access controls on large objects ([CVE-2016-0768][]). + This will be corrected in a future release. For this and earlier releases, + the recommendation is to selectively grant `USAGE` on the `java` language to + specific users or roles responsible for creating Java functions; see + "default `USAGE` permssion" under Changes. + +`INSTEAD OF` triggers, triggers on `TRUNCATE` +: These are supported by annotations and the SQL generator, and the runtime + will deliver them to the specified method, but the `TriggerData` interface + has no new methods to recognize these cases (that is, no added + methods analogous to `isFiredAfter`, `isFiredByDelete`). For a method + expressly coded to be a `TRUNCATE` trigger or an `INSTEAD OF` trigger, + that is not a problem, but care should be taken when coding a trigger + method to handle more than one type of trigger, or creating triggers of + these new types that call a method developed pre-PL/Java-1.5.0. Such a + method could be called with a `TriggerData` argument whose existing + `isFired...` methods all return `false`, likely to put the method on an + unexpected code path. + + A later PL/Java version should introduce trigger interfaces that better + support such evolution of PostgreSQL in a type-safe way. + +Constraint triggers +: Constraint trigger syntax is not supported by annotations and the SQL + generator. If declared (using hand-written SQL), they will be delivered + by the runtime, but without any constraint-trigger-specific information + available to the called method. + +Event triggers +: Event triggers are not yet supported by annotations or the SQL generator, + and will not be delivered by the PL/Java runtime. + +Range types +: No predefined mappings for range types are provided. + +`PRE_PREPARE`, `PRE_COMMIT`, `PARALLEL_ABORT`, `PARALLEL_PRE_COMMIT`, and `PARALLEL_COMMIT` transaction callbacks, `PRE_COMMIT` subtransaction callbacks +: Listeners for these events cannot be registered and the events will not + be delivered. + +$h4 Imperfect integration with PostgreSQL dependency tracking + +In a dump/restore, manual intervention can be needed if the users/roles +recorded as owners of jars are missing or have been renamed. A current +[thread on `pgsql-hackers`][ownhack] should yield a better solution for +a future release. + +[ownhack]: http://www.postgresql.org/message-id/56783412.6090005@anastigmatix.net + +$h4 Quirk if deployment descriptor loads classes from same jar + +The `install_jar` function installs a jar, optionally reading deployment +descriptors from the jar and executing the install actions they contain. +It is possible for those actions to load classes from the jar just installed. +(This would be unlikely if the install actions are limited to typical setup, +function/operator/datatype creation, but likely, if the install actions also +include basic function tests, or if the setup requirements are more +interesting.) + +If, for any class in the jar, the first attempt to load that class is made +while resolving a function declared `STABLE` or `IMMUTABLE`, a +`ClassNotFoundException` results. The cause is PostgreSQL's normal treatment of +a `STABLE` or `IMMUTABLE` function, which relies on a snapshot from the start of +the `install_jar` query, when the jar was not yet installed. A workaround is to +ensure that the install actions cause each needed class to be loaded, such as +by calling a `VOLATILE` function it supplies, before calling one that is +`STABLE` or `IMMUTABLE`. (One could even write install actions to declare a +needed function `VOLATILE` before the first call and then redeclare it.) + +This issue should be resolved as part of a broader rework of class loading +in a future PL/Java release. + +$h4 Partial implementation of JDBC 4 and later + +The changes to make PL/Java build under Java SE 6 and later, with version 4.0 +and later of JDBC, involved providing the specified methods so +compilation would succeed, with real implementations for some, but for others +only stub versions that throw `SQLFeatureNotSupportedException` if used. +Regrettably, there is nothing in the documentation indicating which methods +have real implementations and which do not; to create such a list would require +an audit of that code. If a method throws the exception when you call it, it's +one of the unimplemented ones. + +Individual methods may be fleshed out with implementations as use cases arise +that demand them, but for a long-term roadmap, it seems more promising to +reduce the overhead of maintaining another JDBC implementation by sharing +code with `pgjdbc`, as has been [discussed on pljava-dev][jdbcinherit]. + +[jdbcinherit]: http://lists.pgfoundry.org/pipermail/pljava-dev/2015/002370.html + +$h4 Exception handling and logging + +PL/Java does interconvert between PostgreSQL and Java exceptions, but with +some loss of fidelity in the two directions. PL/Java code has some access +to most fields of a PostgreSQL error data structure, but only through +internal PL/Java API that is not expected to remain usable, and code written +for PL/Java has never quite had first-class standing in its ability to +_generate_ exceptions as information-rich as those from PostgreSQL itself. + +PL/Java in some cases generates the _categorized `SQLException`s_ introduced +with JDBC 4.0, and in other cases does not. + +This area may see considerable change in a future release. +[Thoughts on logging][tol] is a preview of some of the considerations. + +[tol]: https://github.com/tada/pljava/wiki/Thoughts-on-logging + +$h4 Types with type modifiers and `COPY` + +Although it is possible to create a PL/Java user-defined type that accepts +a type modifier (see the [example][typmex]), such a type will not yet be +handled by SQL `COPY` or any other operation that requires the `input` or +`receive` function to handle the modifier. This is left for a future release. + +$h3 Credits + +PL/Java 1.5.0 owes its being to original creator Thomas Hallgren and +many contributors: + +Daniel Blanch Bataller, +Peter Brewer, +Frank Broda, +Chapman Flack, +Marty Frasier, +Bear Giles, +Christian Hammers, +Hal Hildebrand, +Robert M. Lefkowitz, +Eugenie V. Lyzenko, +Dos Moonen, +Asif Naeem, +Kenneth Olson, +Johann Oskarsson, +Thomas G. Peters, +Srivatsan Ramanujam, +Igal Sapir, +Jeff Shaw, +Rakesh Vidyadharan, +`grunjol`, +`mc-soi`. + +Periods in PL/Java's development have been sponsored by EnterpriseDB. + +In the 1.5.0 release cycle, multiple iterations of testing effort +have been generously contributed by Kilobe Systems and by Pegasystems, Inc. + +## From this point on, the entries were reconstructed from old notes at the +## same time as the 1.5.0 notes were drafted, and they use a finer level of +## heading. So restore the 'real' values of the heading variables from here +## to the end of the file. +#set($h2 = '##') +#set($h3 = '###') +#set($h4 = '####') +#set($h5 = '#####') + +$h3 PL/Java 1.4.3 (15 September 2011) + +Notable changes in this release: + +* Works with PostgreSQL 9.1 +* Correctly links against IBM Java. +* Reads microseconds correctly in timestamps. + +Bugs fixed: + +* [Be clear about not building with JDK 1.6][1010660] +* [Does not link with IBM VM][1010970] +* [SPIConnection.getMetaData() is incorrectly documented][1010971] +* [PL/Java 1.4.2 Does not build with x86_64-w64-mingw32][1011025] +* [PL/Java does not build with PostgreSQL 9.1][1011091] + +Feature Requests: + +* [Allow pg_config to be set externally to the Makefile][1011092] +* [Add option to have pljava.so built with the runtime path of libjvm.so][1010955] + +[1010660]: ${pgfbug}1010660 +[1010970]: ${pgfbug}1010970 +[1010971]: ${pgfbug}1010971 +[1011025]: ${pgfbug}1011025 +[1011091]: ${pgfbug}1011091 + +[1011092]: ${pgffeat}1011092 +[1010955]: ${pgffeat}1010955 + +$h3 PL/Java 1.4.2 (11 December 2010) + +Bugfixes: + +* [Function returning complex objects with POD arrays cause a segfault][1010956] +* [Segfault when assigning an array to ResultSet column][1010953] +* [Embedded array support in returned complex objects][1010482] + +[1010956]: ${pgfbug}1010956 +[1010953]: ${pgfbug}1010953 +[1010482]: ${pgfbug}1010482 + +$h3 PL/Java 1.4.1 (9 December 2010) + +Note: Does not compile with Java 6. Use JDK 1.5 or 1.4. + +Compiles with PostgreSQL 8.4 and 9.0. + +Connection.getCatalog() has been implemented. + +Bugfixes: + +* [Compiling error with postgresql 8.4.1][1010759] +* [org.postgresql.pljava.internal.Portal leak][1010712] +* [build java code with debugging if server has debugging enabled][1010189] +* [Connection.getCatalog() returns null][1010653] +* [VM crash in TransactionListener][1010462] +* [Link against wrong library when compiling amd64 code on Solaris][1010954] + +[1010759]: ${pgfbug}1010759 +[1010712]: ${pgfbug}1010712 +[1010189]: ${pgfbug}1010189 +[1010653]: ${pgfbug}1010653 +[1010462]: ${pgfbug}1010462 +[1010954]: ${pgfbug}1010954 + +Other commits: + +For a multi-threaded pljava function we need to adjust stack_base_ptr +before calling into the backend to avoid stack depth limit exceeded +errors. Previously this was done only on query execution, but we need +to do it on iteration of the ResultSet as well. + +When creating a variable length data type, the code was directly +assigning the varlena header length rather than going through an +access macro. The header format changed for the 8.3 release and this +manual coding was not noticed and changed accordingly. Use +SET_VARSIZE to do this correctly. + +Handle passed by value data types by reading and writing directly to +the Datum rather than dereferencing it. + +If the call to a type output function is the first pljava call in a +session, we get a crash. The first pljava call results in a SPI +connection being established and torn down. The type output function +was allocating the result in the SPI memory context which gets +destroyed prior to returning the data to the caller. Allocate the +result in the correct context to survive function exit. + +Clean up a warning about byteasend and bytearecv not having a +prototype when building against 9.0 as those declarations are now in a +new header file. + + +$h3 PL/Java 1.4.0 (1 February 2008) + +Warning! The recent postgresql security releases changed the API of a function +that PL/Java uses. The source can be built against either version, but the +binaries will only run against the version they were built against. The PL/Java +binaries for 1.4.0 have all been built against the latest server releases (which +you should be using anyway). If you are using an older you will have to build +from source. The binary releases support: 8.3 - All versions. 8.2 - 8.2.6 and +up. 8.1 - 8.1.11 and up. 8.0 - 8.0.15 and up. + +$h3 PL/Java 1.3.0 (18 June 2006) + +This release is about type mapping and the creation of new types in PL/Java. An +extensive effort has gone into making the PL/Java type system extremely +flexible. Not only can you map arbitrary SQL data types to java classes. You can +also create new scalar types completely in Java. Read about the Changes in +version 1.3. + +$h4 Changes + +* A much improved type mapping system that will allow you to: + + * [Map any SQL type to a Java class][maptype] + * [Create a Scalar UDT in Java][scalarudt] + * [Map array and pseudo types][deftypemap] + +[maptype]: https://github.com/tada/pljava/wiki/Mapping-an-sql-type-to-a-java-class +[scalarudt]: https://github.com/tada/pljava/wiki/Creating-a-scalar-udt-in-java +[deftypemap]: https://github.com/tada/pljava/wiki/Default-type-mapping + +* Get the OID for a given relation ([feature request 1319][1319]) +* Jar manifest included in the SQLJ Jar repository + ([feature request 1525][1525]) + +$h4 Fixed bugs + +* [Reconnect needed for jar manipulation to take effect][1531] +* [Backends hang with test suite][1504] +* [Keeps crashing while making a call to a function][1560] +* [Memory Leak in Statement.executeUpdate][1556] +* [jarowner incorrect after dump and reload][1506] +* [Missing JAR manifest][1525] +* [TZ adjustments for Date are incorrect][1547] +* [Functions returning sets leaks memory][1542] +* [drop lib prefix][1423] +* ["oid" column is not available in trigger's NEW/OLD ResultSet][1317] +* [fails to run with GCJ, too][1480] +* [Compile failure with 8.1.4][1558] +* [fails to build with GCJ][1479] +* [Record returning function cannot be called with different structures within one session][1440] +* [Cannot map function with complex return type to method that uses non primitive arguments][1551] +* [Get OID for given relation][1319] + +[1531]: ${gborgbug}1531 +[1504]: ${gborgbug}1504 +[1560]: ${gborgbug}1560 +[1556]: ${gborgbug}1556 +[1506]: ${gborgbug}1506 +[1525]: ${gborgbug}1525 +[1547]: ${gborgbug}1547 +[1542]: ${gborgbug}1542 +[1423]: ${gborgbug}1423 +[1317]: ${gborgbug}1317 +[1480]: ${gborgbug}1480 +[1558]: ${gborgbug}1558 +[1479]: ${gborgbug}1479 +[1440]: ${gborgbug}1440 +[1551]: ${gborgbug}1551 +[1319]: ${gborgbug}1319 + +$h3 PL/Java 1.2.0 (20 Nov 2005) + +The PL/Java 1.2.0 release is primarily targeted at the new PostgreSQL 8.1 but +full support for 8.0.x is maintained. New features include support IN/OUT +parameters, improved meta-data handling, and better memory management. + +$h3 PL/Java 1.1.0 (14 Apr 2005) + +PL/Java 1.1.0 includes a lot of new features such as `DatabaseMetaData`, +`ResultSetMetaData`, language handlers for both trusted and untrusted language, +additional semantics for functions returning `SETOF`, and simple ObjectPooling. + +$h3 PL/Java 1.0.1 (07 Feb 2005) + +This release resolves a couple of important security issues. The most important +one is perhaps that PL/Java now is a trusted language. See [Security][] for more +info. Filip Hrbek, now member of the PL/Java project, contributed what was +needed to make this happen. + +[Security]: https://github.com/tada/pljava/wiki/Security + +$h3 PL/Java 1.0.0 (23 Jan 2005) + +Today, after a long period of fine tuning, PL/Java 1.0.0 was finally released. diff --git a/src/site/markdown/releasenotes.md.vm b/src/site/markdown/releasenotes.md.vm index da1da3495..a2a76f77d 100644 --- a/src/site/markdown/releasenotes.md.vm +++ b/src/site/markdown/releasenotes.md.vm @@ -10,7 +10,89 @@ #set($ghbug = 'https://github.com/tada/pljava/issues/') #set($ghpull = 'https://github.com/tada/pljava/pull/') -$h2 PL/Java 1.6.0 +$h2 PL/Java 1.6.1 + +This is the first minor update in the PL/Java 1.6 series, with two bugs fixed. +It also adds functionality in the SQL generator, allowing automated +declaration of new PostgreSQL aggregates, casts, and operators, and functions +with `OUT` parameters. + +$h3 Changes + +$h4 Limitations when built with Java 10 or 11 removed + +PL/Java can now be built with any Java 9 or later (latest tested is 15 at +time of writing), and the built artifact can use any Java 9 or later at +run time (as selected by the `pljava.libjvm_location` configuration variable). + +That was previously true when built with Java 9 or with Java 12 or later, but +not when built with 10 (would not run on 9) or with 11 (would not run on 9 +or 10). Those limits have been removed. + +$h4 Functions with `OUT` parameters + +PL/Java has long been able to declare a function that returns a composite +type (or a set of such), by returning a named composite PostgreSQL type, or +by being declared to return `RECORD`. + +The former approach requires separately declaring a new composite type to +PostgreSQL so it can be named as the function return. The `RECORD` approach +does not require pre-declaring a type, but requires every caller of the +function to supply a column definition list at the call site. + +Declaring the function [with `OUT` parameters][outprm] offers a middle ground, +where the function has a fixed composite return type with known member +names and types, callers do not need to supply a column definition list, +and no separate declaration of the type is needed. + +There is no change to how such a function is coded at the Java source level; +the new annotation element only changes the SQL generated to declare the +function to PostgreSQL. [Examples][outprmeg] are provided. + +$h4 Generation of aggregate, cast, and operator declarations + +The SQL generator now recognizes [`@Aggregate`][agganno], [`@Cast`][castanno], +and [`@Operator`][opranno] annotations, generating the corresponding SQL +deploy/undeploy scripts. Some examples (for [aggregates][aggeg], +[casts][casteg], and [operators][opreg]) are provided. The reduction +in boilerplate needed for a realistically-complete example can be seen +in [this comparison][bg160161] of Bear Giles's `pljava-udt-type-extension` +example; the two branches compared here are (1) using only the annotations +supported in PL/Java 1.6.0 and (2) using also the new support in 1.6.1. + +$h3 Bugs fixed + +* [1.6.0: opening a ResourceBundle (or a resource) fails](${ghbug}322) +* [Better workaround needed for javac 10 and 11 --release bug](${ghbug}328) + +[outprm]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Function.html#annotation.type.element.detail +[outprmeg]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/ReturnComposite.html#method.detail +[agganno]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Aggregate.html +[castanno]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Cast.html +[opranno]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Operator.html +[aggeg]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/Aggregates.html +[casteg]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/IntWithMod.html +[opreg]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/ComplexScalar.html +[bg160161]: https://github.com/beargiles/pljava-udt-type-extension/compare/98f1a6e...jcflack:3e56056 + +$h3 Credits + +Thanks to Bear Giles for the `pljava-udt-type-extension` example, which not only +illustrates the SQL generation improvements in this release, but also exposed +both of the bugs fixed here. + +$h2 Earlier releases + +## A nice thing about using Velocity is that each release can be entered at +## birth using h2 as its main heading, h3 and below within ... and then, when +## it is moved under 'earlier releases', just define those variables to be +## one heading level finer. Here goes: +#set($h2 = '###') +#set($h3 = '####') +#set($h4 = '#####') +#set($h5 = '######') + +$h2 PL/Java 1.6.0 (18 October 2020) This is the first release of a significantly refactored PL/Java 1.6 branch with a number of new features and changes. It requires Java 9 or later at @@ -197,1586 +279,4 @@ continuous integration was supported by Google Summer of Code. [charsets]: use/charsets.html [jpms]: use/jpms.html -$h2 Earlier releases - -## A nice thing about using Velocity is that each release can be entered at -## birth using h2 as its main heading, h3 and below within ... and then, when -## it is moved under 'earlier releases', just define those variables to be -## one heading level finer. Here goes: -#set($h2 = '###') -#set($h3 = '####') -#set($h4 = '#####') -#set($h5 = '######') - -$h2 PL/Java 1.5.6 - -This release adds support for PostgreSQL 13. - -It includes improvements to the JDBC 4.0 `java.sql.SQLXML` API that first became -available in 1.5.1, an update of the ISO SQL/XML examples based on the Saxon -product to Saxon 10 (which now includes support for XML Query higher-order -functions in the freely-licensed Saxon-HE), some improvements to internals, -and a number of bug fixes. - -$h3 Version compatibility - -PL/Java 1.5.6 can be built against recent PostgreSQL versions including 13, -and older ones back to 8.2, using Java SE 8 or later. The source code avoids -features newer than Java 6, so building with Java 7 or 6 should also be -possible, but is no longer routinely tested. The Java version used at runtime -does not have to be the same version used for building. PL/Java itself can run -on any Java version 6 or later if built with Java 11 or earlier; it can run -on Java 7 or later if built with Java 12 or later. PL/Java functions can be -written for, and use features of, whatever Java version will be loaded at run -time. See [version compatibility][versions] for more detail. - -PL/Java 1.5.6 cannot be built with Java 15 or later, as the Nashorn JavaScript -engine used in the build process no longer ships with Java 15. It can be built -with [GraalVM][], if `-Dpolyglot.js.nashorn-compat` is added to the `mvn` -command line. It will run on Java 15 if built with an earlier JDK or with Graal. - -When used with GraalVM as the runtime VM, PL/Java functions can use Graal's -"polyglot" capabilities to execute code in any other language available on -GraalVM. In this release, it is not yet possible to directly declare a function -in a language other than Java. - -$h3 Changes - -$h4 Improvements to the `java.sql.SQLXML` type - -Additions to the `Adjusting.XML` API support -[limiting resource usage][xmlreslim] in XML processing, controlling -[resolution][xmlresolv] of external documents and resources, -[validation against a schema][xmlschema], and integration of an -[XML catalog][xmlcatalog] to locally satisfy requests for external documents. - -Corrections and new documentation of [whitespace handling][xmlws] in XML values -of `CONTENT` form, and implementation [limitations][xmlimpl]. - -$h4 Improvements to the Saxon-based ISO SQL/XML example functions - -Updated the dependency for these optional examples to Saxon 10. Probably the -most significant of the [Saxon 10 changes][saxon10], for PostgreSQL's purposes, -will be that the XQuery [higher-order function feature][xqhof] is now included -in the freely-licensed Saxon-HE, so that it is now possible without cost to -integrate a modern XQuery 3.1 implementation that is lacking only the -[schema-aware feature][xqsaf] and the [typed data feature][xqtdf] (for those, -the paid Saxon-EE product is needed), and the [static typing feature][xqstf] -(which is not in any Saxon edition). - -To compensate for delivering the higher-order function support in -HE, -Saxonica moved certain optimizations to -EE. This seems a justifiable trade, as -it is better for development purposes to have the more complete implementation -of the language, leaving better optimization to be bought if and when needed. - -Thanks to a tip from Saxon's developer, the returning of results to SQL is now -done in a way that may incur less copying in some cases. - -$h4 Internals - -* Many sources of warnings reported by the Java VM's `-Xcheck:jni` option have - been tracked down, making it practical to use `-Xcheck:jni` in testing. -* Reduced pressure on the garbage collector in management of references to - PostgreSQL native state. - -$h3 Enhancement requests addressed - -* Work around PostgreSQL [API breakage in EnterpriseDB 11](${ghbug}260) - -$h3 Bugs fixed - -* [Support of arrays in composite types](${ghbug}300) -* [Order-dependent behavior caching array types](${ghbug}310) -* [Date conversion errors possible with PostgreSQL 10 on Windows/MSVC](${ghbug}297) -* [Build issue with Windows/MinGW-w64](${ghbug}282) -* ["xmltable" with XML output column or parameter](${ghbug}280) -* [Google Summer of Code catches 15-year-old PL/Java bug](${ghbug}274) -* [Several bugs in SQLXML handling](${ghbug}272) -* Work around an exception from `Reference.clear` on OpenJ9 JVM -* Bugs in SQL generator when supplying a function parameter name, or the - `category`, `delimiter`, or `storage` attribute of a user-defined type. - -$h3 Updated PostgreSQL APIs tracked - -* Removal of `CREATE EXTENSION ... FROM unpackaged` -* `numvals` in `SPITupleTable` -* `detoast.h` -* `detoast_external_attr` - -$h3 Credits - -There is a PL/Java 1.5.6 thanks in part to -Christoph Berg, -Chapman Flack, -Kartik Ohri, -original creator Thomas Hallgren, -and the many contributors to earlier versions. - -The work of Kartik Ohri in summer 2020 was supported by Google Summer of Code. - -[xmlreslim]: use/sqlxml.html#Additional_adjustments_in_recent_Java_versions -[xmlresolv]: use/sqlxml.html#Supplying_a_SAX_or_DOM_EntityResolver_or_Schema -[xmlschema]: use/sqlxml.html#Validation_against_a_schema -[xmlcatalog]: use/sqlxml.html#Using_XML_Catalogs_when_running_on_Java_9_or_later -[xmlws]: use/sqlxml.html#Effect_on_parsing_of_whitespace -[xmlimpl]: use/sqlxml.html#Known_limitations -[saxon10]: https://www.saxonica.com/html/documentation/changes/v10/installation.html -[xqhof]: https://www.w3.org/TR/xquery-31/#id-higher-order-function-feature -[xqsaf]: https://www.w3.org/TR/xquery-31/#id-schema-aware-feature -[xqtdf]: https://www.w3.org/TR/xquery-31/#id-typed-data-feature -[xqstf]: https://www.w3.org/TR/xquery-31/#id-static-typing-feature - -$h2 PL/Java 1.5.5 (4 November 2019) - -This bug-fix release fixes runtime issues reported in 32-bit `i386` builds, some -of which would not affect a more common 64-bit architecture, but some of which -could under the wrong circumstances, so this release should be used in -preference to 1.5.4 or 1.5.3 on any architecture. - -It is featurewise identical to 1.5.4, so those release notes, below, should be -consulted for the details of user-visible changes. - -Thanks to Christoph Berg for the `i386` testing that exposed these issues. - -$h3 Bugs fixed - -* [32bit i386 segfault](${ghbug}246) - -$h2 PL/Java 1.5.4 (29 October 2019) - -This minor release fixes a build issue reported with Java 11, and adds -support for building with Java 13. Issues with building the javadocs in later -Java versions are resolved. A work-in-progress feature that can -[apply the SQLXML API to other tree-structured data types](use/xmlview.html) -is introduced. - -Documentation updates include coverage of -[changes to Application Class Data Sharing](install/appcds.html) in recent -Hotspot versions, and ahead-of-time compilation using -[jaotc](install/vmoptions.html#a-XX:AOTLibrary). - -Otherwise, the release notes for 1.5.3, below, should be -consulted for the details of recent user-visible changes. - -$h3 Bugs fixed - -* [Build failure with Java 11 and --release](${ghbug}235) -* [Build with Java 13](${ghbug}236) -* [Javadoc build fails in Java 11+](${ghbug}239) -* [Javadoc build fails in Java 13](${ghbug}241) - -$h2 PL/Java 1.5.3 (4 October 2019) - -This release adds support for PostgreSQL 12, and removes the former -requirement to build with a Java release earlier than 9. - -It includes a rework of of threading and resource management, improvements to -the JDBC 4.0 `java.sql.SQLXML` API that first became available in 1.5.1, and -a substantially usable example providing the functionality of ISO SQL -`XMLEXISTS`, `XMLQUERY`, `XMLTABLE`, `XMLCAST`, `LIKE_REGEX`, -`OCCURRENCES_REGEX`, `POSITION_REGEX`, `SUBSTRING_REGEX`, and `TRANSLATE_REGEX`. -Some bugs are fixed. - -$h3 Version compatibility - -PL/Java 1.5.3 can be built against recent PostgreSQL versions including 12, -and older ones back to 8.2, using Java SE 8 or later. The source code avoids -features newer than Java 6, so building with Java 7 or 6 should also be -possible, but is no longer routinely tested. The Java version used at runtime -does not have to be the same version used for building. PL/Java itself can run -on any Java version 6 or later if built with Java 11 or earlier; it can run -on Java 7 or later if built with Java 12. PL/Java functions can be written for, -and use features of, whatever Java version will be loaded at run time. See -[version compatibility][versions] for more detail. - -When used with [GraalVM][] as the runtime VM, PL/Java functions can use its -"polyglot" capabilities to execute code in any other language available on -GraalVM. In this release, it is not yet possible to directly declare a function -in a language other than Java. - -$h3 Changes - -$h4 Threading/synchronization, finalizers, and new configuration variable - -Java is multithreaded while PostgreSQL is not, requiring ways to prevent -Java threads from entering PostgreSQL at the wrong times, while cleaning up -native resources in PostgreSQL when PL/Java references are released, and -_vice versa_. - -PL/Java has historically used an assortment of approaches including Java -object finalizers, which have long been deprecated informally, and formally -since Java 9. Finalizers enter PostgreSQL from a thread of their own, and the -synchronization approach used in PL/Java 1.5.2 and earlier has been associated -with occasional hangs at backend exit when using an OpenJ9 JVM at runtime. - -A redesigned approach using a new `DualState` class was introduced in 1.5.1, -at first only used in implementing the `java.sql.SQLXML` type, a newly-added -feature. In 1.5.3, other approaches used in the rest of PL/Java's code base are -migrated to use `DualState` also, and all uses of the deprecated Java object -finalizers have been retired. With the new techniques, the former occasional -OpenJ9 hangs have not been observed. - -This represents the most invasive change to PL/Java's thread synchronization -in many years, so it may be worthwhile to reserve extra time for -testing applications. - -A new [configuration variable](use/variables.html), -`pljava.java_thread_pg_entry`, allows adjusting the thread policy. The default -setting, `allow`, preserves PL/Java's former behavior, allowing Java threads -entry into PostgreSQL one at a time, only when any thread already in PG code -has entered or returned to Java. - -With object finalizers no longer used, PL/Java itself does not need the `allow` -mode, but there may be application code that does. Application code can be -tested by setting the `error` mode, which will raise an error for any attempted -entry to PG from a thread other than the original thread that launched PL/Java. -If an application runs in `error` mode with no errors, it can also be run in -`block` mode, which may be more efficient, as it eliminates many locking -operations that happen in `allow` or `error` mode. However, if `block` mode -is used with an application that has not been fully tested in `error` mode -first, and the application does attempt to enter PostgreSQL from a Java thread -other than the initial one, the result can be blocked threads or a deadlocked -backend that has to be killed. - -A JMX management client like `JConsole` or `jvisualvm` can identify threads that -are blocked, if needed. The new `DualState` class also offers some statistics -that can be viewed in `JConsole`, or `jvisualvm` with the `VisualVM-MBeans` -plugin. - -$h4 Improvements to the `java.sql.SQLXML` type - -Support for this JDBC 4.0 type was added in PL/Java 1.5.1. Release 1.5.3 -includes the following improvements: - -* A new ["Adjusting" API](use/sqlxml.html#Extended_API_to_configure_XML_parsers) - exposes configuration settings for Java XML parsers that may be created - internally during operations on `SQLXML` instances. That allows the default - settings to restrict certain XML parser features as advocated by the - [Open Web Application Security Project][OWASP] when XML content may be - coming from untrusted sources, with a simple API for relaxing those - restrictions when appropriate for XML content from a known source. -* It is now possible for a PL/Java function to return, pass into a - `PreparedStatement`, etc., an `SQLXML` instance that PL/Java did not create. - For example, a PL/Java function could use another database's JDBC driver to - obtain a `SQLXML` value from that database, and use that as its own return - value. Transparently, the content is copied to a PL/Java `SQLXML` instance. - The copy can also be done explicitly, allowing the "Adjusting" API to be - used if the default XML parser restrictions should be relaxed. -* Behavior when the server encoding is not UTF-8, or when it is not an - IANA-registered encoding (even if Java has a codec for it), has been - improved. - -$h4 Improvements to the Saxon-based ISO SQL/XML example functions - -Since PL/Java 1.5.1, the supplied examples have included a not-built-by-default -[example supplying ISO SQL/XML features missing from core PostgreSQL][exsaxon]. -It is not built by default because it raises the minimum Java version to 8, and -brings in the Saxon-HE XML-processing library. - -In 1.5.3, the example now provides versions of the ISO SQL `XMLEXISTS`, -`XMLQUERY`, `XMLTABLE`, and `XMLCAST` functions based on the W3C XQuery -language as ISO SQL specifies (while PostgreSQL has an "XMLTABLE" function -since release 10 and "XMLEXISTS" since 9.1, they have -[numerous limitations][appD31] inherited from a library that does not support -XQuery, and additional peculiarities prior to PostgreSQL 12), and the ISO SQL -`LIKE_REGEX`, `OCCURRENCES_REGEX`, `POSITION_REGEX`, `SUBSTRING_REGEX`, and -`TRANSLATE_REGEX` functions that apply XQuery regular expressions. It also -includes the `XMLTEXT` function, which is rather trivial, but also missing from -core PostgreSQL, and supplied here for completeness. - -As plain user-defined functions without special treatment in PostgreSQL's SQL -parser, these functions cannot be used with the exact syntax specified in -ISO SQL, but require simple rewriting into equivalent forms that are valid -ordinary PostgreSQL function calls. The rewritten forms are intended to be easy -to read and correspond closely to the ISO syntax. - -While still presented as examples and not a full implementation, these functions -are now intended to be substantially usable (subject to minor -[documented limits][exsaxon]), and testing and reports of shortcomings are -welcome. - -$h4 ResultSet holdability again - -A `ResultSet` obtained from a query done in PL/Java would return the value -`CLOSE_CURSORS_AT_COMMIT` to a caller of its `getHoldability` method, but in -reality would become unusable as soon as the PL/Java function creating it -returned to PostgreSQL. That was fixed in PL/Java 1.5.1 for a `ResultSet` -obtained from a `Statement`, but not for one obtained from a -`PreparedStatement`. It now correctly remains usable to the end of the -transaction in either case. - -$h4 Savepoint behavior at rollback - -Per JDBC, a `Savepoint` still exists after being used in a rollback, and can be -used again; the rollback only invalidates any `Savepoint` that had been created -after the one being rolled back. That should be familiar behavior, as it is the -same as PostgreSQL's own SQL `SAVEPOINT` behavior. It is also correct in pgJDBC, -which has test coverage to confirm it. PL/Java has been doing it wrong. - -In 1.5.3 it now has the JDBC-specified behavior. For compatibility with existing -application code, the meaning of the `pljava.release_lingering_savepoints` -[configuration variable](use/variables.html) has been adjusted. The setting -tells PL/Java what to do if a `Savepoint` still exists, neither released nor -rolled back, at the time a function exits. If `on`, the savepoint is released -(committed); if `off`, the savepoint is rolled back. A warning is issued in -either case. - -In an existing function that used savepoints and assumed that a rolled-back -savepoint would be no longer live, it will now be normal for such a savepoint -to reach the function exit still alive. To recognize this case, PL/Java tracks -whether any savepoint has been rolled back at least once. At function exit, any -savepoint that has been neither released nor ever rolled back is disposed of -according to the `release_lingering_savepoints` setting and with a warning, -as before, but any savepoint that has already been rolled back at least once -is simply released, regardless of the variable setting, and without producing -a warning. - -$h4 Control of function parameter names in generated SQL - -When generating the `CREATE FUNCTION` command in a deployment descriptor -according to an annotated Java function, PL/Java ordinarily gives the function -parameters names that match their Java names, unquoted. Because PostgreSQL -allows named notation when calling a function, the parameter names in its -declaration become part of its signature that cannot later be changed without -dropping and re-creating the function. - -In some cases, explicit control of the SQL parameter names may be wanted, -independently of the Java names: to align with an external standard, perhaps, -or when either the SQL or the Java name would collide with a reserved word. -For that purpose, the (already slightly overloaded) `@SQLType` annotation now -has a `name` attribute that can specify the SQL name of the annotated parameter. - -$h4 Documentation - -The user guide and guide for packagers contained incorrect instructions for -using Maven to build a single subproject of PL/Java (such as `pljava-api` or -`pljava-examples`) instead of the full project. Those have been corrected. - -$h3 Enhancement requests addressed - -* [Allow building with Java releases newer than 8](${ghbug}212) - -$h3 Bugs fixed - -* [ResultSet holdability still wrong when using PreparedStatement](${ghbug}209) -* [Can't return (or set/update PreparedStatement/ResultSet) non-PL/Java SQLXML object](${ghbug}225) -* [JDBC Savepoint behavior](${ghbug}228) -* Writing `SQLXML` via StAX when server encoding is not UTF-8 -* StAX rejecting server encoding if not an IANA-registered encoding -* Error handling when PL/Java startup fails - (may have been [issue 211](${ghbug}211)) -* SPI connection management for certain set-returning functions - -$h3 Updated PostgreSQL APIs tracked - -* Retirement of `dynloader.h` -* Retirement of magical Oids -* Retirement of `nabstime` -* Retirement of `pg_attrdef.adsrc` -* Extensible `TupleTableSlot`s -* `FunctionCallInfoBaseData` - -$h3 Credits - -There is a PL/Java 1.5.3 thanks in part to -Christoph Berg, -Chapman Flack, -`ppKrauss`, -original creator Thomas Hallgren, -and the many contributors to earlier versions. - -[GraalVM]: https://www.graalvm.org/ -[OWASP]: https://www.owasp.org/index.php/About_The_Open_Web_Application_Security_Project -[appD31]: https://www.postgresql.org/docs/12/xml-limits-conformance.html - -$h2 PL/Java 1.5.2 (5 November 2018) - -A pure bug-fix release, correcting a regression in 1.5.1 that was not caught -in pre-release testing, and could leave -[conversions between PostgreSQL `date` and `java.sql.Date`](${ghbug}199) off -by one day in certain timezones and times of the year. - -1.5.1 added support for the newer `java.time` classes from JSR 310 / JDBC 4.2, -which are [recommended as superior alternatives](use/datetime.html) to the -older conversions involving `java.sql.Date` and related classes. The new -versions are superior in part because they do not have hidden timezone -dependencies. - -However, the change to the historical `java.sql.Date` conversion behavior was -inadvertent, and is fixed in this release. - -$h3 Open issues with date/time/timestamp conversions - -During preparation of this release, other issues of longer standing were also -uncovered in the legacy conversions between PG `date`, `time`, and -`timestamp` classes and the `java.sql` types. They are detailed in -[issue #200](${ghbug}200). Because they are not regressions but long-established -behavior, they are left untouched in this release, and will be fixed in -a future release. - -The Java 8 `java.time` conversions are free of these issues as well. - -$h2 PL/Java 1.5.1 (17 October 2018) - -This release adds support for PostgreSQL 9.6, 10, and 11, -and plays more nicely with `pg_upgrade`. If a PostgreSQL installation -is to be upgraded using `pg_upgrade`, and is running a version of -PL/Java before 1.5.1, the PL/Java version should first be upgraded -in the running PostgreSQL version, and then the PostgreSQL `pg_upgrade` -can be done. - -The documentation is expanded on the topic of shared-memory precompiled -class cache features, which can substantially improve JVM startup time -and memory footprint, and are now available across Oracle Java, OpenJDK -with Hotspot, and OpenJDK with OpenJ9. When running on OpenJ9, PL/Java -cooperates with the JVM to include even the application's classes -(those loaded with `install_jar`) in the shared cache, something not -yet possible with Hotspot. While the advanced sharing feature in Oracle -Java is still subject to a commercial licensing encumbrance, the equivalent -(or superior, with OpenJ9) features in OpenJDK are not encumbered. - -Significant new functionality includes new datatype mapping support: -SQL `date`, `time`, and `timestamp` values can be mapped to the new -Java classes of the `java.time` package in Java 8 and later (JSR 310 / -JDBC 4.2), which are much more faithful representations of the values -in SQL. Values of `xml` type can be manipulated efficiently using the -JDBC 4.0 `SQLXML` API, supporting several different APIs for XML -processing in Java. - -For Java code that does not use the new date/time classes in the -`java.time` package, some minor conversion inaccuracies (less than -two seconds) in the older mapping to `java.sql.Timestamp` have been -corrected. - -Queries from PL/Java code now produce `ResultSet`s that are usable to the -end of the containing transaction, as they had already been claiming to be. - -With PostgreSQL 9.6 support comes the ability to declare functions -`PARALLEL { UNSAFE | RESTRICTED | SAFE }`, and with PG 10 support, -transition tables are available to triggers. - -$h3 Security - -$h4 Schema-qualification - -PL/Java now more consistently schema-qualifies objects in queries and DDL -it generates internally, as a measure of defense-in-depth in case the database -it is installed in has not been [protected][prot1058] from [CVE-2018-1058][]. - -_No schema-qualification work has been done on the example code._ If the -examples jar will be installed, it should be in a database that -[the recommended steps have been taken to secure][prot1058]. - -$h4 Some large-object code removed - -1.5.1 removes the code at issue in [CVE-2016-0768][], which pertained to -PostgreSQL large objects, but had never been documented or exposed as API. - -This is not expected to break any existing code at all, based on further -review showing the code in question had also been simply broken, since 2006, -with no reported issues in that time. That discovery would support an argument -for downgrading the severity of the reported vulnerability, but with no need -to keep that code around, it is more satisfying to remove it entirely. - -Developers wishing to manipulate large objects in PL/Java are able to do so -using the SPI JDBC interface and the large-object SQL functions already -available in every PostgreSQL version PL/Java currently supports. - -[CVE-2018-1058]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1058 -[prot1058]: https://wiki.postgresql.org/wiki/A_Guide_to_CVE-2018-1058:_Protect_Your_Search_Path#Next_Steps:_How_Can_I_Protect_My_Databases.3F - -$h3 Version compatibility - -PL/Java 1.5.1 can be built against recent PostgreSQL versions including 11, -and older ones back to 8.2, using Java SE 8, 7, or 6. It can _run_ using newer -Java versions including Java 11. PL/Java functions can be written for, and use -features of, the Java version loaded at run time. See -[version compatibility][versions] for more detail. - -OpenJDK is supported, and can be downloaded in versions using the Hotspot or the -OpenJ9 JVM. Features of modern Java VMs can drastically reduce memory footprint -and startup time, in particular class-data sharing. Several choices of Java -runtime now offer such features: Oracle Java has a simple class data sharing -feature for the JVM itself, freely usable in all supported versions, and an -"application class data sharing" feature in Java 8 and later that can also share -the internal classes of PL/Java, but is a commercial feature requiring a -license from Oracle. As of Java 10, the same application class sharing feature -is present in OpenJDK/Hotspot, where it is freely usable without an additional -license. OpenJDK/OpenJ9 includes a different, and very sophisticated, class -sharing feature, freely usable from Java 8 onward. More on these features -can be found [in the installation docs][vmopts]. - -$h3 Changes - -$h4 Typing of parameters in prepared statements - -PL/Java currently does not determine the necessary types of `PreparedStatement` -parameters from the results of PostgreSQL's own type analysis of the query -(as a network client would, when using PostgreSQL's "extended query" protocol). -PostgreSQL added the means to do so in SPI only in PostgreSQL 9.0, and a future -PL/Java major release should use it. However, this release does make two small -changes to the current behavior. - -Without the query analysis results from PostgreSQL, PL/Java tries to type the -prepared-statement parameters based on the types of values supplied by the -application Java code. It now has two additional ways to do so: - -* If Java code supplies a Java user-defined type (UDT)---that is, an object - implementing the `SQLData` interface---PL/Java will now call the `SQLData` - method `getSQLTypeName` on that object and use the result to pin down - the PostgreSQL type of the parameter. Existing code should already provide - this method, but could, in the past, have returned a bogus result without - detection, as PL/Java did not use it. - -* Java code can use the three-argument form of `setNull` to specify the exact - PostgreSQL type for a parameter, and then another method can be used to - supply a non-null value for it. If the following non-null value has - a default mapping to a different PostgreSQL type, in most cases it will - overwrite the type supplied with `setNull` and re-plan the query. That was - PL/Java's existing behavior, and was not changed for this minor release. - However, the new types introduced in this release---the `java.time` types - and `SQLXML`---behave in the way that should become universal in a future - major release: the already-supplied PostgreSQL type will be respected, and - PL/Java will try to find a usable coercion to it. - -$h4 Inaccuracies converting TIMESTAMP and TIMESTAMPTZ - -When converting between PostgreSQL values of `timestamp` or `timestamptz` type -and the pre-Java 8 original JDBC type `java.sql.Timestamp`, there were cases -where values earlier than 1 January 2000 would produce exceptions rather than -converting successfully. Those have been fixed. - -Also, converting in the other direction, from `java.sql.Timestamp` to a -PostgreSQL timestamp, an error of up to 1.998 seconds (averaging 0.999) -could be introduced. - -That error has been corrected. If an application has stored Java `Timestamp`s -and corresponding SQL `timestamp`s generated in the past and requires them -to match, it could be affected by this change. - -$h4 New date/time/timestamp API in Java 8 `java.time` package - -The old, and still default, mappings in JDBC from the SQL `date`, `time`, and -`timestamp` types to `java.sql.Date`, `java.sql.Time`, and `java.sql.Timestamp`, -were never well suited to represent the PostgreSQL data types. The `Time` and -`Timestamp` classes were used to map both the with-timezone and without-timezone -variants of the corresponding SQL types and, clearly, could not represent both -equally well. These Java classes all contain timezone dependencies, requiring -the conversion to involve timezone calculations even when converting non-zoned -SQL types, and making the conversion results for non-zoned types implicitly -depend on the current PostgreSQL session timezone setting. - -Applications are strongly encouraged to adopt Java 8 as a minimum language -version and use the new-in-Java-8 types in the `java.time` package, which -eliminate those problems and map the SQL types much more faithfully. -For PL/Java function parameters and returns, the class in the method declaration -can simply be changed. For retrieving date/time/timestamp values from a -`ResultSet` or `SQLInput` object, use the variants of `getObject` / `readObject` -that take a `Class` parameter. The class to use is: - -| PostgreSQL type | `java.time` class | -|--:|:--| -|`date`|`LocalDate`| -|`time without time zone`|`LocalTime`| -|`time with time zone`|`OffsetTime`| -|`timestamp without time zone`|`LocalDateTime`| -|`timestamp with time zone`|`OffsetDateTime`| -[Correspondence of PostgreSQL date/time types and Java 8 `java.time` classes] - -Details on these mappings are [added to the documentation](use/datetime.html). - -$h4 Newly supported `java.sql.SQLXML` type - -PL/Java has not, until now, supported the JDBC 4.0 `SQLXML` type. PL/Java -functions have been able to work with PostgreSQL XML values by mapping them -as Java `String`, but that conversion could introduce character encoding issues -outside the control of the XML APIs, and also has memory implications if an -application stores, or generates in queries, large XML values. Even if the -processing to be done in the application could be structured to run in constant -bounded memory while streaming through the XML, a conversion to `String` -requires the whole, uncompressed, character-serialized value to be brought into -the Java heap at once, and any heap-size tuning has to account for that -worst-case size. The `java.sql.SQLXML` API solves those problems by allowing -XML manipulation with any of several Java XML APIs with the data remaining in -PostgreSQL native memory, never brought fully into the Java heap unless that is -what the application does. Heap sizing can be based on the just the -application's processing needs. - -The `SQLXML` type can take the place of `String` in PL/Java function parameters -and returns simply by changing their declarations from `String` to `SQLXML`. -When retrieving XML values from `ResultSet` or `SQLInput` objects, the legacy -`getObject / readObject` methods will continue to return `String` for existing -application compatibility, so the specific `getSQLXML / readSQLXML` methods, or -the forms of `getObject / readObject` with a `Class` parameter and passing -`SQLXML.class`, must be used. A [documentation page](use/sqlxml.html) has been -added, and the [PassXML example][exxml] illustrates use of the API. - -A [not-built-by-default new example][exsaxon] (because it depends on Java 8 and -the Saxon-HE XML-processing library) provides a partial implementation of true -`XMLQUERY` and `XMLTABLE` functions for PostgreSQL, using the standard-specified -XML Query language rather than the XPath 1.0 of the native PostgreSQL functions. - -[exxml]: pljava-examples/apidocs/index.html?org/postgresql/pljava/example/annotation/PassXML.html -[exsaxon]: examples/saxon.html - -$h4 New Java property exposes the PostgreSQL server character-set encoding - -A Java system property, `org.postgresql.server.encoding`, is set to the -canonical name of a supported Java `Charset` that corresponds to PostgreSQL's -`server_encoding` setting, if one can be found. If the server encoding's name -is not recognized as any known Java `Charset`, this property will be unset, and -some functionality, such as the `SQLXML` API, may be limited. If a Java -`Charset` does exist (or is made available through a `CharsetProvider`) that -does match the PostgreSQL server encoding, but is not automatically selected -because of a naming mismatch, the `org.postgresql.server.encoding` property can -be set (with a `-D` in `pljava.vmoptions`) to select it by name. - -$h4 ResultSet holdability - -A `ResultSet` obtained from a query done in PL/Java would return the value -`CLOSE_CURSORS_AT_COMMIT` to a caller of its `getHoldability` method, but in -reality would become unusable as soon as the PL/Java function creating it -returned to PostgreSQL. It now remains usable to the end of the transaction, -as claimed. - -$h4 PostgreSQL 9.6 and parallel query - -A function in PL/Java can now be [annotated][apianno] -`parallel={UNSAFE | RESTRICTED | SAFE}`, with `UNSAFE` the default. -A new [user guide section][ugparqry] explains the possibilities and -tradeoffs. (Good cases for marking a PL/Java function `SAFE` may be -rare, as pushing such a function into multiple background processes -will require them all to start JVMs. But if a practical application -arises, PostgreSQL's `parallel_setup_cost` can be tuned to help the -planner make good plans.) - -Although `RESTRICTED` and `SAFE` Java functions work in simple tests, -there has been no exhaustive audit of the code to ensure that PL/Java's -internal workings never violate the behavior constraints on such functions. -The support should be considered experimental, and could be a fruitful -area for beta testing. - -[ugparqry]: use/parallel.html - -$h4 Tuple counts widened to 64 bits with PostgreSQL 9.6 - -To accommodate the possibility of more than two billion tuples in a single -operation, the SPI implementation of the JDBC `Statement` interface now -provides the JDK 8-specified `executeLargeBatch` and `getLargeUpdateCount` -methods defined to return `long` counts. The original `executeBatch` and -`getUpdateCount` methods remain but, obviously, cannot return counts that -exceed `INT_MAX`. In case the count is too large, `getUpdateCount` will throw -an `ArithmeticException`; `executeBatch` will store `SUCCESS_NO_INFO` for -any statement in the batch that affected too many tuples to report. - -For now, a `ResultSetProvider` cannot be used to return more than `INT_MAX` -tuples, but will check that condition and throw an error to ensure predictable -behavior. - -$h4 `pg_upgrade` - -PL/Java should be upgraded to 1.5.1 in a database cluster, before that -cluster is binary-upgraded to a newer PostgreSQL version using `pg_upgrade`. -A new [Upgrading][upgrading] installation-guide section centralizes information -on both upgrading PL/Java in a database, and upgrading a database with PL/Java -in it. - -[upgrading]: install/upgrade.html - -$h4 Suppressing row operations from triggers - -In PostgreSQL, a `BEFORE ROW` trigger is able to allow the proposed row -operation, allow it with modified values, or silently suppress the operation -for that row. Way back in PL/Java 1.1.0, the way to produce the 'suppress' -outcome was for the trigger method to throw an exception. Since PL/Java 1.2.0, -however, an exception thrown in a trigger method is used to signal an error -to PostgreSQL, and there has not been a way to suppress the row operation. - -The `TriggerData` interface now has a [`suppress`][tgsuppress] method that -the trigger can invoke to suppress the operation for the row. - -[tgsuppress]: pljava-api/apidocs/index.html?org/postgresql/pljava/TriggerData.html#suppress() - -$h4 Constraint triggers - -New attributes in the `@Trigger` annotation allow the SQL generator to -create constraint triggers (a type of trigger that can be created with SQL -since PostgreSQL 9.1). Such triggers will be delivered by the PL/Java runtime -(to indicate that a constraint would be violated, a constraint trigger -method should throw an informative exception). However, the trigger method -will have access, through the `TriggerData` interface, only to the properties -common to ordinary triggers; methods on that interface to retrieve properties -specific to constraint triggers have not been added for this release. - -$h4 PostgreSQL 10 and trigger transition tables - -A trigger [annotation][apianno] can now specify `tableOld="`_name1_`"` or -`tableNew="`_name2_`"`, or both, and the PL/Java function servicing the -trigger can do SPI JDBC queries and see the transition table(s) under the -given name(s). The [triggers example code][extrig] has been extended with -a demonstration. - -[extrig]: $project.scm.url/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java - -$h4 Logging from Java - -The way the Java logging system has historically been plumbed to PostgreSQL's, -as described in [issue 125](${ghbug}125), can be perplexing both because it -is unaffected by later changes to the PostgreSQL settings after PL/Java is -loaded in the session, and because it has honored only `log_min_messages` -and ignored `client_min_messages`. The second part is easy to fix, so in -1.5.1 the threshold where Java discards messages on the fast path is -determined by the finer of `log_min_messages` and `client_min_messages`. - -$h4 Conveniences for downstream package maintainers - -The `mvn` command to build PL/Java will now accept an option to provide -a useful default for `pljava.libjvm_location`, when building a package for -a particular software environment where the likely path to Java is known. - -The `mvn` command will also accept an option to specify, by the path to -the `pg_config` executable, the PostgreSQL version to build against, in -case multiple versions exist on the build host. This was already possible -by manipulating `PATH` ahead of running `mvn`, but the option makes it more -explicit. - -A new [packaging section][packaging] in the build guide documents those -and a number of considerations for making a PL/Java package. - -[packaging]: build/package.html - -$h3 Enhancement requests addressed - -$h4 In 1.5.1-BETA3 - -* [Add a ddr.reproducible option to SQL generator](${ghbug}186) - -$h4 In 1.5.1-BETA2 - -* [java 8 date/time api](${ghbug}137) -* [Annotations don't support CREATE CONSTRAINT TRIGGER](${ghbug}138) -* [Let annotations give defaults to row-type parameters](${ghpull}153) -* [Improve DDR generator on the dont-repeat-yourself dimension for UDT type mapping](${ghpull}159) -* [Support the JDBC 4.0 SQLXML type](${ghpull}171) - -$h3 Bugs fixed - -$h4 In 1.5.1-BETA3 - -* [self-install jar ClassCastException (...ConsString to String), some java 6/7 runtimes](${ghbug}179) -* [i386 libjvm_location gets mangled as .../jre/lib/1/server/libjvm.so](${ghbug}176) -* [java.lang.ClassNotFoundException installing examples jar](${ghbug}178) -* [Preprocessor errors building on Windows with MSVC](${ghbug}182) -* [Saxon example does not build since Saxon 9.9 released](${ghbug}185) -* [Segfault in VarlenaWrapper.Input on 32-bit](${ghbug}177) -* [Windows: self-install jar silently fails to replace existing files](${ghbug}189) -* [ERROR: java.sql.SQLException: _some Java class name_](${ghbug}192) -* [SetOfRecordTest with timestamp column influenced by environment ](${ghbug}195) - -$h4 In 1.5.1-BETA2 - -* [PostgreSQL 10: SPI_modifytuple failed with SPI_ERROR_UNCONNECTED](${ghbug}134) -* [SPIConnection prepareStatement doesn't recognize all parameters](${ghbug}136) -* [Ordinary (non-constraint) trigger has no way to suppress operation](${ghbug}142) -* [ResultSetHandle and column definition lists](${ghbug}146) -* [PreparedStatement doesn't get parameter types from PostgreSQL](${ghbug}149) - _(partial improvements)_ -* [internal JDBC: inaccuracies converting TIMESTAMP and TIMESTAMPTZ](${ghbug}155) -* [Missing type mapping for Java return `byte[]`](${ghbug}157) -* [The REMOVE section of DDR is in wrong order for conditionals](${ghbug}163) -* [Loading PL/Java reinitializes timeouts in PostgreSQL >= 9.3](${ghbug}166) -* [JDBC ResultSet.CLOSE_CURSORS_AT_COMMIT reported, but usable life shorter](${ghbug}168) - -$h4 In 1.5.1-BETA1 - -* [Add support for PostgreSQL 9.6](${ghbug}108) -* [Clarify documentation of ResultSetProvider](${ghbug}115) -* [`pg_upgrade` (upgrade failure from 9.5 to 9.6)](${ghbug}117) -* [Java logging should honor `client_min_messages` too](${ghbug}125) - -$h3 Updated PostgreSQL APIs tracked - -* `heap_form_tuple` -* 64-bit `SPI_processed` -* 64-bit `Portal->portalPos` -* 64-bit `FuncCallContext.call_cntr` -* 64-bit `SPITupleTable.alloced` and `.free` -* `IsBackgroundWorker` -* `IsBinaryUpgrade` -* `SPI_register_trigger_data` -* `SPI` without `SPI_push`/`SPI_pop` -* `AllocSetContextCreate` -* `DefineCustom...Variable` (no `GUC_LIST_QUOTE` in extensions) - -$h3 Credits - -There is a PL/Java 1.5.1 thanks in part to -Christoph Berg, -Thom Brown, -Luca Ferrari, -Chapman Flack, -Petr Michalek, -Steve Millington, -Kenneth Olson, -Fabian Zeindl, -original creator Thomas Hallgren, -and the many contributors to earlier versions. - -$h2 PL/Java 1.5.0 (29 March 2016) - -This, the first PL/Java numbered release since 1.4.3 in 2011, combines -compatibility with the latest PostgreSQL and Java versions with modernized -build and installation procedures, automatic generation of SQL deployment -code from Java annotations, and many significant fixes. - -$h3 Security - -Several security issues are addressed in this release. Sites already -using PL/Java are encouraged to update to 1.5.0. For several of the -issues below, practical measures are described to mitigate risk until -an update can be completed. - -[CVE-2016-0766][], a privilege escalation requiring an authenticated -PostgreSQL connection, is closed by installing PL/Java 1.5.0 (including -prereleases) or by updating PostgreSQL itself to at least 9.5.1, 9.4.6, -9.3.11, 9.2.15, 9.1.20. Vulnerable systems are only those running both -an older PL/Java and an older PostgreSQL. - -[CVE-2016-0767][], in which an authenticated PostgreSQL user with USAGE -permission on the `public` schema may alter the `public` schema classpath, -is closed by release 1.5.0 (including prereleases). If updating to 1.5.0 -must be delayed, risk can be mitigated by revoking public `EXECUTE` permission -on `sqlj.set_classpath` and granting it selectively to responsible users or -roles. - -This release brings a policy change to a more secure-by-default posture, -where the ability to create functions in `LANGUAGE java` is no longer -automatically granted to `public`, but can be selectively granted to roles -that will have that responsibility. The change reduces exposure to a known -issue present in 1.5.0 and earlier versions, that will be closed in a future -release ([CVE-2016-0768][], see **large objects, access control** below). - -The new policy will be applied in a new installation; permissions will not -be changed in an upgrade, but any site can move to this policy, even before -updating to 1.5.0, with `REVOKE USAGE ON LANGUAGE java FROM public;` followed by -explicit `GRANT` commands for the users/roles expected to create Java -functions. - -[CVE-2016-2192][], in which an authenticated user can alter type mappings -without owning the types involved. Exploitability is limited by other -permissions, but if type mapping is a feature being used at a site, one -can interfere with proper operation of code that relies on it. A mitigation -is simply to `REVOKE EXECUTE ... FROM PUBLIC` on the `sqlj.add_type_mapping` -and `sqlj.drop_type_mapping` functions, and grant the privilege only to -selected users or roles. As of 1.5.0, these functions require the invoker -to be superuser or own the type being mapped. - -[CVE-2016-0766]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-0766 -[CVE-2016-0767]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-0767 -[CVE-2016-0768]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-0768 -[CVE-2016-2192]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-2192 - -$h3 Version compatibility - -PL/Java 1.5.0 can be built against recent PostgreSQL versions including 9.5, -using Java SE 8, 7, or 6. See [version compatibility][versions] for more -detail. OpenJDK is well supported. Support for GCJ has been dropped; features -of modern Java VMs that are useful to minimize footprint and startup time, -such as class-data sharing, are now more deeply covered -[in the installation docs][vmopts]. - -[versions]: build/versions.html -[vmopts]: install/vmoptions.html - -$h3 Build procedures - -Since 2013, PL/Java has been hosted [on GitHub][ghpljava] and built -using [Apache Maven][mvn]. See the new [build instructions][bld] for details. - -Reported build issues for specific platforms have been resolved, -with new platform-specific build documentation -for [OS X][osxbld], [Solaris][solbld], [Ubuntu][ububld], -[Windows MSVC][msvcbld], and [Windows MinGW-w64][mgwbld]. - -The build produces a redistributable installation archive usable with -the version of PostgreSQL built against and the same operating system, -architecture, and linker. The type of archive is `jar` on all platforms, as -all PL/Java installations will have Java available. - -[ghpljava]: https://github.com/tada/pljava -[mvn]: http://maven.apache.org/ -[bld]: build/build.html -[msvcbld]: build/buildmsvc.html -[mgwbld]: build/mingw64.html -[osxbld]: build/macosx.html -[solbld]: build/solaris.html -[ububld]: build/ubuntu.html - -$h3 Installation procedures - -The jar produced by the build is executable and will self-extract, -consulting `pg_config` on the destination system to find the correct -default locations for the extracted files. Any location can be overridden. -(Enhancement requests [6][gh6], [9][gh9]) - -PL/Java now uses a PostgreSQL configuration variable, `pljava.libjvm_location`, -to find the Java runtime to use, eliminating the past need for highly -platform-specific tricks like link-time options or runtime-loader configuration -just so that PL/Java could find Java. PostgreSQL configuration variables are -now the only form of configuration needed for PL/Java, and the `libjvm_location` -should be the only setting needed if file locations have not been overridden. - -In PostgreSQL 9.1 and later, PL/Java can be installed with -`CREATE EXTENSION pljava`. Regardless of PostgreSQL version, installation -has been simplified. Former procedures involving `Deployer` or `install.sql` -are no longer required. Details are in the [new installation instructions][ins]. - -$h4 Schema migration - -The tables used internally by PL/Java have changed. If PL/Java 1.5.0 is -loaded in a database with an existing `sqlj` schema populated by an earlier -PL/Java version (1.3.0 or later), the structure will be updated without data -loss (enhancement request [12][gh12]). *Remember that PL/Java runs independently -in each database session where it is in use. Older PL/Java versions active in -other sessions can be disrupted by the schema change.* - -A trial installation of PL/Java 1.5.0 can be done in a transaction, and -rolled back if desired, leaving the schema as it was. Any concurrent sessions -with active older PL/Java versions will not be disrupted by the altered schema -as long as the transaction remains open, *but they may block for the duration, -so such a test transaction should be kept short*. - -[ins]: install/install.html - -$h3 Changes - -$h4 Behavior of `readSQL` and `writeSQL` for base and mirror user-defined types - -In the course of fixing [issue #98][gh98], the actual behavior of -`readSQL` and `writeSQL` with base or mirror types, which had not -previously been documented, [now is](develop/coercion.html), along with -other details of PL/Java's type coercion rules found only in the code. -Because machine byte order was responsible for issue #98, it now (a) is -selectable, and (b) has different, appropriate, defaults for mirror UDTs -(which need to match PostgreSQL's order) and for base UDTs (which must -stay big-endian because of how binary `COPY` is specified). -A [new documentation section](use/byteorder.html) explains in detail. - -$h4 `USAGE` to `PUBLIC` no longer default for `java` language - -Of the two languages installed by PL/Java, functions that declare -`LANGUAGE javau` can be created only by superusers, while those that -declare `LANGUAGE java` can be created by any user or role granted the -`USAGE` privilege on the language. - -In the past, the language `java` has been created with PostgreSQL's -default permission granting `USAGE` to `PUBLIC`, but PL/Java 1.5.0 -leaves the permission to be explicitly granted to those users or roles -expected to create Java functions, in keeping with least-privilege -principles. See **large objects, access control** under **known issues** -for background. - -$h4 SQL generated by Java annotations - -Java code developed for use by PL/Java can carry in-code annotations, -used by the Java compiler to generate the SQL commands to declare the -new functions, types, triggers, etc. in PostgreSQL (enhancement request -[1011112][], though different in implementation). This eliminates the need -to have Java code and the corresponding SQL commands developed in parallel, -and the class of errors possible when both are not updated together. It -also allows compile-time checks that the Java methods or classes being -annotated are suitable (correct access modifiers, signatures, etc.) -for their declared SQL purposes, rather than discovering -such issues only upon loading the code into PostgreSQL and trying to use it. - -The Java compiler writes the generated SQL into a "deployment descriptor" -file (`pljava.ddr` by default), as specified by the SQL/JRT standard. The -file can be included in a `jar` archive with the compiled code, and the -commands will be executed by PL/Java when the `install_jar` function is -used to load the jar. - -SQL generation is covered in the [updated user documentation][user], -and illustrated in the [Hello, World example][hello] and -[several other supplied examples][exanno]. Reference information -is [in the API documentation][apianno]. It is currently usable to declare -functions, triggers, and user-defined types, both base and composite. - -[user]: use/use.html -[hello]: use/hello.html -[exanno]: $project.scm.url/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation -[apianno]: pljava-api/apidocs/index.html?org/postgresql/pljava/annotation/package-summary.html#package_description - -The history of this feature in PL/Java is long, with the first related commits -appearing in 2005, six years in advance of an enhancement request for it. -It became generally usable in 2013 when building with -Java SE 6 or later, using the annotation processing framework Java introduced -in that release. 1.5.0 is the first PL/Java numbered release to feature it. - -$h5 Annotation keyword changes - -If you have been using the SQL generation feature in prerelease `git` builds of -2013 or later, be aware that some annotation keywords have changed in finalizing -the 1.5.0 release. Java code that was compiled using the earlier keywords will -continue to work, but will have to be updated before it can be recompiled. - -* For functions: `effects=(VOLATILE,STABLE,IMMUTABLE)` was formerly `type=` -* For functions: `type=` (_an explicit SQL return type for the function_) - was formerly `complexType=` -* For functions: `trust=(SANDBOXED,UNSANDBOXED)` was formerly - `(RESTRICTED,UNRESTRICTED)` -* For triggers: `called=(BEFORE,AFTER,INSTEAD_OF)` was formerly `when=` - and conflicted with the `WHEN` clause introduced for triggers - in PostgreSQL 9.0. - -$h4 A jar may have more than one deployment descriptor - -PL/Java formerly allowed only one entry in a jar to be a deployment -descriptor (that is, a file of SQL commands to be executed upon loading -or unloading the jar). The SQL/JRT standard allows multiple entries to -be deployment descriptors, executed in the order they are mentioned -_in the jar manifest_, or the reverse of that order when the jar is -being unloaded. PL/Java now conforms to the standard. - -The behavior is useful during transition to annotation-driven deployment -descriptor generation for a project that already has a manually-maintained -deployment descriptor. PL/Java's own `pljava-examples` project is an -illustration, in the midst of such a transition itself. - -Note the significance placed by SQL/JRT on the order of entries in a jar -manifest, whose order is normally _not_ significant according to the Jar File -Specification. Care can be needed when manipulating manifests with automated -tools that may not preserve order. - -$h4 Conditional execution within deployment descriptors - -Deployment descriptors have a primitive conditional-execution provision -defined in the SQL/JRT standard: commands wrapped in a -`BEGIN IMPLEMENTOR ` _identifier_ construct will only be executed if the -_identifier_ is recognized by the SQL/JRT implementation in use. The design -makes possible jars that can be installed on different database systems that -provide SQL/JRT features, with database-specific commands wrapped in -`BEGIN IMPLEMENTOR` blocks with an _identifier_ specific to the system. -By default, PL/Java recognizes the _identifier_ `postgresql` (matched without -regard to case). - -PL/Java extends the standard by allowing the PostgreSQL configuration -variable `pljava.implementors` to contain a list of identifiers that will -be recognized. SQL code in a deployment descriptor can conditionally add -or remove identifiers in this list to influence which subsequent implementor -blocks will be executed, giving a still-primitive but more general control -structure. - -In sufficiently recent PostgreSQL versions, the same effect could be -achieved using `DO` statements and PL/pgSQL control structures, but this -facility in PL/Java does not require either to be available. - -$h4 Interaction with `SET ROLE` corrected - -PL/Java formerly was aware of the user ID associated with the running -session, but not any role ID that user may have acquired with `SET ROLE`. -The result would commonly be failed permission checks made by PL/Java when -the session user did not have the needed permission, but had `SET ROLE` to -a role that did. Likewise, within `install_jar`, PL/Java would execute -deployment descriptor commands as the original session user rather than -as the user's current role, with permission failures a likely result. - -Correcting this issue has changed the PL/Java API, but without a bump -of major version because the prior API, while deprecated, is still available. - -* [`getOuterUserName`][goun] and [`executeAsOuterUser`][eaou] are new, and - correctly refer to the session user or current role, when active. -* [`getSessionUserName`][gsun] and [`executeAsSessionUser`][easu] are still - present but deprecated, and _their semantics are changed_. They are now - deprecated aliases for the corresponding new methods, which honor the - set role. Use cases that genuinely need to refer only to the _session_ user - and ignore the role should be rare, and should be discussed on the mailing - list or opened as issues. - -#set($sessapi = 'pljava-api/apidocs/index.html?org/postgresql/pljava/Session.html#') - -[goun]: ${sessapi}getOuterUserName() -[eaou]: ${sessapi}executeAsOuterUser(java.sql.Connection,java.lang.String) -[gsun]: ${sessapi}getSessionUserName() -[easu]: ${sessapi}executeAsSessionUser(java.sql.Connection,java.lang.String) - -$h4 Unicode transparency - -Since the resolution of [bug 21][gh21], PL/Java contains a regression test -to ensure that character strings passed and returned between PostgreSQL and -Java will round-trip without alteration for the full range of Unicode -characters, _when the database encoding is set to `UTF8`_. - -More considerations apply when the database encoding is anything other -than `UTF8`, and especially when it is `SQL_ASCII`. Please see -[character encoding support][charsets] for more. - -[charsets]: use/charsets.html - -$h3 Enhancement requests addressed - -* [Use Annotations instead of DDL Manifest][1011112] -* [Installation of pljava on postgresql servers][gh9] -* [Find an alternative way to install the pljava.so in `/usr/lib`][gh6] -* [Provide database migration][gh12] -* [Support types with type modifiers][1011140] (partial: see [example][typmex]) -* [Build process: accommodate Solaris 10][gh102] - -[1011112]: ${pgffeat}1011112 -[1011140]: ${pgffeat}1011140 -[gh9]: ${ghbug}9 -[gh6]: ${ghbug}6 -[gh12]: ${ghbug}12 -[gh102]: ${ghbug}102 - -[typmex]: $project.scm.url/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/IntWithMod.java - -$h3 Bugs fixed - -$h4 Since 1.5.0-BETA3 - -* [Build process: accept variation in PostgreSQL version string][gh101] -* [Build process: accommodate PostgreSQL built with private libraries][gh103] -* Clarified message when `CREATE EXTENSION` fails because new session needed -* Reduced stack usage in SQL generator - (small-memory build no longer needs `-Xss`) - -$h4 In 1.5.0-BETA3 - -* [Bogus mirror-UDT values on little-endian hardware][gh98] -* [Base UDT not registered if first access isn't in/out/send/recv][gh99] -* `TupleDesc` leak warnings with composite UDTs -* also added regression test from [1010962][] report - -$h4 In 1.5.0-BETA2 - -* [Generate SQL for trigger function with no parameters][gh92] -* [openssl/ssl.h needed on osx el-capitan (latest 10.11.3)/postgres 9.5][gh94] - (documented) -* [Source location missing for some annotation errors][gh95] -* [OS X El Capitan "Java 6" dialog when loading ... Java 8][gh96] -* pljava-api jar missing from installation jar - -$h4 In 1.5.0-BETA1 - -* [SPIPreparedStatement.setObject() fails with Types.BIT][1011119] -* [SSLSocketFactory throws IOException on Linux][1011095] -* [PL/Java fails to compile with -Werror=format-security][1011181] -* [PL/Java does not build on POWER 7][1011197] -* [The built in functions do not use the correct error codes][1011206] -* [TupleDesc reference leak][1010962] -* [String conversion to enum fails][gh4] -* [segfault if SETOF RECORD-returning function used without AS at callsite][gh7] -* [pl/java PG9.3 Issue][gh17] -* [No-arg functions unusable: "To many parameters - expected 0"][gh8] -* [Exceptions in static initializers are masked][gh54] -* [UDT in varlena form breaks if length > 32767][gh52] -* [PL/Java kills unicode?][gh21] -* [Type.c expects pre-8.3 find_coercion_pathway behavior][gh65] -* [Support PostgreSQL 9.5][gh48] -* [pl/java getting a build on MacOSX - PostgreSQL 9.3.2][gh22] -* [build pljava on windows for PostgreSQL 9.2][gh23] -* [Error while installing PL/Java with Postgresql 9.3.4 64 bit on Windows 7 64 bit System][gh28] -* [pljava does not compile on mac osx ver 10.11.1 and postgres 9.4][gh63] -* [pljava does not compile on centos 6.5 and postgres 9.4][gh64] -* [Error installing pljava with Windows 7 64 Bit and Postgres 9.4][gh71] -## JNI_getIntArrayRegion instead of JNI_getShortArrayRegion -## Eclipse IDE artifacts -## Site -## Warnings -## Javadoc - -[1011119]: ${pgfbug}1011119 -[1011095]: ${pgfbug}1011095 -[1011181]: ${pgfbug}1011181 -[1011197]: ${pgfbug}1011197 -[1011206]: ${pgfbug}1011206 -[1010962]: ${pgfbug}1010962 -[gh4]: ${ghbug}4 -[gh7]: ${ghbug}7 -[gh8]: ${ghbug}8 -[gh17]: ${ghbug}17 -[gh54]: ${ghbug}54 -[gh52]: ${ghbug}52 -[gh21]: ${ghbug}21 -[gh65]: ${ghbug}65 -[gh48]: ${ghbug}48 -[gh22]: ${ghbug}22 -[gh23]: ${ghbug}23 -[gh28]: ${ghbug}28 -[gh63]: ${ghbug}63 -[gh64]: ${ghbug}64 -[gh71]: ${ghbug}71 -[gh92]: ${ghbug}92 -[gh94]: ${ghbug}94 -[gh95]: ${ghbug}95 -[gh96]: ${ghbug}96 -[gh98]: ${ghbug}98 -[gh99]: ${ghbug}99 -[gh101]: ${ghbug}101 -[gh103]: ${ghbug}103 - -$h3 Updated PostgreSQL APIs tracked - -Several APIs within PostgreSQL itself have been added or changed; -PL/Java now uses the current versions of these where appropriate: - -* `find_coercion_pathway` -* `set_stack_base` -* `GetOuterUserId` -* `GetUserNameFromId` -* `GetUserIdAndSecContext` -* `pg_attribute_*` -* Large objects: truncate, and 64-bit offsets - -$h3 Known issues and areas for future work - -$h4 Developments in PostgreSQL not yet covered - -Large objects, access control -: PL/Java does not yet expose PostgreSQL large objects with a documented, - stable API, and the support it does contain was developed against pre-9.0 - PostgreSQL versions, where no access control applied to large objects and - any object could be accessed by any database user. PL/Java's behavior is - proper for PostgreSQL before 9.0, but improper on 9.0+ where it would be - expected to honor access controls on large objects ([CVE-2016-0768][]). - This will be corrected in a future release. For this and earlier releases, - the recommendation is to selectively grant `USAGE` on the `java` language to - specific users or roles responsible for creating Java functions; see - "default `USAGE` permssion" under Changes. - -`INSTEAD OF` triggers, triggers on `TRUNCATE` -: These are supported by annotations and the SQL generator, and the runtime - will deliver them to the specified method, but the `TriggerData` interface - has no new methods to recognize these cases (that is, no added - methods analogous to `isFiredAfter`, `isFiredByDelete`). For a method - expressly coded to be a `TRUNCATE` trigger or an `INSTEAD OF` trigger, - that is not a problem, but care should be taken when coding a trigger - method to handle more than one type of trigger, or creating triggers of - these new types that call a method developed pre-PL/Java-1.5.0. Such a - method could be called with a `TriggerData` argument whose existing - `isFired...` methods all return `false`, likely to put the method on an - unexpected code path. - - A later PL/Java version should introduce trigger interfaces that better - support such evolution of PostgreSQL in a type-safe way. - -Constraint triggers -: Constraint trigger syntax is not supported by annotations and the SQL - generator. If declared (using hand-written SQL), they will be delivered - by the runtime, but without any constraint-trigger-specific information - available to the called method. - -Event triggers -: Event triggers are not yet supported by annotations or the SQL generator, - and will not be delivered by the PL/Java runtime. - -Range types -: No predefined mappings for range types are provided. - -`PRE_PREPARE`, `PRE_COMMIT`, `PARALLEL_ABORT`, `PARALLEL_PRE_COMMIT`, and `PARALLEL_COMMIT` transaction callbacks, `PRE_COMMIT` subtransaction callbacks -: Listeners for these events cannot be registered and the events will not - be delivered. - -$h4 Imperfect integration with PostgreSQL dependency tracking - -In a dump/restore, manual intervention can be needed if the users/roles -recorded as owners of jars are missing or have been renamed. A current -[thread on `pgsql-hackers`][ownhack] should yield a better solution for -a future release. - -[ownhack]: http://www.postgresql.org/message-id/56783412.6090005@anastigmatix.net - -$h4 Quirk if deployment descriptor loads classes from same jar - -The `install_jar` function installs a jar, optionally reading deployment -descriptors from the jar and executing the install actions they contain. -It is possible for those actions to load classes from the jar just installed. -(This would be unlikely if the install actions are limited to typical setup, -function/operator/datatype creation, but likely, if the install actions also -include basic function tests, or if the setup requirements are more -interesting.) - -If, for any class in the jar, the first attempt to load that class is made -while resolving a function declared `STABLE` or `IMMUTABLE`, a -`ClassNotFoundException` results. The cause is PostgreSQL's normal treatment of -a `STABLE` or `IMMUTABLE` function, which relies on a snapshot from the start of -the `install_jar` query, when the jar was not yet installed. A workaround is to -ensure that the install actions cause each needed class to be loaded, such as -by calling a `VOLATILE` function it supplies, before calling one that is -`STABLE` or `IMMUTABLE`. (One could even write install actions to declare a -needed function `VOLATILE` before the first call and then redeclare it.) - -This issue should be resolved as part of a broader rework of class loading -in a future PL/Java release. - -$h4 Partial implementation of JDBC 4 and later - -The changes to make PL/Java build under Java SE 6 and later, with version 4.0 -and later of JDBC, involved providing the specified methods so -compilation would succeed, with real implementations for some, but for others -only stub versions that throw `SQLFeatureNotSupportedException` if used. -Regrettably, there is nothing in the documentation indicating which methods -have real implementations and which do not; to create such a list would require -an audit of that code. If a method throws the exception when you call it, it's -one of the unimplemented ones. - -Individual methods may be fleshed out with implementations as use cases arise -that demand them, but for a long-term roadmap, it seems more promising to -reduce the overhead of maintaining another JDBC implementation by sharing -code with `pgjdbc`, as has been [discussed on pljava-dev][jdbcinherit]. - -[jdbcinherit]: http://lists.pgfoundry.org/pipermail/pljava-dev/2015/002370.html - -$h4 Exception handling and logging - -PL/Java does interconvert between PostgreSQL and Java exceptions, but with -some loss of fidelity in the two directions. PL/Java code has some access -to most fields of a PostgreSQL error data structure, but only through -internal PL/Java API that is not expected to remain usable, and code written -for PL/Java has never quite had first-class standing in its ability to -_generate_ exceptions as information-rich as those from PostgreSQL itself. - -PL/Java in some cases generates the _categorized `SQLException`s_ introduced -with JDBC 4.0, and in other cases does not. - -This area may see considerable change in a future release. -[Thoughts on logging][tol] is a preview of some of the considerations. - -[tol]: https://github.com/tada/pljava/wiki/Thoughts-on-logging - -$h4 Types with type modifiers and `COPY` - -Although it is possible to create a PL/Java user-defined type that accepts -a type modifier (see the [example][typmex]), such a type will not yet be -handled by SQL `COPY` or any other operation that requires the `input` or -`receive` function to handle the modifier. This is left for a future release. - -$h3 Credits - -PL/Java 1.5.0 owes its being to original creator Thomas Hallgren and -many contributors: - -Daniel Blanch Bataller, -Peter Brewer, -Frank Broda, -Chapman Flack, -Marty Frasier, -Bear Giles, -Christian Hammers, -Hal Hildebrand, -Robert M. Lefkowitz, -Eugenie V. Lyzenko, -Dos Moonen, -Asif Naeem, -Kenneth Olson, -Johann Oskarsson, -Thomas G. Peters, -Srivatsan Ramanujam, -Igal Sapir, -Jeff Shaw, -Rakesh Vidyadharan, -`grunjol`, -`mc-soi`. - -Periods in PL/Java's development have been sponsored by EnterpriseDB. - -In the 1.5.0 release cycle, multiple iterations of testing effort -have been generously contributed by Kilobe Systems and by Pegasystems, Inc. - -## From this point on, the entries were reconstructed from old notes at the -## same time as the 1.5.0 notes were drafted, and they use a finer level of -## heading. So restore the 'real' values of the heading variables from here -## to the end of the file. -#set($h2 = '##') -#set($h3 = '###') -#set($h4 = '####') -#set($h5 = '#####') - -$h3 PL/Java 1.4.3 (15 September 2011) - -Notable changes in this release: - -* Works with PostgreSQL 9.1 -* Correctly links against IBM Java. -* Reads microseconds correctly in timestamps. - -Bugs fixed: - -* [Be clear about not building with JDK 1.6][1010660] -* [Does not link with IBM VM][1010970] -* [SPIConnection.getMetaData() is incorrectly documented][1010971] -* [PL/Java 1.4.2 Does not build with x86_64-w64-mingw32][1011025] -* [PL/Java does not build with PostgreSQL 9.1][1011091] - -Feature Requests: - -* [Allow pg_config to be set externally to the Makefile][1011092] -* [Add option to have pljava.so built with the runtime path of libjvm.so][1010955] - -[1010660]: ${pgfbug}1010660 -[1010970]: ${pgfbug}1010970 -[1010971]: ${pgfbug}1010971 -[1011025]: ${pgfbug}1011025 -[1011091]: ${pgfbug}1011091 - -[1011092]: ${pgffeat}1011092 -[1010955]: ${pgffeat}1010955 - -$h3 PL/Java 1.4.2 (11 December 2010) - -Bugfixes: - -* [Function returning complex objects with POD arrays cause a segfault][1010956] -* [Segfault when assigning an array to ResultSet column][1010953] -* [Embedded array support in returned complex objects][1010482] - -[1010956]: ${pgfbug}1010956 -[1010953]: ${pgfbug}1010953 -[1010482]: ${pgfbug}1010482 - -$h3 PL/Java 1.4.1 (9 December 2010) - -Note: Does not compile with Java 6. Use JDK 1.5 or 1.4. - -Compiles with PostgreSQL 8.4 and 9.0. - -Connection.getCatalog() has been implemented. - -Bugfixes: - -* [Compiling error with postgresql 8.4.1][1010759] -* [org.postgresql.pljava.internal.Portal leak][1010712] -* [build java code with debugging if server has debugging enabled][1010189] -* [Connection.getCatalog() returns null][1010653] -* [VM crash in TransactionListener][1010462] -* [Link against wrong library when compiling amd64 code on Solaris][1010954] - -[1010759]: ${pgfbug}1010759 -[1010712]: ${pgfbug}1010712 -[1010189]: ${pgfbug}1010189 -[1010653]: ${pgfbug}1010653 -[1010462]: ${pgfbug}1010462 -[1010954]: ${pgfbug}1010954 - -Other commits: - -For a multi-threaded pljava function we need to adjust stack_base_ptr -before calling into the backend to avoid stack depth limit exceeded -errors. Previously this was done only on query execution, but we need -to do it on iteration of the ResultSet as well. - -When creating a variable length data type, the code was directly -assigning the varlena header length rather than going through an -access macro. The header format changed for the 8.3 release and this -manual coding was not noticed and changed accordingly. Use -SET_VARSIZE to do this correctly. - -Handle passed by value data types by reading and writing directly to -the Datum rather than dereferencing it. - -If the call to a type output function is the first pljava call in a -session, we get a crash. The first pljava call results in a SPI -connection being established and torn down. The type output function -was allocating the result in the SPI memory context which gets -destroyed prior to returning the data to the caller. Allocate the -result in the correct context to survive function exit. - -Clean up a warning about byteasend and bytearecv not having a -prototype when building against 9.0 as those declarations are now in a -new header file. - - -$h3 PL/Java 1.4.0 (1 February 2008) - -Warning! The recent postgresql security releases changed the API of a function -that PL/Java uses. The source can be built against either version, but the -binaries will only run against the version they were built against. The PL/Java -binaries for 1.4.0 have all been built against the latest server releases (which -you should be using anyway). If you are using an older you will have to build -from source. The binary releases support: 8.3 - All versions. 8.2 - 8.2.6 and -up. 8.1 - 8.1.11 and up. 8.0 - 8.0.15 and up. - -$h3 PL/Java 1.3.0 (18 June 2006) - -This release is about type mapping and the creation of new types in PL/Java. An -extensive effort has gone into making the PL/Java type system extremely -flexible. Not only can you map arbitrary SQL data types to java classes. You can -also create new scalar types completely in Java. Read about the Changes in -version 1.3. - -$h4 Changes - -* A much improved type mapping system that will allow you to: - - * [Map any SQL type to a Java class][maptype] - * [Create a Scalar UDT in Java][scalarudt] - * [Map array and pseudo types][deftypemap] - -[maptype]: https://github.com/tada/pljava/wiki/Mapping-an-sql-type-to-a-java-class -[scalarudt]: https://github.com/tada/pljava/wiki/Creating-a-scalar-udt-in-java -[deftypemap]: https://github.com/tada/pljava/wiki/Default-type-mapping - -* Get the OID for a given relation ([feature request 1319][1319]) -* Jar manifest included in the SQLJ Jar repository - ([feature request 1525][1525]) - -$h4 Fixed bugs - -* [Reconnect needed for jar manipulation to take effect][1531] -* [Backends hang with test suite][1504] -* [Keeps crashing while making a call to a function][1560] -* [Memory Leak in Statement.executeUpdate][1556] -* [jarowner incorrect after dump and reload][1506] -* [Missing JAR manifest][1525] -* [TZ adjustments for Date are incorrect][1547] -* [Functions returning sets leaks memory][1542] -* [drop lib prefix][1423] -* ["oid" column is not available in trigger's NEW/OLD ResultSet][1317] -* [fails to run with GCJ, too][1480] -* [Compile failure with 8.1.4][1558] -* [fails to build with GCJ][1479] -* [Record returning function cannot be called with different structures within one session][1440] -* [Cannot map function with complex return type to method that uses non primitive arguments][1551] -* [Get OID for given relation][1319] - -[1531]: ${gborgbug}1531 -[1504]: ${gborgbug}1504 -[1560]: ${gborgbug}1560 -[1556]: ${gborgbug}1556 -[1506]: ${gborgbug}1506 -[1525]: ${gborgbug}1525 -[1547]: ${gborgbug}1547 -[1542]: ${gborgbug}1542 -[1423]: ${gborgbug}1423 -[1317]: ${gborgbug}1317 -[1480]: ${gborgbug}1480 -[1558]: ${gborgbug}1558 -[1479]: ${gborgbug}1479 -[1440]: ${gborgbug}1440 -[1551]: ${gborgbug}1551 -[1319]: ${gborgbug}1319 - -$h3 PL/Java 1.2.0 (20 Nov 2005) - -The PL/Java 1.2.0 release is primarily targeted at the new PostgreSQL 8.1 but -full support for 8.0.x is maintained. New features include support IN/OUT -parameters, improved meta-data handling, and better memory management. - -$h3 PL/Java 1.1.0 (14 Apr 2005) - -PL/Java 1.1.0 includes a lot of new features such as `DatabaseMetaData`, -`ResultSetMetaData`, language handlers for both trusted and untrusted language, -additional semantics for functions returning `SETOF`, and simple ObjectPooling. - -$h3 PL/Java 1.0.1 (07 Feb 2005) - -This release resolves a couple of important security issues. The most important -one is perhaps that PL/Java now is a trusted language. See [Security][] for more -info. Filip Hrbek, now member of the PL/Java project, contributed what was -needed to make this happen. - -[Security]: https://github.com/tada/pljava/wiki/Security - -$h3 PL/Java 1.0.0 (23 Jan 2005) - -Today, after a long period of fine tuning, PL/Java 1.0.0 was finally released. +$h2 [Releases prior to PL/Java 1.6.0](releasenotes-pre1_6.html) diff --git a/src/site/markdown/use/hello.md.vm b/src/site/markdown/use/hello.md.vm index cc606f2f8..e37ae7b00 100644 --- a/src/site/markdown/use/hello.md.vm +++ b/src/site/markdown/use/hello.md.vm @@ -294,7 +294,7 @@ The [@Function annotation][funcanno] declares that the `hello` function should be available from SQL, so a `pljava.ddr` file will be added to the jar, containing the SQL commands to make that happen. -[funcanno]: ../pljava-api/apidocs/index.html?org/postgresql/pljava/annotation/Function.html +[funcanno]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Function.html One more try with `mvn clean package` and there it is: diff --git a/src/site/markdown/use/parallel.md b/src/site/markdown/use/parallel.md index 083228bb2..861a621b4 100644 --- a/src/site/markdown/use/parallel.md +++ b/src/site/markdown/use/parallel.md @@ -26,7 +26,7 @@ of the query may execute in parallel, but the part that calls the `RESTRICTED` function will be executed only in the lead process. A function labeled `SAFE` may be executed in every process participating in the query. -[paranno]: ../pljava-api/apidocs/index.html?org/postgresql/pljava/annotation/Function.html#parallel() +[paranno]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Function.html#parallel ### Parallel setup cost diff --git a/src/site/markdown/use/policy.md b/src/site/markdown/use/policy.md index 02cdf8b08..8ce129486 100644 --- a/src/site/markdown/use/policy.md +++ b/src/site/markdown/use/policy.md @@ -318,8 +318,21 @@ such as `java.version` or `org.postgresql.pljava.version`._ ## Troubleshooting -When in doubt what permissions are needed to get some existing PL/Java code -working again, it may be helpful to add `-Djava.security.debug=access` in +When in doubt what permissions may need to be granted in `pljava.policy` to run +some existing PL/Java code, these techniques may be helpful. + +### Running PL/Java with a 'trial' policy + +To simplify the job of finding the permissions needed by some existing code, +it is possible to run PL/Java at first with a 'trial' policy, allowing code to +run while logging permissions that `pljava.policy` has not granted. The log +entries have a condensed format meant to be convenient for this use. +Trial policy configuration is described [here][trial]. + +### Using policy debug features provided by Java + +Java itself offers a number of debugging switches to reveal details of +permission decisions. It may be useful to add `-Djava.security.debug=access` in the setting of `pljava.vmoptions`, and observe the messages on the PostgreSQL backend's standard error (which should be included in the log file, if `logging_collector` is `on`). It is not necessary to change the @@ -330,6 +343,9 @@ Other options for `java.security.debug` can be found in [Troubleshooting Security][tssec]. Some can be used to filter the logging down to requests for specific permissions or from a specific codebase. +The log output produced by Java's debug options can be voluminous compared to +the condensed output of PL/Java's trial policy. + ## Forward compatibility The current implementation makes use of the Java classes @@ -344,3 +360,4 @@ release, so relying on it is not recommended. [dopriv]: https://docs.oracle.com/en/java/javase/14/security/java-se-platform-security-architecture.html#GUID-E8898CB5-65BB-4D1A-A574-8F7112FC353F [sqljajl]: ../pljava/apidocs/org.postgresql.pljava.internal/org/postgresql/pljava/management/Commands.html#alias_java_language [tssec]: https://docs.oracle.com/en/java/javase/14/security/troubleshooting-security.html +[trial]: trial.html diff --git a/src/site/markdown/use/sqlxml.md b/src/site/markdown/use/sqlxml.md index acf925f4a..c766cfd6d 100644 --- a/src/site/markdown/use/sqlxml.md +++ b/src/site/markdown/use/sqlxml.md @@ -468,7 +468,7 @@ Java's extensive support for XML. [OWASP]: https://www.owasp.org/index.php/About_The_Open_Web_Application_Security_Project [cheat]: https://cheatsheetseries.owasp.org/cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#java -[adjx]: ../pljava-api/apidocs/index.html?org/postgresql/pljava/Adjusting.XML.html +[adjx]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/Adjusting.XML.html [jaxps]: https://docs.oracle.com/en/java/javase/13/security/java-api-xml-processing-jaxp-security-guide.html [catapi]: https://docs.oracle.com/javase/9/core/xml-catalog-api1.htm#JSCOR-GUID-51446739-F878-4B70-A36F-47FBBE12A26A diff --git a/src/site/markdown/use/trial.md b/src/site/markdown/use/trial.md new file mode 100644 index 000000000..79d4bffda --- /dev/null +++ b/src/site/markdown/use/trial.md @@ -0,0 +1,186 @@ +# Migrating to policy-based permissions from an earlier PL/Java release + +When migrating existing code from a PL/Java 1.5 or earlier release to 1.6, +it may be necessary to add permission grants in the new `pljava.policy` file, +which grants few permissions by default. PL/Java's security policy configuration +is described [here][policy]. + +To simplify migration, it is possible to run with a 'trial' policy initially, +allowing code to run but logging permissions that may need to be added in +`pljava.policy`. + +## Configuring a trial policy + +Even when running with a trial policy, the [configuration variable][vbls] +`pljava.policy_urls` should point to the normal policy file(s), as usual. +That is where the ultimate policy for production will be developed. + +The trial policy is configured by creating another policy file somewhere, using +the same policy file syntax, and pointing to it with +`-Dorg.postgresql.pljava.policy.trial=`_url_ added to the configuration variable +`pljava.vmoptions`. + +Anything _this_ policy allows will be allowed, but will be logged if the regular +policy would have denied it. So you can make this one more generous than the +regular policy, and use the log entries to identify grants that might belong in +the regular policy. As you add the missing ones to the real policy, they stop +getting logged by this one, and the log gets quieter. You can make this one as +generous as you are comfortable making it during the period of testing and +tuning. + +At the very extreme of generosity it could be this: + +``` +grant { + permission java.security.AllPermission; +}; +``` + +and it would happily allow the code under test to do _anything at all_, while +logging whatever permissions aren't in the regular policy. (A side effect of +this would be to erase any distinction between `java` and `javaU` for as long as +the trial policy is in place.) Such a setting would be difficult to recommend in +general, but it might suffice if the only code being tested has already been in +use for years under PL/Java 1.5 and is well trusted, users of the database have +not been granted permission to install more PL/Java functions, and if +the purpose of testing is only to learn what permissions the code uses that +may need to be granted in the 1.6 policy. + +### Granting `TrialPolicy$Permission` + +When `AllPermission` is too broad, there is the difficulty that Java's +permission model does not have a subtractive mode; it is not simple to say +"grant `AllPermission` except for this list of the ones I'd really rather not." +Therefore, PL/Java offers a custom "meta-permission" with roughly that meaning: + +``` +grant { + permission org.postgresql.pljava.policy.TrialPolicy$Permission; +}; +``` + +`TrialPolicy$Permission` is effectively `AllPermission` but excluding any +`FilePermission` (so that `java`/`javaU` distinction stays meaningful) as well +as a couple dozen other various +`SecurityPermission`/`ReflectPermission`/`RuntimePermission` instances in the +"really rather not" category. If its hard-coded exclusion list excludes +any permissions that some unusual code under test might legitimately need, +those can be explicitly added to the trial policy too. + +Configuring a trial policy can be a bit of a balancing act: if it is very +generous, that minimizes the chance of breaking the code under test because of +a denied permission, but increases potential exposure if that code misbehaves. +A more limited trial policy decreases exposure but increase the risk of +service interruption if the code under test really does need some permission +that you weren't comfortable putting in the trial policy. Somewhere near +the sweet spot is where `TrialPolicy$Permission` is aimed. + +All other normal policy features also work in the trial policy. If your +code is installed in several different jars, you can use `grant codebase` +separately to put different outer limits around different jars, and completely +remove the grants for one jar after another as you are satisfied you have added +the right things for each one in the regular policy. You could also set +different limits for `java` and `javaU` by granting to the `PLPrincipal`, +just as you can in the regular policy. + +## About false positives + +One thing to be aware of is that the trial policy can give false alarms. It is +not uncommon for software to include configuration-dependent bits that +tentatively try certain actions, catch exceptions, and then proceed normally, +having discovered what the configuration allows. The trial policy can log +permission denials that happen in the course of such checks, even if the denial +has no functional impact on the code. + +There may be no perfect way to tell which denials being logged by the trial +policy are false alarms. One approach would be to collect a sampling of log +entries, figure out what user-visible functions of the code they were coming +from, and then start a dedicated session without the +`-Dorg.postgresql.pljava.policy.trial` setting (or with it pointing to a +different, more restrictive version of the policy, not granting the permissions +you're curious about), then exercise those functions of the code and see if +anything breaks. Other users could still have the more generous trial setting in +their sessions, so as not to be affected by your experiments. + +False positives, of course, are also affected by the choice of how generous to +make the trial policy. Log entries are only produced for permissions that the +regular policy denies but the trial policy allows. If the permissions being +silently checked by benign code are not granted in the trial policy, they will +be silently denied, just as they would in normal operation, and produce no +log entries. + +## Format of the log entries + +To avoid bloating logs too much, `TrialPolicy` emits an abbreviated form of +stack trace for each entry. The approach is to keep one stack frame above and +one below each crossing of a module or protection-domain boundary, with `...` +replacing intermediate frames within the same module/domain, and the code +source/principals of the denied domain shown wrapped in `>> <<`at +the appropriate position in the trace. For the purpose of identifying the +source of a permission request and the appropriate domain(s) to be granted +the permission, this is probably more usable than the very long full traces +available with `java.security.debug`. + +The messages are sent through the PostgreSQL log if the thread making the +permission check knows it can do so without blocking; otherwise they just go to +standard error, which should wind up in the PostgreSQL log anyway, if +`logging_collector` is on; otherwise it may be system-dependent where they go. + +There isn't really a reliable "can I do so without blocking?" check for every +setting of the `pljava.java_thread_pg_entry` configuration variable. +If it is set to `throw` (and that is a workable setting for the code under +test), the logging behavior will be more predictable; entries from the main +thread will go through PostgreSQL's log facility always, and those from any +other thread will go to standard error. + +Here is an example of two log entries, generated by the same permission check: + +``` +POLICY DENIES/TRIAL POLICY ALLOWS: ("java.net.SocketPermission" "127.0.0.1:5432" "connect,resolve") +java.base/java.security.ProtectionDomain.implies(ProtectionDomain.java:321) +... +java.base/java.net.Socket.(Socket.java:294) +>> null [PLPrincipal.Sandboxed: java] << +jdk.translet/die.verwandlung.GregorSamsa.template$dot$0() +... +jdk.translet/die.verwandlung.GregorSamsa.transform() +java.xml/com.sun.org.apache.xalan.internal.xsltc.runtime.AbstractTranslet.transform(AbstractTranslet.java:624) +... +java.xml/com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transform(TransformerImpl.java:383) +org.postgresql.pljava.example.annotation.PassXML.transformXML(PassXML.java:561) + +POLICY DENIES/TRIAL POLICY ALLOWS: ("java.net.SocketPermission" "127.0.0.1:5432" "connect,resolve") +java.base/java.security.ProtectionDomain.implies(ProtectionDomain.java:321) +... +java.base/java.net.Socket.(Socket.java:294) +jdk.translet/die.verwandlung.GregorSamsa.template$dot$0() +... +jdk.translet/die.verwandlung.GregorSamsa.transform() +java.xml/com.sun.org.apache.xalan.internal.xsltc.runtime.AbstractTranslet.transform(AbstractTranslet.java:624) +... +java.xml/com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transform(TransformerImpl.java:383) +>> sqlj:examples [PLPrincipal.Sandboxed: java] << +org.postgresql.pljava.example.annotation.PassXML.transformXML(PassXML.java:561) +``` + +The example shows the use of an XSLT 1.0 transform that appears to +make use of the Java XSLT ability to call out to arbitrary Java, and is trying +to make a network connection back to PostgreSQL on `localhost`. Java's XSLTC +implementation compiles the transform to a class in `jdk.translet` with null +as its codebase, and the first log entry shows permission is denied at that +level (the protection domain shown as +`>> null [PLPrincipal.Sandboxed: java] <<`). + +A second log entry results because `TrialPolicy` turns the first failure to +success, allowing the permission check to continue, and it next fails at +the PL/Java function being called, in the `sqlj:examples` jar. Under the trial +policy, that also is logged and then allowed to succeed. + +The simplest way to allow this connection in the production policy would be +to grant the needed `java.net.SocketPermission` to `PLPrincipal$Sandboxed`, +as that is present in both denied domains. It would be possible to grant +the permission by codebase to `sqlj:examples` instead, but not to +the nameless codebase of the compiled XSLT transform. + +[policy]: policy.html +[vbls]: variables.html diff --git a/src/site/markdown/use/use.md b/src/site/markdown/use/use.md index 5ed1cec51..27b16e47a 100644 --- a/src/site/markdown/use/use.md +++ b/src/site/markdown/use/use.md @@ -42,6 +42,15 @@ The permissions in effect for PL/Java functions can be tailored, independently for functions declared to the `TRUSTED` or untrusted language, as described [here](policy.html). +#### Tailoring permissions for code migrated from PL/Java pre-1.6 + +When migrating existing code from a PL/Java 1.5 or earlier release to 1.6, +it may be necessary to add permission grants in the new `pljava.policy` file, +which grants few permissions by default. To simplify migration, it is possible +to run with a 'trial' policy initially, allowing code to run but logging +permissions that may need to be added in `pljava.policy`. How to do that is +described [here](trial.html). + ### Choices when mapping data types #### Date and time types