diff --git a/.github/workflows/build-ci.yml b/.github/workflows/build-ci.yml index e02b88f6348..3c250fcfc54 100644 --- a/.github/workflows/build-ci.yml +++ b/.github/workflows/build-ci.yml @@ -17,14 +17,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' @@ -153,7 +153,7 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' diff --git a/.github/workflows/check-ci.yml b/.github/workflows/check-ci.yml index a80f9153b61..53340a370b9 100644 --- a/.github/workflows/check-ci.yml +++ b/.github/workflows/check-ci.yml @@ -21,14 +21,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' diff --git a/.github/workflows/docs-ci-v2.yml b/.github/workflows/docs-ci-v2.yml new file mode 100644 index 00000000000..b6be35dd623 --- /dev/null +++ b/.github/workflows/docs-ci-v2.yml @@ -0,0 +1,398 @@ +name: Docs CI V2 + +on: + pull_request: + branches: [ main ] + push: + branches: [ 'main', 'release/v*' ] + +jobs: + makedirs: + runs-on: ubuntu-22.04 + steps: + - name: Make Directories + run: | + mkdir -p tmp-deephaven-core-v2/${{ github.ref_name }}/ + cd tmp-deephaven-core-v2/${{ github.ref_name }}/ + mkdir -p javadoc pydoc client-api + cd client-api + mkdir -p javascript python cpp-examples cpp r + + - name: Deploy Directories + if: ${{ github.event_name == 'push' }} + uses: burnett01/rsync-deployments@5.2 + with: + switches: -rlptDvz + path: tmp-deephaven-core-v2/ + remote_path: deephaven-core-v2/ + remote_host: ${{ secrets.DOCS_HOST }} + remote_port: ${{ secrets.DOCS_PORT }} + remote_user: ${{ secrets.DOCS_USER }} + remote_key: ${{ secrets.DEEPHAVEN_CORE_SSH_KEY }} + symlink: + if: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/heads/release/v') }} + needs: [javadoc, typedoc, pydoc, cppdoc, rdoc] + runs-on: ubuntu-22.04 + steps: + - name: Make Symlinks + run: | + mkdir -p tmp-deephaven-core-v2/symlinks + cd tmp-deephaven-core-v2/symlinks + ln -s ../${{ github.ref_name }} latest + ln -s ../main next + + - name: Deploy Symlinks + uses: burnett01/rsync-deployments@5.2 + with: + switches: -rlptDvz + path: tmp-deephaven-core-v2/ + remote_path: deephaven-core-v2/ + remote_host: ${{ secrets.DOCS_HOST }} + remote_port: ${{ secrets.DOCS_PORT }} + remote_user: ${{ secrets.DOCS_USER }} + remote_key: ${{ secrets.DEEPHAVEN_CORE_SSH_KEY }} + javadoc: + needs: [makedirs] + runs-on: ubuntu-22.04 + concurrency: + group: javadoc-${{ github.workflow }}-${{ github.ref }} + # We don't want to cancel in-progress jobs against main because that might leave the upload in a bad state. + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup JDK 11 + id: setup-java-11 + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: '11' + + - name: Setup JDK 17 + id: setup-java-17 + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: '17' + + - name: Set JAVA_HOME + run: echo "JAVA_HOME=${{ steps.setup-java-11.outputs.path }}" >> $GITHUB_ENV + + - name: Setup gradle properties + run: | + .github/scripts/gradle-properties.sh >> gradle.properties + cat gradle.properties + + - name: All Javadoc + uses: burrunan/gradle-cache-action@v1 + with: + job-id: allJavadoc + arguments: --scan outputVersion combined-javadoc:allJavadoc + gradle-version: wrapper + + - name: Get Deephaven Version + id: dhc-version + run: echo "version=$(cat build/version)" >> $GITHUB_OUTPUT + + - name: Upload Javadocs + if: ${{ github.event_name == 'push' }} + uses: actions/upload-artifact@v3 + with: + name: javadocs-${{ steps.dhc-version.outputs.version }} + path: 'combined-javadoc/build/docs/javadoc/' + + - name: Deploy Javadoc + if: ${{ github.event_name == 'push' }} + uses: burnett01/rsync-deployments@5.2 + with: + switches: -rlptDvz --delete + path: combined-javadoc/build/docs/javadoc/ + remote_path: deephaven-core-v2/${{ github.ref_name }}/javadoc/ + remote_host: ${{ secrets.DOCS_HOST }} + remote_port: ${{ secrets.DOCS_PORT }} + remote_user: ${{ secrets.DOCS_USER }} + remote_key: ${{ secrets.DEEPHAVEN_CORE_SSH_KEY }} + + typedoc: + needs: [makedirs] + runs-on: ubuntu-22.04 + concurrency: + group: typedoc-${{ github.workflow }}-${{ github.ref }} + # We don't want to cancel in-progress jobs against main because that might leave the upload in a bad state. + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup JDK 11 + id: setup-java-11 + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: '11' + + - name: Setup JDK 17 + id: setup-java-17 + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: '17' + + - name: Set JAVA_HOME + run: echo "JAVA_HOME=${{ steps.setup-java-11.outputs.path }}" >> $GITHUB_ENV + + - name: Run typedoc on JS API + uses: burrunan/gradle-cache-action@v1 + with: + job-id: typedoc + arguments: --scan outputVersion :web-client-api:types:typedoc + gradle-version: wrapper + + - name: Get Deephaven Version + id: dhc-version + run: echo "version=$(cat build/version)" >> $GITHUB_OUTPUT + + - name: Upload JavaScript/TypeScript docs + if: ${{ github.event_name == 'push' }} + uses: actions/upload-artifact@v3 + with: + name: typedoc-${{ steps.dhc-version.outputs.version }} + path: 'web/client-api/types/build/documentation/' + + - name: Deploy JavaScript/TypeScript docs + if: ${{ github.event_name == 'push' }} + uses: burnett01/rsync-deployments@5.2 + with: + switches: -rlptDvz --delete + path: web/client-api/types/build/documentation/ + remote_path: deephaven-core-v2/${{ github.ref_name }}/client-api/javascript/ + remote_host: ${{ secrets.DOCS_HOST }} + remote_port: ${{ secrets.DOCS_PORT }} + remote_user: ${{ secrets.DOCS_USER }} + remote_key: ${{ secrets.DEEPHAVEN_CORE_SSH_KEY }} + + pydoc: + needs: [makedirs] + runs-on: ubuntu-22.04 + concurrency: + group: pydoc-${{ github.workflow }}-${{ github.ref }} + # We don't want to cancel in-progress jobs against main because that might leave the upload in a bad state. + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup JDK 11 + id: setup-java-11 + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: '11' + + - name: Setup JDK 17 + id: setup-java-17 + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: '17' + + - name: Set JAVA_HOME + run: echo "JAVA_HOME=${{ steps.setup-java-11.outputs.path }}" >> $GITHUB_ENV + + - name: Setup gradle properties + run: | + .github/scripts/gradle-properties.sh >> gradle.properties + cat gradle.properties + + - name: Generate Python Docs + uses: burrunan/gradle-cache-action@v1 + with: + job-id: pythonDocs + arguments: --scan outputVersion sphinx:pythonDocs sphinx:pydeephavenDocs + gradle-version: wrapper + + - name: Get Deephaven Version + id: dhc-version + run: echo "version=$(cat build/version)" >> $GITHUB_OUTPUT + + - name: Upload Python Server Docs + if: ${{ github.event_name == 'push' }} + uses: actions/upload-artifact@v3 + with: + name: pyserver-docs-${{ steps.dhc-version.outputs.version }} + path: 'sphinx/build/docs/' + + - name: Upload Python Client Docs + if: ${{ github.event_name == 'push' }} + uses: actions/upload-artifact@v3 + with: + name: pyclient-docs-${{ steps.dhc-version.outputs.version }} + path: 'sphinx/build/pyclient-docs/' + + - name: Deploy Python Docs + if: ${{ github.event_name == 'push' }} + uses: burnett01/rsync-deployments@5.2 + with: + switches: -rlptDvz --delete + path: sphinx/build/docs/ + remote_path: deephaven-core-v2/${{ github.ref_name }}/pydoc/ + remote_host: ${{ secrets.DOCS_HOST }} + remote_port: ${{ secrets.DOCS_PORT }} + remote_user: ${{ secrets.DOCS_USER }} + remote_key: ${{ secrets.DEEPHAVEN_CORE_SSH_KEY }} + + - name: Deploy Client Python Docs + if: ${{ github.event_name == 'push' }} + uses: burnett01/rsync-deployments@5.2 + with: + switches: -rlptDvz --delete + path: sphinx/build/pyclient-docs/ + remote_path: deephaven-core-v2/${{ github.ref_name }}/client-api/python/ + remote_host: ${{ secrets.DOCS_HOST }} + remote_port: ${{ secrets.DOCS_PORT }} + remote_user: ${{ secrets.DOCS_USER }} + remote_key: ${{ secrets.DEEPHAVEN_CORE_SSH_KEY }} + + - name: Upload JVM Error Logs + uses: actions/upload-artifact@v3 + if: failure() + with: + name: docs-ci-pydoc-jvm-err + path: '**/*_pid*.log' + if-no-files-found: ignore + + cppdoc: + needs: [makedirs] + runs-on: ubuntu-22.04 + concurrency: + group: cppdoc-${{ github.workflow }}-${{ github.ref }} + # We don't want to cancel in-progress jobs against main because that might leave the upload in a bad state. + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup JDK 11 + id: setup-java-11 + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: '11' + + - name: Set JAVA_HOME + run: echo "JAVA_HOME=${{ steps.setup-java-11.outputs.path }}" >> $GITHUB_ENV + + - name: Setup gradle properties + run: | + .github/scripts/gradle-properties.sh >> gradle.properties + cat gradle.properties + + - name: Generate C++ Docs + uses: burrunan/gradle-cache-action@v1 + with: + job-id: cppDocs + arguments: --scan outputVersion sphinx:cppClientDocs sphinx:cppExamplesDocs + gradle-version: wrapper + + - name: Get Deephaven Version + id: dhc-version + run: echo "version=$(cat build/version)" >> $GITHUB_OUTPUT + + - name: Upload Client C++ Docs + if: ${{ github.event_name == 'push' }} + uses: actions/upload-artifact@v3 + with: + name: cppclient-docs-${{ steps.dhc-version.outputs.version }} + path: 'sphinx/build/cppClientDocs/' + + - name: Upload Client C++ Example Docs + if: ${{ github.event_name == 'push' }} + uses: actions/upload-artifact@v3 + with: + name: cppclient-examples-${{ steps.dhc-version.outputs.version }} + path: 'sphinx/build/cppExamplesDocs/' + + - name: Deploy Client C++ Docs + if: ${{ github.event_name == 'push' }} + uses: burnett01/rsync-deployments@5.2 + with: + switches: -rlptDvz --delete + path: sphinx/build/cppClientDocs/ + remote_path: deephaven-core-v2/${{ github.ref_name }}/client-api/cpp/ + remote_host: ${{ secrets.DOCS_HOST }} + remote_port: ${{ secrets.DOCS_PORT }} + remote_user: ${{ secrets.DOCS_USER }} + remote_key: ${{ secrets.DEEPHAVEN_CORE_SSH_KEY }} + + - name: Deploy Client C++ Example Docs + if: ${{ github.event_name == 'push' }} + uses: burnett01/rsync-deployments@5.2 + with: + switches: -rlptDvz --delete + path: sphinx/build/cppExamplesDocs/ + remote_path: deephaven-core-v2/${{ github.ref_name }}/client-api/cpp-examples/ + remote_host: ${{ secrets.DOCS_HOST }} + remote_port: ${{ secrets.DOCS_PORT }} + remote_user: ${{ secrets.DOCS_USER }} + remote_key: ${{ secrets.DEEPHAVEN_CORE_SSH_KEY }} + + rdoc: + needs: [makedirs] + runs-on: ubuntu-22.04 + concurrency: + group: rdoc-${{ github.workflow }}-${{ github.ref }} + # We don't want to cancel in-progress jobs against main because that might leave the upload in a bad state. + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup JDK 11 + id: setup-java-11 + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: '11' + + - name: Set JAVA_HOME + run: echo "JAVA_HOME=${{ steps.setup-java-11.outputs.path }}" >> $GITHUB_ENV + + - name: Setup gradle properties + run: | + .github/scripts/gradle-properties.sh >> gradle.properties + cat gradle.properties + + - name: Generate R Docs + uses: burrunan/gradle-cache-action@v1 + with: + job-id: rDocs + arguments: --scan outputVersion R:rClientSite + gradle-version: wrapper + + - name: Get Deephaven Version + id: dhc-version + run: echo "version=$(cat build/version)" >> $GITHUB_OUTPUT + + - name: Upload R Docs + if: ${{ github.event_name == 'push' }} + uses: actions/upload-artifact@v3 + with: + name: rdoc-${{ steps.dhc-version.outputs.version }} + path: 'R/rdeephaven/docs/' + + - name: Deploy R Docs + if: ${{ github.event_name == 'push' }} + uses: burnett01/rsync-deployments@5.2 + with: + switches: -rlptDvz --delete + path: R/rdeephaven/docs/ + remote_path: deephaven-core-v2/${{ github.ref_name }}/client-api/r/ + remote_host: ${{ secrets.DOCS_HOST }} + remote_port: ${{ secrets.DOCS_PORT }} + remote_user: ${{ secrets.DOCS_USER }} + remote_key: ${{ secrets.DEEPHAVEN_CORE_SSH_KEY }} + + - name: Upload JVM Error Logs + uses: actions/upload-artifact@v3 + if: failure() diff --git a/.github/workflows/docs-ci.yml b/.github/workflows/docs-ci.yml index 3b8bd964585..f69a984eabd 100644 --- a/.github/workflows/docs-ci.yml +++ b/.github/workflows/docs-ci.yml @@ -19,14 +19,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' @@ -81,14 +81,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' @@ -138,14 +138,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' @@ -227,7 +227,7 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' @@ -301,7 +301,7 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' diff --git a/.github/workflows/nightly-check-ci.yml b/.github/workflows/nightly-check-ci.yml index 5f501789109..81c62c1a120 100644 --- a/.github/workflows/nightly-check-ci.yml +++ b/.github/workflows/nightly-check-ci.yml @@ -29,21 +29,21 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' - name: Setup JDK 21 id: setup-java-21 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '21' diff --git a/.github/workflows/nightly-image-check.yml b/.github/workflows/nightly-image-check.yml index 5b97bda947c..de160aa3593 100644 --- a/.github/workflows/nightly-image-check.yml +++ b/.github/workflows/nightly-image-check.yml @@ -15,7 +15,7 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' diff --git a/.github/workflows/publish-ci.yml b/.github/workflows/publish-ci.yml index 2f94fd7eb57..14314448a31 100644 --- a/.github/workflows/publish-ci.yml +++ b/.github/workflows/publish-ci.yml @@ -20,14 +20,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' diff --git a/.github/workflows/quick-ci.yml b/.github/workflows/quick-ci.yml index 7bea9bbbb1c..f482528bc8e 100644 --- a/.github/workflows/quick-ci.yml +++ b/.github/workflows/quick-ci.yml @@ -22,7 +22,7 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' diff --git a/.github/workflows/tag-base-images.yml b/.github/workflows/tag-base-images.yml index 3a9dbb91b21..3ce799ed989 100644 --- a/.github/workflows/tag-base-images.yml +++ b/.github/workflows/tag-base-images.yml @@ -17,14 +17,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' diff --git a/.github/workflows/update-web.yml b/.github/workflows/update-web.yml index 4f1db596353..40d5da41804 100644 --- a/.github/workflows/update-web.yml +++ b/.github/workflows/update-web.yml @@ -20,15 +20,18 @@ jobs: echo "WEB_VERSION=$(npm info @deephaven/code-studio@latest version)" >> $GITHUB_OUTPUT echo "GRID_VERSION=$(npm info @deephaven/embed-grid@latest version)" >> $GITHUB_OUTPUT echo "CHART_VERSION=$(npm info @deephaven/embed-chart@latest version)" >> $GITHUB_OUTPUT + echo "WIDGET_VERSION=$(npm info @deephaven/embed-widget@latest version)" >> $GITHUB_OUTPUT - name: Update deephaven-core env: WEB_VERSION: ${{steps.web_versions.outputs.WEB_VERSION}} GRID_VERSION: ${{steps.web_versions.outputs.GRID_VERSION}} CHART_VERSION: ${{steps.web_versions.outputs.CHART_VERSION}} + WIDGET_VERSION: ${{steps.web_versions.outputs.WIDGET_VERSION}} run: | sed -i "s/^ARG WEB_VERSION=.*/ARG WEB_VERSION=$WEB_VERSION/" ./web/client-ui/Dockerfile sed -i "s/^ARG GRID_VERSION=.*/ARG GRID_VERSION=$GRID_VERSION/" ./web/client-ui/Dockerfile sed -i "s/^ARG CHART_VERSION=.*/ARG CHART_VERSION=$CHART_VERSION/" ./web/client-ui/Dockerfile + sed -i "s/^ARG WIDGET_VERSION=.*/ARG WIDGET_VERSION=WIDGET_VERSION/" ./web/client-ui/Dockerfile - name: Create Pull Request uses: peter-evans/create-pull-request@v5 env: diff --git a/Base/src/main/java/io/deephaven/base/stats/ItemUpdateListener.java b/Base/src/main/java/io/deephaven/base/stats/ItemUpdateListener.java index 4d092026ba6..cc9007f5e72 100644 --- a/Base/src/main/java/io/deephaven/base/stats/ItemUpdateListener.java +++ b/Base/src/main/java/io/deephaven/base/stats/ItemUpdateListener.java @@ -4,13 +4,10 @@ package io.deephaven.base.stats; public interface ItemUpdateListener { - public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, - String intervalName); - public static final ItemUpdateListener NULL = new ItemUpdateListener() { - public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, - String intervalName) { - // empty - } + void handleItemUpdated( + Item item, long now, long appNow, int intervalIndex, long intervalMillis, String intervalName); + + ItemUpdateListener NULL = (item, now, appNow, intervalIndex, intervalMillis, intervalName) -> { }; } diff --git a/Base/src/test/java/io/deephaven/base/stats/HistogramPower2Test.java b/Base/src/test/java/io/deephaven/base/stats/HistogramPower2Test.java index 6958e16994b..bd24cf5c052 100644 --- a/Base/src/test/java/io/deephaven/base/stats/HistogramPower2Test.java +++ b/Base/src/test/java/io/deephaven/base/stats/HistogramPower2Test.java @@ -33,7 +33,7 @@ public void testSample() throws Exception { // should have a count of 1 in bin[1]..bin[63]; bin[0]=2 Stats.update(new ItemUpdateListener() { - public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, + public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, String intervalName) { // Value v = item.getValue(); HistogramPower2 nh; diff --git a/Base/src/test/java/io/deephaven/base/stats/HistogramStateTest.java b/Base/src/test/java/io/deephaven/base/stats/HistogramStateTest.java index ea63a9100fd..2a23844c93f 100644 --- a/Base/src/test/java/io/deephaven/base/stats/HistogramStateTest.java +++ b/Base/src/test/java/io/deephaven/base/stats/HistogramStateTest.java @@ -23,7 +23,7 @@ public void testSample() throws Exception { // This should print 10 invocations every time Stats.update(new ItemUpdateListener() { - public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, + public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, String intervalName) { Value v = item.getValue(); History history = v.getHistory(); diff --git a/FishUtil/build.gradle b/FishUtil/build.gradle deleted file mode 100644 index bcadd9989db..00000000000 --- a/FishUtil/build.gradle +++ /dev/null @@ -1,14 +0,0 @@ -plugins { - id 'io.deephaven.project.register' - id 'java-library' -} - -dependencies { - implementation project(':Base') - implementation project(':DataStructures') - implementation project(':IO') - implementation project(':Configuration') - implementation project(':log-factory') - - testImplementation project(path: ':Base', configuration: 'tests') -} \ No newline at end of file diff --git a/FishUtil/cpp/MicroTimer.cpp b/FishUtil/cpp/MicroTimer.cpp deleted file mode 100644 index 1455d872f8e..00000000000 --- a/FishUtil/cpp/MicroTimer.cpp +++ /dev/null @@ -1,89 +0,0 @@ -#include "MicroTimer.h" - -#ifdef _WIN32 -#include -#include -static jdouble scale; -LARGE_INTEGER startupTick; -LARGE_INTEGER startupTime; -const double MICROS_IN_SEC = 1000000.0; - -JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM * vm, void * reserved) { - LARGE_INTEGER freq; - QueryPerformanceFrequency (&freq); - scale = freq.QuadPart / MICROS_IN_SEC; - - QueryPerformanceCounter(&startupTick); - - struct timeb startupTimeMillis; - ftime(&startupTimeMillis); - startupTime.QuadPart = startupTimeMillis.time; - startupTime.QuadPart *= 1000; - startupTime.QuadPart += startupTimeMillis.millitm; - startupTime.QuadPart *= 1000; - - return JNI_VERSION_1_2; -} - -JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_currentTimeMicrosNative - (JNIEnv * env, jclass cls) { - LARGE_INTEGER now; - QueryPerformanceCounter (&now); - LARGE_INTEGER diff; - diff.QuadPart = (now.QuadPart - startupTick.QuadPart) / scale; - return startupTime.QuadPart + diff.QuadPart; -} - -extern "C" JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_clockRealtimeNative - (JNIEnv * env, jclass cls) { - jlong micros = Java_io_deephaven_util_clock_MicroTimer_currentTimeMicrosNative(env, cls); - return micros * 1000L; -} - -extern "C" JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_clockMonotonicNative - (JNIEnv * env, jclass cls) { - jlong micros = Java_io_deephaven_util_clock_MicroTimer_currentTimeMicrosNative(env, cls); - return micros * 1000L; -} - - -#else -#include -#include -#include -const uint64_t MICROS_IN_SEC = 1000000L; - -JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_currentTimeMicrosNative - (JNIEnv * env, jclass cls) { - timeval now; - gettimeofday(&now, NULL); - return ((uint64_t) now.tv_sec * 1000000L) + now.tv_usec; -} - -extern "C" JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_clockRealtimeNative - (JNIEnv * env, jclass cls) { - timespec now; - clock_gettime(CLOCK_REALTIME, &now); - return ((uint64_t) now.tv_sec * 1000000000L) + now.tv_nsec; -} - -extern "C" JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_clockMonotonicNative - (JNIEnv * env, jclass cls) { - timespec now; - clock_gettime(CLOCK_MONOTONIC, &now); - return ((uint64_t) now.tv_sec * 1000000000L) + now.tv_nsec; -} - -#endif - -static __inline__ unsigned long long rdtsc(void) { - unsigned hi, lo; - __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi)); - return ((unsigned long long)lo)|(((unsigned long long)hi)<<32); -} - -extern "C" JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_rdtscNative - (JNIEnv * env, jclass cls) { - return (jlong) rdtsc(); -} - diff --git a/FishUtil/cpp/MicroTimer.h b/FishUtil/cpp/MicroTimer.h deleted file mode 100644 index e20fbca170f..00000000000 --- a/FishUtil/cpp/MicroTimer.h +++ /dev/null @@ -1,22 +0,0 @@ -/* DO NOT EDIT THIS FILE - it is machine generated */ -#include -/* Header for class io_deephaven_util_clock_MicroTimer */ - -#ifndef _Included_io_deephaven_util_clock_MicroTimer -#define _Included_io_deephaven_util_clock_MicroTimer - -#ifdef __cplusplus -extern "C" { -#endif -/* - * Class: io_deephaven_util_clock_MicroTimer - * Method: currentTimeMicrosNative - * Signature: ()J - */ -JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_currentTimeMicrosNative - (JNIEnv *, jclass); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/FishUtil/cpp/SignalUtils.cpp b/FishUtil/cpp/SignalUtils.cpp deleted file mode 100644 index 5d4f2a75ca4..00000000000 --- a/FishUtil/cpp/SignalUtils.cpp +++ /dev/null @@ -1,9 +0,0 @@ -#include "SignalUtils.h" - -#include -#include - -extern "C" JNIEXPORT jint JNICALL Java_io_deephaven_util_signals_SignalUtils_sendSignalNative - (JNIEnv * env, jclass cls, jint pid, jint sig) { - return kill(pid, sig); -} diff --git a/FishUtil/cpp/SignalUtils.h b/FishUtil/cpp/SignalUtils.h deleted file mode 100644 index 281c9a8a34f..00000000000 --- a/FishUtil/cpp/SignalUtils.h +++ /dev/null @@ -1,21 +0,0 @@ -/* DO NOT EDIT THIS FILE - it is machine generated */ -#include -/* Header for class io_deephaven_util_signals_SignalUtils */ - -#ifndef _Included_io_deephaven_util_signals_SignalUtils -#define _Included_io_deephaven_util_signals_SignalUtils -#ifdef __cplusplus -extern "C" { -#endif -/* - * Class: io_deephaven_util_signals_SignalUtils - * Method: sendSignalNative - * Signature: (II)I - */ -JNIEXPORT jint JNICALL Java_io_deephaven_util_signals_SignalUtils_sendSignalNative - (JNIEnv *, jclass, jint, jint); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/FishUtil/gradle.properties b/FishUtil/gradle.properties deleted file mode 100644 index c186bbfdde1..00000000000 --- a/FishUtil/gradle.properties +++ /dev/null @@ -1 +0,0 @@ -io.deephaven.project.ProjectType=JAVA_PUBLIC diff --git a/FishUtil/src/main/java/io/deephaven/util/DateUtil.java b/FishUtil/src/main/java/io/deephaven/util/DateUtil.java deleted file mode 100644 index c5c5b66ee90..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/DateUtil.java +++ /dev/null @@ -1,955 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util; - -import io.deephaven.base.verify.Require; -import io.deephaven.base.verify.RequirementFailure; -import io.deephaven.configuration.PropertyFile; - -import java.io.File; -import java.text.DateFormat; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.*; - -// -------------------------------------------------------------------- -/** - * Useful methods for working with dates. Not for use in the critical path. - */ -public class DateUtil { - - public static final boolean DAYMASK_STRICT = true; - public static final boolean DAYMASK_NOT_STRICT = false; - - public static final int DAY_VALID = '1'; - public static final int DAY_INVALID = '0'; - public static final int DAY_OPTIONAL = '2'; - - public static final String DAYMASK_NORMAL_BUSINESS_WEEK = "0111110"; - - public static final long NANOS_PER_MICRO = 1000; - public static final long NANOS_PER_MILLI = NANOS_PER_MICRO * 1000; - public static final long NANOS_PER_SECOND = NANOS_PER_MILLI * 1000; - - public static final long MICROS_PER_MILLI = 1000; - public static final long MICROS_PER_SECOND = MICROS_PER_MILLI * 1000; - - public static final long MILLIS_PER_SECOND = 1000; - public static final long MILLIS_PER_MINUTE = MILLIS_PER_SECOND * 60; - public static final long MILLIS_PER_HOUR = MILLIS_PER_MINUTE * 60; - public static final long MILLIS_PER_DAY = MILLIS_PER_HOUR * 24; - - public static final int SECONDS_PER_MINUTE = 60; - public static final int SECONDS_PER_HOUR = SECONDS_PER_MINUTE * 60; - public static final int SECONDS_PER_DAY = SECONDS_PER_HOUR * 24; - - public static final int DAYS_PER_WEEK = 7; - - public static final long[] THOUSANDS = {1, 1000, 1000000, 1000000000}; - - /** Number of days in each month. (Jan==1, Feb is non-leap-year) */ - public static final int[] DAYS_PER_MONTH = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; - - /** Three letter abbreviations of month names. (Jan==1, title case) */ - public static final String[] MONTH_ABBREVIATIONS_3T = - {"Xxx", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; - - /** Three letter abbreviations of month names. (Jan==1, upper case) */ - public static final String[] MONTH_ABBREVIATIONS_3U = - {"XXX", "JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC"}; - - /** Three letter abbreviations of month names. (Jan==1, lower case) */ - public static final String[] MONTH_ABBREVIATIONS_3L = - {"xxx", "jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"}; - - // some useful formatting objects - /** Formats a year in YYYY format. */ - private static final DateFormat ms_dateFormatYear = new ThreadSafeDateFormat(new SimpleDateFormat("yyyy")); - /** Formats a month in MM format. */ - private static final DateFormat ms_dateFormatMonth = new ThreadSafeDateFormat(new SimpleDateFormat("MM")); - /** Formats a day in DD format. */ - private static final DateFormat ms_dateFormatDay = new ThreadSafeDateFormat(new SimpleDateFormat("dd")); - - private static final DateFormat ms_dateFormatHour = new ThreadSafeDateFormat(new SimpleDateFormat("HH")); - private static final DateFormat ms_dateFormatMinute = new ThreadSafeDateFormat(new SimpleDateFormat("mm")); - private static final DateFormat ms_dateFormatSecond = new ThreadSafeDateFormat(new SimpleDateFormat("ss")); - - - /** - * An easy way to get the OS-specific directory name component separator. - */ - private static final String DIR_SEP = File.separator; - - /** - * The "local" time zone. We make it explicit for testing purposes. - */ - private static TimeZone ms_localTimeZone = TimeZone.getDefault(); - - // ---------------------------------------------------------------- - /** Gets the "local" time zone. */ - public static TimeZone getLocalTimeZone() { - return ms_localTimeZone; - } - - // ---------------------------------------------------------------- - /** Gets the "local" time zone. */ - public static void setLocalTimeZone(TimeZone localTimeZone) { - Require.neqNull(localTimeZone, "localTimeZone"); - ms_localTimeZone = localTimeZone; - ms_dateFormatDay.setTimeZone(localTimeZone); - ms_dateFormatMonth.setTimeZone(localTimeZone); - ms_dateFormatYear.setTimeZone(localTimeZone); - } - - // ---------------------------------------------------------------- - public static boolean isLeapYear(int nYear) { - return 0 == nYear % 4 && (0 != nYear % 100 || 0 == nYear % 400) && 0 != nYear; - } - - // ---------------------------------------------------------------- - public static int getDaysInMonth(int nMonth, int nYear) { - Require.geq(nMonth, "nMonth", 1); - Require.leq(nMonth, "nMonth", 12); - return DAYS_PER_MONTH[nMonth] + (2 == nMonth && isLeapYear(nYear) ? 1 : 0); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in MMDD format. - */ - public static String getDateAsMMDD(Date date) { - Require.neqNull(date, "date"); - return ms_dateFormatMonth.format(date) + ms_dateFormatDay.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in MM format. - */ - public static String getDateAsMM(Date date) { - Require.neqNull(date, "date"); - return ms_dateFormatMonth.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in DD format. - */ - public static String getDateAsDD(Date date) { - Require.neqNull(date, "date"); - return ms_dateFormatDay.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in YYYYMM format. - */ - public static String getDateAsYYYYMM(Date date) { - Require.neqNull(date, "date"); - return ms_dateFormatYear.format(date) + ms_dateFormatMonth.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in YYYYMMDD format. - */ - public static String getDateAsYYYYMMDD(Date date) { - Require.neqNull(date, "date"); - return ms_dateFormatYear.format(date) + ms_dateFormatMonth.format(date) + ms_dateFormatDay.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in YYYYMMDD format. - */ - public static String getDateAsYYYYMMDD(long timeInMillis) { - Date date = new Date(timeInMillis); - return getDateAsYYYYMMDD(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in YYYYMMDDTHH:MM:SS format. - */ - public static String getDateAsYYYYdMMdDDTHHcMMcSS(Date date) { - return ms_dateFormatYear.format(date) + "-" + ms_dateFormatMonth.format(date) + "-" - + ms_dateFormatDay.format(date) + "T" + ms_dateFormatHour.format(date) + ":" - + ms_dateFormatMinute.format(date) + ":" + ms_dateFormatSecond.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in YYYYMMDDTHH:MM:SS format. - */ - public static String getDateAsYYYYdMMdDDTHHcMMcSS(long timeInMillis) { - Date date = new Date(timeInMillis); - return getDateAsYYYYdMMdDDTHHcMMcSS(date); - } - - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in MMDDYYYY format. - */ - public static String getDateAsMMDDYYYY(Date date) { - Require.neqNull(date, "date"); - return ms_dateFormatMonth.format(date) + ms_dateFormatDay.format(date) + ms_dateFormatYear.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in YYYY/YYYYMM/YYYYMMDD format. - */ - public static String getDateAsPath(Date date) { - Require.neqNull(date, "date"); - String sYear = ms_dateFormatYear.format(date); - String sMonth = ms_dateFormatMonth.format(date); - String sDay = ms_dateFormatDay.format(date); - return sYear + DIR_SEP + sYear + sMonth + DIR_SEP + sYear + sMonth + sDay; - } - - // ---------------------------------------------------------------- - /** - * Converts the given integer in YYYYMMDD format to a string in YYYY/YYYYMM/YYYYMMDD format. - */ - public static String getYyyymmddIntAsPath(int nDateYyyymmdd) { - String sYyyymmdd = "00000000" + Integer.toString(nDateYyyymmdd); - sYyyymmdd = sYyyymmdd.substring(sYyyymmdd.length() - 8); - String sYear = sYyyymmdd.substring(0, 4); - String sMonth = sYyyymmdd.substring(4, 6); - String sDay = sYyyymmdd.substring(6, 8); - return sYear + DIR_SEP + sYear + sMonth + DIR_SEP + sYear + sMonth + sDay; - } - - // ---------------------------------------------------------------- - /** - * Gets the download path, in [DownloadBaseDir]/sDataSubdir/YYYY/YYYYMM/YYYYMMDD format given a date (local - * timezone). - */ - public static String getDateDownloadPath(PropertyFile configuration, String sDataSubdir, Date date) { - Require.nonempty(sDataSubdir, "sDataSubdir"); - Require.neqNull(date, "date"); - return configuration.getProperty("DownloadBaseDir") + DIR_SEP + sDataSubdir + DIR_SEP + getDateAsPath(date); - } - - // ---------------------------------------------------------------- - /** - * Gets the download path, in [DownloadBaseDir]/sDataSubdir/YYYY/YYYYMM/YYYYMMDD format given an integer in YYYYMMDD - * format. - */ - public static String getYyyymmddIntDownloadPath(PropertyFile configuration, String sDataSubdir, int nDateYyyymmdd) { - Require.nonempty(sDataSubdir, "sDataSubdir"); - return configuration.getProperty("DownloadBaseDir") + DIR_SEP + sDataSubdir + DIR_SEP - + getYyyymmddIntAsPath(nDateYyyymmdd); - } - - // ---------------------------------------------------------------- - /** Gets a date object representing the time 24 hours ago. */ - public static Date getDateYesterday() { - long timeNow = System.currentTimeMillis(); - long timeYesterday = timeNow - MILLIS_PER_DAY; - return new Date(timeYesterday); - } - - // ---------------------------------------------------------------- - /** - * Gets a date object representing the next day at the same hour (which may not be exactly 24 hours in the future). - */ - public static Date getNextDaySameTime(Date baseline, TimeZone zone) { - Require.neqNull(baseline, "baseline"); - Require.neqNull(zone, "zone"); - Calendar calendar = Calendar.getInstance(zone); - calendar.setTime(baseline); - calendar.add(Calendar.DATE, 1); - return calendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Subtracts zero or more 24hr periods from the given date until the day of week for the resulting date (local - * timezone) is a valid day according to the mask. If the strict flag is true, optional days are not considered - * valid. - *

- * See {@link #validateDayOfWeekMask}. - */ - public static Date getMostRecentValidDate(Date date, String sValidDaysMask, boolean bStrict) { - Require.neqNull(date, "date"); - validateDayOfWeekMask(sValidDaysMask, bStrict); - - Calendar calendar = Calendar.getInstance(ms_localTimeZone); - while (true) { - calendar.setTime(date); - char chDayType = sValidDaysMask.charAt(calendar.get(Calendar.DAY_OF_WEEK) - Calendar.SUNDAY); - if (DAY_VALID == chDayType || (!bStrict && DAY_OPTIONAL == chDayType)) { - break; - } - date = new Date(date.getTime() - MILLIS_PER_DAY); - } - return date; - } - - // ---------------------------------------------------------------- - /** - * Adds one or more 24hr periods from the given date until the day of week for the resulting date (local timezone) - * is a valid day according to the mask. If the strict flag is true, optional days are not considered valid. - *

- * See {@link #validateDayOfWeekMask}. - */ - public static Date getNextValidDate(Date date, String sValidDaysMask, boolean bStrict) { - Require.neqNull(date, "date"); - validateDayOfWeekMask(sValidDaysMask, bStrict); - - Calendar calendar = Calendar.getInstance(ms_localTimeZone); - while (true) { - date = new Date(date.getTime() + MILLIS_PER_DAY); - calendar.setTime(date); - char chDayType = sValidDaysMask.charAt(calendar.get(Calendar.DAY_OF_WEEK) - Calendar.SUNDAY); - if (DAY_VALID == chDayType || (!bStrict && DAY_OPTIONAL == chDayType)) { - break; - } - } - return date; - } - - // ---------------------------------------------------------------- - /** - * Returns the validity flag from the mask for the given date (local timezone). - *

- * See {@link #validateDayOfWeekMask}. - */ - public static int getDayValidity(Date date, String sValidDaysMask) { - Require.neqNull(date, "date"); - validateDayOfWeekMask(sValidDaysMask, DAYMASK_NOT_STRICT); - return sValidDaysMask.charAt(getDayOfWeek(date)); - } - - // ---------------------------------------------------------------- - /** - * Throws a requirement exception if the given day of week mask is not valid. There must be at least one valid day - * in the mask. If the strict flag is set, optional days are not considered valid. - *

- * See {@link #DAY_VALID}, {@link #DAY_INVALID}, {@link #DAY_OPTIONAL}, {@link #DAYMASK_STRICT}, - * {@link #DAYMASK_NOT_STRICT} - */ - public static void validateDayOfWeekMask(String sValidDaysMask, boolean bStrict) { - Require.neqNull(sValidDaysMask, "sValidDaysMask", 1); - Require.eq(sValidDaysMask.length(), "sValidDaysMask.length()", DAYS_PER_WEEK, 1); - int nValidDaysFound = 0; - for (int nIndex = 0; nIndex < DAYS_PER_WEEK; nIndex++) { - char chDayType = sValidDaysMask.charAt(nIndex); - Require.requirement(DAY_INVALID == chDayType || DAY_VALID == chDayType || DAY_OPTIONAL == chDayType, - "DAY_INVALID==chDayType || DAY_VALID==chDayType || DAY_OPTIONAL==chDayType", 1); - if (DAY_VALID == chDayType || (!bStrict && DAY_OPTIONAL == chDayType)) { - nValidDaysFound++; - } - } - Require.gtZero(nValidDaysFound, "nValidDaysFound", 1); - } - - // ---------------------------------------------------------------- - /** - * Gets the day of the week (Su == 0) for the given date (local timezone). - */ - public static int getDayOfWeek(Date date) { - Require.neqNull(date, "date"); - Calendar calendar = Calendar.getInstance(ms_localTimeZone); - calendar.setTime(date); - return calendar.get(Calendar.DAY_OF_WEEK) - Calendar.SUNDAY; - } - - // ---------------------------------------------------------------- - /** - * Gets the current date (local timezone) as an integer, in YYYYMMDD format. - */ - public static int getDateTodayAsYyyymmddInt() { - return getDateAsYyyymmddInt(new Date()); - } - - // ---------------------------------------------------------------- - /** - * Gets the given date (local timezone) as an integer, in YYYYMMDD format. - */ - public static int getDateAsYyyymmddInt(Date date) { - Require.neqNull(date, "date"); - Calendar calendar = Calendar.getInstance(ms_localTimeZone); - calendar.setTime(date); - return calendar.get(Calendar.YEAR) * 10000 + (calendar.get(Calendar.MONTH) + 1) * 100 - + calendar.get(Calendar.DAY_OF_MONTH); - } - - // ---------------------------------------------------------------- - /** Converts an integer in YYYYMMDD format into "YYYY-MM-DD". */ - public static String formatYyyymmddIntAsIso(int nDateYyyymmdd) { - return formatYyyymmddStringAsIso(Integer.toString(nDateYyyymmdd)); - } - - // ---------------------------------------------------------------- - /** Converts an integer in YYYYMMDD format into "MM/DD/YYYY". */ - public static String formatYyyymmddIntAsUs(int nDateYyyymmdd) { - return formatYyyymmddStringAsUs(Integer.toString(nDateYyyymmdd)); - } - - // ---------------------------------------------------------------- - /** Converts a String in YYYYMMDD format into "YYYY-MM-DD". */ - public static String formatYyyymmddStringAsIso(String sDateYyyymmdd) { - Require.neqNull(sDateYyyymmdd, "sDateYyyymmdd"); - Require.eq(sDateYyyymmdd.length(), "sDateYyyymmdd.length()", 8); - return sDateYyyymmdd.substring(0, 4) + "-" + sDateYyyymmdd.substring(4, 6) + "-" - + sDateYyyymmdd.substring(6, 8); - } - - // ---------------------------------------------------------------- - /** Converts a String in YYYYMMDD format into "MM/DD/YYYY". */ - public static String formatYyyymmddStringAsUs(String sDateYyyymmdd) { - Require.neqNull(sDateYyyymmdd, "sDateYyyymmdd"); - Require.eq(sDateYyyymmdd.length(), "sDateYyyymmdd.length()", 8); - return sDateYyyymmdd.substring(4, 6) + "/" + sDateYyyymmdd.substring(6, 8) + "/" - + sDateYyyymmdd.substring(0, 4); - } - - // ---------------------------------------------------------------- - /** Converts a String in (M|MM)/(D|DD)/(YY|YYYY) format into "YYYY-MM-DD". */ - public static String formatMmddyyyyStringAsIso(String sDateMmddyyyy) { - Require.neqNull(sDateMmddyyyy, "sDateMmddyyyy"); - String[] date = sDateMmddyyyy.split("/"); - String res; - - res = ((date[2].length() == 2) ? "20" + date[2] : date[2]); - res += "-" + ((date[0].length() == 1) ? "0" + date[0] : date[0]); - res += "-" + ((date[1].length() == 1) ? "0" + date[1] : date[1]); - - Require.eq(res.length(), "sDateMmddyyyy.length()", 10); - return res; - } - - // ---------------------------------------------------------------- - /** Converts a String in (M|MM)/(D|DD)/YYYY format into "YYYY-MM-DD". */ - public static String formatMmddyyyyStringAsIsoAllowNull(String sDateMmddyyyy) { - if (null == sDateMmddyyyy || sDateMmddyyyy.length() == 0) { - return ""; - } - Require.neqNull(sDateMmddyyyy, "sDateMmddyyyy"); - String[] date = sDateMmddyyyy.split("/"); - String res; - - res = date[2]; - res += "-" + ((date[0].length() == 1) ? "0" + date[0] : date[0]); - res += "-" + ((date[1].length() == 1) ? "0" + date[1] : date[1]); - - Require.eq(res.length(), "sDateMmddyyyy.length()", 10); - return res; - } - - - // ---------------------------------------------------------------- - /** Converts a String in DDM3UYYYY format into "YYYY-MM-DD". */ - public static String formatddM3UyyyyStringAsIso(String sDateddM3Uyyyy) { - Require.neqNull(sDateddM3Uyyyy, "sDateddM3Uyyyy"); - String res; - - res = sDateddM3Uyyyy.substring(5); - int monthValue = Arrays.asList(MONTH_ABBREVIATIONS_3U).indexOf(sDateddM3Uyyyy.substring(2, 5)); - res += "-" + ((monthValue < 10) ? "0" + monthValue : monthValue); - res += "-" + (sDateddM3Uyyyy.substring(0, 2)); - - Require.eq(res.length(), "sDateddM3Uyyyy.length()", 10); - return res; - } - - // ---------------------------------------------------------------- - /** Converts a String in DD-MMM-YYYY format into "YYYY-MM-DD". */ - public static String formatddMMMyyyyStringAsIso(String sDateddMMMyyyy) { - Require.neqNull(sDateddMMMyyyy, "sDateddMMMyyyy"); - String[] date = sDateddMMMyyyy.split("-"); - String res; - - res = date[2]; - int monthValue = Arrays.asList(MONTH_ABBREVIATIONS_3U).indexOf(date[1]); - res += "-" + ((monthValue < 10) ? "0" + monthValue : monthValue); - res += "-" + ((date[0].length() == 1) ? "0" + date[0] : date[0]); - - Require.eq(res.length(), "sDateddmmmyyyy.length()", 10); - return res; - } - - // ---------------------------------------------------------------- - /** Converts a String in DD-MMM-YY format into "YYYY-MM-DD". */ - public static String formatddMMMyyStringAsIso(String sDateddMMMyy) { - Require.neqNull(sDateddMMMyy, "sDateddMMMyy"); - String[] date = sDateddMMMyy.split("-"); - String res; - - res = date[2]; - int monthValue = Arrays.asList(MONTH_ABBREVIATIONS_3U).indexOf(date[1].toUpperCase()); - res += "-" + ((monthValue < 10) ? "0" + monthValue : monthValue); - res += "-" + ((date[0].length() == 1) ? "0" + date[0] : date[0]); - - Require.eq(res.length(), "sDateddmmmyyyy.length()", 10); - return res; - } - - - // ------------------------------------------------------------------ - /** Converts a String in "Mmm dd, YYYY" format int "YYYY-MM-DD". */ - public static String formatMmmddcYYYYStringAsIso(String sDateMmmddcYYYY) { - Require.neqNull(sDateMmmddcYYYY, "sDateMmmddcYYYY"); - String[] date = sDateMmmddcYYYY.split("[ ,]"); - String res; - - res = date[3]; - int monthValue = Arrays.asList(MONTH_ABBREVIATIONS_3T).indexOf(date[0]); - res += "-" + ((monthValue < 10) ? "0" + monthValue : monthValue); - res += "-" + date[1]; - - Require.eq(res.length(), "sDateMmmddcYYYY.length()", 10); - return res; - } - - // ------------------------------------------------------------------ - /** Converts a String in "YYYY-MM-DD" format into "MM/DD/YYYY" format. */ - public static String formatIsoAsMMsDDsYYYYString(String sDateYYYYdMMdDD) { - Require.neqNull(sDateYYYYdMMdDD, "sDateYYYYdMMdDD"); - String[] date = sDateYYYYdMMdDD.split("-"); - String res = date[1] + "/" + date[2] + "/" + date[0]; - Require.eq(res.length(), "sDateYYYYdMMdDD.length()", 10); - return res; - } - - /** - * Converts a date string into a date. - * - * @param date - * @param sourceFormat - * @param resultFormat - * @return date - * @throws ParseException - */ - public static String formatDateFromStringToString(String date, String sourceFormat, String resultFormat) { - final DateFormat sourceDateFormat = new SimpleDateFormat(sourceFormat); - final DateFormat resultDateFormat = new SimpleDateFormat(resultFormat); - return formatDateFromFormatToFormat(date, sourceDateFormat, resultDateFormat); - } - - /** - * Converts a date string into a date. - * - * @param date - * @param sourceDateFormat - * @param resultDateFormat - * @return date - * @throws ParseException - */ - public static String formatDateFromFormatToFormat(String date, DateFormat sourceDateFormat, - DateFormat resultDateFormat) { - try { - return resultDateFormat.format(sourceDateFormat.parse(date)); - } catch (ParseException e) { - throw new RuntimeException(e); - } - } - - // ################################################################ - - // ---------------------------------------------------------------- - /** - * Returns the absolute timestamp of the most recent occurrence (before or exactly on the - * referenceTimestamp) of a daily event. The time of day is taken from - * sPropertyNameRoot.time in "h:mm a" format. The time zone for calculations is taken from - * sPropertyNameRoot.timeZone. - */ - public static Date getTimestampOfMostRecentDailyEvent(PropertyFile configuration, String sPropertyNameRoot, - Date referenceTimestamp) { - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - Require.neqNull(referenceTimestamp, "referenceTimestamp"); - - // get the time zone of the event from the system properties - TimeZone timeZone = getTimeZoneOfEvent(configuration, sPropertyNameRoot); - - // get the time of day of the event from the system properties - Calendar eventTimestampCalendar = buildEventTimestampCalendar(timeZone, sPropertyNameRoot, configuration); - - // determine the exact timestamp of when the event happens today - Calendar referenceTimestampCalendar = Calendar.getInstance(timeZone); - referenceTimestampCalendar.setTime(referenceTimestamp); - eventTimestampCalendar.set( - referenceTimestampCalendar.get(Calendar.YEAR), - referenceTimestampCalendar.get(Calendar.MONTH), - referenceTimestampCalendar.get(Calendar.DAY_OF_MONTH)); - - // if the event happens in the future, then the most recent occurrence was the one that happened one day ago - if (eventTimestampCalendar.getTimeInMillis() > referenceTimestampCalendar.getTimeInMillis()) { - eventTimestampCalendar.add(Calendar.DAY_OF_MONTH, -1); - } - - return eventTimestampCalendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Returns the absolute timestamp of the occurrence of a daily event that happens in the same "day" as right now. - * The time of day of the event is taken from sPropertyNameRoot.time in "h:mm a" format. The - * time zone for calculations (and for determining the boundaries of "today") is taken from - * sPropertyNameRoot.timeZone. - */ - public static Date getTimestampOfEventToday(PropertyFile configuration, String sPropertyNameRoot) { - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - - // get the time zone of the event from the system properties - TimeZone timeZone = getTimeZoneOfEvent(configuration, sPropertyNameRoot); - - // get the time of day of the event from the system properties - Calendar eventTimestampCalendar = buildEventTimestampCalendar(timeZone, sPropertyNameRoot, configuration); - - // determine the exact timestamp of when the event happens today - Calendar referenceTimestampCalendar = Calendar.getInstance(timeZone); - eventTimestampCalendar.set( - referenceTimestampCalendar.get(Calendar.YEAR), - referenceTimestampCalendar.get(Calendar.MONTH), - referenceTimestampCalendar.get(Calendar.DAY_OF_MONTH)); - - return eventTimestampCalendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Returns the absolute timestamp of the occurrence of a daily event that happens in the same "day" as right now. - * The time of day of the event is taken from sPropertyNameRoot.time in "h:mm a" format. The - * time zone for calculations (and for determining the boundaries of "today") is taken from - * sPropertyNameRoot.timeZone. - */ - public static Date getTimestampOfEventToday(PropertyFile configuration, String sPropertyNameRoot, long nNowMillis) { - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - - // get the time zone of the event from the system properties - TimeZone timeZone = getTimeZoneOfEvent(configuration, sPropertyNameRoot); - - // get the time of day of the event from the system properties - Calendar eventTimestampCalendar = buildEventTimestampCalendar(timeZone, sPropertyNameRoot, configuration); - - // determine the exact timestamp of when the event happens today - Calendar referenceTimestampCalendar = Calendar.getInstance(timeZone); - referenceTimestampCalendar.setTimeInMillis(nNowMillis); - eventTimestampCalendar.set( - referenceTimestampCalendar.get(Calendar.YEAR), - referenceTimestampCalendar.get(Calendar.MONTH), - referenceTimestampCalendar.get(Calendar.DAY_OF_MONTH)); - - return eventTimestampCalendar.getTime(); - } - - // ---------------------------------------------------------------- - private static Calendar buildEventTimestampCalendar(TimeZone timeZone, String sPropertyNameRoot, - PropertyFile configuration) { - String sTimeProperty = sPropertyNameRoot + ".time"; - String sTime = configuration.getProperty(sTimeProperty); - Calendar eventTimestampCalendar = Calendar.getInstance(timeZone); - SimpleDateFormat timeFormat = new SimpleDateFormat("h:mm:ss a"); - timeFormat.setCalendar(eventTimestampCalendar); - try { - timeFormat.parse(sTime); - } catch (ParseException e) { - timeFormat = new SimpleDateFormat("h:mm a"); - timeFormat.setCalendar(eventTimestampCalendar); - try { - timeFormat.parse(sTime); - } catch (ParseException e2) { - throw Require.exceptionNeverCaught("Value of property " + sTimeProperty + " (\"" + sTime - + "\") not in proper format (\"" + timeFormat.toPattern() + "\").", e2); - } - } - return eventTimestampCalendar; - } - - // ---------------------------------------------------------------- - /** - * Gets the timestamp of an event based upon a daily event and a date (retrieved from properties) - */ - public static Date getTimestampOfEvent(PropertyFile configuration, String sEventPropertyRoot, - String sDateProperty) { - Require.nonempty(sEventPropertyRoot, "sEventPropertyRoot"); - Require.nonempty(sDateProperty, "sDateProperty"); - - // get the time zone of the event from the system properties - TimeZone timeZone = getTimeZoneOfEvent(configuration, sEventPropertyRoot); - - // get the time of day of the event from the system properties - Calendar eventTimestampCalendar = buildEventTimestampCalendar(timeZone, sEventPropertyRoot, configuration); - - // parse the date string and set the year, month, and day of the timestamp we are building - // note: time zone is irrelevant for the next step because we just want the numbers - we could use a regexp. - SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); - String sDate = configuration.getProperty(sDateProperty); - try { - dateFormat.parse(sDate); - } catch (ParseException e) { - throw Require.exceptionNeverCaught( - sDateProperty + " (\"" + sDate + "\") not in \"" + dateFormat.toPattern() + "\" format.", e); - } - Calendar dateCalendar = dateFormat.getCalendar(); - - // set the year, month, and day - eventTimestampCalendar.set(dateCalendar.get(Calendar.YEAR), dateCalendar.get(Calendar.MONTH), - dateCalendar.get(Calendar.DAY_OF_MONTH)); - - return eventTimestampCalendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Gets the timestamp of an event based upon a daily event and a date specified by year, month (jan=1), day - */ - public static Date getTimestampOfEvent(PropertyFile configuration, String sEventPropertyRoot, int nYear, int nMonth, - int nDay) { - Require.nonempty(sEventPropertyRoot, "sEventPropertyRoot"); - - // get the time zone of the event from the system properties - TimeZone timeZone = getTimeZoneOfEvent(configuration, sEventPropertyRoot); - - // get the time of day of the event from the system properties - Calendar eventTimestampCalendar = buildEventTimestampCalendar(timeZone, sEventPropertyRoot, configuration); - - // set the year, month, and day - eventTimestampCalendar.set(nYear, nMonth - 1, nDay); - - return eventTimestampCalendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Gets the timestamp of an event based upon a daily event and a date in YYYYMMDD format - */ - public static Date getTimestampOfEvent(PropertyFile configuration, String sEventPropertyRoot, int nYYYYMMDD) { - Require.nonempty(sEventPropertyRoot, "sEventPropertyRoot"); - return getTimestampOfEvent(configuration, sEventPropertyRoot, nYYYYMMDD / 10000, (nYYYYMMDD / 100) % 100, - nYYYYMMDD % 100); - } - - // ---------------------------------------------------------------- - /** Gets the time zone associated with a particular daily event. */ - public static TimeZone getTimeZoneOfEvent(PropertyFile configuration, String sPropertyNameRoot) { - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - return TimeZone.getTimeZone(configuration.getProperty(sPropertyNameRoot + ".timeZone")); - } - - // ---------------------------------------------------------------- - /** - * Returns a date (noon in the local time zone) which is the date of the most recent occurrence (before or exactly - * on the referenceTimestamp) of the specified event, in the event's timezone. - */ - public static Date getDateOfMostRecentDailyEvent(PropertyFile configuration, String sPropertyNameRoot, - Date referenceTimestamp) { - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - Require.neqNull(referenceTimestamp, "referenceTimestamp"); - Date eventTimestamp = getTimestampOfMostRecentDailyEvent(configuration, sPropertyNameRoot, referenceTimestamp); - Calendar sourceCalendar = Calendar.getInstance(getTimeZoneOfEvent(configuration, sPropertyNameRoot)); - sourceCalendar.setTime(eventTimestamp); - Calendar targetCalendar = Calendar.getInstance(ms_localTimeZone); - targetCalendar.clear(); - targetCalendar.set(sourceCalendar.get(Calendar.YEAR), sourceCalendar.get(Calendar.MONTH), - sourceCalendar.get(Calendar.DAY_OF_MONTH), 12, 0, 0); - return targetCalendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Returns a date (noon in the local time zone) which is the date of the most recent occurrence (before or exactly - * on the referenceTimestamp) of the specified event, in the event's timezone. If the (strict) valid - * days mask indicates that the date is not valid, days will be subtracted until the date is valid. - *

- * See {@link #validateDayOfWeekMask}. - */ - public static Date getDateOfMostRecentDailyEvent(PropertyFile configuration, String sPropertyNameRoot, - Date referenceTimestamp, String sValidDaysMask) { - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - Require.neqNull(referenceTimestamp, "referenceTimestamp"); - validateDayOfWeekMask(sValidDaysMask, DAYMASK_STRICT); - Calendar calendar = Calendar.getInstance(ms_localTimeZone); - calendar.setTime(getDateOfMostRecentDailyEvent(configuration, sPropertyNameRoot, referenceTimestamp)); - while (true) { - char chDayType = sValidDaysMask.charAt(calendar.get(Calendar.DAY_OF_WEEK) - Calendar.SUNDAY); - if (DAY_VALID == chDayType) { - break; - } - calendar.add(Calendar.DATE, -1); - } - return calendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Wraps a "daily event" as an object. The time of day of the event is taken from - * sPropertyNameRoot.time in "h:mm a" format. The time zone for calculations (and for - * determining the boundaries of "today") is taken from sPropertyNameRoot.timeZone. - */ - public static class DailyEvent { - - private final PropertyFile m_configuration; - private final String m_sPropertyNameRoot; - - // ------------------------------------------------------------ - public DailyEvent(PropertyFile configuration, String sPropertyNameRoot) { - Require.neqNull(configuration, "configuration"); - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - try { - buildEventTimestampCalendar(getTimeZoneOfEvent(configuration, sPropertyNameRoot), sPropertyNameRoot, - configuration); - } catch (RequirementFailure e) { - throw e.adjustForDelegatingMethod(); - } - m_configuration = configuration; - m_sPropertyNameRoot = sPropertyNameRoot; - } - - // ------------------------------------------------------------ - public long getTimestampOfEventToday(long nNow) { - return DateUtil.getTimestampOfEventToday(m_configuration, m_sPropertyNameRoot, nNow).getTime(); - } - - // ------------------------------------------------------------ - @Override - public String toString() { - return m_configuration.getProperty(m_sPropertyNameRoot + ".time") + ", " - + m_configuration.getProperty(m_sPropertyNameRoot + ".timeZone"); - } - } - - // ################################################################ - - // ---------------------------------------------------------------- - /** Parse the given string into a date with the given format. */ - public static long parse(String sTime, String sFormat) throws ParseException { - SimpleDateFormat simpleDateFormat = new SimpleDateFormat(sFormat); - simpleDateFormat.setTimeZone(ms_localTimeZone); - return simpleDateFormat.parse(sTime).getTime(); - } - - // ---------------------------------------------------------------- - /** - * Determines if two dates are on the same calendar day. - * - * @param d1 first date. - * @param d2 second date. - * @param tz timezone for the calendar. - * @return true if the dates are on the same calendar day, and false otherwise. - */ - public static boolean isSameDay(Date d1, Date d2, TimeZone tz) { - Calendar calendar1 = new GregorianCalendar(tz); - calendar1.setTime(d1); - Calendar calendar2 = new GregorianCalendar(tz); - calendar2.setTime(d2); - - if (calendar1.get(Calendar.YEAR) != calendar2.get(Calendar.YEAR)) { - return false; - } else if (calendar1.get(Calendar.DAY_OF_YEAR) != calendar2.get(Calendar.DAY_OF_YEAR)) { - return false; - } else { - return true; - } - } - - // ################################################################ - - // ---------------------------------------------------------------- - /** - * Returns a string in "0d 0h 0m 0.000'000'000s" format from a time interval in nanoseconds. - */ - public static String formatIntervalNanos(long tsInterval) { - return internalFormatInterval(tsInterval, 3); - } - - // ---------------------------------------------------------------- - /** - * Returns a string in "0d 0h 0m 0.000'000s" format from a time interval in microseconds. - */ - public static String formatIntervalMicros(long tsInterval) { - return internalFormatInterval(tsInterval, 2); - } - - // ---------------------------------------------------------------- - /** - * Returns a string in "0d 0h 0m 0.000s" format from a time interval in milliseconds. - */ - public static String formatIntervalMillis(long tsInterval) { - return internalFormatInterval(tsInterval, 1); - } - - // ---------------------------------------------------------------- - private static String internalFormatInterval(long tsInterval, int nThousands) { - - StringBuilder stringBuilder = new StringBuilder(); - if (tsInterval < 0) { - stringBuilder.append("-"); - tsInterval = -tsInterval; - } - - long tsSeconds = tsInterval / THOUSANDS[nThousands]; - - boolean bNeedUnit = false; - if (tsSeconds > SECONDS_PER_DAY) { - long nDays = tsSeconds / SECONDS_PER_DAY; - tsSeconds %= SECONDS_PER_DAY; - stringBuilder.append(nDays).append("d "); - bNeedUnit = true; - } - if (tsSeconds > SECONDS_PER_HOUR || bNeedUnit) { - long nHours = tsSeconds / SECONDS_PER_HOUR; - tsSeconds %= SECONDS_PER_HOUR; - stringBuilder.append(nHours).append("h "); - bNeedUnit = true; - } - if (tsSeconds > SECONDS_PER_MINUTE || bNeedUnit) { - long nMinutes = tsSeconds / SECONDS_PER_MINUTE; - tsSeconds %= SECONDS_PER_MINUTE; - stringBuilder.append(nMinutes).append("m "); - } - stringBuilder.append(tsSeconds).append('.'); - - long tsFractions = tsInterval % THOUSANDS[nThousands]; - - for (int nIndex = nThousands; nIndex > 0; nIndex--) { - // if (nIndex!=nThousands) { stringBuilder.append('\''); } - long tsThousand = tsFractions / THOUSANDS[nIndex - 1]; - tsFractions %= THOUSANDS[nIndex - 1]; - - String sLeadingZeros; - if (tsThousand >= 100) { - sLeadingZeros = ""; - } else if (tsThousand >= 10) { - sLeadingZeros = "0"; - } else { - sLeadingZeros = "00"; - } - stringBuilder.append(sLeadingZeros).append(tsThousand); - } - return stringBuilder.append("s").toString(); - } - - // ---------------------------------------------------------------- - /** - * Formats the given microsecond timestamp with the given date formatter and then appends the last three microsend - * digits. - */ - public static String formatWithTrailingMicros(DateFormat dateFormat, long nTimestampMicros) { - return dateFormat.format(nTimestampMicros / DateUtil.MICROS_PER_MILLI) - + DateUtil.formatTrailingMicros(nTimestampMicros); - } - - // ---------------------------------------------------------------- - /** - * Returns the last three digits of the given microsecond timestamp as a string, suitable for appending to a - * timestamp formatted to millisecond precision. - */ - public static String formatTrailingMicros(long nTimestampMicros) { - nTimestampMicros = nTimestampMicros % 1000; - String sLeadingZeros; - if (nTimestampMicros >= 100) { - sLeadingZeros = ""; - } else if (nTimestampMicros >= 10) { - sLeadingZeros = "0"; - } else { - sLeadingZeros = "00"; - } - return sLeadingZeros + nTimestampMicros; - } -} diff --git a/FishUtil/src/main/java/io/deephaven/util/ExceptionUtil.java b/FishUtil/src/main/java/io/deephaven/util/ExceptionUtil.java deleted file mode 100644 index 759729e5f32..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/ExceptionUtil.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util; - -import org.jetbrains.annotations.NotNull; - -/** - * Some utilities for inspecting exceptions - */ -public class ExceptionUtil { - public static boolean causedBy(@NotNull Throwable t, Class cause) { - Throwable curr = t; - while (curr != null) { - if (cause.isAssignableFrom(curr.getClass())) { - return true; - } - curr = curr.getCause(); - } - return false; - } -} diff --git a/FishUtil/src/main/java/io/deephaven/util/Mailer.java b/FishUtil/src/main/java/io/deephaven/util/Mailer.java deleted file mode 100644 index c520f647054..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/Mailer.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -public interface Mailer { - void sendEmail(String sender, String[] recipients, String subject, String msg) throws IOException; - - void sendEmail(String sender, String recipient, String subject, String msg) throws IOException; - - void sendHTMLEmail(String sender, String recipient, String subject, String msg) throws IOException; - - void sendEmail(String sender, String recipient, String subject, String msg, - List> extraHeaderEntries) throws IOException; -} diff --git a/FishUtil/src/main/java/io/deephaven/util/ThreadSafeDateFormat.java b/FishUtil/src/main/java/io/deephaven/util/ThreadSafeDateFormat.java deleted file mode 100644 index 18cf9f4a550..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/ThreadSafeDateFormat.java +++ /dev/null @@ -1,155 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util; - -import io.deephaven.base.verify.Require; - -import java.text.AttributedCharacterIterator; -import java.text.DateFormat; -import java.text.FieldPosition; -import java.text.NumberFormat; -import java.text.ParseException; -import java.text.ParsePosition; -import java.util.Calendar; -import java.util.Date; -import java.util.TimeZone; - -// -------------------------------------------------------------------- -/** - * Wraps a {@link DateFormat} to provide a minimal level of thread safety that DateFormat is lacking (namely, preventing - * simultaneous calls to {@link #format} from separate threads from interfering with each other). - */ -public class ThreadSafeDateFormat extends DateFormat { - private final DateFormat m_dateFormat; - - public ThreadSafeDateFormat(DateFormat dateFormat) { - m_dateFormat = dateFormat; - } - - @Override - public Date parse(String source, ParsePosition pos) { - synchronized (m_dateFormat) { - return m_dateFormat.parse(source, pos); - } - } - - @Override - public StringBuffer format(Date date, StringBuffer toAppendTo, FieldPosition fieldPosition) { - synchronized (m_dateFormat) { - return m_dateFormat.format(date, toAppendTo, fieldPosition); - } - } - - @Override - public boolean isLenient() { - synchronized (m_dateFormat) { - return m_dateFormat.isLenient(); - } - } - - @Override - public void setLenient(boolean lenient) { - synchronized (m_dateFormat) { - m_dateFormat.setLenient(lenient); - } - } - - @Override - public NumberFormat getNumberFormat() { - synchronized (m_dateFormat) { - return m_dateFormat.getNumberFormat(); - } - } - - @Override - public void setNumberFormat(NumberFormat newNumberFormat) { - synchronized (m_dateFormat) { - m_dateFormat.setNumberFormat(newNumberFormat); - } - } - - @Override - public Calendar getCalendar() { - synchronized (m_dateFormat) { - return m_dateFormat.getCalendar(); - } - } - - @Override - public void setCalendar(Calendar newCalendar) { - synchronized (m_dateFormat) { - m_dateFormat.setCalendar(newCalendar); - } - } - - @Override - public TimeZone getTimeZone() { - synchronized (m_dateFormat) { - return m_dateFormat.getTimeZone(); - } - } - - @Override - public void setTimeZone(TimeZone zone) { - synchronized (m_dateFormat) { - m_dateFormat.setTimeZone(zone); - } - } - - @Override - public Date parse(String source) throws ParseException { - synchronized (m_dateFormat) { - return m_dateFormat.parse(source); - } - } - - @Override - public Object parseObject(String source, ParsePosition pos) { - synchronized (m_dateFormat) { - return m_dateFormat.parseObject(source, pos); - } - } - - @Override - public Object parseObject(String source) throws ParseException { - synchronized (m_dateFormat) { - return m_dateFormat.parseObject(source); - } - } - - @Override - public AttributedCharacterIterator formatToCharacterIterator(Object obj) { - synchronized (m_dateFormat) { - return m_dateFormat.formatToCharacterIterator(obj); - } - } - - @Override - public String toString() { - synchronized (m_dateFormat) { - return m_dateFormat.toString(); - } - } - - // ################################################################ - - @Override - public boolean equals(Object obj) { - Require.statementNeverExecuted(); - return super.equals(obj); - } - - @Override - public int hashCode() { - Require.statementNeverExecuted(); - return super.hashCode(); - } - - @Override - public Object clone() { - Require.statementNeverExecuted(); - return super.clone(); - } - -} diff --git a/FishUtil/src/main/java/io/deephaven/util/Validate.java b/FishUtil/src/main/java/io/deephaven/util/Validate.java deleted file mode 100644 index bb03259c765..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/Validate.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util; - -public class Validate { - public static int validatePositiveInteger(String name, String s) throws NumberFormatException { - int i = 0; - - try { - i = Integer.parseInt(s); - } catch (NumberFormatException e) { - throw new NumberFormatException(name + " is not a valid number"); - } - - if (i <= 0) { - throw new NumberFormatException(name + " must be greater than zero"); - } - - return i; - } - - public static int validateInteger(String name, String s) throws NumberFormatException { - int i = 0; - - try { - i = Integer.parseInt(s); - } catch (NumberFormatException e) { - throw new NumberFormatException(name + " is not a valid number"); - } - - return i; - } - - public static double validatePositiveDouble(String name, String s) throws NumberFormatException { - double d = 0; - - try { - d = Double.parseDouble(s); - } catch (NumberFormatException e) { - throw new NumberFormatException(name + " is not a valid number"); - } - - if (d <= 0) { - throw new NumberFormatException(name + " must be greater than zero"); - } - - return d; - } - - public static double validateDouble(String name, String s) throws NumberFormatException { - double d = 0; - - try { - d = Double.parseDouble(s); - } catch (NumberFormatException e) { - throw new NumberFormatException(name + " is not a valid number"); - } - - return d; - } - - public static void validate(boolean b, String errorMsg) throws Exception { - if (!b) { - throw new Exception(errorMsg); - } - } - - public static void validateDouble(String name, double value, double min, double max, boolean inclusiveMin, - boolean inclusiveMax) throws Exception { - if (Double.isNaN(value)) { - throw new Exception(name + " may not be NaN"); - } - - if (inclusiveMin && value < min) { - throw new Exception(name + " must be greater than or equal to " + min); - } - - if (!inclusiveMin && value <= min) { - throw new Exception(name + " must be greater than " + min); - } - - if (inclusiveMax && value > max) { - throw new Exception(name + " must be less than or equal to " + max); - } - - if (!inclusiveMax && value >= max) { - throw new Exception(name + " must be less than " + max); - } - } - - public static void validateInteger(String name, int value, int min, int max, boolean inclusiveMin, - boolean inclusiveMax) throws Exception { - if (inclusiveMin && value < min) { - throw new Exception(name + " must be greater than or equal to " + min); - } - - if (!inclusiveMin && value <= min) { - throw new Exception(name + " must be greater than " + min); - } - - if (inclusiveMax && value > max) { - throw new Exception(name + " must be less than or equal to " + max); - } - - if (!inclusiveMax && value >= max) { - throw new Exception(name + " must be less than " + max); - } - } -} diff --git a/FishUtil/src/main/java/io/deephaven/util/formatters/ISO8601.java b/FishUtil/src/main/java/io/deephaven/util/formatters/ISO8601.java deleted file mode 100644 index b5f6a2da220..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/formatters/ISO8601.java +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util.formatters; - -import io.deephaven.configuration.Configuration; - -import java.text.DateFormat; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.TimeZone; - -public class ISO8601 { - private static final ThreadLocal toISO8601Cache = new ThreadLocal(); - private static final ThreadLocal timeISO8601Cache = new ThreadLocal(); - private static final ThreadLocal dateISO8601Cache = new ThreadLocal(); - private static TimeZone TZ_SERVER = null; - - public static synchronized TimeZone serverTimeZone() { - if (TZ_SERVER == null) { - TZ_SERVER = Configuration.getInstance().getServerTimezone(); - } - return TZ_SERVER; - } - - public static DateFormat ISO8601DateFormat(TimeZone tz) { - SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd"); - df.setTimeZone(tz); - return df; - } - - public static DateFormat ISE8601TimeFormat() { - return ISO8601TimeFormat(serverTimeZone()); - } - - public static DateFormat ISO8601TimeFormat(TimeZone tz) { - SimpleDateFormat df = new SimpleDateFormat("HH:mm:ss.SSSZ"); - df.setTimeZone(tz); - return df; - } - - public static DateFormat ISE8601DateTimeFormat() { - return ISO8601DateTimeFormat(serverTimeZone()); - } - - public static DateFormat ISO8601DateTimeFormat(TimeZone tz) { - SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - df.setTimeZone(tz); - return df; - } - - public static String toISO8601(long millis) { - return toISO8601(new Date(millis), serverTimeZone()); - } - - public static String toISO8601(Date d) { - return toISO8601(d, serverTimeZone()); - } - - public static String toISO8601(long millis, TimeZone tz) { - return toISO8601(new Date(millis), tz); - } - - public static String toISO8601(Date d, TimeZone tz) { - DateFormat df = toISO8601Cache.get(); - if (df == null) { - df = ISO8601DateTimeFormat(tz); - toISO8601Cache.set(df); - } else { - df.setTimeZone(tz); - } - return df.format(d); - } - - public static String timeISO8601(long millis) { - return timeISO8601(new Date(millis), serverTimeZone()); - } - - public static String timeISO8601(Date d) { - return timeISO8601(d, serverTimeZone()); - } - - public static String timeISO8601(long millis, TimeZone tz) { - return timeISO8601(new Date(millis), tz); - } - - public static String timeISO8601(Date d, TimeZone tz) { - DateFormat df = timeISO8601Cache.get(); - if (df == null) { - df = ISO8601TimeFormat(tz); - timeISO8601Cache.set(df); - } else { - df.setTimeZone(tz); - } - return df.format(d); - } - - public static String dateISO8601(long millis) { - return dateISO8601(new Date(millis), serverTimeZone()); - } - - public static String dateISO8601(Date d) { - return dateISO8601(d, serverTimeZone()); - } - - public static String dateISO8601(long millis, TimeZone tz) { - return dateISO8601(new Date(millis), tz); - } - - public static String dateISO8601(Date d, TimeZone tz) { - DateFormat df = dateISO8601Cache.get(); - if (df == null) { - df = ISO8601DateFormat(tz); - dateISO8601Cache.set(df); - } else { - df.setTimeZone(tz); - } - return df.format(d); - } -} diff --git a/FishUtil/src/main/java/io/deephaven/util/signals/SignalSender.java b/FishUtil/src/main/java/io/deephaven/util/signals/SignalSender.java deleted file mode 100644 index c039e5a79ee..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/signals/SignalSender.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util.signals; - -import io.deephaven.base.verify.Require; -import io.deephaven.io.logger.Logger; -import io.deephaven.io.logger.StreamLoggerImpl; -import org.jetbrains.annotations.NotNull; - -import java.io.IOException; - -public class SignalSender { - - private final Logger log; - private final boolean useNative; - - public SignalSender(@NotNull final Logger log, final boolean useNative) { - this.log = log; - this.useNative = useNative; - if (useNative) { - SignalUtils.loadNative(); - } - } - - /** - * Helper method - sends SIQQUIT to a process. If this process is a JVM, it will send a stack dump to stdout. - * - * @param processId The process ID to send the signal to - * @return true on success, false on error - */ - public boolean sendQuit(final int processId) { - return sendSignal(processId, SignalUtils.Signal.SIGQUIT); - } - - /** - * Helper method - sends SIQKILL to a process. - * - * @param processId The process ID to send the signal to - * @return true on success, false on error - */ - public boolean kill(final int processId) { - return sendSignal(processId, SignalUtils.Signal.SIGKILL); - } - - /** - * Helper method - sends SIGCONT to a process. - * - * @param processId The process ID to send the signal to - * @return true on success, false on error - */ - public boolean resume(final int processId) { - return sendSignal(processId, SignalUtils.Signal.SIGCONT); - } - - /** - * Helper method - sends SIGSTOP to a process. - * - * @param processId The process ID to send the signal to - * @return true on success, false on error - */ - public boolean suspend(final int processId) { - return sendSignal(processId, SignalUtils.Signal.SIGSTOP); - } - - /** - * Send the specified signal to the target process. - * - * @param processId The process ID to send the signal to - * @param signal The signal to send - * @return true on success, false on error - */ - private boolean sendSignal(final int processId, final SignalUtils.Signal signal) { - Require.gtZero(processId, "processId"); // Don't want to allow fancier usages for now. See 'man -s 2 kill'. - Require.neqNull(signal, "signal"); - - final int rc; - if (useNative) { - rc = SignalUtils.sendSignalNative(processId, signal.getSignalNumber()); - } else { - try { - rc = SignalUtils.sendSignalWithBinKill(processId, signal.getSignalName()); - } catch (IOException e) { - log.error().append("sendSignal: Exception while using /bin/kill to send ").append(signal.toString()) - .append(" to processId ").append(processId).append(": ").append(e).endl(); - return false; - } - } - - if (rc == 0) { - return true; - } - log.error().append("sendSignal: Error while using ").append(useNative ? "native code" : "/bin/kill") - .append(" to send ").append(signal.toString()) - .append(" to processId ").append(processId) - .append(": kill returned ").append(rc).endl(); - return false; - } - - /** - * Simple program for functionality testing. - * - * @param args [ <pid> <signal> <use native?> ] - */ - public static void main(final String... args) { - final int pid = Integer.parseInt(args[0]); - final SignalUtils.Signal signal = SignalUtils.Signal.valueOf(args[1]); - final boolean useNative = Boolean.valueOf(args[2]); - new SignalSender(new StreamLoggerImpl(), useNative).sendSignal(pid, signal); - } -} diff --git a/FishUtil/src/main/java/io/deephaven/util/signals/SignalUtils.java b/FishUtil/src/main/java/io/deephaven/util/signals/SignalUtils.java deleted file mode 100644 index 10aaf8bcd06..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/signals/SignalUtils.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util.signals; - -import io.deephaven.util.OSUtil; - -import java.io.IOException; - -public class SignalUtils { - - /** - * What operating system does the JVM think we're on? - */ - private static final OSUtil.OSFamily OPERATING_SYSTEM = OSUtil.getOSFamily(); - - /** - * Placeholder value when we don't know a signal's number on the current OS. - */ - private static final int UNDEFINED_SIGNAL_NUMBER = Integer.MIN_VALUE; - - /** - * Supported signals. Be careful when adding new entries - as you can see, signal numbers don't always line up - * across operating systems. - */ - public enum Signal { - SIGINT("int", 2, 2, 2), SIGTERM("term", 15, 15, 15), SIGQUIT("quit", 3, 3, 3), SIGKILL("kill", 9, 9, - 9), SIGSTOP("stop", 19, 23, 17), SIGCONT("cont", 18, 25, 19); - - private final String signalName; - private final int signalNumber; - - Signal(final String signalName, final int linuxSignalNumber, final int solarisSignalNumber, - final int macOsSignalNumber) { - this.signalName = signalName; - switch (OPERATING_SYSTEM) { - case LINUX: - signalNumber = linuxSignalNumber; - break; - case MAC_OS: - signalNumber = macOsSignalNumber; - break; - case SOLARIS: - signalNumber = solarisSignalNumber; - break; - case WINDOWS: - default: - signalNumber = UNDEFINED_SIGNAL_NUMBER; - break; - } - } - - public String getSignalName() { - return signalName; - } - - public int getSignalNumber() { - if (signalNumber == UNDEFINED_SIGNAL_NUMBER) { - throw new UnsupportedOperationException(this + " is undefined on " + OPERATING_SYSTEM); - } - return signalNumber; - } - } - - /** - * Use /bin/kill to send a signal by name. - * - * @param processId The process ID to send the signal to - * @param signalName The name of the signal to send - * @return The exit value of the child process. - */ - @SuppressWarnings("WeakerAccess") - public static int sendSignalWithBinKill(final int processId, final String signalName) throws IOException { - final ProcessBuilder pb = new ProcessBuilder("/bin/kill", "-s", signalName, Integer.toString(processId)); - final Process p = pb.start(); - - try { - p.getErrorStream().close(); - p.getInputStream().close(); - p.getOutputStream().close(); - } catch (IOException e) { - throw new AssertionError("sendSignalWithBinKill: unexpected exception while closing child process streams: " - + e.getMessage(), e); - } - - while (true) { - try { - return p.waitFor(); - } catch (InterruptedException ignored) { - } - } - } - - /** - * Ensure that libraries have been loaded, before using sendSignalNative(...). - */ - @SuppressWarnings("WeakerAccess") - public static void loadNative() { - System.loadLibrary("FishCommon"); - } - - /** - * Use native code to send a signal by number. - * - * @param processId The process ID to send the signal to - * @param signalNumber The signal number to send - * @return The return value of kill(2). - */ - public static native int sendSignalNative(final int processId, final int signalNumber); -} diff --git a/FishUtil/src/main/java/io/deephaven/util/threads/ThreadDump.java b/FishUtil/src/main/java/io/deephaven/util/threads/ThreadDump.java deleted file mode 100644 index 344cc6e517f..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/threads/ThreadDump.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util.threads; - -import io.deephaven.io.logger.Logger; - -import java.io.PrintStream; -import java.lang.management.ManagementFactory; -import java.lang.management.ThreadInfo; -import java.lang.management.ThreadMXBean; -import java.util.function.Consumer; - -/** - * A simple method for generating a Thread dump for this JVM; it doesn't do all the stuff that the kill -3 does; but you - * can easily run it from inside the JVM without having to send yourself a signal. - */ -public class ThreadDump { - @SuppressWarnings("WeakerAccess") - public static void threadDump(final PrintStream out) { - doDump(out::print); - } - - public static void threadDump(final Logger logger) { - doDump(arg -> logger.info().append(arg).endl()); - } - - @SuppressWarnings("WeakerAccess") - public static String threadDump() { - final StringBuilder builder = new StringBuilder(); - doDump(builder::append); - return builder.toString(); - } - - private static void doDump(Consumer output) { - ThreadMXBean threadMXBean = ManagementFactory.getPlatformMXBean(ThreadMXBean.class); - - ThreadInfo[] threadInfos = threadMXBean.dumpAllThreads(true, true); - - for (ThreadInfo threadInfo : threadInfos) { - output.accept(threadInfo.toString()); - } - } - - public static void main(String[] args) { - threadDump(System.out); - } -} diff --git a/IO/src/main/java/io/deephaven/io/NioUtil.java b/IO/src/main/java/io/deephaven/io/NioUtil.java deleted file mode 100644 index 4e24b19ac38..00000000000 --- a/IO/src/main/java/io/deephaven/io/NioUtil.java +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io; - -import io.deephaven.base.LowGarbageArrayIntegerMap; -import io.deephaven.base.LowGarbageArrayList; -import io.deephaven.base.LowGarbageArraySet; -import io.deephaven.base.verify.Assert; -import io.deephaven.base.verify.Require; - -import java.lang.reflect.Field; -import java.nio.channels.Selector; -import java.nio.channels.spi.AbstractSelector; -import java.util.List; -import java.util.Set; - -// -------------------------------------------------------------------- -/** - * General utilities for NIO - */ -public class NioUtil { - - private static final String JAVA_8_SPEC_VERSION = "1.8"; - - // ---------------------------------------------------------------- - /** - * Use reflection to change the collection implementations so iteration operations used in the selector - * implementation will not produce garbage. - * - *

- * This is only applied when the system property {@code java.specification.version} is equal to "1.8". - * - *

- * We can do this because, by looking at the source code, we can tell that there are no simultaneous iterations so - * reusing one iterator is OK. Because of concurrent modification issues and thread safety issues, this is generally - * likely to be the case anyway. The implementation of selector is not likely to change between minor JDK revisions. - * A major JDK release might produce a rewrite, but in that case we can check the JDK version and apply the - * appropriate set of patches. - */ - public static Selector reduceSelectorGarbage(Selector selector) { - final String javaSpecificationVersion = System.getProperty("java.specification.version"); - if (JAVA_8_SPEC_VERSION.equals(javaSpecificationVersion)) { - return reduceSelectorGarbageImpl(selector); - } - return selector; - } - - private static Selector reduceSelectorGarbageImpl(Selector selector) { - try { - Class selectorImplClass = Class.forName("sun.nio.ch.SelectorImpl"); - Require.instanceOf(selector, "selector", selectorImplClass); - - Field cancelledKeysField = AbstractSelector.class.getDeclaredField("cancelledKeys"); - cancelledKeysField.setAccessible(true); - Set newCancelledKeys = new LowGarbageArraySet(); - cancelledKeysField.set(selector, newCancelledKeys); - - Field keysField = selectorImplClass.getDeclaredField("keys"); - keysField.setAccessible(true); - Field publicKeysField = selectorImplClass.getDeclaredField("publicKeys"); - publicKeysField.setAccessible(true); - Set newKeys = new LowGarbageArraySet(); - keysField.set(selector, newKeys); - publicKeysField.set(selector, newKeys); - - Field selectedKeysField = selectorImplClass.getDeclaredField("selectedKeys"); - selectedKeysField.setAccessible(true); - Field publicSelectedKeysField = selectorImplClass.getDeclaredField("publicSelectedKeys"); - publicSelectedKeysField.setAccessible(true); - Set newSelectedKeys = new LowGarbageArraySet(); - selectedKeysField.set(selector, newSelectedKeys); - publicSelectedKeysField.set(selector, newSelectedKeys); - - if (System.getProperty("os.name").startsWith("Windows") - && System.getProperty("java.vendor").startsWith("Oracle")) { - Class windowsSelectorImplClass = Class.forName("sun.nio.ch.WindowsSelectorImpl"); - Require.instanceOf(selector, "selector", windowsSelectorImplClass); - - Field threadsField = windowsSelectorImplClass.getDeclaredField("threads"); - threadsField.setAccessible(true); - List newThreads = new LowGarbageArrayList(); - threadsField.set(selector, newThreads); - - } else if (System.getProperty("os.name").startsWith("Linux")) { - Class ePollSelectorImplClass = Class.forName("sun.nio.ch.EPollSelectorImpl"); - Require.instanceOf(selector, "selector", ePollSelectorImplClass); - - Field fdToKeyField = ePollSelectorImplClass.getDeclaredField("fdToKey"); - fdToKeyField.setAccessible(true); - LowGarbageArrayIntegerMap newFdToKey = new LowGarbageArrayIntegerMap(); - fdToKeyField.set(selector, newFdToKey); - - } else if (System.getProperty("os.name").startsWith("SunOS")) { - Class devPollSelectorImplClass = Class.forName("sun.nio.ch.DevPollSelectorImpl"); - Require.instanceOf(selector, "selector", devPollSelectorImplClass); - - Field fdToKeyField = devPollSelectorImplClass.getDeclaredField("fdToKey"); - fdToKeyField.setAccessible(true); - LowGarbageArrayIntegerMap newFdToKey = new LowGarbageArrayIntegerMap(); - fdToKeyField.set(selector, newFdToKey); - } - - return selector; - } catch (final NoSuchFieldException | IllegalAccessException | ClassNotFoundException e) { - throw Assert.exceptionNeverCaught(e); - } - } -} diff --git a/IO/src/main/java/io/deephaven/io/sched/Job.java b/IO/src/main/java/io/deephaven/io/sched/Job.java deleted file mode 100644 index ead141f4164..00000000000 --- a/IO/src/main/java/io/deephaven/io/sched/Job.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import io.deephaven.base.log.LogOutput; -import io.deephaven.base.log.LogOutputAppendable; - -import java.nio.channels.SelectableChannel; -import java.io.IOException; - -/** - * This is the base class for jobs that can be invoked by the scheduler. - */ -public abstract class Job implements LogOutputAppendable { - - // -------------------------------------------------------------------------- - // public interface - // -------------------------------------------------------------------------- - - /** - * This method is invoked by the scheduler when the job's channel becomes ready. - * - * @param channel the channel which has become ready - * @param readyOps the operations which can be performed on this channel without blocking - * @returns the modified readyOps after the invocation; if non-zero, the job will be invoked again with these - * @throws IOException - if something bad happens - */ - public abstract int invoke(SelectableChannel channel, int readyOps, Runnable handoff) throws IOException; - - /** - * This method is invoked if the job times out. - */ - public abstract void timedOut(); - - /** - * This method is called if the job is explicitly cancelled before it becomes ready or times out. - */ - public abstract void cancelled(); - - // -------------------------------------------------------------------------- - // scheduler state management - // -------------------------------------------------------------------------- - - // TODO: currently, we assume that the scheduler is a singleton, or at the least - // TODO: that no job will be used with more than one scheduler throughout its lifetime. - // TODO: If this changes, we will have to change the state pointer to a set. - - /** the link to the scheduler's state for this job */ - JobState state; - - /** return the state for the given scheduler, or null */ - final JobState getStateFor(Scheduler sched) { - return state; - } - - /** return or create the state for the given scheduler */ - final JobState makeStateFor(Scheduler sched) { - return state == null ? (state = new JobState(this)) : state; - } - - @Override - public LogOutput append(LogOutput logOutput) { - return logOutput.append(LogOutput.BASIC_FORMATTER, this); - } -} diff --git a/IO/src/main/java/io/deephaven/io/sched/JobState.java b/IO/src/main/java/io/deephaven/io/sched/JobState.java deleted file mode 100644 index 00be8941aee..00000000000 --- a/IO/src/main/java/io/deephaven/io/sched/JobState.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import java.nio.channels.SelectableChannel; - -/** - * The per-scheduler state for a job. Note that this class is package-private. - */ -class JobState implements Cloneable { - /** the job */ - final Job job; - - /** the update count for this job state */ - long updateClock = 0; - - /** the current deadline for this job */ - long deadline = Long.MAX_VALUE; - - /** the job's current position in the scheduler's timeout queue */ - int tqPos = 0; - - /** true, if this job has been invoked or has timed out */ - boolean gathered = false; - - /** true if the job has been forgotten after being dispatched and not reinstalled */ - boolean forgotten = false; - - /** true if this job has been explicitly cancelled */ - boolean cancelled = false; - - /** this is the channel we are waiting on in the selector */ - SelectableChannel waitChannel = null; - - /** the channel on which the job is ready to be dispatched, or null */ - SelectableChannel readyChannel = null; - - /** the operation set on which the job is ready to be dispatched, or zero */ - int readyOps = 0; - - /** the channel on which this job will select in the next scheduler loop */ - SelectableChannel nextChannel = null; - - /** the interest set on which this job will select in the next scheduler loop */ - int nextOps = 0; - - /** the timeout deadline of this job in the next scheduler loop */ - long nextDeadline = Long.MAX_VALUE; - - /** the nano-time when this job was last enqueued */ - long gatheredNanos = 0; - - /** constructor stores the back-link to the job */ - JobState(Job job) { - this.job = job; - } - - /** - * Clone this object - */ - public JobState clone() throws CloneNotSupportedException { - return (JobState) super.clone(); - } -} diff --git a/IO/src/main/java/io/deephaven/io/sched/JobStateTimeoutQueue.java b/IO/src/main/java/io/deephaven/io/sched/JobStateTimeoutQueue.java deleted file mode 100644 index 9c57851c20e..00000000000 --- a/IO/src/main/java/io/deephaven/io/sched/JobStateTimeoutQueue.java +++ /dev/null @@ -1,211 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import io.deephaven.io.logger.Logger; - -import java.util.Set; - -/** - * A priority queue (heap) for JobState instances, ordered by their deadlines. Note that this class is package-private. - */ -class JobStateTimeoutQueue implements Cloneable { - private final Logger log; - - /** the queue storage */ - private JobState[] queue; - - /** the size of the queue (invariant: size < queue.length - 1) */ - private int size = 0; - - public JobStateTimeoutQueue(Logger log, int initialSize) { - this.log = log; - this.queue = new JobState[initialSize]; - } - - /** clone the queue (for testing) */ - public Object clone() throws CloneNotSupportedException { - JobStateTimeoutQueue q = (JobStateTimeoutQueue) super.clone(); - q.queue = new JobState[queue.length]; - for (int i = 1; i <= size; ++i) { - q.queue[i] = queue[i].clone(); - } - q.size = size; - return q; - } - - /** return the priority queue's size */ - int size() { - return size; - } - - /** Returns true if the priority queue contains no elements. */ - boolean isEmpty() { - return size == 0; - } - - /** Adds a job to to the timeout queue */ - void enter(JobState state, long deadline) { - state.deadline = deadline; - if (state.tqPos == 0) { - if (++size == queue.length) { - JobState[] newQueue = new JobState[2 * queue.length]; - System.arraycopy(queue, 0, newQueue, 0, size); - queue = newQueue; - } - queue[size] = state; - state.tqPos = size; - fixUp(size); - assert testInvariant("after fixUp in enter-add"); - } else { - assert queue[state.tqPos] == state; - int k = state.tqPos; - fixDown(k); - fixUp(k); - assert testInvariant("after fixDown/fixUp in enter-change"); - } - } - - /** Return the top of the timeout queue - the next timeout */ - JobState top() { - return queue[1]; - } - - /** Remove the top element from the timeout queue. */ - void removeTop() { - queue[1].tqPos = 0; - if (--size == 0) { - queue[1] = null; - } else { - queue[1] = queue[size + 1]; - queue[size + 1] = null; // Drop extra reference to prevent memory leak - queue[1].tqPos = 1; - fixDown(1); - } - assert testInvariant("after removeTop()"); - } - - /** remove an arbitrary element from the timeout queue */ - void remove(JobState state) { - int k = state.tqPos; - if (k != 0) { - assert queue[k] == state; - state.tqPos = 0; - if (k == size) { - queue[size--] = null; - } else { - queue[k] = queue[size]; - queue[k].tqPos = k; - queue[size--] = null; - fixDown(k); - fixUp(k); - assert testInvariant("after fixDown/fixUp in remove()"); - } - } - assert testInvariant("at end of remove()"); - } - - /** move queue[k] up the heap until it's deadline is >= that of its parent. */ - private void fixUp(int k) { - if (k > 1) { - JobState state = queue[k]; - int j = k >> 1; - JobState parent = queue[j]; - if (parent.deadline > state.deadline) { - queue[k] = parent; - parent.tqPos = k; - k = j; - j = k >> 1; - while (k > 1 && (parent = queue[j]).deadline > state.deadline) { - queue[k] = parent; - parent.tqPos = k; - k = j; - j = k >> 1; - } - queue[k] = state; - state.tqPos = k; - } - } - } - - /** move queue[k] down the heap until it's deadline is <= those of its children. */ - private void fixDown(int k) { - int j = k << 1; - if (j <= size) { - JobState state = queue[k], child = queue[j], child2; - if (j < size && (child2 = queue[j + 1]).deadline < child.deadline) { - child = child2; - j++; - } - if (child.deadline < state.deadline) { - queue[k] = child; - child.tqPos = k; - k = j; - j = k << 1; - while (j <= size) { - child = queue[j]; - if (j < size && (child2 = queue[j + 1]).deadline < child.deadline) { - child = child2; - j++; - } - if (child.deadline >= state.deadline) { - break; - } - queue[k] = child; - child.tqPos = k; - k = j; - j = k << 1; - } - queue[k] = state; - state.tqPos = k; - } - } - } - - boolean testInvariantAux(int i, String what) { - if (i <= size) { - if (queue[i].tqPos != i) { - log.error().append(what).append(": queue[").append(i).append("].tqPos=").append(queue[i].tqPos) - .append(" != ").append(i).endl(); - } - if (!testInvariantAux(i * 2, what)) { - return false; - } - if (!testInvariantAux(i * 2 + 1, what)) { - return false; - } - if (i > 1) { - if (queue[i].deadline < queue[i / 2].deadline) { - log.error().append(what).append(": child[").append(i).append("]=").append(queue[i].deadline) - .append(" < parent[").append((i / 2)).append("]=").append(queue[i / 2].deadline).endl(); - return false; - } - } - } - return true; - } - - boolean testInvariant(String what) { - boolean result = testInvariantAux(1, what); - if (result) { - for (int i = size + 1; i < queue.length; ++i) { - if (queue[i] != null) { - log.error().append(what).append(": size = ").append(size).append(", child[").append(i).append("]=") - .append(queue[i].deadline).append(" != null").endl(); - result = false; - } - } - } - if (result) { - // log.info("timeoutQueue.testInvariant: OK "+what); - } - return result; - } - - void junitGetAllJobs(Set jobs) { - for (int i = 1; i <= size; ++i) { - jobs.add(queue[i].job); - } - } -} diff --git a/IO/src/main/java/io/deephaven/io/sched/Scheduler.java b/IO/src/main/java/io/deephaven/io/sched/Scheduler.java deleted file mode 100644 index f38cde3c881..00000000000 --- a/IO/src/main/java/io/deephaven/io/sched/Scheduler.java +++ /dev/null @@ -1,199 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import java.nio.channels.*; -import java.util.*; -import java.util.concurrent.Executor; - -/** - * This class provides a singleton wrapper for scheduling invocations of multiple Job instances from a single thread. - * Job are scheduled in accordance with an interest set on a java.nio.Channel, deadline based time scheduling, and/or - * custom criteria defined by the Jobs' implementation of the ready() method. - * - * Jobs are instantiated by the application and made known to the scheduler by one of the install() methods. Once the - * job is installed, the scheduler will call exactly one of its invoke(), timedOut() or cancelled() methods exactly - * once. After this, the scheduler forgets about the job completely, unless the application installs it again. - */ -public interface Scheduler { - - // -------------------------------------------------------------------------- - // public interface - // -------------------------------------------------------------------------- - - /** - * Return the scheduler's idea of the current time. - */ - public long currentTimeMillis(); - - /** - * Install a job in association with a channel and an interest set. - */ - public void installJob(Job job, long deadline, SelectableChannel channel, int interest); - - /** - * Install a job with only an associated deadline (removing any channel association) - */ - public void installJob(Job job, long deadline); - - /** - * Cancel a job, making the scheduler forget it completely.. - */ - public void cancelJob(Job job); - - /** - * Wait for jobs to become ready, then invoke() them all. This method will form the core of the main loop of a - * scheduler-driven application. The method first waits until: - * - * -- the given timeout expires, -- the earliest job-specific timeout expires, or -- one or more jobs becomes ready - * - * If jobs have become ready, then the entire ready set will be invoked. If any job throws an uncaught exception, - * the job's terminated() method will be called and the job deregistered. This does not abort the invocation of the - * remaining jobs. The return value is then the number of jobs that were invoked. - * - * If no jobs are ready and any job-specific timeouts expire, the associated jobs' timedOut() methods are called. - * The return value is the negative of the number of expired timeouts. - * - * If the time given by the timeout argument expires, then zero is returned. - * - * Note that this method is not synchronized. The application must ensure that it is never called concurrently by - * more than one thread. - * - * @return true, if some job was dispatched - */ - public boolean work(long timeout, Runnable handoff); - - /** - * Shut down the scheduler, calling close() on the underlying Selector. - */ - public void close(); - - /** - * Return true if the scheduler is closing or closed. - */ - public boolean isClosed(); - - // -------------------------------------------------------------------------- - // test support methods - // -------------------------------------------------------------------------- - - /** - * Return a reference to the selector - */ - public Selector junitGetSelector(); - - /** - * Return all jobs known to the scheduler, in whatever state. - */ - public Set junitGetAllJobs(); - - /** - * Return the contents of the timeout queue, in deadline order - * - * @return the jobs in the timeout queue - */ - public ArrayList junitGetTimeoutQueue(); - - /** - * Return the selection keys currently known to the scheduler. - */ - public ArrayList junitGetAllKeys(); - - /** - * Return the selection keys currently known to the scheduler. - */ - public ArrayList junitGetReadyKeys(); - - /** - * Return a map containing all channels and the jobs to which they are associated. - */ - public Map junitGetChannelsAndJobs(); - - /** - * Return true if the timeout queue invariant holds. - */ - public boolean junitTestTimeoutQueueInvariant(); - - public class Null implements Scheduler { - @Override - public long currentTimeMillis() { - return 0; - } - - @Override - public void installJob(Job job, long deadline, SelectableChannel channel, int interest) {} - - @Override - public void installJob(Job job, long deadline) {} - - @Override - public void cancelJob(Job job) {} - - @Override - public boolean work(long timeout, Runnable handoff) { - return false; - } - - @Override - public void close() {} - - @Override - public boolean isClosed() { - return false; - } - - @Override - public Selector junitGetSelector() { - return null; - } - - @Override - public Set junitGetAllJobs() { - return null; - } - - @Override - public ArrayList junitGetTimeoutQueue() { - return null; - } - - @Override - public ArrayList junitGetAllKeys() { - return null; - } - - @Override - public ArrayList junitGetReadyKeys() { - return null; - } - - @Override - public Map junitGetChannelsAndJobs() { - return null; - } - - @Override - public boolean junitTestTimeoutQueueInvariant() { - return false; - } - } - - public final class ExecutorAdaptor implements Executor { - final Scheduler scheduler; - - public ExecutorAdaptor(final Scheduler scheduler) { - this.scheduler = scheduler; - } - - @Override - public void execute(final Runnable runnable) { - scheduler.installJob(new TimedJob() { - @Override - public final void timedOut() { - runnable.run(); - } - }, 0); - } - } -} diff --git a/IO/src/main/java/io/deephaven/io/sched/TimedJob.java b/IO/src/main/java/io/deephaven/io/sched/TimedJob.java deleted file mode 100644 index 9fb8c2899fb..00000000000 --- a/IO/src/main/java/io/deephaven/io/sched/TimedJob.java +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import io.deephaven.base.log.LogOutput; - -import java.nio.channels.SelectableChannel; - -/** - * This is the base class for jobs which are only interested in timing events. It provides default invoke() and - * cancelled() method which do nothing. - */ -public abstract class TimedJob extends Job { - public int invoke(SelectableChannel channel, int readyOps, Runnable handoff) { - if (handoff != null) { - handoff.run(); - } - return 0; - } - - public void cancelled() { - // do nothing - } - - @Override - public LogOutput append(LogOutput logOutput) { - return logOutput.append(LogOutput.BASIC_FORMATTER, this); - } -} diff --git a/IO/src/main/java/io/deephaven/io/sched/YASchedulerImpl.java b/IO/src/main/java/io/deephaven/io/sched/YASchedulerImpl.java deleted file mode 100644 index f10c7fc70b8..00000000000 --- a/IO/src/main/java/io/deephaven/io/sched/YASchedulerImpl.java +++ /dev/null @@ -1,979 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import io.deephaven.base.RingBuffer; -import io.deephaven.base.stats.*; -import io.deephaven.io.logger.Logger; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.nio.channels.*; -import java.util.*; - -/** - * Yet Another implementation of the Scheduler interface -- the best one yet. - * - * This class provides a singleton wrapper for scheduling invocations of multiple Job instances from a single thread. - * Job are scheduled in accordance with an interest set on a java.nio.Channel, deadline based time scheduling, and/or - * custom criteria defined by the Jobs' implementation of the ready() method. - * - * Jobs are instantiated by the application and made known to the scheduler by one of the installJob() methods. A - * previously installed job can be removed from the scheduler with the cancelJob() method. The installJob() and - * cancelJob() methods are thread-safe. It is allowed to call installJob() on a job that is already installed, or - * cancelJob() on a job that is not current in the scheduler. In the former case, the channel and/or deadline will be - * updated accordingly; in the latter, the call will be ignored. - * - * Once the job is installed, the scheduler promises to call exactly one of its invoke(), timedOut() or cancelled() - * methods exactly once. The invoke() method will be called only if the job was (last) installed with a channel and - * non-zero interest set. The timedOut() method can be called for any job, since all jobs have an associated deadline - * (although the timeout value can be set to Integer.MAX_VALUE to make if effectively infinite). The cancelled() method - * is called only if the job is removed by a cancelJob() call before either the channe is ready or the deadline expires. - * - * After the job is called back, the scheduler forgets about the job completely, unless the application installs it - * again. That is, from the scheduler's point of view *all* jobs are one-shots. This design is based on the observation - * that it is easier to reschedule jobs on every invocation in the style of a tail-recursive loop, as opposed to - * maintaining persistent state in the scheduler. - * - * The application must drive the scheduler by calling the work() method in a loop. The work() method is *not* - * thread-safe; the application must either call it from a single thread or synchronize calls accordingly. - */ -public class YASchedulerImpl implements Scheduler { - - /** the scheduler name, for debug and stats output */ - protected final String name; - - /** the java.nio.Selector instance */ - private final Selector selector; - - /** the logger */ - protected final Logger log; - - /** lock for internal state */ - private final Object stateLock = new Object(); - - /** if non-zero, there is a select() in progress that will terminate at the specified deadline */ - private long selectingTill = 0; - - private volatile boolean spinWakeSelector = false; - - /** the update clock for this scheduler */ - private long updateClock = 1; - - /** the waiting jobs, ordered by deadline */ - private final JobStateTimeoutQueue timeoutQueue; - - /** invokable/timed-out jobs are stored here */ - private RingBuffer dispatchQueue = new RingBuffer(128); - - /** the list of jobs which might have changed since the last update() call */ - private ArrayList changedStates = new ArrayList(128); - - /** add a state to the changedStates list */ - private boolean changedState(JobState state) { - if (state.updateClock < updateClock) { - state.updateClock = updateClock; - changedStates.add(state); - return true; - } - - // Assert.eqTrue(isInChangedStates(state), "isInChangedStates(state)"); // temporary - - return false; - } - - private boolean isInChangedStates(JobState state) { - final int L = changedStates.size(); - for (int i = 0; i < L; ++i) { - if (state == changedStates.get(i)) { - return true; - } - } - return false; - } - - /** if there are lots of tiny jobs, taking timing measurements may be time consuming. */ - private final boolean doTimingStats; - - private final boolean doSpinSelect; - - /** time base for loop duration measurements */ - private long lastNanos = 0; - - private void mark(Value v) { - if (doTimingStats) { - long t = System.nanoTime(); - if (lastNanos != 0) { - v.sample((t - lastNanos + 500) / 1000); - } - lastNanos = t; - } - } - - /** have we been closed? */ - private volatile boolean isClosed = false; - - // statistics - private Value invokeCount; - private Value timeoutCount; - private Value selectDuration; - private Value workDuration; - private Value gatheredDuration; - private Value channelInstalls; - private Value timedInstalls; - private Value jobCancels; - private Value jobUpdates; - private Value keyUpdates; - private Value keyOrphans; - private Value selectorWakeups; - private Value channelInterestWakeups; - private Value channelTimeoutWakeups; - private Value plainTimeoutWakeups; - private Value cancelWakeups; - - /** - * The constructor. - */ - public YASchedulerImpl(Selector selector, Logger log) throws IOException { - this("Scheduler", selector, log); - } - - public YASchedulerImpl(String name, Selector selector, Logger log) throws IOException { - this(name, selector, log, true, false); - } - - public YASchedulerImpl(String name, Selector selector, Logger log, boolean doTimingStats, boolean doSpinSelect) { - this.name = name; - this.selector = selector; - this.log = log; - this.doTimingStats = doTimingStats; - this.doSpinSelect = doSpinSelect; - - this.timeoutQueue = new JobStateTimeoutQueue(log, 1024); - - this.invokeCount = Stats.makeItem(name, "invokeCount", Counter.FACTORY, - "The number of jobs invoked for I/O").getValue(); - this.timeoutCount = Stats.makeItem(name, "timeoutCount", Counter.FACTORY, - "The number of jobs that have timed out").getValue(); - this.selectDuration = Stats.makeItem(name, "SelectDuration", State.FACTORY, - "The number of microseconds spent in select()").getValue(); - this.workDuration = Stats.makeItem(name, "WorkDuration", State.FACTORY, - "The number of microseconds between successive select() calls").getValue(); - this.gatheredDuration = Stats.makeItem(name, "GatheredDuration", State.FACTORY, - "The number of microseconds jobs spend waiting after being gathered").getValue(); - this.channelInstalls = Stats.makeItem(name, "channelInstalls", Counter.FACTORY, - "The number of installJob() calls with a channel").getValue(); - this.timedInstalls = Stats.makeItem(name, "timedInstalls", Counter.FACTORY, - "The number of installJob() calls with just a timeout").getValue(); - this.jobCancels = Stats.makeItem(name, "jobCancels", Counter.FACTORY, - "The number of cancelJob() calls").getValue(); - this.jobUpdates = Stats.makeItem(name, "jobUpdates", Counter.FACTORY, - "The number of updates applied to the job state pre- and post-select").getValue(); - this.keyUpdates = Stats.makeItem(name, "keyUpdates", Counter.FACTORY, - "The number of times an NIO SelectionKey was updated with non-zero interest").getValue(); - this.keyOrphans = Stats.makeItem(name, "keyOrphans", Counter.FACTORY, - "The number of times an NIO SelectionKey's interest was cleared").getValue(); - this.selectorWakeups = Stats.makeItem(name, "selectorWakeups", Counter.FACTORY, - "The number of times the selector had to be woken up").getValue(); - - this.channelInterestWakeups = Stats.makeItem(name, "channelInterestWakeups", Counter.FACTORY, - "The number of selector wakeups due to a change in a channel's interest set").getValue(); - this.channelTimeoutWakeups = Stats.makeItem(name, "channelTimeoutWakeups", Counter.FACTORY, - "The number of selector wakeups due to a channel's timeout becoming the earliest").getValue(); - this.plainTimeoutWakeups = Stats.makeItem(name, "plainTimeoutWakeups", Counter.FACTORY, - "The number of selector wakeups due to a plain timeout becoming the earliest").getValue(); - this.cancelWakeups = Stats.makeItem(name, "cancelWakeups", Counter.FACTORY, - "The number of selector wakeups due to a job cancellation").getValue(); - } - - /** - * Return the scheduler's idea of the current time. - */ - public long currentTimeMillis() { - return System.currentTimeMillis(); - } - - /** - * Install a job in association with a channel and an interest set. - */ - public void installJob(Job job, long deadline, SelectableChannel channel, int interest) { - synchronized (stateLock) { - JobState state = job.makeStateFor(this); - SelectionKey key = channel.keyFor(selector); - - // see if we will need to wake up the selector - boolean wakeup = false; - if (key == null || !key.isValid()) { - wakeup = true; - } else if (deadline < selectingTill) { - wakeup = true; - channelTimeoutWakeups.sample(1); - } else if (key.interestOps() != interest && (channel != state.nextChannel || interest != state.nextOps)) { - wakeup = true; - channelInterestWakeups.sample(1); - } - - state.nextChannel = channel; - state.nextOps = interest; - state.nextDeadline = deadline; - state.cancelled = false; - state.forgotten = false; - changedState(state); - - if (log.isDebugEnabled()) { - log.debug().append(name).append(" installed job ").append(job) - .append(", d=").append(deadline) - .append(", ni=").append(state.nextOps) - // .append(", k=").append(key) - .append(", ki=").append((key == null || !key.isValid() ? 0 : key.interestOps())) - .append(", w=").append(wakeup) - .endl(); - } - - if (wakeup) { - maybeWakeSelector(); - } - - // must always wake if doing spin select since we aren't setting selectingTill - else if (doSpinSelect) { - spinWakeSelector = true; - } - - channelInstalls.sample(1); - } - } - - /** - * Install a job with only an associated deadline (removing any channel association) - */ - public void installJob(Job job, long deadline) { - synchronized (stateLock) { - JobState state = job.makeStateFor(this); - state.nextChannel = null; - state.nextOps = 0; - state.nextDeadline = deadline; - state.cancelled = false; - state.forgotten = false; - final boolean changed = changedState(state); - - // Note: We don't need to be concerned with waking up due to channelInterest changes, since - // we would have to be reducing the interest set which can only lead to a later wakeup time. - - // if the new deadline is earlier than the current top, wake up the selector - boolean wakeup = false; - if (deadline < selectingTill) { - plainTimeoutWakeups.sample(1); - maybeWakeSelector(); - } - - // must always wake if doing spin select since we aren't setting selectingTill - else if (doSpinSelect) { - spinWakeSelector = true; - } - - if (log.isDebugEnabled()) { - log.debug().append(name).append(" installed job ").append(job) - .append(", d=").append(deadline) - .append(", w=").append(wakeup) - .append(", c=").append(changed) - .endl(); - } - - timedInstalls.sample(1); - } - } - - /** - * Cancel a job's selection key with the scheduler. - * - * @param job the job to be cancelled. - */ - public void cancelJob(Job job) { - synchronized (stateLock) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" explicitly cancelling ").append(job) - .append(" in YAScheduler.cancelJob").endl(); - } - JobState state = job.getStateFor(this); - if (state != null) { - state.nextChannel = null; - state.nextOps = 0; - state.nextDeadline = 0; - state.cancelled = true; - state.forgotten = false; - changedState(state); - - if (state.waitChannel != null) { - cancelWakeups.sample(1); - maybeWakeSelector(); - } - jobCancels.sample(1); - } - } - } - - /** - * drop the association of a state with a channel - */ - private void dropChannel(JobState state) { - if (state.waitChannel != null) { - SelectionKey key = state.waitChannel.keyFor(selector); - try { - if (key != null && key.isValid() && key.attachment() == state) { - key.attach(null); - if (key.interestOps() != 0) { - key.interestOps(0); - if (log.isDebugEnabled()) { - log.debug().append(name).append(" setting interest on orphaned key ").append(key.toString()) - .append(" to 0").endl(); - } - keyUpdates.sample(1); - } - } - } catch (CancelledKeyException x) { - // ignore it - if (log.isDebugEnabled()) { - log.info().append(name).append(" got CancelledKeyException while dropping channel ") - .append(state.waitChannel.toString()).endl(); - } - } - state.waitChannel = null; - } - } - - /** - * associate a channel with a state - */ - private boolean grabChannel(JobState state) { - try { - SelectionKey key = state.nextChannel.keyFor(selector); - if (key == null) { - key = state.nextChannel.register(selector, state.nextOps, state); - log.debug().append(name).append(" update ").append(state.job) - .append(": registered channel ").append(state.nextChannel.toString()) - .append(", ni=").append(state.nextOps) - .append(", k=").append(key.toString()) - .endl(); - } else { - key.attach(state); - if (key.interestOps() != state.nextOps) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" update ").append(state.job) - .append(": setting interest on key ").append(key.toString()).append(" to ") - .append(state.nextOps) - .endl(); - } - key.interestOps(state.nextOps); - keyUpdates.sample(1); - } else { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" update ").append(state.job) - .append(": interest on key ").append(key.toString()).append(" already at ") - .append(state.nextOps) - .endl(); - } - } - } - if (state.waitChannel != null && state.waitChannel != state.nextChannel) { - SelectionKey waitKey = state.waitChannel.keyFor(selector); - if (waitKey != null && waitKey.attachment() == state) { - try { - waitKey.interestOps(0); - } catch (CancelledKeyException x) { - // ignore this - } - } - } - state.waitChannel = state.nextChannel; - return true; - } catch (ClosedChannelException x) { - // fall through - } catch (CancelledKeyException x) { - // fall through - } - state.waitChannel = null; - log.error().append(name).append(" tried to register ").append(state.job).append(" on closed channel ") - .append(state.nextChannel.toString()).endl(); - return false; - } - - /** - * Apply changes to the job states. - * - * NOTE: assumes that stateLock is held - */ - private void update() { - // DO NOT USE FOREACH HERE AS IT CREATES AN INTERATOR -> No Allocation changes - int size = changedStates.size(); - for (int i = 0; i < size; i++) { - JobState state = changedStates.get(i); - jobUpdates.sample(1); - - if (log.isDebugEnabled()) { - SelectionKey key = null; - if (state.nextChannel != null) { - key = state.nextChannel.keyFor(selector); - } - log.debug().append(name).append(" updating job ").append(state.job) - .append(", d=").append(state.nextDeadline) - .append(", ni=").append(state.nextOps) - .append(", k=").append(key == null ? "null" : key.toString()) - .append(", ki=").append(key == null || !key.isValid() ? 0 : key.interestOps()) - .endl(); - } - - if (state.gathered) { - // job is waiting to be invoked; leave it alone - } else if (state.nextChannel != null && state.nextOps != 0) { - if (!grabChannel(state)) { - log.error().append(name).append(" cancelling ").append(state.job) - .append(" after failed I/O registration").endl(); - timeoutQueue.remove(state); - state.cancelled = true; - dispatchQueue.add(state); - } else { - timeoutQueue.enter(state, state.nextDeadline); - } - } else if (state.forgotten) { - dropChannel(state); - timeoutQueue.remove(state); - } else if (state.cancelled) { - dropChannel(state); - timeoutQueue.remove(state); - if (log.isDebugEnabled()) { - log.debug().append(name).append(" cancelling ").append(state.job).append(" from update()").endl(); - } - state.cancelled = true; - dispatchQueue.add(state); - } else { - dropChannel(state); - timeoutQueue.enter(state, state.nextDeadline); - } - - state.forgotten = true; - state.nextChannel = null; - state.nextOps = 0; - state.nextDeadline = 0; - - assert state.waitChannel == null || state.waitChannel.keyFor(selector).attachment() == state; - } - if (log.isDebugEnabled()) { - log.debug().append(name).append(" updated ").append(changedStates.size()).append(" jobs").endl(); - } - changedStates.clear(); - updateClock++; - } - - /** - * compute the timeout value for the next select() call - * - * NOTE: assumes that stateLock is held - */ - private long computeTimeout(long now, long timeout) { - if (!dispatchQueue.isEmpty()) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" update: dispatch queue is not empty, setting timeout to zero").endl(); - } - timeout = 0; - } else if (!timeoutQueue.isEmpty()) { - JobState next = timeoutQueue.top(); - long remain = next.deadline - now; - if (log.isDebugEnabled()) { - log.debug().append(name).append(" update: next timeout due in ").append(remain).append(" millis: ") - .append(next.job).endl(); - } - timeout = Math.max(0, Math.min(timeout, remain)); - } - return timeout; - } - - /** - * Wait for something to happen - */ - private void select(long timeout) { - try { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" calling select(").append(timeout).append(")").endl(); - } - - mark(workDuration); - - if (timeout > 0) { - selector.select(timeout); - } else { - selector.selectNow(); - } - - mark(selectDuration); - } catch (IOException x) { - if (java.util.regex.Pattern.matches(".*Operation not permitted.*", x.toString())) { - // There is a documented bug (http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6481709) in some - // versions of the epoll selector which causes occasional "Operation not permitted" errors to be - // thrown. - log.warn().append(name).append( - " Ignoring 'Operation not permitted' exception, see http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6481709") - .endl(); - } else { - if (!isClosed()) { - log.fatal(x).append(name).append(" Unexpected IOException in select(): ").append(x.getMessage()) - .endl(); - System.exit(1); - } - } - } catch (ClosedSelectorException x) { - if (!isClosed()) { - log.fatal(x).append(name).append(" ClosedSelectorException in select(): ").append(x.getMessage()) - .endl(); - System.exit(1); - } - } - } - - private void spinSelect(long times) { - try { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" calling spinSelect(").append(times).append(")").endl(); - } - - mark(workDuration); - - while (selector.selectNow() == 0 && !spinWakeSelector && (times-- > 0)) { - } - - mark(selectDuration); - } catch (IOException x) { - if (java.util.regex.Pattern.matches(".*Operation not permitted.*", x.toString())) { - // There is a documented bug (http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6481709) in some - // versions of the epoll selector which causes occasional "Operation not permitted" errors to be - // thrown. - log.warn().append(name).append( - " Ignoring 'Operation not permitted' exception, see http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6481709") - .endl(); - } else { - if (!isClosed()) { - log.fatal(x).append(name).append(" Unexpected IOException in spinSelect(): ").append(x.getMessage()) - .endl(); - System.exit(1); - } - } - } catch (ClosedSelectorException x) { - if (!isClosed()) { - log.fatal(x).append(name).append(" ClosedSelectorException in spinSelect(): ").append(x.getMessage()) - .endl(); - System.exit(1); - } - } - } - - /** - * Gather up selected and timed-out jobs - * - * NOTE: assumes that stateLock is held - */ - private void gather(long now) { - JobState state; - int numInvokes = 0; - // first gather all of the invokable jobs - for (SelectionKey key : selector.selectedKeys()) { - ++numInvokes; - try { - if ((state = (JobState) key.attachment()) == null) { - // clear interest ops, so we don't select in a tight loop - if (key.isValid() && key.interestOps() != 0) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" clearing interest in orphaned key ") - .append(key.toString()).append(" in YASchedulerImpl.gather").endl(); - } - if (key.isValid()) { - key.interestOps(0); - } - keyOrphans.sample(1); - } - } else { - key.attach(null); - state.readyChannel = key.channel(); - state.readyOps = key.readyOps(); - state.gathered = true; - state.gatheredNanos = lastNanos; - dispatchQueue.add(state); - timeoutQueue.remove(state); - if (log.isDebugEnabled()) { - log.debug().append(name).append(" gather ").append(key.toString()).append(" -> ") - .append(state.job) - .append(", ops=").append(key.readyOps()) - .append(", ki=").append(key.interestOps()) - .append(", dq=").append(dispatchQueue.size()) - .endl(); - } - } - } catch (CancelledKeyException x) { - // We can't guarantee that some thread won't try to write to the channel and - // cause it to cancel the key -- if that happens, then we'll get the exception - // here. But that's okay, because it's either an orphan channel which we just - // want to get rid of, or the IOJob will get the exception later and handle it. - } - } - selector.selectedKeys().clear(); - invokeCount.sample(numInvokes); - - // now get all of the expired timeouts - int numTimeouts = 0; - while (!timeoutQueue.isEmpty() && now >= (state = timeoutQueue.top()).deadline) { - ++numTimeouts; - timeoutQueue.removeTop(); - state.gathered = true; - state.gatheredNanos = lastNanos; - dispatchQueue.add(state); - } - timeoutCount.sample(numTimeouts); - - if (log.isDebugEnabled()) { - log.debug().append(name).append(" gathered ").append(numInvokes).append(" for I/O and ").append(numTimeouts) - .append(" timeouts").endl(); - } - } - - /** - * dispatch a gathered job, if there are any - */ - private boolean dispatch(Runnable handoff) { - JobState state; - SelectableChannel readyChannel; - int readyOps; - boolean cancelled; - synchronized (stateLock) { - if ((state = dispatchQueue.poll()) == null) { - return false; - } - - readyChannel = state.readyChannel; - readyOps = state.readyOps; - cancelled = state.cancelled; - state.readyChannel = null; - state.readyOps = 0; - state.gathered = false; - // NOTE: we only need to record the state as changed if it has a channel; - // cancelled and timed-out states will just be forgotten. - if (!cancelled && readyChannel != null) { - changedState(state); - } - if (log.isDebugEnabled()) { - log.debug().append(name).append(" dispatch ").append(state.job) - .append(", ops=").append(readyOps) - .append(", dq=").append(dispatchQueue.size()) - .endl(); - } - assert readyChannel == null || readyOps != 0; - } - - // dispatch the job outside of the state lock - try { - if (cancelled) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" cancelled ").append(state.job).endl(); - } - state.job.cancelled(); - } else { - if (doTimingStats) - gatheredDuration.sample((System.nanoTime() - state.gatheredNanos + 500) / 1000); - if (readyOps != 0) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" invoke ").append(state.job).endl(); - } - state.job.invoke(readyChannel, readyOps, handoff); - } else { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" timedOut ").append(state.job).endl(); - } - if (handoff != null) { - handoff.run(); - } - state.job.timedOut(); - } - } - } catch (Throwable x) { - log.fatal(x).append(": unhandled Throwable in dispatch on job [").append(state.job).append("]: ") - .append(x.getMessage()).endl(); - throw new RuntimeException(x); - } - - return true; - } - - /** - * Wake up the selector, if necessary. - * - * NOTE: assumes that stateLock is held! - */ - private void maybeWakeSelector() { - if (selectingTill > 0) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" waking up the scheduler").endl(); - } - selector.wakeup(); - selectorWakeups.sample(1); - } - - if (doSpinSelect) { - spinWakeSelector = true; - } - } - - /** - * Wait for jobs to become ready, then invoke() them all. This method will form the core of the main loop of a - * scheduler-driven application. The method first waits until: - * - * -- the given timeout expires, -- the earliest job-specific timeout expires, or -- one or more jobs becomes ready - * - * Note that this method is not synchronized. The application must ensure that it is never called concurrently by - * more than one thread. - * - * @return true, if some work was done. - */ - public boolean work(long timeout, Runnable handoff) { - if (doSpinSelect) { - // just use the millis timeout as the number of times to spin - long times = timeout; - return spinWork(times, handoff); - } - - boolean didOne = dispatch(handoff); - if (!didOne) { - // apply any changes to the states - synchronized (stateLock) { - update(); - long now = currentTimeMillis(); - timeout = computeTimeout(now, timeout); - assert selectingTill == 0 : "no more than one thread should ever call work!"; - if (timeout > 0) { - selectingTill = now + timeout; - } - } - - // wait for something to happen - select(timeout); - - // apply changes while we were waiting, then gather up all of the jobs that can be dispatched - synchronized (stateLock) { - selectingTill = 0; - update(); - long now = currentTimeMillis(); - gather(now); - } - - // and try again - didOne = dispatch(handoff); - } - return didOne; - } - - private boolean spinWork(long times, Runnable handoff) { - boolean didOne = dispatch(handoff); - if (!didOne) { - // apply any changes to the states - synchronized (stateLock) { - update(); - if (!dispatchQueue.isEmpty() || spinWakeSelector) { - times = 1; // only want to spin on select once since we have stuff to dispatch - spinWakeSelector = false; - } - assert selectingTill == 0 : "no more than one thread should ever call work!"; - } - - // spin for something to happen - spinSelect(times); - - // apply changes while we were waiting, then gather up all of the jobs that can be dispatched - synchronized (stateLock) { - selectingTill = 0; - update(); - long now = currentTimeMillis(); - gather(now); - } - - // and try again - didOne = dispatch(handoff); - } - return didOne; - } - - /** - * Shuts down the scheduler, calling close() on the underlying Selector instance. - */ - public void close() { - isClosed = true; - clear(); - try { - selector.close(); - } catch (IOException x) { - log.warn(x).append(name).append(" Scheduler.close: ignoring exception from selector.close(): ") - .append(x.getMessage()).endl(); - } - } - - /** - * Return true if the scheduler is closed, or in the process of closing. - */ - public boolean isClosed() { - return isClosed; - } - - /** - * Clear out the scheduler state - */ - private void clear() { - Set allJobs = getAllJobs(); - for (Job j : allJobs) { - cancelJob(j); - } - log.info().append(name).append(" Scheduler.clear: starting with ").append(allJobs.size()).append(" jobs") - .endl(); - synchronized (stateLock) { - update(); - } - ArrayList allKeys = getAllKeys(); - for (SelectionKey k : allKeys) { - k.cancel(); - } - synchronized (stateLock) { - update(); - } - try { - selector.selectNow(); - } catch (IOException x) { - throw new UncheckedIOException(x); - } - while (true) { - try { - if (!dispatch(null)) { - break; - } - } catch (Exception x) { - log.warn().append(name).append(" Scheduler.clear: ignoring shutdown exception: ").append(x).endl(); - } - } - log.info().append(name).append(" Scheduler.clear: finished").endl(); - } - - /** - * return the set of all jobs known to the scheduler, in whatever state - */ - private Set getAllJobs() { - synchronized (stateLock) { - update(); - Set result = new HashSet(); - timeoutQueue.junitGetAllJobs(result); - for (JobState state : changedStates) { - assert state != null; - if (state.job != null) { - result.add(state.job); - } - } - for (SelectionKey key : junitGetAllKeys()) { - Object attachment; - if (key != null && (attachment = key.attachment()) != null && attachment instanceof JobState) { - JobState state = (JobState) attachment; - if (state.job != null) { - result.add(state.job); - } - } - } - return result; - } - } - - /** - * Return the selection keys currently known to the scheduler. - */ - private ArrayList getAllKeys() { - synchronized (stateLock) { - update(); - Set keys = selector.keys(); - selector.wakeup(); - synchronized (keys) { - return new ArrayList(keys); - } - } - } - - // -------------------------------------------------------------------------- - // test support methods (white-box) - // -------------------------------------------------------------------------- - - public Selector junitGetSelector() { - return selector; - } - - /** - * return the set of all jobs known to the scheduler, in whatever state - */ - public Set junitGetAllJobs() { - return getAllJobs(); - } - - /** - * Return the contents of the timeout queue, in deadline order - * - * @return the jobs in the timeout queue - */ - public ArrayList junitGetTimeoutQueue() { - synchronized (stateLock) { - update(); - ArrayList result = new ArrayList(timeoutQueue.size()); - try { - JobStateTimeoutQueue q = (JobStateTimeoutQueue) timeoutQueue.clone(); - while (!q.isEmpty()) { - result.add(q.top().job); - q.removeTop(); - } - } catch (CloneNotSupportedException x) { - // ignore - } - return result; - } - } - - /** - * Return the selection keys currently known to the scheduler. - */ - public ArrayList junitGetAllKeys() { - return getAllKeys(); - } - - /** - * Return the selection keys currently known to the scheduler. - */ - public ArrayList junitGetReadyKeys() { - return new ArrayList(selector.selectedKeys()); - } - - /** - * Return a map containing all channels and the jobs to which they are associated. - */ - public Map junitGetChannelsAndJobs() { - synchronized (stateLock) { - update(); - Map result = new HashMap(); - for (SelectionKey key : junitGetAllKeys()) { - Object attachment; - if (key != null && (attachment = key.attachment()) != null && attachment instanceof JobState) { - JobState state = (JobState) attachment; - if (state.job != null) { - result.put(key.channel(), ((JobState) attachment).job); - } - } - } - return result; - } - } - - /** - * Return true if the timeout queue invariant holds. - */ - public boolean junitTestTimeoutQueueInvariant() { - synchronized (stateLock) { - return timeoutQueue.testInvariant("in call from junit"); - } - } -} diff --git a/IO/src/test/java/io/deephaven/io/sched/TestJobStateTimeoutQueue.java b/IO/src/test/java/io/deephaven/io/sched/TestJobStateTimeoutQueue.java deleted file mode 100644 index a572d425d89..00000000000 --- a/IO/src/test/java/io/deephaven/io/sched/TestJobStateTimeoutQueue.java +++ /dev/null @@ -1,124 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import io.deephaven.io.logger.Logger; -import junit.framework.TestCase; - -import java.nio.channels.SelectableChannel; -import java.io.IOException; - -public class TestJobStateTimeoutQueue extends TestCase { - - public void setUp() throws Exception { - super.setUp(); - } - - public void tearDown() throws Exception { - super.tearDown(); - } - - /** - * A null Job implementation - */ - private static class NullJob extends Job { - public int invoke(SelectableChannel channel, int readyOps, Runnable handoff) throws IOException { - return 0; - } - - public void timedOut() {} - - public void cancelled() {} - } - - /** - * Macro test - */ - public void testTimeoutQueue() { - JobState[] ja = new JobState[10]; - for (int i = 0; i < ja.length; ++i) { - ja[i] = new JobState(new NullJob()); - } - JobStateTimeoutQueue q = new JobStateTimeoutQueue(Logger.NULL, 10); - - q.enter(ja[0], 1); - assertTrue(q.testInvariant("insert 1")); - q.enter(ja[1], 9); - assertTrue(q.testInvariant("insert 9")); - q.enter(ja[2], 8); - assertTrue(q.testInvariant("insert 8")); - q.enter(ja[3], 5); - assertTrue(q.testInvariant("insert 5")); - q.enter(ja[4], 2); - assertTrue(q.testInvariant("insert 2")); - q.enter(ja[5], 3); - assertTrue(q.testInvariant("insert 3")); - q.enter(ja[6], 6); - assertTrue(q.testInvariant("insert 6")); - q.enter(ja[7], 4); - assertTrue(q.testInvariant("insert 4")); - q.enter(ja[8], 7); - assertTrue(q.testInvariant("insert 7")); - q.enter(ja[9], 10); - assertTrue(q.testInvariant("insert 10")); - - assertEquals(ja[0], q.top()); - q.removeTop(); - q.testInvariant("remove 1"); - assertEquals(ja[4], q.top()); - q.removeTop(); - q.testInvariant("remove 2"); - assertEquals(ja[5], q.top()); - q.removeTop(); - q.testInvariant("remove 3"); - assertEquals(ja[7], q.top()); - q.removeTop(); - q.testInvariant("remove 4"); - assertEquals(ja[3], q.top()); - q.removeTop(); - q.testInvariant("remove 5"); - assertEquals(ja[6], q.top()); - q.removeTop(); - q.testInvariant("remove 6"); - assertEquals(ja[8], q.top()); - q.removeTop(); - q.testInvariant("remove 7"); - assertEquals(ja[2], q.top()); - q.removeTop(); - q.testInvariant("remove 8"); - assertEquals(ja[1], q.top()); - q.removeTop(); - q.testInvariant("remove 9"); - assertEquals(ja[9], q.top()); - q.removeTop(); - q.testInvariant("remove 10"); - - assertTrue(q.testInvariant("after clone")); - } - - /** - * Test change of deadline within queue - */ - public void testDeadlineChange() { - JobState j1 = new JobState(new NullJob()); - JobState j2 = new JobState(new NullJob()); - JobState j3 = new JobState(new NullJob()); - JobStateTimeoutQueue q = new JobStateTimeoutQueue(Logger.NULL, 10); - - q.enter(j1, 1000); - q.enter(j2, 2000); - q.enter(j3, 3000); - - assertEquals(j1, q.top()); - - q.enter(j2, 200); - assertEquals(j2, q.top()); - - q.enter(j2, 20000); - assertEquals(j1, q.top()); - - q.enter(j1, 100000); - assertEquals(j3, q.top()); - } -} diff --git a/Integrations/src/main/java/io/deephaven/integrations/python/PythonDeephavenSession.java b/Integrations/src/main/java/io/deephaven/integrations/python/PythonDeephavenSession.java index 607174ee1a3..f37f58e3907 100644 --- a/Integrations/src/main/java/io/deephaven/integrations/python/PythonDeephavenSession.java +++ b/Integrations/src/main/java/io/deephaven/integrations/python/PythonDeephavenSession.java @@ -24,6 +24,8 @@ import io.deephaven.util.SafeCloseable; import io.deephaven.util.annotations.ScriptApi; import io.deephaven.util.annotations.VisibleForTesting; +import io.deephaven.util.thread.NamingThreadFactory; +import io.deephaven.util.thread.ThreadInitializationFactory; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.jpy.KeyError; @@ -43,6 +45,9 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.function.Consumer; import java.util.stream.Collectors; /** @@ -76,11 +81,12 @@ public class PythonDeephavenSession extends AbstractScriptSession scope) { - super(updateGraph, NoOp.INSTANCE, null); + public PythonDeephavenSession(final UpdateGraph updateGraph, + final ThreadInitializationFactory threadInitializationFactory, final PythonScope scope) { + super(updateGraph, threadInitializationFactory, NoOp.INSTANCE, null); evaluator = null; this.scope = (PythonScope) scope; @@ -120,9 +127,26 @@ public PythonDeephavenSession( } scriptFinder = null; + registerJavaExecutor(threadInitializationFactory); publishInitial(); } + private void registerJavaExecutor(ThreadInitializationFactory threadInitializationFactory) { + // TODO (deephaven-core#4040) Temporary exec service until we have cleaner startup wiring + try (PyModule pyModule = PyModule.importModule("deephaven.server.executors"); + final PythonDeephavenThreadsModule module = pyModule.createProxy(PythonDeephavenThreadsModule.class)) { + NamingThreadFactory threadFactory = new NamingThreadFactory(PythonDeephavenSession.class, "serverThread") { + @Override + public Thread newThread(@NotNull Runnable r) { + return super.newThread(threadInitializationFactory.createInitializer(r)); + } + }; + ExecutorService executorService = Executors.newFixedThreadPool(1, threadFactory); + module._register_named_java_executor("serial", executorService::submit); + module._register_named_java_executor("concurrent", executorService::submit); + } + } + @Override @VisibleForTesting public QueryScope newQueryScope() { @@ -325,4 +349,10 @@ interface PythonScriptSessionModule extends Closeable { void close(); } + + interface PythonDeephavenThreadsModule extends Closeable { + void close(); + + void _register_named_java_executor(String executorName, Consumer execute); + } } diff --git a/Net/build.gradle b/Net/build.gradle deleted file mode 100644 index 31bf972685b..00000000000 --- a/Net/build.gradle +++ /dev/null @@ -1,30 +0,0 @@ -plugins { - id 'io.deephaven.project.register' -} - -dependencies { - implementation project(':Base') - implementation project(':DataStructures') - implementation project(':IO') - implementation project(':Configuration') - implementation project(':FishUtil') - implementation project(':log-factory') - - testImplementation project(path: ':Base', configuration: 'tests') - - testRuntimeOnly project(':log-to-slf4j') - Classpaths.inheritSlf4j(project, 'slf4j-simple', 'testRuntimeOnly') -} - -test { - useJUnit() - - enableAssertions = true - maxHeapSize = '3g' - - systemProperty 'Configuration.rootFile', 'lib-tests.prop' - systemProperty 'deephaven.dataDir', "$rootDir/tmp/workspace" - systemProperty 'configuration.quiet', 'true' - - exclude '**/NoTest*' -} \ No newline at end of file diff --git a/Net/gradle.properties b/Net/gradle.properties deleted file mode 100644 index c186bbfdde1..00000000000 --- a/Net/gradle.properties +++ /dev/null @@ -1 +0,0 @@ -io.deephaven.project.ProjectType=JAVA_PUBLIC diff --git a/Net/src/main/java/io/deephaven/net/CommBase.java b/Net/src/main/java/io/deephaven/net/CommBase.java deleted file mode 100644 index cdbdb9b42f3..00000000000 --- a/Net/src/main/java/io/deephaven/net/CommBase.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.net; - -import io.deephaven.base.FatalErrorHandler; -import io.deephaven.base.FatalErrorHandlerFactory; -import io.deephaven.configuration.Configuration; -import io.deephaven.net.impl.nio.NIODriver; -import io.deephaven.io.NioUtil; -import io.deephaven.io.logger.Logger; -import io.deephaven.io.sched.*; - -import java.io.IOException; -import java.nio.channels.Selector; - -public class CommBase { - - private static volatile FatalErrorHandler defaultFatalErrorHandler; - - public static FatalErrorHandler getDefaultFatalHandler() { - if (defaultFatalErrorHandler == null) { - synchronized (CommBase.class) { - if (defaultFatalErrorHandler == null) { - final String defaultFatalErrorHandlerClassName = - Configuration.getInstance().getProperty("Comm.fatalErrorHandlerFactoryClass"); - final Class defaultFatalErrorHandlerClass; - try { - defaultFatalErrorHandlerClass = Class.forName(defaultFatalErrorHandlerClassName); - } catch (ClassNotFoundException e) { - throw new IllegalArgumentException( - "Could not find envelopeHandlerFactoryClass " + defaultFatalErrorHandlerClassName, e); - } - final FatalErrorHandlerFactory defaultFatalErrorHandlerFactory; - try { - defaultFatalErrorHandlerFactory = - (FatalErrorHandlerFactory) defaultFatalErrorHandlerClass.newInstance(); - } catch (InstantiationException | IllegalAccessException | ClassCastException e) { - throw new IllegalArgumentException( - "Could not instantiate envelopeHandlerFactoryClass " + defaultFatalErrorHandlerClass, - e); - } - defaultFatalErrorHandler = defaultFatalErrorHandlerFactory.get(); - } - } - } - return defaultFatalErrorHandler; - } - - public static void signalFatalError(final String message, Throwable x) { - try { - FatalErrorHandler feh = getDefaultFatalHandler(); - feh.signalFatalError(message, x); - } catch (Throwable fehx) { - // dump this to stderr, it's not great, but we had an error raising an error and really do want both of - // these in the log - fehx.printStackTrace(System.err); - x.printStackTrace(System.err); - throw new RuntimeException("Could not raise fatal error: " + message, x); - } - } - - /** - * Return the scheduler used by the NIO implementation - */ - public static Scheduler getScheduler() { - NIODriver.init(); - return NIODriver.getScheduler(); - } - - /** - * Create a private, single-threaded scheduler and driver thread - */ - public static class SingleThreadedScheduler extends YASchedulerImpl { - private final Thread driver; - private volatile boolean done = false; - - public SingleThreadedScheduler(final String name, Logger log) throws IOException { - super(name, NioUtil.reduceSelectorGarbage(Selector.open()), log); - this.driver = new Thread(() -> { - try { - while (!SingleThreadedScheduler.this.done) { - work(10, null); - } - } catch (Throwable x) { - signalFatalError(name + " exception", x); - } - }); - driver.setName(name + "-Driver"); - driver.setDaemon(true); - } - - public SingleThreadedScheduler start() { - driver.start(); - return this; - } - - public void stop() { - done = true; - } - } - - public static SingleThreadedScheduler singleThreadedScheduler(final String name, Logger log) { - try { - return new SingleThreadedScheduler(name, log); - } catch (IOException x) { - signalFatalError(name + " exception", x); - return null; - } - } -} diff --git a/Net/src/main/java/io/deephaven/net/impl/nio/FastNIODriver.java b/Net/src/main/java/io/deephaven/net/impl/nio/FastNIODriver.java deleted file mode 100644 index 492d5718f0b..00000000000 --- a/Net/src/main/java/io/deephaven/net/impl/nio/FastNIODriver.java +++ /dev/null @@ -1,285 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.net.impl.nio; - -import io.deephaven.base.UnfairMutex; -import io.deephaven.configuration.Configuration; -import io.deephaven.net.CommBase; -import io.deephaven.io.NioUtil; -import io.deephaven.io.logger.LogCrashDump; -import io.deephaven.io.logger.Logger; -import io.deephaven.io.sched.Scheduler; -import io.deephaven.io.sched.TimedJob; -import io.deephaven.io.sched.YASchedulerImpl; - -import java.io.IOException; -import java.nio.channels.Selector; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -public final class FastNIODriver implements Runnable { - private static Logger log; - - public static int numTotalThreads(String property) { - final String[] values = Configuration.getInstance().getProperty(property).split(","); - return Integer.parseInt(values[0]) * Integer.parseInt(values[1]); - } - - public static int threadsPerScheduler(String property) { - final String[] values = Configuration.getInstance().getProperty(property).split(","); - if (values.length != 6) - return 0; - return Integer.parseInt(values[1]); - } - - public static Scheduler[] createSchedulers(String name, String property, Logger log) { - return createSchedulers(name, property, log, Configuration.getInstance()); - } - - public static Scheduler[] createSchedulers(String name, String property, Logger log, Configuration config) { - final String[] values = config.getProperty(property).split(","); - if (values.length != 6) - return null; - - final int numSchedulers = Integer.parseInt(values[0]); - final int threadsPerScheduler = Integer.parseInt(values[1]); - final long timeoutsOrSpins = Long.parseLong(values[2]); - final int spinsUntilPark = Integer.parseInt(values[3]); - final boolean doTimingStats = Boolean.parseBoolean(values[4]); - final boolean doSpinSelect = Boolean.parseBoolean(values[5]); - final Scheduler[] schedulers = new Scheduler[numSchedulers]; - for (int i = 0; i < numSchedulers; ++i) { - schedulers[i] = createDrivers(name + "-" + i, log, threadsPerScheduler, threadsPerScheduler, - timeoutsOrSpins, spinsUntilPark, false, doTimingStats, doSpinSelect).getScheduler(); - } - return schedulers; - } - - public static FastNIODriver createDrivers(String name, Logger log, int initialThreads, int maxThreads, - long workTimeout, int spinsUntilPark, boolean crashOnMax) { - return createDrivers(name, log, initialThreads, maxThreads, workTimeout, spinsUntilPark, crashOnMax, true, - false); - } - - public static FastNIODriver createDrivers(String name, Logger log, int initialThreads, int maxThreads, - long workTimeout, int spinsUntilPark, boolean crashOnMax, boolean doTimingStats, boolean doSpinSelect) { - FastNIODriver.log = log; - log.info().append(name).append(": Starting FastNIODriver Scheduler: threads: ").append(initialThreads) - .append(", maxThreads: ").append(maxThreads) - .append(", workTimeout/spinsOnSelect: ").append(workTimeout) - .append(", spinsUntilPark: ").append(spinsUntilPark) - .append(", doSpinSelect: ").append(doSpinSelect) - .endl(); - try { - final Scheduler scheduler = new YASchedulerImpl(name, NioUtil.reduceSelectorGarbage(Selector.open()), log, - doTimingStats, doSpinSelect); - - final UnfairMutex mutex = new UnfairMutex(spinsUntilPark, maxThreads); - final AtomicBoolean shutdown = new AtomicBoolean(false); - final AtomicInteger created = new AtomicInteger(0); - final AtomicInteger destroyed = new AtomicInteger(0); - final AtomicInteger available = new AtomicInteger(0); - final InternalThread[] threads = new InternalThread[initialThreads]; - // separate the creation and start so the created / available values are setup - for (int i = 0; i < initialThreads; ++i) { - threads[i] = createNewThread(name, scheduler, mutex, shutdown, workTimeout, created, destroyed, - available, maxThreads, crashOnMax); - } - for (int i = 0; i < initialThreads; ++i) { - threads[i].start(); - } - - return threads[0].driver; - } catch (IOException x) { - CommBase.signalFatalError(name + ": FastNIODriver can't create scheduler", x); - return null; - } - } - - private static class InternalThread extends Thread { - private final FastNIODriver driver; - - private InternalThread(final FastNIODriver driver) { - super(driver); - this.driver = driver; - } - } - - private static InternalThread createNewThread(final String name, final Scheduler scheduler, final UnfairMutex mutex, - final AtomicBoolean shutdown, final long workTimeout, final AtomicInteger created, - final AtomicInteger destroyed, final AtomicInteger available, final int maxThreads, - final boolean crashOnMax) { - InternalThread t = new InternalThread(new FastNIODriver(name, scheduler, mutex, shutdown, workTimeout, created, - destroyed, available, maxThreads, crashOnMax)); - t.setDaemon(true); - t.setName(name + "-FastNIODriver-" + created.getAndIncrement()); - int a = available.incrementAndGet(); - log.info().append("Creating thread ").append(t.getName()).append(". available: ").append(a).endl(); - return t; - } - - private final Scheduler scheduler; - private final UnfairMutex mutex; - private final AtomicBoolean shutdown; - private final long workTimeout; - private final Runnable mutexUnlockHandoff; - private boolean alreadyHandedOff; - - private final AtomicInteger created; - private final AtomicInteger destroyed; - private final AtomicInteger available; - private final int maxThreads; - private final boolean crashOnMax; - - private FastNIODriver(final String name, final Scheduler scheduler, final UnfairMutex mutex, - final AtomicBoolean shutdown, final long workTimeout, final AtomicInteger created, - final AtomicInteger destroyed, final AtomicInteger available, final int maxThreads, - final boolean crashOnMax) { - this.scheduler = scheduler; - this.mutex = mutex; - this.shutdown = shutdown; - this.workTimeout = workTimeout; - this.created = created; - this.destroyed = destroyed; - this.available = available; - this.maxThreads = maxThreads; - this.crashOnMax = crashOnMax; - alreadyHandedOff = false; - mutexUnlockHandoff = () -> { - if (!alreadyHandedOff) { - if (shouldCreate()) { - // nobody to handoff to! let's create a new driver - createNewThread(name, scheduler, mutex, shutdown, workTimeout, created, destroyed, available, - maxThreads, crashOnMax).start(); - } - mutex.unlock(); - alreadyHandedOff = true; - } - }; - } - - // only called when we have the mutex... - private boolean shouldCreate() { - if (available.get() == 0) { - // don't need to worry about races w/ index b/c we have lock - if (created.get() == maxThreads) { - if (crashOnMax) { - log.fatal().append("FastNIODriver: exceeded maximum thread pool limit: ").append(summary()).endl(); - LogCrashDump.logCrashDump(log); - CommBase.signalFatalError("FastNIODriver: exceeded maximum thread pool limit: " + summary(), - new Throwable()); - } - return false; - } - return true; - } - return false; - } - - public String summary() { - return "(available: " + available.get() + ", created: " + created.get() + ", destroyed: " + destroyed.get() - + ")"; - } - - @Override - public void run() { - final Thread me = Thread.currentThread(); - Throwable throwable = null; - while (true) { - if (shutdown.get()) { - break; - } - mutex.lock(); - alreadyHandedOff = false; - if (shutdown.get()) { - mutexUnlockHandoff.run(); - break; - } - - try { - - available.getAndDecrement(); - do { - scheduler.work(workTimeout, mutexUnlockHandoff); - } while (mutex.getOwner() == me); - available.getAndIncrement(); - - } catch (Throwable x) { - throwable = x; - shutdown.set(true); - scheduler.installJob(new TimedJob() { - public void timedOut() {} - }, 0); // wake us up yo - mutexUnlockHandoff.run(); // we aren't sure whether the scheduler.work has already called the handoff - // or not yet, so go ahead and call it (it won't double release it) - long deadline = System.currentTimeMillis() + 5000; - // b/c we haven't destroyed ourself yet... - // meh spinning :/ - while (created.get() != destroyed.get() + 1) { - if (deadline - System.currentTimeMillis() < 0) { - break; - } - Thread.yield(); // better than spinning? - } - - break; - } - } - - if (destroyed.incrementAndGet() == created.get()) { - scheduler.close(); - } - - if (throwable == null) { - log.error().append("Thread ").append(me.getName()).append(" is terminating: ").append(summary()).endl(); - } else { - log.fatal(throwable).append("Thread ").append(me.getName()).append(" is terminating on a fatal exception: ") - .append(summary()).endl(); - } - - if (throwable != null) - CommBase.signalFatalError("Unhandled throwable from FastNIODriver scheduler", throwable); - } - - public boolean isShutdown() { - return shutdown.get(); - } - - public boolean shutdown(long maxWait) { - shutdown.set(true); - scheduler.installJob(new TimedJob() { - public void timedOut() {} - }, 0); - long deadline = System.currentTimeMillis() + maxWait; - while (created.get() != destroyed.get()) { - if (deadline - System.currentTimeMillis() < 0) { - break; - } - try { - Thread.sleep(1); // better than spinning? - } catch (InterruptedException e) { - // ignore - } - } - - return created.get() == destroyed.get(); - } - - public Scheduler getScheduler() { - return scheduler; - } - - // whitebox test support methods - public int junit_getWaiting() { - return available.get(); - } - - public int junit_getCreated() { - return created.get(); - } - - public int junit_getDestroyed() { - return destroyed.get(); - } -} diff --git a/Net/src/main/java/io/deephaven/net/impl/nio/NIODriver.java b/Net/src/main/java/io/deephaven/net/impl/nio/NIODriver.java deleted file mode 100644 index f1c57602a45..00000000000 --- a/Net/src/main/java/io/deephaven/net/impl/nio/NIODriver.java +++ /dev/null @@ -1,295 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.net.impl.nio; - -import io.deephaven.internal.log.LoggerFactory; -import java.io.IOException; -import java.nio.channels.Selector; -import java.util.concurrent.atomic.AtomicInteger; - -import io.deephaven.net.CommBase; -import io.deephaven.configuration.Configuration; -import io.deephaven.io.NioUtil; -import io.deephaven.io.logger.LogCrashDump; -import io.deephaven.io.logger.Logger; -import io.deephaven.io.sched.Scheduler; -import io.deephaven.io.sched.TimedJob; -import io.deephaven.io.sched.YASchedulerImpl; - -public class NIODriver implements Runnable { - private static Logger log; - - private static boolean initialized = false; - private static volatile boolean stopped = false; - - private static Scheduler sched = null; - private static FastNIODriver driver = null; - - private static final Object lock = new Object(); - private static Thread leader = null; - private static AtomicInteger available = new AtomicInteger(0); - private static int created = 0; - private static int destroyed = 0; - - public static int WORK_TIMEOUT; - public static int NUM_INITIAL_THREADS; - public static int HARD_MAX_THREADS; - - private static final boolean useFastNIODriver = Configuration.getInstance().getBoolean("NIO.driver.useFast"); - - /** - * Let another thread take over the leadership. - */ - private static void handoff() { - Thread me = Thread.currentThread(); - synchronized (lock) { - if (leader != me) { - LogCrashDump.logCrashDump(log); - CommBase.signalFatalError("NIODriver: WTF? in handoff(), but not the leader?", new Throwable()); - } - - if (log.isDebugEnabled()) { - log.debug().append("Thread ").append(me.getName()).append(" is giving up leadership").endl(); - } - - leader = null; - - if (stopped || available.get() != 0) { - lock.notify(); - } else { - // no joy, have to add another thread - log.warn().append("Thread ").append(me.getName()).append(" is handing off with no threads available: ") - .append(summary()).endl(); - addThread(); - } - } - } - - /** - * A procedure which calls handoff(), to give the scheduler when we are running full-bore - */ - private static final Runnable handoffProc = NIODriver::handoff; - - /** - * return a string telling how many threads are doing what - */ - public static String summary() { - if (useFastNIODriver) { - return driver.summary(); - } else { - return "(available: " + available + ", created: " + created + ", destroyed: " + destroyed + ")"; - } - } - - /** - * one-time initialization - */ - public static void init() { - if (!initialized) { - init(LoggerFactory.getLogger(NIODriver.class)); - } - } - - public static void init(Logger log) { - synchronized (lock) { - if (!initialized) { - NIODriver.log = log; - WORK_TIMEOUT = Configuration.getInstance().getInteger("NIO.driver.workTimeout"); - NUM_INITIAL_THREADS = Configuration.getInstance().getInteger("NIO.driver.initialThreadCount"); - HARD_MAX_THREADS = Configuration.getInstance().getInteger("NIO.driver.maxThreadCount"); - if (useFastNIODriver) { - driver = FastNIODriver.createDrivers("Static", log, NUM_INITIAL_THREADS, HARD_MAX_THREADS, - WORK_TIMEOUT, 1000, true); - sched = driver.getScheduler(); - } else { - try { - sched = new YASchedulerImpl(NioUtil.reduceSelectorGarbage(Selector.open()), log); - } catch (IOException x) { - sched = null; - CommBase.signalFatalError("NIODriver.init: can't create scheduler", x); - } - for (int i = 0; i < NUM_INITIAL_THREADS; ++i) { - addThread(); - } - } - initialized = true; - } - } - - } - - /** - * Shut down, and wait for all threads to terminate. This method is really just for testing; it's a bad idea to do - * this in production because waiting for threads to terminate is prone to deadlocks. If desired, though, it can be - * called from an AbstractService shutdown hook installed in init(). - */ - public static boolean shutdown(long maxWait) { - synchronized (lock) { - if (!initialized) - return true; - - if (useFastNIODriver) { - if (driver.shutdown(maxWait)) { - initialized = false; - log.info().append("NIODriver.shutdown: finished").endl(); - return true; - } else { - return false; - } - } else { - long deadline = System.currentTimeMillis() + maxWait, remain = maxWait; - stopped = true; - lock.notifyAll(); - // force the scheduler to wake up - sched.installJob(new TimedJob() { - public void timedOut() {} - }, 0); - while (created != destroyed) { - try { - log.info().append("NIODriver.shutdown: waiting for threads to terminate: ").append(summary()) - .endl(); - lock.wait(Math.max(remain, 0)); - } catch (InterruptedException x) { - // ignore - } - if ((remain = deadline - System.currentTimeMillis()) < 0) { - return false; - } - } - sched.close(); - log.info().append("NIODriver.shutdown: finished").endl(); - leader = null; - sched = null; - initialized = stopped = false; - created = destroyed = 0; - available.set(0); - return true; - } - } - } - - /** - * Return the scheduler used by the NIO driver - */ - public static Scheduler getScheduler() { - return sched; - } - - /** - * Return the scheduler used by the NIO driver - */ - public static Logger getLogger() { - return log; - } - - /** - * add a thread to the pool - * - * NOTE: caller must hold the lock! - * - * NOTE: We increment the "waiting" variable *before* we start the new thread, and then make sure to correct it in - * the first iteration of the thread loop. This prevents a race in which we handoff() method creates too many - * threads, because it keeps getting called before the first thread it creates can get started. - */ - private static void addThread() { - if (created == HARD_MAX_THREADS) { - log.fatal().append("NIODriver: exceeded maximum thread pool limit: ").append(summary()).endl(); - LogCrashDump.logCrashDump(log); - CommBase.signalFatalError("NIODriver: exceeded maximum thread pool limit: " + summary(), new Throwable()); - } - Thread thread = new Thread(new NIODriver()); - thread.setDaemon(true); - thread.setName("NIODriver-" + created); - created++; - available.incrementAndGet(); - log.info().append("Thread ").append(thread.getName()).append(" is starting: ").append(summary()).endl(); - thread.start(); - } - - /** - * the threads' run method just does an endless loop, trying to become the leader whenever it can - */ - public void run() { - Thread me = Thread.currentThread(); - STOP: { - while (true) { - synchronized (lock) { - while (leader != me) { - if (stopped) { - destroyed++; - log.info().append("Thread ").append(me.getName()).append(" is terminating: ") - .append(summary()).endl(); - lock.notifyAll(); - break STOP; - } else if (leader == null) { - if (log.isDebugEnabled()) { - log.debug().append("Thread ").append(me.getName()).append(" is assuming leadership") - .endl(); - } - leader = me; - } else { - try { - if (log.isDebugEnabled()) { - log.debug().append("Thread ").append(me.getName()).append(" is waiting ") - .append(summary()).endl(); - } - lock.wait(); - if (log.isDebugEnabled()) { - log.debug().append("Thread ").append(me.getName()).append(" has awoken ") - .append(summary()).endl(); - } - } catch (InterruptedException x) { - // ignore - } - } - } - } - try { - available.decrementAndGet(); - sched.work(WORK_TIMEOUT, handoffProc); - available.incrementAndGet(); - } catch (Throwable x) { - synchronized (lock) { - destroyed++; - log.fatal(x).append("Thread ").append(me.getName()) - .append(" is terminating on a fatal exception: ").append(summary()).endl(); - lock.notifyAll(); - } - - NIODriver.shutdown(5000); - CommBase.signalFatalError("Unhandled throwable from NIO scheduler", x); - break STOP; - } - } - } - } - - // whitebox test support methods - public static int junit_getWaiting() { - if (useFastNIODriver) { - return driver.junit_getWaiting(); - } else { - return available.get(); - } - } - - public static int junit_getCreated() { - if (useFastNIODriver) { - return driver.junit_getCreated(); - } else { - return created; - } - } - - public static int junit_getDestroyed() { - if (useFastNIODriver) { - return driver.junit_getDestroyed(); - } else { - return destroyed; - } - } - - // ################################################################ - -} diff --git a/R/rdeephaven/DESCRIPTION b/R/rdeephaven/DESCRIPTION index bdb670b86da..fca519dff82 100644 --- a/R/rdeephaven/DESCRIPTION +++ b/R/rdeephaven/DESCRIPTION @@ -1,7 +1,7 @@ Package: rdeephaven Type: Package Title: R Client for Deephaven Core -Version: 0.31.0 +Version: 0.32.0 Date: 2023-05-12 Author: Deephaven Data Labs Maintainer: Alex Peters diff --git a/Stats/build.gradle b/Stats/build.gradle index e9d46238886..bac1cdce147 100644 --- a/Stats/build.gradle +++ b/Stats/build.gradle @@ -7,8 +7,6 @@ dependencies { implementation project(':DataStructures') implementation project(':IO') implementation project(':Configuration') - implementation project(':FishUtil') - implementation project(':Net') implementation project(':log-factory') implementation project(':engine-context') compileOnly 'com.google.code.java-allocation-instrumenter:java-allocation-instrumenter:3.3.0' diff --git a/Stats/src/main/java/io/deephaven/stats/StatsCPUCollector.java b/Stats/src/main/java/io/deephaven/stats/StatsCPUCollector.java index 1f003b5fb1e..7b1a3e8c8f8 100644 --- a/Stats/src/main/java/io/deephaven/stats/StatsCPUCollector.java +++ b/Stats/src/main/java/io/deephaven/stats/StatsCPUCollector.java @@ -7,7 +7,7 @@ import io.deephaven.configuration.Configuration; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; -import io.deephaven.util.OSUtil; +import io.deephaven.stats.util.OSUtil; import io.deephaven.base.stats.*; import io.deephaven.hash.KeyedLongObjectHash; import io.deephaven.hash.KeyedLongObjectHashMap; diff --git a/Stats/src/main/java/io/deephaven/stats/StatsDriver.java b/Stats/src/main/java/io/deephaven/stats/StatsDriver.java index 29c10096d13..beeda59894b 100644 --- a/Stats/src/main/java/io/deephaven/stats/StatsDriver.java +++ b/Stats/src/main/java/io/deephaven/stats/StatsDriver.java @@ -5,23 +5,27 @@ import io.deephaven.base.clock.Clock; import io.deephaven.engine.context.ExecutionContext; -import io.deephaven.net.CommBase; import io.deephaven.util.SafeCloseable; -import io.deephaven.util.formatters.ISO8601; import io.deephaven.base.stats.*; import io.deephaven.base.text.TimestampBuffer; import io.deephaven.configuration.Configuration; import io.deephaven.io.log.*; -import io.deephaven.io.sched.TimedJob; import io.deephaven.io.log.impl.LogEntryPoolImpl; import io.deephaven.io.log.impl.LogSinkImpl; +import io.deephaven.util.annotations.ReferentialIntegrity; +import io.deephaven.util.thread.NamingThreadFactory; import java.util.Properties; +import java.util.TimeZone; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; /** * Drives the collection of statistics on a 1-second timer task. */ -public class StatsDriver extends TimedJob { +public class StatsDriver { public interface StatusAdapter { void sendAlert(String alertText); @@ -39,11 +43,11 @@ public boolean cmsAlertEnabled() { } private final LogEntryPool entryPool; - private final LogSink sink; + private final LogSink sink; private final LogEntry[] entries; private final LogEntryPool entryPoolHisto; - private final LogSink sinkHisto; + private final LogSink sinkHisto; private final LogEntry[] entriesHisto; private final TimestampBuffer systemTimestamp; @@ -52,9 +56,8 @@ public boolean cmsAlertEnabled() { public final static String header = "Stat,IntervalName,NowSec,NowString,AppNowSec,AppNowString,TypeTag,Name,N,Sum,Last,Min,Max,Avg,Sum2,Stdev"; - private long nextInvocation = System.currentTimeMillis(); - private long nextCpuUpdate = nextInvocation + CPU_INTERVAL; - private long nextMemUpdate = nextInvocation + MEM_INTERVAL; + private long nextCpuUpdate; + private long nextMemUpdate; private static final long STEP = 1000; private static final long MEM_INTERVAL = 1000; @@ -71,9 +74,14 @@ public boolean cmsAlertEnabled() { private final StatsIntradayLogger intraday; private final Value clockValue; private final ExecutionContext executionContext; + @ReferentialIntegrity + private final ScheduledExecutorService scheduler; + @ReferentialIntegrity + private final ScheduledFuture updateJobFuture; private final StatsMemoryCollector memStats; private final StatsCPUCollector cpuStats; + @ReferentialIntegrity private ObjectAllocationCollector objectAllocation; public StatsDriver(Clock clock) { @@ -116,8 +124,9 @@ public StatsDriver(Clock clock, StatsIntradayLogger intraday, boolean getFdStats } } - this.systemTimestamp = new TimestampBuffer(ISO8601.serverTimeZone()); - this.appTimestamp = new TimestampBuffer(ISO8601.serverTimeZone()); + final TimeZone serverTimeZone = Configuration.getInstance().getServerTimezone(); + this.systemTimestamp = new TimestampBuffer(serverTimeZone); + this.appTimestamp = new TimestampBuffer(serverTimeZone); if (path == null) { this.entryPool = null; @@ -150,9 +159,11 @@ public StatsDriver(Clock clock, StatsIntradayLogger intraday, boolean getFdStats clockValue = null; } - long now = System.currentTimeMillis(); - long delay = STEP - (now % STEP); - nextInvocation = now + delay; + final long now = System.currentTimeMillis(); + final long delay = STEP - (now % STEP); + nextCpuUpdate = now + delay + CPU_INTERVAL; + nextMemUpdate = now + delay + MEM_INTERVAL; + cpuStats = new StatsCPUCollector(CPU_INTERVAL, getFdStats); memStats = new StatsMemoryCollector(MEM_INTERVAL, statusAdapter::sendAlert, statusAdapter::cmsAlertEnabled); if (Configuration.getInstance().getBoolean("allocation.stats.enabled")) { @@ -160,13 +171,18 @@ public StatsDriver(Clock clock, StatsIntradayLogger intraday, boolean getFdStats } executionContext = ExecutionContext.getContext(); - // now that the StatsDriver is completely constructed, we can schedule the first iteration + // now that the StatsDriver is completely constructed, we can schedule the update job if (Configuration.getInstance().getBoolean("statsdriver.enabled")) { - schedule(); + scheduler = Executors.newSingleThreadScheduledExecutor( + new NamingThreadFactory(StatsDriver.class, "updateScheduler", true)); + updateJobFuture = scheduler.scheduleAtFixedRate(this::update, delay, STEP, TimeUnit.MILLISECONDS); + } else { + scheduler = null; + updateJobFuture = null; } } - public void timedOut() { + private void update() { long t0 = System.nanoTime(); long now = System.currentTimeMillis(); long appNow = clock == null ? now : clock.currentTimeMillis(); @@ -207,20 +223,12 @@ public void timedOut() { } } - schedule(); - statsTiming.sample((System.nanoTime() - t0 + 500) / 1000); } - private void schedule() { - CommBase.getScheduler().installJob(this, nextInvocation); - long steps = Math.max(1L, (((System.currentTimeMillis() - nextInvocation) / STEP) + 1)); - nextInvocation += steps * STEP; - } - private final ItemUpdateListener LISTENER = new ItemUpdateListener() { @Override - public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, + public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, String intervalName) { final Value v = item.getValue(); final History history = v.getHistory(); diff --git a/FishUtil/src/main/java/io/deephaven/util/OSUtil.java b/Stats/src/main/java/io/deephaven/stats/util/OSUtil.java similarity index 85% rename from FishUtil/src/main/java/io/deephaven/util/OSUtil.java rename to Stats/src/main/java/io/deephaven/stats/util/OSUtil.java index 4e136975d2c..ef3566f4696 100644 --- a/FishUtil/src/main/java/io/deephaven/util/OSUtil.java +++ b/Stats/src/main/java/io/deephaven/stats/util/OSUtil.java @@ -1,20 +1,22 @@ /** * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending */ -package io.deephaven.util; +package io.deephaven.stats.util; import org.jetbrains.annotations.NotNull; import java.util.Arrays; import java.util.function.Predicate; -@SuppressWarnings("WeakerAccess") public class OSUtil { public enum OSFamily { - - LINUX(name -> name.startsWith("Linux")), WINDOWS(name -> name.contains("Windows")), MAC_OS( - name -> name.startsWith("Mac OS")), SOLARIS(name -> name.startsWith("SunOs")); + // @formatter:off + LINUX(name -> name.startsWith("Linux")), + WINDOWS(name -> name.contains("Windows")), + MAC_OS(name -> name.startsWith("Mac OS")), + SOLARIS(name -> name.startsWith("SunOs")); + // @formatter:on private final Predicate nameMatcher; diff --git a/TableLogger/TableLogger.gradle b/TableLogger/TableLogger.gradle index 411b1b752d0..a4173ac368d 100644 --- a/TableLogger/TableLogger.gradle +++ b/TableLogger/TableLogger.gradle @@ -2,12 +2,8 @@ plugins { id 'io.deephaven.project.register' } -configurations { - implementation.extendsFrom fishUtil, fishData - testImplementation.extendsFrom fishDataTest -} - dependencies { + implementation project(':Base') implementation project(':Util') testRuntimeOnly project(path: ':configs') testRuntimeOnly project(path: ':test-configs') diff --git a/FishUtil/src/main/java/io/deephaven/util/process/BaseProcessEnvironment.java b/Util/src/main/java/io/deephaven/util/process/BaseProcessEnvironment.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/BaseProcessEnvironment.java rename to Util/src/main/java/io/deephaven/util/process/BaseProcessEnvironment.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/DefaultFatalErrorReporter.java b/Util/src/main/java/io/deephaven/util/process/DefaultFatalErrorReporter.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/DefaultFatalErrorReporter.java rename to Util/src/main/java/io/deephaven/util/process/DefaultFatalErrorReporter.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/DefaultProcessEnvironment.java b/Util/src/main/java/io/deephaven/util/process/DefaultProcessEnvironment.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/DefaultProcessEnvironment.java rename to Util/src/main/java/io/deephaven/util/process/DefaultProcessEnvironment.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/FatalErrorReporter.java b/Util/src/main/java/io/deephaven/util/process/FatalErrorReporter.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/FatalErrorReporter.java rename to Util/src/main/java/io/deephaven/util/process/FatalErrorReporter.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/FatalErrorReporterBase.java b/Util/src/main/java/io/deephaven/util/process/FatalErrorReporterBase.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/FatalErrorReporterBase.java rename to Util/src/main/java/io/deephaven/util/process/FatalErrorReporterBase.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/LoggerShutdownTask.java b/Util/src/main/java/io/deephaven/util/process/LoggerShutdownTask.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/LoggerShutdownTask.java rename to Util/src/main/java/io/deephaven/util/process/LoggerShutdownTask.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/OnetimeShutdownTask.java b/Util/src/main/java/io/deephaven/util/process/OnetimeShutdownTask.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/OnetimeShutdownTask.java rename to Util/src/main/java/io/deephaven/util/process/OnetimeShutdownTask.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/ProcessEnvironment.java b/Util/src/main/java/io/deephaven/util/process/ProcessEnvironment.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/ProcessEnvironment.java rename to Util/src/main/java/io/deephaven/util/process/ProcessEnvironment.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/ShutdownManager.java b/Util/src/main/java/io/deephaven/util/process/ShutdownManager.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/ShutdownManager.java rename to Util/src/main/java/io/deephaven/util/process/ShutdownManager.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/ShutdownManagerImpl.java b/Util/src/main/java/io/deephaven/util/process/ShutdownManagerImpl.java similarity index 99% rename from FishUtil/src/main/java/io/deephaven/util/process/ShutdownManagerImpl.java rename to Util/src/main/java/io/deephaven/util/process/ShutdownManagerImpl.java index 520b86a96bf..5b3e4dc68ae 100644 --- a/FishUtil/src/main/java/io/deephaven/util/process/ShutdownManagerImpl.java +++ b/Util/src/main/java/io/deephaven/util/process/ShutdownManagerImpl.java @@ -9,8 +9,8 @@ import io.deephaven.io.log.LogEntry; import io.deephaven.io.log.LogLevel; import io.deephaven.io.logger.Logger; -import io.deephaven.util.threads.ThreadDump; import io.deephaven.internal.log.LoggerFactory; +import io.deephaven.util.thread.ThreadDump; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; diff --git a/Util/src/main/java/io/deephaven/util/thread/ThreadInitializationFactory.java b/Util/src/main/java/io/deephaven/util/thread/ThreadInitializationFactory.java index 56a5436adb7..932ce5bb54e 100644 --- a/Util/src/main/java/io/deephaven/util/thread/ThreadInitializationFactory.java +++ b/Util/src/main/java/io/deephaven/util/thread/ThreadInitializationFactory.java @@ -1,55 +1,21 @@ package io.deephaven.util.thread; -import io.deephaven.configuration.Configuration; - -import java.lang.reflect.InvocationTargetException; -import java.util.Arrays; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; +import java.util.Collection; /** * Extension point to allow threads that will run user code from within the platform to be controlled by configuration. */ public interface ThreadInitializationFactory { - /* private */ String[] CONFIGURED_INITIALIZATION_TYPES = - Configuration.getInstance().getStringArrayFromProperty("thread.initialization"); - /* private */ List INITIALIZERS = Arrays.stream(CONFIGURED_INITIALIZATION_TYPES) - .filter(str -> !str.isBlank()) - .map(type -> { - try { - // noinspection unchecked - Class clazz = - (Class) Class.forName(type); - return clazz.getDeclaredConstructor().newInstance(); - } catch (ClassNotFoundException | NoSuchMethodException | InvocationTargetException - | InstantiationException | IllegalAccessException e) { - - // TODO (https://github.com/deephaven/deephaven-core/issues/4040): - // Currently the default property file is shared between both the java client and the server. This - // means that client-side usage will attempt to load the thread.initialization property intended for - // the server which is not available on the class path. - if (e instanceof ClassNotFoundException && type.startsWith("io.deephaven.server.")) { - return null; - } - - throw new IllegalArgumentException( - "Error instantiating initializer " + type + ", please check configuration", e); - } - }) - .filter(Objects::nonNull) - .collect(Collectors.toUnmodifiableList()); + ThreadInitializationFactory NO_OP = r -> r; - /** - * Chains configured initializers to run before/around any given runnable, returning a runnable intended to be run - * by a new thread. - */ - static Runnable wrapRunnable(Runnable runnable) { - Runnable acc = runnable; - for (ThreadInitializationFactory INITIALIZER : INITIALIZERS) { - acc = INITIALIZER.createInitializer(acc); - } - return acc; + static ThreadInitializationFactory of(Collection factories) { + return runnable -> { + Runnable acc = runnable; + for (ThreadInitializationFactory factory : factories) { + acc = factory.createInitializer(acc); + } + return acc; + }; } Runnable createInitializer(Runnable runnable); diff --git a/buildSrc/src/main/groovy/io.deephaven.java-classpath-conventions.gradle b/buildSrc/src/main/groovy/io.deephaven.java-classpath-conventions.gradle index baa0413b77f..d008aa19202 100644 --- a/buildSrc/src/main/groovy/io.deephaven.java-classpath-conventions.gradle +++ b/buildSrc/src/main/groovy/io.deephaven.java-classpath-conventions.gradle @@ -29,14 +29,12 @@ configurations { fishDataStructure.extendsFrom fishIo fishConfig.extendsFrom fishDataStructure fishDataGenerator.extendsFrom jdom - fishNet.extendsFrom fishIo fishNumerics.extendsFrom fishBase - fishUtil.extendsFrom fishConfig fishBaseTest.extendsFrom junit fishIoTest.extendsFrom fishBaseTest dhNumerics.extendsFrom fishNumerics, jama - dhUtil.extendsFrom commonsIo, commonsLang3, commonsText, fishUtil, fishNet, fishIo, jdom + dhUtil.extendsFrom commonsIo, commonsLang3, commonsText, fishConfig, fishIo, jdom dhPlot.extendsFrom dhUtil dhBenchmarkSupport.extendsFrom fishData dhIntegrations.extendsFrom math3 @@ -70,10 +68,6 @@ dependencies { fishConfig project(':Configuration') - fishUtil project(':FishUtil') - - fishNet project(':Net') - fishBaseTest project(path: ':Base', configuration: 'tests') fishIoTest project(path: ':IO', configuration: 'tests') diff --git a/cpp-client/README.md b/cpp-client/README.md index 0130532bfdd..f44271712ee 100644 --- a/cpp-client/README.md +++ b/cpp-client/README.md @@ -34,10 +34,20 @@ on them anymore so we do notguarantee they are current for those platforms. 6. Build and install dependencies for Deephaven C++ client. - Get the `build-dependencies.sh` script from Deephaven's base images repository - at the correct version. - You can download it directly from the link + Get the `build-dependencies.sh` script from Deephaven's base images repository. + + ***Note you need the right version of `build-dependencies.sh` matching + your sources***. + + The link in the paragraph that follows points to a specific + version that works with the code this README.md files accompanies; + if you are reading a different version of the README.md compared + to the source version you will be trying to compile, go back + to the right `README.md` now. + + Download `build-dependencies.sh` directly from https://github.com/deephaven/deephaven-base-images/raw/47f51e769612785c6f320302a3f4f52bc0cff187/cpp-client/build-dependencies.sh + (this script is also used from our automated tools, to generate a docker image to support tests runs; that's why it lives in a separate repo). The script downloads, builds and installs the dependent libraries diff --git a/cpp-client/deephaven/dhclient/proto/deephaven/proto/table.pb.cc b/cpp-client/deephaven/dhclient/proto/deephaven/proto/table.pb.cc index ef808bcfcb0..fa294f934fa 100644 --- a/cpp-client/deephaven/dhclient/proto/deephaven/proto/table.pb.cc +++ b/cpp-client/deephaven/dhclient/proto/deephaven/proto/table.pb.cc @@ -1558,6 +1558,17 @@ struct CreateInputTableRequest_InputTableKind_InMemoryKeyBackedDefaultTypeIntern }; }; PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 CreateInputTableRequest_InputTableKind_InMemoryKeyBackedDefaultTypeInternal _CreateInputTableRequest_InputTableKind_InMemoryKeyBacked_default_instance_; +PROTOBUF_CONSTEXPR CreateInputTableRequest_InputTableKind_Blink::CreateInputTableRequest_InputTableKind_Blink( + ::_pbi::ConstantInitialized){} +struct CreateInputTableRequest_InputTableKind_BlinkDefaultTypeInternal { + PROTOBUF_CONSTEXPR CreateInputTableRequest_InputTableKind_BlinkDefaultTypeInternal() + : _instance(::_pbi::ConstantInitialized{}) {} + ~CreateInputTableRequest_InputTableKind_BlinkDefaultTypeInternal() {} + union { + CreateInputTableRequest_InputTableKind_Blink _instance; + }; +}; +PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 CreateInputTableRequest_InputTableKind_BlinkDefaultTypeInternal _CreateInputTableRequest_InputTableKind_Blink_default_instance_; PROTOBUF_CONSTEXPR CreateInputTableRequest_InputTableKind::CreateInputTableRequest_InputTableKind( ::_pbi::ConstantInitialized) : _oneof_case_{}{} @@ -1644,7 +1655,7 @@ PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORIT } // namespace proto } // namespace deephaven } // namespace io -static ::_pb::Metadata file_level_metadata_deephaven_2fproto_2ftable_2eproto[120]; +static ::_pb::Metadata file_level_metadata_deephaven_2fproto_2ftable_2eproto[121]; static const ::_pb::EnumDescriptor* file_level_enum_descriptors_deephaven_2fproto_2ftable_2eproto[12]; static constexpr ::_pb::ServiceDescriptor const** file_level_service_descriptors_deephaven_2fproto_2ftable_2eproto = nullptr; @@ -2692,6 +2703,12 @@ const uint32_t TableStruct_deephaven_2fproto_2ftable_2eproto::offsets[] PROTOBUF ~0u, // no _inlined_string_donated_ PROTOBUF_FIELD_OFFSET(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryKeyBacked, key_columns_), ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + ~0u, // no _inlined_string_donated_ + ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind, _internal_metadata_), ~0u, // no _extensions_ PROTOBUF_FIELD_OFFSET(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind, _oneof_case_[0]), @@ -2699,6 +2716,7 @@ const uint32_t TableStruct_deephaven_2fproto_2ftable_2eproto::offsets[] PROTOBUF ~0u, // no _inlined_string_donated_ ::_pbi::kInvalidFieldOffsetTag, ::_pbi::kInvalidFieldOffsetTag, + ::_pbi::kInvalidFieldOffsetTag, PROTOBUF_FIELD_OFFSET(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind, kind_), ~0u, // no _has_bits_ PROTOBUF_FIELD_OFFSET(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest, _internal_metadata_), @@ -2906,12 +2924,13 @@ static const ::_pbi::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protode { 1017, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::RunChartDownsampleRequest)}, { 1029, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryAppendOnly)}, { 1035, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryKeyBacked)}, - { 1042, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind)}, - { 1051, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest)}, - { 1062, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::WhereInRequest)}, - { 1073, 1083, -1, sizeof(::io::deephaven::proto::backplane::grpc::ColumnStatisticsRequest)}, - { 1087, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::BatchTableRequest_Operation)}, - { 1134, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::BatchTableRequest)}, + { 1042, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink)}, + { 1048, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind)}, + { 1058, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest)}, + { 1069, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::WhereInRequest)}, + { 1080, 1090, -1, sizeof(::io::deephaven::proto::backplane::grpc::ColumnStatisticsRequest)}, + { 1094, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::BatchTableRequest_Operation)}, + { 1141, -1, -1, sizeof(::io::deephaven::proto::backplane::grpc::BatchTableRequest)}, }; static const ::_pb::Message* const file_default_instances[] = { @@ -3029,6 +3048,7 @@ static const ::_pb::Message* const file_default_instances[] = { &::io::deephaven::proto::backplane::grpc::_RunChartDownsampleRequest_default_instance_._instance, &::io::deephaven::proto::backplane::grpc::_CreateInputTableRequest_InputTableKind_InMemoryAppendOnly_default_instance_._instance, &::io::deephaven::proto::backplane::grpc::_CreateInputTableRequest_InputTableKind_InMemoryKeyBacked_default_instance_._instance, + &::io::deephaven::proto::backplane::grpc::_CreateInputTableRequest_InputTableKind_Blink_default_instance_._instance, &::io::deephaven::proto::backplane::grpc::_CreateInputTableRequest_InputTableKind_default_instance_._instance, &::io::deephaven::proto::backplane::grpc::_CreateInputTableRequest_default_instance_._instance, &::io::deephaven::proto::backplane::grpc::_WhereInRequest_default_instance_._instance, @@ -3621,291 +3641,294 @@ const char descriptor_table_protodef_deephaven_2fproto_2ftable_2eproto[] PROTOBU "\001(\t\022\026\n\016y_column_names\030\006 \003(\t\032s\n\tZoomRange" "\022\037\n\016min_date_nanos\030\001 \001(\003B\0020\001H\000\210\001\001\022\037\n\016max" "_date_nanos\030\002 \001(\003B\0020\001H\001\210\001\001B\021\n\017_min_date_" - "nanosB\021\n\017_max_date_nanos\"\365\004\n\027CreateInput" + "nanosB\021\n\017_max_date_nanos\"\340\005\n\027CreateInput" "TableRequest\022<\n\tresult_id\030\001 \001(\0132).io.dee" "phaven.proto.backplane.grpc.Ticket\022L\n\017so" "urce_table_id\030\002 \001(\01321.io.deephaven.proto" ".backplane.grpc.TableReferenceH\000\022\020\n\006sche" "ma\030\003 \001(\014H\000\022W\n\004kind\030\004 \001(\0132I.io.deephaven." "proto.backplane.grpc.CreateInputTableReq" - "uest.InputTableKind\032\324\002\n\016InputTableKind\022}" + "uest.InputTableKind\032\277\003\n\016InputTableKind\022}" "\n\025in_memory_append_only\030\001 \001(\0132\\.io.deeph" "aven.proto.backplane.grpc.CreateInputTab" "leRequest.InputTableKind.InMemoryAppendO" "nlyH\000\022{\n\024in_memory_key_backed\030\002 \001(\0132[.io" ".deephaven.proto.backplane.grpc.CreateIn" "putTableRequest.InputTableKind.InMemoryK" - "eyBackedH\000\032\024\n\022InMemoryAppendOnly\032(\n\021InMe" - "moryKeyBacked\022\023\n\013key_columns\030\001 \003(\tB\006\n\004ki" - "ndB\014\n\ndefinition\"\203\002\n\016WhereInRequest\022<\n\tr" - "esult_id\030\001 \001(\0132).io.deephaven.proto.back" - "plane.grpc.Ticket\022B\n\007left_id\030\002 \001(\01321.io." - "deephaven.proto.backplane.grpc.TableRefe" - "rence\022C\n\010right_id\030\003 \001(\01321.io.deephaven.p" - "roto.backplane.grpc.TableReference\022\020\n\010in" - "verted\030\004 \001(\010\022\030\n\020columns_to_match\030\005 \003(\t\"\352" - "\001\n\027ColumnStatisticsRequest\022<\n\tresult_id\030" - "\001 \001(\0132).io.deephaven.proto.backplane.grp" - "c.Ticket\022D\n\tsource_id\030\002 \001(\01321.io.deephav" - "en.proto.backplane.grpc.TableReference\022\023" - "\n\013column_name\030\003 \001(\t\022\037\n\022unique_value_limi" - "t\030\004 \001(\005H\000\210\001\001B\025\n\023_unique_value_limit\"\310\031\n\021" - "BatchTableRequest\022K\n\003ops\030\001 \003(\0132>.io.deep" - "haven.proto.backplane.grpc.BatchTableReq" - "uest.Operation\032\345\030\n\tOperation\022K\n\013empty_ta" - "ble\030\001 \001(\01324.io.deephaven.proto.backplane" - ".grpc.EmptyTableRequestH\000\022I\n\ntime_table\030" - "\002 \001(\01323.io.deephaven.proto.backplane.grp" - "c.TimeTableRequestH\000\022M\n\014drop_columns\030\003 \001" - "(\01325.io.deephaven.proto.backplane.grpc.D" - "ropColumnsRequestH\000\022J\n\006update\030\004 \001(\01328.io" + "eyBackedH\000\022`\n\005blink\030\003 \001(\0132O.io.deephaven" + ".proto.backplane.grpc.CreateInputTableRe" + "quest.InputTableKind.BlinkH\000\032\024\n\022InMemory" + "AppendOnly\032(\n\021InMemoryKeyBacked\022\023\n\013key_c" + "olumns\030\001 \003(\t\032\007\n\005BlinkB\006\n\004kindB\014\n\ndefinit" + "ion\"\203\002\n\016WhereInRequest\022<\n\tresult_id\030\001 \001(" + "\0132).io.deephaven.proto.backplane.grpc.Ti" + "cket\022B\n\007left_id\030\002 \001(\01321.io.deephaven.pro" + "to.backplane.grpc.TableReference\022C\n\010righ" + "t_id\030\003 \001(\01321.io.deephaven.proto.backplan" + "e.grpc.TableReference\022\020\n\010inverted\030\004 \001(\010\022" + "\030\n\020columns_to_match\030\005 \003(\t\"\352\001\n\027ColumnStat" + "isticsRequest\022<\n\tresult_id\030\001 \001(\0132).io.de" + "ephaven.proto.backplane.grpc.Ticket\022D\n\ts" + "ource_id\030\002 \001(\01321.io.deephaven.proto.back" + "plane.grpc.TableReference\022\023\n\013column_name" + "\030\003 \001(\t\022\037\n\022unique_value_limit\030\004 \001(\005H\000\210\001\001B" + "\025\n\023_unique_value_limit\"\310\031\n\021BatchTableReq" + "uest\022K\n\003ops\030\001 \003(\0132>.io.deephaven.proto.b" + "ackplane.grpc.BatchTableRequest.Operatio" + "n\032\345\030\n\tOperation\022K\n\013empty_table\030\001 \001(\01324.i" + "o.deephaven.proto.backplane.grpc.EmptyTa" + "bleRequestH\000\022I\n\ntime_table\030\002 \001(\01323.io.de" + "ephaven.proto.backplane.grpc.TimeTableRe" + "questH\000\022M\n\014drop_columns\030\003 \001(\01325.io.deeph" + "aven.proto.backplane.grpc.DropColumnsReq" + "uestH\000\022J\n\006update\030\004 \001(\01328.io.deephaven.pr" + "oto.backplane.grpc.SelectOrUpdateRequest" + "H\000\022O\n\013lazy_update\030\005 \001(\01328.io.deephaven.p" + "roto.backplane.grpc.SelectOrUpdateReques" + "tH\000\022H\n\004view\030\006 \001(\01328.io.deephaven.proto.b" + "ackplane.grpc.SelectOrUpdateRequestH\000\022O\n" + "\013update_view\030\007 \001(\01328.io.deephaven.proto." + "backplane.grpc.SelectOrUpdateRequestH\000\022J" + "\n\006select\030\010 \001(\01328.io.deephaven.proto.back" + "plane.grpc.SelectOrUpdateRequestH\000\022S\n\017se" + "lect_distinct\030\t \001(\01328.io.deephaven.proto" + ".backplane.grpc.SelectDistinctRequestH\000\022" + "G\n\006filter\030\n \001(\01325.io.deephaven.proto.bac" + "kplane.grpc.FilterTableRequestH\000\022`\n\023unst" + "ructured_filter\030\013 \001(\0132A.io.deephaven.pro" + "to.backplane.grpc.UnstructuredFilterTabl" + "eRequestH\000\022C\n\004sort\030\014 \001(\01323.io.deephaven." + "proto.backplane.grpc.SortTableRequestH\000\022" + "D\n\004head\030\r \001(\01324.io.deephaven.proto.backp" + "lane.grpc.HeadOrTailRequestH\000\022D\n\004tail\030\016 " + "\001(\01324.io.deephaven.proto.backplane.grpc." + "HeadOrTailRequestH\000\022I\n\007head_by\030\017 \001(\01326.i" + "o.deephaven.proto.backplane.grpc.HeadOrT" + "ailByRequestH\000\022I\n\007tail_by\030\020 \001(\01326.io.dee" + "phaven.proto.backplane.grpc.HeadOrTailBy" + "RequestH\000\022D\n\007ungroup\030\021 \001(\01321.io.deephave" + "n.proto.backplane.grpc.UngroupRequestH\000\022" + "F\n\005merge\030\022 \001(\01325.io.deephaven.proto.back" + "plane.grpc.MergeTablesRequestH\000\022S\n\017combo" + "_aggregate\030\023 \001(\01328.io.deephaven.proto.ba" + "ckplane.grpc.ComboAggregateRequestH\000\022D\n\007" + "flatten\030\025 \001(\01321.io.deephaven.proto.backp" + "lane.grpc.FlattenRequestH\000\022\\\n\024run_chart_" + "downsample\030\026 \001(\0132<.io.deephaven.proto.ba" + "ckplane.grpc.RunChartDownsampleRequestH\000" + "\022O\n\ncross_join\030\027 \001(\01329.io.deephaven.prot" + "o.backplane.grpc.CrossJoinTablesRequestH" + "\000\022S\n\014natural_join\030\030 \001(\0132;.io.deephaven.p" + "roto.backplane.grpc.NaturalJoinTablesReq" + "uestH\000\022O\n\nexact_join\030\031 \001(\01329.io.deephave" + "n.proto.backplane.grpc.ExactJoinTablesRe" + "questH\000\022M\n\tleft_join\030\032 \001(\01328.io.deephave" + "n.proto.backplane.grpc.LeftJoinTablesReq" + "uestH\000\022R\n\nas_of_join\030\033 \001(\01328.io.deephave" + "n.proto.backplane.grpc.AsOfJoinTablesReq" + "uestB\002\030\001H\000\022K\n\013fetch_table\030\034 \001(\01324.io.dee" + "phaven.proto.backplane.grpc.FetchTableRe" + "questH\000\022^\n\025apply_preview_columns\030\036 \001(\0132=" + ".io.deephaven.proto.backplane.grpc.Apply" + "PreviewColumnsRequestH\000\022X\n\022create_input_" + "table\030\037 \001(\0132:.io.deephaven.proto.backpla" + "ne.grpc.CreateInputTableRequestH\000\022G\n\tupd" + "ate_by\030 \001(\01322.io.deephaven.proto.backpl" + "ane.grpc.UpdateByRequestH\000\022E\n\010where_in\030!" + " \001(\01321.io.deephaven.proto.backplane.grpc" + ".WhereInRequestH\000\022O\n\raggregate_all\030\" \001(\013" + "26.io.deephaven.proto.backplane.grpc.Agg" + "regateAllRequestH\000\022H\n\taggregate\030# \001(\01323." + "io.deephaven.proto.backplane.grpc.Aggreg" + "ateRequestH\000\022K\n\010snapshot\030$ \001(\01327.io.deep" + "haven.proto.backplane.grpc.SnapshotTable" + "RequestH\000\022T\n\rsnapshot_when\030% \001(\0132;.io.de" + "ephaven.proto.backplane.grpc.SnapshotWhe" + "nTableRequestH\000\022I\n\nmeta_table\030& \001(\01323.io" + ".deephaven.proto.backplane.grpc.MetaTabl" + "eRequestH\000\022O\n\nrange_join\030\' \001(\01329.io.deep" + "haven.proto.backplane.grpc.RangeJoinTabl" + "esRequestH\000\022C\n\002aj\030( \001(\01325.io.deephaven.p" + "roto.backplane.grpc.AjRajTablesRequestH\000" + "\022D\n\003raj\030) \001(\01325.io.deephaven.proto.backp" + "lane.grpc.AjRajTablesRequestH\000\022W\n\021column" + "_statistics\030* \001(\0132:.io.deephaven.proto.b" + "ackplane.grpc.ColumnStatisticsRequestH\000B" + "\004\n\002opJ\004\010\024\020\025J\004\010\035\020\036*b\n\017BadDataBehavior\022#\n\037" + "BAD_DATA_BEHAVIOR_NOT_SPECIFIED\020\000\022\t\n\005THR" + "OW\020\001\022\t\n\005RESET\020\002\022\010\n\004SKIP\020\003\022\n\n\006POISON\020\004*t\n" + "\024UpdateByNullBehavior\022\037\n\033NULL_BEHAVIOR_N" + "OT_SPECIFIED\020\000\022\022\n\016NULL_DOMINATES\020\001\022\023\n\017VA" + "LUE_DOMINATES\020\002\022\022\n\016ZERO_DOMINATES\020\003*\033\n\tN" + "ullValue\022\016\n\nNULL_VALUE\020\000*2\n\017CaseSensitiv" + "ity\022\016\n\nMATCH_CASE\020\000\022\017\n\013IGNORE_CASE\020\001*&\n\t" + "MatchType\022\013\n\007REGULAR\020\000\022\014\n\010INVERTED\020\0012\2500\n" + "\014TableService\022\221\001\n GetExportedTableCreati" + "onResponse\022).io.deephaven.proto.backplan" + "e.grpc.Ticket\032@.io.deephaven.proto.backp" + "lane.grpc.ExportedTableCreationResponse\"" + "\000\022\206\001\n\nFetchTable\0224.io.deephaven.proto.ba" + "ckplane.grpc.FetchTableRequest\032@.io.deep" + "haven.proto.backplane.grpc.ExportedTable" + "CreationResponse\"\000\022\230\001\n\023ApplyPreviewColum" + "ns\022=.io.deephaven.proto.backplane.grpc.A" + "pplyPreviewColumnsRequest\032@.io.deephaven" + ".proto.backplane.grpc.ExportedTableCreat" + "ionResponse\"\000\022\206\001\n\nEmptyTable\0224.io.deepha" + "ven.proto.backplane.grpc.EmptyTableReque" + "st\032@.io.deephaven.proto.backplane.grpc.E" + "xportedTableCreationResponse\"\000\022\204\001\n\tTimeT" + "able\0223.io.deephaven.proto.backplane.grpc" + ".TimeTableRequest\032@.io.deephaven.proto.b" + "ackplane.grpc.ExportedTableCreationRespo" + "nse\"\000\022\210\001\n\013DropColumns\0225.io.deephaven.pro" + "to.backplane.grpc.DropColumnsRequest\032@.i" + "o.deephaven.proto.backplane.grpc.Exporte" + "dTableCreationResponse\"\000\022\206\001\n\006Update\0228.io" ".deephaven.proto.backplane.grpc.SelectOr" - "UpdateRequestH\000\022O\n\013lazy_update\030\005 \001(\01328.i" - "o.deephaven.proto.backplane.grpc.SelectO" - "rUpdateRequestH\000\022H\n\004view\030\006 \001(\01328.io.deep" - "haven.proto.backplane.grpc.SelectOrUpdat" - "eRequestH\000\022O\n\013update_view\030\007 \001(\01328.io.dee" + "UpdateRequest\032@.io.deephaven.proto.backp" + "lane.grpc.ExportedTableCreationResponse\"" + "\000\022\212\001\n\nLazyUpdate\0228.io.deephaven.proto.ba" + "ckplane.grpc.SelectOrUpdateRequest\032@.io." + "deephaven.proto.backplane.grpc.ExportedT" + "ableCreationResponse\"\000\022\204\001\n\004View\0228.io.dee" "phaven.proto.backplane.grpc.SelectOrUpda" - "teRequestH\000\022J\n\006select\030\010 \001(\01328.io.deephav" - "en.proto.backplane.grpc.SelectOrUpdateRe" - "questH\000\022S\n\017select_distinct\030\t \001(\01328.io.de" - "ephaven.proto.backplane.grpc.SelectDisti" - "nctRequestH\000\022G\n\006filter\030\n \001(\01325.io.deepha" - "ven.proto.backplane.grpc.FilterTableRequ" - "estH\000\022`\n\023unstructured_filter\030\013 \001(\0132A.io." - "deephaven.proto.backplane.grpc.Unstructu" - "redFilterTableRequestH\000\022C\n\004sort\030\014 \001(\01323." - "io.deephaven.proto.backplane.grpc.SortTa" - "bleRequestH\000\022D\n\004head\030\r \001(\01324.io.deephave" - "n.proto.backplane.grpc.HeadOrTailRequest" - "H\000\022D\n\004tail\030\016 \001(\01324.io.deephaven.proto.ba" - "ckplane.grpc.HeadOrTailRequestH\000\022I\n\007head" - "_by\030\017 \001(\01326.io.deephaven.proto.backplane" - ".grpc.HeadOrTailByRequestH\000\022I\n\007tail_by\030\020" - " \001(\01326.io.deephaven.proto.backplane.grpc" - ".HeadOrTailByRequestH\000\022D\n\007ungroup\030\021 \001(\0132" - "1.io.deephaven.proto.backplane.grpc.Ungr" - "oupRequestH\000\022F\n\005merge\030\022 \001(\01325.io.deephav" - "en.proto.backplane.grpc.MergeTablesReque" - "stH\000\022S\n\017combo_aggregate\030\023 \001(\01328.io.deeph" - "aven.proto.backplane.grpc.ComboAggregate" - "RequestH\000\022D\n\007flatten\030\025 \001(\01321.io.deephave" - "n.proto.backplane.grpc.FlattenRequestH\000\022" - "\\\n\024run_chart_downsample\030\026 \001(\0132<.io.deeph" - "aven.proto.backplane.grpc.RunChartDownsa" - "mpleRequestH\000\022O\n\ncross_join\030\027 \001(\01329.io.d" - "eephaven.proto.backplane.grpc.CrossJoinT" - "ablesRequestH\000\022S\n\014natural_join\030\030 \001(\0132;.i" - "o.deephaven.proto.backplane.grpc.Natural" - "JoinTablesRequestH\000\022O\n\nexact_join\030\031 \001(\0132" - "9.io.deephaven.proto.backplane.grpc.Exac" - "tJoinTablesRequestH\000\022M\n\tleft_join\030\032 \001(\0132" - "8.io.deephaven.proto.backplane.grpc.Left" - "JoinTablesRequestH\000\022R\n\nas_of_join\030\033 \001(\0132" - "8.io.deephaven.proto.backplane.grpc.AsOf" - "JoinTablesRequestB\002\030\001H\000\022K\n\013fetch_table\030\034" - " \001(\01324.io.deephaven.proto.backplane.grpc" - ".FetchTableRequestH\000\022^\n\025apply_preview_co" - "lumns\030\036 \001(\0132=.io.deephaven.proto.backpla" - "ne.grpc.ApplyPreviewColumnsRequestH\000\022X\n\022" - "create_input_table\030\037 \001(\0132:.io.deephaven." - "proto.backplane.grpc.CreateInputTableReq" - "uestH\000\022G\n\tupdate_by\030 \001(\01322.io.deephaven" - ".proto.backplane.grpc.UpdateByRequestH\000\022" - "E\n\010where_in\030! \001(\01321.io.deephaven.proto.b" - "ackplane.grpc.WhereInRequestH\000\022O\n\raggreg" - "ate_all\030\" \001(\01326.io.deephaven.proto.backp" - "lane.grpc.AggregateAllRequestH\000\022H\n\taggre" - "gate\030# \001(\01323.io.deephaven.proto.backplan" - "e.grpc.AggregateRequestH\000\022K\n\010snapshot\030$ " - "\001(\01327.io.deephaven.proto.backplane.grpc." - "SnapshotTableRequestH\000\022T\n\rsnapshot_when\030" - "% \001(\0132;.io.deephaven.proto.backplane.grp" - "c.SnapshotWhenTableRequestH\000\022I\n\nmeta_tab" - "le\030& \001(\01323.io.deephaven.proto.backplane." - "grpc.MetaTableRequestH\000\022O\n\nrange_join\030\' " - "\001(\01329.io.deephaven.proto.backplane.grpc." - "RangeJoinTablesRequestH\000\022C\n\002aj\030( \001(\01325.i" - "o.deephaven.proto.backplane.grpc.AjRajTa" - "blesRequestH\000\022D\n\003raj\030) \001(\01325.io.deephave" - "n.proto.backplane.grpc.AjRajTablesReques" - "tH\000\022W\n\021column_statistics\030* \001(\0132:.io.deep" - "haven.proto.backplane.grpc.ColumnStatist" - "icsRequestH\000B\004\n\002opJ\004\010\024\020\025J\004\010\035\020\036*b\n\017BadDat" - "aBehavior\022#\n\037BAD_DATA_BEHAVIOR_NOT_SPECI" - "FIED\020\000\022\t\n\005THROW\020\001\022\t\n\005RESET\020\002\022\010\n\004SKIP\020\003\022\n" - "\n\006POISON\020\004*t\n\024UpdateByNullBehavior\022\037\n\033NU" - "LL_BEHAVIOR_NOT_SPECIFIED\020\000\022\022\n\016NULL_DOMI" - "NATES\020\001\022\023\n\017VALUE_DOMINATES\020\002\022\022\n\016ZERO_DOM" - "INATES\020\003*\033\n\tNullValue\022\016\n\nNULL_VALUE\020\000*2\n" - "\017CaseSensitivity\022\016\n\nMATCH_CASE\020\000\022\017\n\013IGNO" - "RE_CASE\020\001*&\n\tMatchType\022\013\n\007REGULAR\020\000\022\014\n\010I" - "NVERTED\020\0012\2500\n\014TableService\022\221\001\n GetExport" - "edTableCreationResponse\022).io.deephaven.p" - "roto.backplane.grpc.Ticket\032@.io.deephave" - "n.proto.backplane.grpc.ExportedTableCrea" - "tionResponse\"\000\022\206\001\n\nFetchTable\0224.io.deeph" - "aven.proto.backplane.grpc.FetchTableRequ" - "est\032@.io.deephaven.proto.backplane.grpc." - "ExportedTableCreationResponse\"\000\022\230\001\n\023Appl" - "yPreviewColumns\022=.io.deephaven.proto.bac" - "kplane.grpc.ApplyPreviewColumnsRequest\032@" - ".io.deephaven.proto.backplane.grpc.Expor" - "tedTableCreationResponse\"\000\022\206\001\n\nEmptyTabl" - "e\0224.io.deephaven.proto.backplane.grpc.Em" - "ptyTableRequest\032@.io.deephaven.proto.bac" - "kplane.grpc.ExportedTableCreationRespons" - "e\"\000\022\204\001\n\tTimeTable\0223.io.deephaven.proto.b" - "ackplane.grpc.TimeTableRequest\032@.io.deep" + "teRequest\032@.io.deephaven.proto.backplane" + ".grpc.ExportedTableCreationResponse\"\000\022\212\001" + "\n\nUpdateView\0228.io.deephaven.proto.backpl" + "ane.grpc.SelectOrUpdateRequest\032@.io.deep" "haven.proto.backplane.grpc.ExportedTable" - "CreationResponse\"\000\022\210\001\n\013DropColumns\0225.io." - "deephaven.proto.backplane.grpc.DropColum" - "nsRequest\032@.io.deephaven.proto.backplane" - ".grpc.ExportedTableCreationResponse\"\000\022\206\001" - "\n\006Update\0228.io.deephaven.proto.backplane." - "grpc.SelectOrUpdateRequest\032@.io.deephave" - "n.proto.backplane.grpc.ExportedTableCrea" - "tionResponse\"\000\022\212\001\n\nLazyUpdate\0228.io.deeph" + "CreationResponse\"\000\022\206\001\n\006Select\0228.io.deeph" "aven.proto.backplane.grpc.SelectOrUpdate" "Request\032@.io.deephaven.proto.backplane.g" - "rpc.ExportedTableCreationResponse\"\000\022\204\001\n\004" - "View\0228.io.deephaven.proto.backplane.grpc" - ".SelectOrUpdateRequest\032@.io.deephaven.pr" - "oto.backplane.grpc.ExportedTableCreation" - "Response\"\000\022\212\001\n\nUpdateView\0228.io.deephaven" - ".proto.backplane.grpc.SelectOrUpdateRequ" - "est\032@.io.deephaven.proto.backplane.grpc." - "ExportedTableCreationResponse\"\000\022\206\001\n\006Sele" - "ct\0228.io.deephaven.proto.backplane.grpc.S" - "electOrUpdateRequest\032@.io.deephaven.prot" + "rpc.ExportedTableCreationResponse\"\000\022\202\001\n\010" + "UpdateBy\0222.io.deephaven.proto.backplane." + "grpc.UpdateByRequest\032@.io.deephaven.prot" "o.backplane.grpc.ExportedTableCreationRe" - "sponse\"\000\022\202\001\n\010UpdateBy\0222.io.deephaven.pro" - "to.backplane.grpc.UpdateByRequest\032@.io.d" - "eephaven.proto.backplane.grpc.ExportedTa" - "bleCreationResponse\"\000\022\216\001\n\016SelectDistinct" - "\0228.io.deephaven.proto.backplane.grpc.Sel" - "ectDistinctRequest\032@.io.deephaven.proto." - "backplane.grpc.ExportedTableCreationResp" - "onse\"\000\022\203\001\n\006Filter\0225.io.deephaven.proto.b" - "ackplane.grpc.FilterTableRequest\032@.io.de" - "ephaven.proto.backplane.grpc.ExportedTab" - "leCreationResponse\"\000\022\233\001\n\022UnstructuredFil" - "ter\022A.io.deephaven.proto.backplane.grpc." - "UnstructuredFilterTableRequest\032@.io.deep" - "haven.proto.backplane.grpc.ExportedTable" - "CreationResponse\"\000\022\177\n\004Sort\0223.io.deephave" - "n.proto.backplane.grpc.SortTableRequest\032" - "@.io.deephaven.proto.backplane.grpc.Expo" - "rtedTableCreationResponse\"\000\022\200\001\n\004Head\0224.i" - "o.deephaven.proto.backplane.grpc.HeadOrT" - "ailRequest\032@.io.deephaven.proto.backplan" - "e.grpc.ExportedTableCreationResponse\"\000\022\200" - "\001\n\004Tail\0224.io.deephaven.proto.backplane.g" - "rpc.HeadOrTailRequest\032@.io.deephaven.pro" - "to.backplane.grpc.ExportedTableCreationR" - "esponse\"\000\022\204\001\n\006HeadBy\0226.io.deephaven.prot" - "o.backplane.grpc.HeadOrTailByRequest\032@.i" - "o.deephaven.proto.backplane.grpc.Exporte" - "dTableCreationResponse\"\000\022\204\001\n\006TailBy\0226.io" - ".deephaven.proto.backplane.grpc.HeadOrTa" - "ilByRequest\032@.io.deephaven.proto.backpla" - "ne.grpc.ExportedTableCreationResponse\"\000\022" - "\200\001\n\007Ungroup\0221.io.deephaven.proto.backpla" - "ne.grpc.UngroupRequest\032@.io.deephaven.pr" - "oto.backplane.grpc.ExportedTableCreation" - "Response\"\000\022\210\001\n\013MergeTables\0225.io.deephave" - "n.proto.backplane.grpc.MergeTablesReques" - "t\032@.io.deephaven.proto.backplane.grpc.Ex" - "portedTableCreationResponse\"\000\022\220\001\n\017CrossJ" - "oinTables\0229.io.deephaven.proto.backplane" - ".grpc.CrossJoinTablesRequest\032@.io.deepha" - "ven.proto.backplane.grpc.ExportedTableCr" - "eationResponse\"\000\022\224\001\n\021NaturalJoinTables\022;" - ".io.deephaven.proto.backplane.grpc.Natur" - "alJoinTablesRequest\032@.io.deephaven.proto" + "sponse\"\000\022\216\001\n\016SelectDistinct\0228.io.deephav" + "en.proto.backplane.grpc.SelectDistinctRe" + "quest\032@.io.deephaven.proto.backplane.grp" + "c.ExportedTableCreationResponse\"\000\022\203\001\n\006Fi" + "lter\0225.io.deephaven.proto.backplane.grpc" + ".FilterTableRequest\032@.io.deephaven.proto" ".backplane.grpc.ExportedTableCreationRes" - "ponse\"\000\022\220\001\n\017ExactJoinTables\0229.io.deephav" - "en.proto.backplane.grpc.ExactJoinTablesR" - "equest\032@.io.deephaven.proto.backplane.gr" - "pc.ExportedTableCreationResponse\"\000\022\216\001\n\016L" - "eftJoinTables\0228.io.deephaven.proto.backp" - "lane.grpc.LeftJoinTablesRequest\032@.io.dee" - "phaven.proto.backplane.grpc.ExportedTabl" - "eCreationResponse\"\000\022\221\001\n\016AsOfJoinTables\0228" - ".io.deephaven.proto.backplane.grpc.AsOfJ" - "oinTablesRequest\032@.io.deephaven.proto.ba" - "ckplane.grpc.ExportedTableCreationRespon" - "se\"\003\210\002\001\022\205\001\n\010AjTables\0225.io.deephaven.prot" - "o.backplane.grpc.AjRajTablesRequest\032@.io" - ".deephaven.proto.backplane.grpc.Exported" - "TableCreationResponse\"\000\022\206\001\n\tRajTables\0225." - "io.deephaven.proto.backplane.grpc.AjRajT" - "ablesRequest\032@.io.deephaven.proto.backpl" - "ane.grpc.ExportedTableCreationResponse\"\000" - "\022\220\001\n\017RangeJoinTables\0229.io.deephaven.prot" - "o.backplane.grpc.RangeJoinTablesRequest\032" - "@.io.deephaven.proto.backplane.grpc.Expo" - "rtedTableCreationResponse\"\000\022\221\001\n\016ComboAgg" - "regate\0228.io.deephaven.proto.backplane.gr" - "pc.ComboAggregateRequest\032@.io.deephaven." - "proto.backplane.grpc.ExportedTableCreati" - "onResponse\"\003\210\002\001\022\212\001\n\014AggregateAll\0226.io.de" - "ephaven.proto.backplane.grpc.AggregateAl" + "ponse\"\000\022\233\001\n\022UnstructuredFilter\022A.io.deep" + "haven.proto.backplane.grpc.UnstructuredF" + "ilterTableRequest\032@.io.deephaven.proto.b" + "ackplane.grpc.ExportedTableCreationRespo" + "nse\"\000\022\177\n\004Sort\0223.io.deephaven.proto.backp" + "lane.grpc.SortTableRequest\032@.io.deephave" + "n.proto.backplane.grpc.ExportedTableCrea" + "tionResponse\"\000\022\200\001\n\004Head\0224.io.deephaven.p" + "roto.backplane.grpc.HeadOrTailRequest\032@." + "io.deephaven.proto.backplane.grpc.Export" + "edTableCreationResponse\"\000\022\200\001\n\004Tail\0224.io." + "deephaven.proto.backplane.grpc.HeadOrTai" "lRequest\032@.io.deephaven.proto.backplane." "grpc.ExportedTableCreationResponse\"\000\022\204\001\n" - "\tAggregate\0223.io.deephaven.proto.backplan" - "e.grpc.AggregateRequest\032@.io.deephaven.p" + "\006HeadBy\0226.io.deephaven.proto.backplane.g" + "rpc.HeadOrTailByRequest\032@.io.deephaven.p" "roto.backplane.grpc.ExportedTableCreatio" - "nResponse\"\000\022\207\001\n\010Snapshot\0227.io.deephaven." - "proto.backplane.grpc.SnapshotTableReques" - "t\032@.io.deephaven.proto.backplane.grpc.Ex" - "portedTableCreationResponse\"\000\022\217\001\n\014Snapsh" - "otWhen\022;.io.deephaven.proto.backplane.gr" - "pc.SnapshotWhenTableRequest\032@.io.deephav" - "en.proto.backplane.grpc.ExportedTableCre" - "ationResponse\"\000\022\200\001\n\007Flatten\0221.io.deephav" - "en.proto.backplane.grpc.FlattenRequest\032@" + "nResponse\"\000\022\204\001\n\006TailBy\0226.io.deephaven.pr" + "oto.backplane.grpc.HeadOrTailByRequest\032@" ".io.deephaven.proto.backplane.grpc.Expor" - "tedTableCreationResponse\"\000\022\226\001\n\022RunChartD" - "ownsample\022<.io.deephaven.proto.backplane" - ".grpc.RunChartDownsampleRequest\032@.io.dee" - "phaven.proto.backplane.grpc.ExportedTabl" - "eCreationResponse\"\000\022\222\001\n\020CreateInputTable" - "\022:.io.deephaven.proto.backplane.grpc.Cre" - "ateInputTableRequest\032@.io.deephaven.prot" - "o.backplane.grpc.ExportedTableCreationRe" - "sponse\"\000\022\200\001\n\007WhereIn\0221.io.deephaven.prot" - "o.backplane.grpc.WhereInRequest\032@.io.dee" - "phaven.proto.backplane.grpc.ExportedTabl" - "eCreationResponse\"\000\022\203\001\n\005Batch\0224.io.deeph" - "aven.proto.backplane.grpc.BatchTableRequ" + "tedTableCreationResponse\"\000\022\200\001\n\007Ungroup\0221" + ".io.deephaven.proto.backplane.grpc.Ungro" + "upRequest\032@.io.deephaven.proto.backplane" + ".grpc.ExportedTableCreationResponse\"\000\022\210\001" + "\n\013MergeTables\0225.io.deephaven.proto.backp" + "lane.grpc.MergeTablesRequest\032@.io.deepha" + "ven.proto.backplane.grpc.ExportedTableCr" + "eationResponse\"\000\022\220\001\n\017CrossJoinTables\0229.i" + "o.deephaven.proto.backplane.grpc.CrossJo" + "inTablesRequest\032@.io.deephaven.proto.bac" + "kplane.grpc.ExportedTableCreationRespons" + "e\"\000\022\224\001\n\021NaturalJoinTables\022;.io.deephaven" + ".proto.backplane.grpc.NaturalJoinTablesR" + "equest\032@.io.deephaven.proto.backplane.gr" + "pc.ExportedTableCreationResponse\"\000\022\220\001\n\017E" + "xactJoinTables\0229.io.deephaven.proto.back" + "plane.grpc.ExactJoinTablesRequest\032@.io.d" + "eephaven.proto.backplane.grpc.ExportedTa" + "bleCreationResponse\"\000\022\216\001\n\016LeftJoinTables" + "\0228.io.deephaven.proto.backplane.grpc.Lef" + "tJoinTablesRequest\032@.io.deephaven.proto." + "backplane.grpc.ExportedTableCreationResp" + "onse\"\000\022\221\001\n\016AsOfJoinTables\0228.io.deephaven" + ".proto.backplane.grpc.AsOfJoinTablesRequ" "est\032@.io.deephaven.proto.backplane.grpc." - "ExportedTableCreationResponse\"\0000\001\022\231\001\n\024Ex" - "portedTableUpdates\022>.io.deephaven.proto." - "backplane.grpc.ExportedTableUpdatesReque" - "st\032=.io.deephaven.proto.backplane.grpc.E" - "xportedTableUpdateMessage\"\0000\001\022r\n\007SeekRow" - "\0221.io.deephaven.proto.backplane.grpc.See" - "kRowRequest\0322.io.deephaven.proto.backpla" - "ne.grpc.SeekRowResponse\"\000\022\204\001\n\tMetaTable\022" - "3.io.deephaven.proto.backplane.grpc.Meta" - "TableRequest\032@.io.deephaven.proto.backpl" - "ane.grpc.ExportedTableCreationResponse\"\000" - "\022\231\001\n\027ComputeColumnStatistics\022:.io.deepha" - "ven.proto.backplane.grpc.ColumnStatistic" - "sRequest\032@.io.deephaven.proto.backplane." - "grpc.ExportedTableCreationResponse\"\000BAH\001" - "P\001Z;github.com/deephaven/deephaven-core/" - "go/internal/proto/tableb\006proto3" + "ExportedTableCreationResponse\"\003\210\002\001\022\205\001\n\010A" + "jTables\0225.io.deephaven.proto.backplane.g" + "rpc.AjRajTablesRequest\032@.io.deephaven.pr" + "oto.backplane.grpc.ExportedTableCreation" + "Response\"\000\022\206\001\n\tRajTables\0225.io.deephaven." + "proto.backplane.grpc.AjRajTablesRequest\032" + "@.io.deephaven.proto.backplane.grpc.Expo" + "rtedTableCreationResponse\"\000\022\220\001\n\017RangeJoi" + "nTables\0229.io.deephaven.proto.backplane.g" + "rpc.RangeJoinTablesRequest\032@.io.deephave" + "n.proto.backplane.grpc.ExportedTableCrea" + "tionResponse\"\000\022\221\001\n\016ComboAggregate\0228.io.d" + "eephaven.proto.backplane.grpc.ComboAggre" + "gateRequest\032@.io.deephaven.proto.backpla" + "ne.grpc.ExportedTableCreationResponse\"\003\210" + "\002\001\022\212\001\n\014AggregateAll\0226.io.deephaven.proto" + ".backplane.grpc.AggregateAllRequest\032@.io" + ".deephaven.proto.backplane.grpc.Exported" + "TableCreationResponse\"\000\022\204\001\n\tAggregate\0223." + "io.deephaven.proto.backplane.grpc.Aggreg" + "ateRequest\032@.io.deephaven.proto.backplan" + "e.grpc.ExportedTableCreationResponse\"\000\022\207" + "\001\n\010Snapshot\0227.io.deephaven.proto.backpla" + "ne.grpc.SnapshotTableRequest\032@.io.deepha" + "ven.proto.backplane.grpc.ExportedTableCr" + "eationResponse\"\000\022\217\001\n\014SnapshotWhen\022;.io.d" + "eephaven.proto.backplane.grpc.SnapshotWh" + "enTableRequest\032@.io.deephaven.proto.back" + "plane.grpc.ExportedTableCreationResponse" + "\"\000\022\200\001\n\007Flatten\0221.io.deephaven.proto.back" + "plane.grpc.FlattenRequest\032@.io.deephaven" + ".proto.backplane.grpc.ExportedTableCreat" + "ionResponse\"\000\022\226\001\n\022RunChartDownsample\022<.i" + "o.deephaven.proto.backplane.grpc.RunChar" + "tDownsampleRequest\032@.io.deephaven.proto." + "backplane.grpc.ExportedTableCreationResp" + "onse\"\000\022\222\001\n\020CreateInputTable\022:.io.deephav" + "en.proto.backplane.grpc.CreateInputTable" + "Request\032@.io.deephaven.proto.backplane.g" + "rpc.ExportedTableCreationResponse\"\000\022\200\001\n\007" + "WhereIn\0221.io.deephaven.proto.backplane.g" + "rpc.WhereInRequest\032@.io.deephaven.proto." + "backplane.grpc.ExportedTableCreationResp" + "onse\"\000\022\203\001\n\005Batch\0224.io.deephaven.proto.ba" + "ckplane.grpc.BatchTableRequest\032@.io.deep" + "haven.proto.backplane.grpc.ExportedTable" + "CreationResponse\"\0000\001\022\231\001\n\024ExportedTableUp" + "dates\022>.io.deephaven.proto.backplane.grp" + "c.ExportedTableUpdatesRequest\032=.io.deeph" + "aven.proto.backplane.grpc.ExportedTableU" + "pdateMessage\"\0000\001\022r\n\007SeekRow\0221.io.deephav" + "en.proto.backplane.grpc.SeekRowRequest\0322" + ".io.deephaven.proto.backplane.grpc.SeekR" + "owResponse\"\000\022\204\001\n\tMetaTable\0223.io.deephave" + "n.proto.backplane.grpc.MetaTableRequest\032" + "@.io.deephaven.proto.backplane.grpc.Expo" + "rtedTableCreationResponse\"\000\022\231\001\n\027ComputeC" + "olumnStatistics\022:.io.deephaven.proto.bac" + "kplane.grpc.ColumnStatisticsRequest\032@.io" + ".deephaven.proto.backplane.grpc.Exported" + "TableCreationResponse\"\000BAH\001P\001Z;github.co" + "m/deephaven/deephaven-core/go/internal/p" + "roto/tableb\006proto3" ; static const ::_pbi::DescriptorTable* const descriptor_table_deephaven_2fproto_2ftable_2eproto_deps[1] = { &::descriptor_table_deephaven_2fproto_2fticket_2eproto, }; static ::_pbi::once_flag descriptor_table_deephaven_2fproto_2ftable_2eproto_once; const ::_pbi::DescriptorTable descriptor_table_deephaven_2fproto_2ftable_2eproto = { - false, false, 34351, descriptor_table_protodef_deephaven_2fproto_2ftable_2eproto, + false, false, 34458, descriptor_table_protodef_deephaven_2fproto_2ftable_2eproto, "deephaven/proto/table.proto", - &descriptor_table_deephaven_2fproto_2ftable_2eproto_once, descriptor_table_deephaven_2fproto_2ftable_2eproto_deps, 1, 120, + &descriptor_table_deephaven_2fproto_2ftable_2eproto_once, descriptor_table_deephaven_2fproto_2ftable_2eproto_deps, 1, 121, schemas, file_default_instances, TableStruct_deephaven_2fproto_2ftable_2eproto::offsets, file_level_metadata_deephaven_2fproto_2ftable_2eproto, file_level_enum_descriptors_deephaven_2fproto_2ftable_2eproto, file_level_service_descriptors_deephaven_2fproto_2ftable_2eproto, @@ -33555,10 +33578,50 @@ ::PROTOBUF_NAMESPACE_ID::Metadata CreateInputTableRequest_InputTableKind_InMemor // =================================================================== +class CreateInputTableRequest_InputTableKind_Blink::_Internal { + public: +}; + +CreateInputTableRequest_InputTableKind_Blink::CreateInputTableRequest_InputTableKind_Blink(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned) + : ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase(arena, is_message_owned) { + // @@protoc_insertion_point(arena_constructor:io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.Blink) +} +CreateInputTableRequest_InputTableKind_Blink::CreateInputTableRequest_InputTableKind_Blink(const CreateInputTableRequest_InputTableKind_Blink& from) + : ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase() { + _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); + // @@protoc_insertion_point(copy_constructor:io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.Blink) +} + + + + + +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData CreateInputTableRequest_InputTableKind_Blink::_class_data_ = { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl, + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl, +}; +const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*CreateInputTableRequest_InputTableKind_Blink::GetClassData() const { return &_class_data_; } + + + + + + + +::PROTOBUF_NAMESPACE_ID::Metadata CreateInputTableRequest_InputTableKind_Blink::GetMetadata() const { + return ::_pbi::AssignDescriptors( + &descriptor_table_deephaven_2fproto_2ftable_2eproto_getter, &descriptor_table_deephaven_2fproto_2ftable_2eproto_once, + file_level_metadata_deephaven_2fproto_2ftable_2eproto[114]); +} + +// =================================================================== + class CreateInputTableRequest_InputTableKind::_Internal { public: static const ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryAppendOnly& in_memory_append_only(const CreateInputTableRequest_InputTableKind* msg); static const ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryKeyBacked& in_memory_key_backed(const CreateInputTableRequest_InputTableKind* msg); + static const ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink& blink(const CreateInputTableRequest_InputTableKind* msg); }; const ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryAppendOnly& @@ -33569,6 +33632,10 @@ const ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTabl CreateInputTableRequest_InputTableKind::_Internal::in_memory_key_backed(const CreateInputTableRequest_InputTableKind* msg) { return *msg->kind_.in_memory_key_backed_; } +const ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink& +CreateInputTableRequest_InputTableKind::_Internal::blink(const CreateInputTableRequest_InputTableKind* msg) { + return *msg->kind_.blink_; +} void CreateInputTableRequest_InputTableKind::set_allocated_in_memory_append_only(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryAppendOnly* in_memory_append_only) { ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); clear_kind(); @@ -33599,6 +33666,21 @@ void CreateInputTableRequest_InputTableKind::set_allocated_in_memory_key_backed( } // @@protoc_insertion_point(field_set_allocated:io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.in_memory_key_backed) } +void CreateInputTableRequest_InputTableKind::set_allocated_blink(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* blink) { + ::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaForAllocation(); + clear_kind(); + if (blink) { + ::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = + ::PROTOBUF_NAMESPACE_ID::Arena::InternalGetOwningArena(blink); + if (message_arena != submessage_arena) { + blink = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage( + message_arena, blink, submessage_arena); + } + set_has_blink(); + kind_.blink_ = blink; + } + // @@protoc_insertion_point(field_set_allocated:io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.blink) +} CreateInputTableRequest_InputTableKind::CreateInputTableRequest_InputTableKind(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned) : ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) { @@ -33618,6 +33700,10 @@ CreateInputTableRequest_InputTableKind::CreateInputTableRequest_InputTableKind(c _internal_mutable_in_memory_key_backed()->::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryKeyBacked::MergeFrom(from._internal_in_memory_key_backed()); break; } + case kBlink: { + _internal_mutable_blink()->::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink::MergeFrom(from._internal_blink()); + break; + } case KIND_NOT_SET: { break; } @@ -33664,6 +33750,12 @@ void CreateInputTableRequest_InputTableKind::clear_kind() { } break; } + case kBlink: { + if (GetArenaForAllocation() == nullptr) { + delete kind_.blink_; + } + break; + } case KIND_NOT_SET: { break; } @@ -33704,6 +33796,14 @@ const char* CreateInputTableRequest_InputTableKind::_InternalParse(const char* p } else goto handle_unusual; continue; + // .io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.Blink blink = 3; + case 3: + if (PROTOBUF_PREDICT_TRUE(static_cast(tag) == 26)) { + ptr = ctx->ParseMessage(_internal_mutable_blink(), ptr); + CHK_(ptr); + } else + goto handle_unusual; + continue; default: goto handle_unusual; } // switch @@ -33747,6 +33847,13 @@ uint8_t* CreateInputTableRequest_InputTableKind::_InternalSerialize( _Internal::in_memory_key_backed(this).GetCachedSize(), target, stream); } + // .io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.Blink blink = 3; + if (_internal_has_blink()) { + target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite:: + InternalWriteMessage(3, _Internal::blink(this), + _Internal::blink(this).GetCachedSize(), target, stream); + } + if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { target = ::_pbi::WireFormat::InternalSerializeUnknownFieldsToArray( _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); @@ -33778,6 +33885,13 @@ size_t CreateInputTableRequest_InputTableKind::ByteSizeLong() const { *kind_.in_memory_key_backed_); break; } + // .io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.Blink blink = 3; + case kBlink: { + total_size += 1 + + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize( + *kind_.blink_); + break; + } case KIND_NOT_SET: { break; } @@ -33813,6 +33927,10 @@ void CreateInputTableRequest_InputTableKind::MergeFrom(const CreateInputTableReq _internal_mutable_in_memory_key_backed()->::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryKeyBacked::MergeFrom(from._internal_in_memory_key_backed()); break; } + case kBlink: { + _internal_mutable_blink()->::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink::MergeFrom(from._internal_blink()); + break; + } case KIND_NOT_SET: { break; } @@ -33841,7 +33959,7 @@ void CreateInputTableRequest_InputTableKind::InternalSwap(CreateInputTableReques ::PROTOBUF_NAMESPACE_ID::Metadata CreateInputTableRequest_InputTableKind::GetMetadata() const { return ::_pbi::AssignDescriptors( &descriptor_table_deephaven_2fproto_2ftable_2eproto_getter, &descriptor_table_deephaven_2fproto_2ftable_2eproto_once, - file_level_metadata_deephaven_2fproto_2ftable_2eproto[114]); + file_level_metadata_deephaven_2fproto_2ftable_2eproto[115]); } // =================================================================== @@ -34206,7 +34324,7 @@ void CreateInputTableRequest::InternalSwap(CreateInputTableRequest* other) { ::PROTOBUF_NAMESPACE_ID::Metadata CreateInputTableRequest::GetMetadata() const { return ::_pbi::AssignDescriptors( &descriptor_table_deephaven_2fproto_2ftable_2eproto_getter, &descriptor_table_deephaven_2fproto_2ftable_2eproto_once, - file_level_metadata_deephaven_2fproto_2ftable_2eproto[115]); + file_level_metadata_deephaven_2fproto_2ftable_2eproto[116]); } // =================================================================== @@ -34549,7 +34667,7 @@ void WhereInRequest::InternalSwap(WhereInRequest* other) { ::PROTOBUF_NAMESPACE_ID::Metadata WhereInRequest::GetMetadata() const { return ::_pbi::AssignDescriptors( &descriptor_table_deephaven_2fproto_2ftable_2eproto_getter, &descriptor_table_deephaven_2fproto_2ftable_2eproto_once, - file_level_metadata_deephaven_2fproto_2ftable_2eproto[116]); + file_level_metadata_deephaven_2fproto_2ftable_2eproto[117]); } // =================================================================== @@ -34875,7 +34993,7 @@ void ColumnStatisticsRequest::InternalSwap(ColumnStatisticsRequest* other) { ::PROTOBUF_NAMESPACE_ID::Metadata ColumnStatisticsRequest::GetMetadata() const { return ::_pbi::AssignDescriptors( &descriptor_table_deephaven_2fproto_2ftable_2eproto_getter, &descriptor_table_deephaven_2fproto_2ftable_2eproto_once, - file_level_metadata_deephaven_2fproto_2ftable_2eproto[117]); + file_level_metadata_deephaven_2fproto_2ftable_2eproto[118]); } // =================================================================== @@ -37294,7 +37412,7 @@ void BatchTableRequest_Operation::InternalSwap(BatchTableRequest_Operation* othe ::PROTOBUF_NAMESPACE_ID::Metadata BatchTableRequest_Operation::GetMetadata() const { return ::_pbi::AssignDescriptors( &descriptor_table_deephaven_2fproto_2ftable_2eproto_getter, &descriptor_table_deephaven_2fproto_2ftable_2eproto_once, - file_level_metadata_deephaven_2fproto_2ftable_2eproto[118]); + file_level_metadata_deephaven_2fproto_2ftable_2eproto[119]); } // =================================================================== @@ -37472,7 +37590,7 @@ void BatchTableRequest::InternalSwap(BatchTableRequest* other) { ::PROTOBUF_NAMESPACE_ID::Metadata BatchTableRequest::GetMetadata() const { return ::_pbi::AssignDescriptors( &descriptor_table_deephaven_2fproto_2ftable_2eproto_getter, &descriptor_table_deephaven_2fproto_2ftable_2eproto_once, - file_level_metadata_deephaven_2fproto_2ftable_2eproto[119]); + file_level_metadata_deephaven_2fproto_2ftable_2eproto[120]); } // @@protoc_insertion_point(namespace_scope) @@ -37938,6 +38056,10 @@ template<> PROTOBUF_NOINLINE ::io::deephaven::proto::backplane::grpc::CreateInpu Arena::CreateMaybeMessage< ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryKeyBacked >(Arena* arena) { return Arena::CreateMessageInternal< ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryKeyBacked >(arena); } +template<> PROTOBUF_NOINLINE ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* +Arena::CreateMaybeMessage< ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink >(Arena* arena) { + return Arena::CreateMessageInternal< ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink >(arena); +} template<> PROTOBUF_NOINLINE ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind* Arena::CreateMaybeMessage< ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind >(Arena* arena) { return Arena::CreateMessageInternal< ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind >(arena); diff --git a/cpp-client/deephaven/dhclient/proto/deephaven/proto/table.pb.h b/cpp-client/deephaven/dhclient/proto/deephaven/proto/table.pb.h index 7528e511312..eed505bb572 100644 --- a/cpp-client/deephaven/dhclient/proto/deephaven/proto/table.pb.h +++ b/cpp-client/deephaven/dhclient/proto/deephaven/proto/table.pb.h @@ -187,6 +187,9 @@ extern CreateInputTableRequestDefaultTypeInternal _CreateInputTableRequest_defau class CreateInputTableRequest_InputTableKind; struct CreateInputTableRequest_InputTableKindDefaultTypeInternal; extern CreateInputTableRequest_InputTableKindDefaultTypeInternal _CreateInputTableRequest_InputTableKind_default_instance_; +class CreateInputTableRequest_InputTableKind_Blink; +struct CreateInputTableRequest_InputTableKind_BlinkDefaultTypeInternal; +extern CreateInputTableRequest_InputTableKind_BlinkDefaultTypeInternal _CreateInputTableRequest_InputTableKind_Blink_default_instance_; class CreateInputTableRequest_InputTableKind_InMemoryAppendOnly; struct CreateInputTableRequest_InputTableKind_InMemoryAppendOnlyDefaultTypeInternal; extern CreateInputTableRequest_InputTableKind_InMemoryAppendOnlyDefaultTypeInternal _CreateInputTableRequest_InputTableKind_InMemoryAppendOnly_default_instance_; @@ -463,6 +466,7 @@ template<> ::io::deephaven::proto::backplane::grpc::Condition* Arena::CreateMayb template<> ::io::deephaven::proto::backplane::grpc::ContainsCondition* Arena::CreateMaybeMessage<::io::deephaven::proto::backplane::grpc::ContainsCondition>(Arena*); template<> ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest* Arena::CreateMaybeMessage<::io::deephaven::proto::backplane::grpc::CreateInputTableRequest>(Arena*); template<> ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind* Arena::CreateMaybeMessage<::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind>(Arena*); +template<> ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* Arena::CreateMaybeMessage<::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink>(Arena*); template<> ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryAppendOnly* Arena::CreateMaybeMessage<::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryAppendOnly>(Arena*); template<> ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryKeyBacked* Arena::CreateMaybeMessage<::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryKeyBacked>(Arena*); template<> ::io::deephaven::proto::backplane::grpc::CrossJoinTablesRequest* Arena::CreateMaybeMessage<::io::deephaven::proto::backplane::grpc::CrossJoinTablesRequest>(Arena*); @@ -22456,6 +22460,122 @@ class CreateInputTableRequest_InputTableKind_InMemoryKeyBacked final : }; // ------------------------------------------------------------------- +class CreateInputTableRequest_InputTableKind_Blink final : + public ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase /* @@protoc_insertion_point(class_definition:io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.Blink) */ { + public: + inline CreateInputTableRequest_InputTableKind_Blink() : CreateInputTableRequest_InputTableKind_Blink(nullptr) {} + explicit PROTOBUF_CONSTEXPR CreateInputTableRequest_InputTableKind_Blink(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized); + + CreateInputTableRequest_InputTableKind_Blink(const CreateInputTableRequest_InputTableKind_Blink& from); + CreateInputTableRequest_InputTableKind_Blink(CreateInputTableRequest_InputTableKind_Blink&& from) noexcept + : CreateInputTableRequest_InputTableKind_Blink() { + *this = ::std::move(from); + } + + inline CreateInputTableRequest_InputTableKind_Blink& operator=(const CreateInputTableRequest_InputTableKind_Blink& from) { + CopyFrom(from); + return *this; + } + inline CreateInputTableRequest_InputTableKind_Blink& operator=(CreateInputTableRequest_InputTableKind_Blink&& from) noexcept { + if (this == &from) return *this; + if (GetOwningArena() == from.GetOwningArena() + #ifdef PROTOBUF_FORCE_COPY_IN_MOVE + && GetOwningArena() != nullptr + #endif // !PROTOBUF_FORCE_COPY_IN_MOVE + ) { + InternalSwap(&from); + } else { + CopyFrom(from); + } + return *this; + } + + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() { + return GetDescriptor(); + } + static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() { + return default_instance().GetMetadata().descriptor; + } + static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() { + return default_instance().GetMetadata().reflection; + } + static const CreateInputTableRequest_InputTableKind_Blink& default_instance() { + return *internal_default_instance(); + } + static inline const CreateInputTableRequest_InputTableKind_Blink* internal_default_instance() { + return reinterpret_cast( + &_CreateInputTableRequest_InputTableKind_Blink_default_instance_); + } + static constexpr int kIndexInFileMessages = + 114; + + friend void swap(CreateInputTableRequest_InputTableKind_Blink& a, CreateInputTableRequest_InputTableKind_Blink& b) { + a.Swap(&b); + } + inline void Swap(CreateInputTableRequest_InputTableKind_Blink* other) { + if (other == this) return; + #ifdef PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() != nullptr && + GetOwningArena() == other->GetOwningArena()) { + #else // PROTOBUF_FORCE_COPY_IN_SWAP + if (GetOwningArena() == other->GetOwningArena()) { + #endif // !PROTOBUF_FORCE_COPY_IN_SWAP + InternalSwap(other); + } else { + ::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other); + } + } + void UnsafeArenaSwap(CreateInputTableRequest_InputTableKind_Blink* other) { + if (other == this) return; + GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena()); + InternalSwap(other); + } + + // implements Message ---------------------------------------------- + + CreateInputTableRequest_InputTableKind_Blink* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final { + return CreateMaybeMessage(arena); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyFrom; + inline void CopyFrom(const CreateInputTableRequest_InputTableKind_Blink& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::CopyImpl(this, from); + } + using ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeFrom; + void MergeFrom(const CreateInputTableRequest_InputTableKind_Blink& from) { + ::PROTOBUF_NAMESPACE_ID::internal::ZeroFieldsBase::MergeImpl(this, from); + } + public: + + private: + friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata; + static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() { + return "io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.Blink"; + } + protected: + explicit CreateInputTableRequest_InputTableKind_Blink(::PROTOBUF_NAMESPACE_ID::Arena* arena, + bool is_message_owned = false); + public: + + static const ClassData _class_data_; + const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final; + + ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // @@protoc_insertion_point(class_scope:io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.Blink) + private: + class _Internal; + + template friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper; + typedef void InternalArenaConstructable_; + typedef void DestructorSkippable_; + friend struct ::TableStruct_deephaven_2fproto_2ftable_2eproto; +}; +// ------------------------------------------------------------------- + class CreateInputTableRequest_InputTableKind final : public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind) */ { public: @@ -22502,6 +22622,7 @@ class CreateInputTableRequest_InputTableKind final : enum KindCase { kInMemoryAppendOnly = 1, kInMemoryKeyBacked = 2, + kBlink = 3, KIND_NOT_SET = 0, }; @@ -22510,7 +22631,7 @@ class CreateInputTableRequest_InputTableKind final : &_CreateInputTableRequest_InputTableKind_default_instance_); } static constexpr int kIndexInFileMessages = - 114; + 115; friend void swap(CreateInputTableRequest_InputTableKind& a, CreateInputTableRequest_InputTableKind& b) { a.Swap(&b); @@ -22580,12 +22701,14 @@ class CreateInputTableRequest_InputTableKind final : typedef CreateInputTableRequest_InputTableKind_InMemoryAppendOnly InMemoryAppendOnly; typedef CreateInputTableRequest_InputTableKind_InMemoryKeyBacked InMemoryKeyBacked; + typedef CreateInputTableRequest_InputTableKind_Blink Blink; // accessors ------------------------------------------------------- enum : int { kInMemoryAppendOnlyFieldNumber = 1, kInMemoryKeyBackedFieldNumber = 2, + kBlinkFieldNumber = 3, }; // .io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.InMemoryAppendOnly in_memory_append_only = 1; bool has_in_memory_append_only() const; @@ -22623,6 +22746,24 @@ class CreateInputTableRequest_InputTableKind final : ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryKeyBacked* in_memory_key_backed); ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryKeyBacked* unsafe_arena_release_in_memory_key_backed(); + // .io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.Blink blink = 3; + bool has_blink() const; + private: + bool _internal_has_blink() const; + public: + void clear_blink(); + const ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink& blink() const; + PROTOBUF_NODISCARD ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* release_blink(); + ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* mutable_blink(); + void set_allocated_blink(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* blink); + private: + const ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink& _internal_blink() const; + ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* _internal_mutable_blink(); + public: + void unsafe_arena_set_allocated_blink( + ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* blink); + ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* unsafe_arena_release_blink(); + void clear_kind(); KindCase kind_case() const; // @@protoc_insertion_point(class_scope:io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind) @@ -22630,6 +22771,7 @@ class CreateInputTableRequest_InputTableKind final : class _Internal; void set_has_in_memory_append_only(); void set_has_in_memory_key_backed(); + void set_has_blink(); inline bool has_kind() const; inline void clear_has_kind(); @@ -22642,6 +22784,7 @@ class CreateInputTableRequest_InputTableKind final : ::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized _constinit_; ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryAppendOnly* in_memory_append_only_; ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_InMemoryKeyBacked* in_memory_key_backed_; + ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* blink_; } kind_; mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_; uint32_t _oneof_case_[1]; @@ -22704,7 +22847,7 @@ class CreateInputTableRequest final : &_CreateInputTableRequest_default_instance_); } static constexpr int kIndexInFileMessages = - 115; + 116; friend void swap(CreateInputTableRequest& a, CreateInputTableRequest& b) { a.Swap(&b); @@ -22931,7 +23074,7 @@ class WhereInRequest final : &_WhereInRequest_default_instance_); } static constexpr int kIndexInFileMessages = - 116; + 117; friend void swap(WhereInRequest& a, WhereInRequest& b) { a.Swap(&b); @@ -23160,7 +23303,7 @@ class ColumnStatisticsRequest final : &_ColumnStatisticsRequest_default_instance_); } static constexpr int kIndexInFileMessages = - 117; + 118; friend void swap(ColumnStatisticsRequest& a, ColumnStatisticsRequest& b) { a.Swap(&b); @@ -23408,7 +23551,7 @@ class BatchTableRequest_Operation final : &_BatchTableRequest_Operation_default_instance_); } static constexpr int kIndexInFileMessages = - 118; + 119; friend void swap(BatchTableRequest_Operation& a, BatchTableRequest_Operation& b) { a.Swap(&b); @@ -24391,7 +24534,7 @@ class BatchTableRequest final : &_BatchTableRequest_default_instance_); } static constexpr int kIndexInFileMessages = - 119; + 120; friend void swap(BatchTableRequest& a, BatchTableRequest& b) { a.Swap(&b); @@ -46381,6 +46524,10 @@ CreateInputTableRequest_InputTableKind_InMemoryKeyBacked::mutable_key_columns() // ------------------------------------------------------------------- +// CreateInputTableRequest_InputTableKind_Blink + +// ------------------------------------------------------------------- + // CreateInputTableRequest_InputTableKind // .io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.InMemoryAppendOnly in_memory_append_only = 1; @@ -46531,6 +46678,80 @@ inline ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTab return _msg; } +// .io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.Blink blink = 3; +inline bool CreateInputTableRequest_InputTableKind::_internal_has_blink() const { + return kind_case() == kBlink; +} +inline bool CreateInputTableRequest_InputTableKind::has_blink() const { + return _internal_has_blink(); +} +inline void CreateInputTableRequest_InputTableKind::set_has_blink() { + _oneof_case_[0] = kBlink; +} +inline void CreateInputTableRequest_InputTableKind::clear_blink() { + if (_internal_has_blink()) { + if (GetArenaForAllocation() == nullptr) { + delete kind_.blink_; + } + clear_has_kind(); + } +} +inline ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* CreateInputTableRequest_InputTableKind::release_blink() { + // @@protoc_insertion_point(field_release:io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.blink) + if (_internal_has_blink()) { + clear_has_kind(); + ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* temp = kind_.blink_; + if (GetArenaForAllocation() != nullptr) { + temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp); + } + kind_.blink_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline const ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink& CreateInputTableRequest_InputTableKind::_internal_blink() const { + return _internal_has_blink() + ? *kind_.blink_ + : reinterpret_cast< ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink&>(::io::deephaven::proto::backplane::grpc::_CreateInputTableRequest_InputTableKind_Blink_default_instance_); +} +inline const ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink& CreateInputTableRequest_InputTableKind::blink() const { + // @@protoc_insertion_point(field_get:io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.blink) + return _internal_blink(); +} +inline ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* CreateInputTableRequest_InputTableKind::unsafe_arena_release_blink() { + // @@protoc_insertion_point(field_unsafe_arena_release:io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.blink) + if (_internal_has_blink()) { + clear_has_kind(); + ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* temp = kind_.blink_; + kind_.blink_ = nullptr; + return temp; + } else { + return nullptr; + } +} +inline void CreateInputTableRequest_InputTableKind::unsafe_arena_set_allocated_blink(::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* blink) { + clear_kind(); + if (blink) { + set_has_blink(); + kind_.blink_ = blink; + } + // @@protoc_insertion_point(field_unsafe_arena_set_allocated:io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.blink) +} +inline ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* CreateInputTableRequest_InputTableKind::_internal_mutable_blink() { + if (!_internal_has_blink()) { + clear_kind(); + set_has_blink(); + kind_.blink_ = CreateMaybeMessage< ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink >(GetArenaForAllocation()); + } + return kind_.blink_; +} +inline ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* CreateInputTableRequest_InputTableKind::mutable_blink() { + ::io::deephaven::proto::backplane::grpc::CreateInputTableRequest_InputTableKind_Blink* _msg = _internal_mutable_blink(); + // @@protoc_insertion_point(field_mutable:io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.blink) + return _msg; +} + inline bool CreateInputTableRequest_InputTableKind::has_kind() const { return kind_case() != KIND_NOT_SET; } @@ -50758,6 +50979,8 @@ BatchTableRequest::ops() const { // ------------------------------------------------------------------- +// ------------------------------------------------------------------- + // @@protoc_insertion_point(namespace_scope) diff --git a/docker/registry/server-base/gradle.properties b/docker/registry/server-base/gradle.properties index f24e145c310..4367ce45c0d 100644 --- a/docker/registry/server-base/gradle.properties +++ b/docker/registry/server-base/gradle.properties @@ -1,3 +1,3 @@ io.deephaven.project.ProjectType=DOCKER_REGISTRY deephaven.registry.imageName=ghcr.io/deephaven/server-base:edge -deephaven.registry.imageId=ghcr.io/deephaven/server-base@sha256:b02de3d96469d38a2ba5999f04a6d99e0c5f5e5e34482e57d47bd4bb64108a7c +deephaven.registry.imageId=ghcr.io/deephaven/server-base@sha256:8f9b993a4ce7c78b50b869be840241a0a9d19d3f4f35601f20cd05475abd5753 diff --git a/docker/server-jetty/src/main/server-jetty/requirements.txt b/docker/server-jetty/src/main/server-jetty/requirements.txt index abd518c2e76..a33870876ee 100644 --- a/docker/server-jetty/src/main/server-jetty/requirements.txt +++ b/docker/server-jetty/src/main/server-jetty/requirements.txt @@ -1,18 +1,18 @@ adbc-driver-manager==0.8.0 adbc-driver-postgresql==0.8.0 connectorx==0.3.2; platform.machine == 'x86_64' -deephaven-plugin==0.5.0 +deephaven-plugin==0.6.0 java-utilities==0.2.0 jedi==0.18.2 jpy==0.14.0 llvmlite==0.41.1 numba==0.58.1 -numpy==1.26.1 -pandas==2.1.2 +numpy==1.26.2 +pandas==2.1.4 parso==0.8.3 -pyarrow==13.0.0 +pyarrow==14.0.1 python-dateutil==2.8.2 pytz==2023.3.post1 six==1.16.0 -turbodbc==4.7.0 +turbodbc==4.8.0 tzdata==2023.3 diff --git a/docker/server/src/main/server-netty/requirements.txt b/docker/server/src/main/server-netty/requirements.txt index abd518c2e76..a33870876ee 100644 --- a/docker/server/src/main/server-netty/requirements.txt +++ b/docker/server/src/main/server-netty/requirements.txt @@ -1,18 +1,18 @@ adbc-driver-manager==0.8.0 adbc-driver-postgresql==0.8.0 connectorx==0.3.2; platform.machine == 'x86_64' -deephaven-plugin==0.5.0 +deephaven-plugin==0.6.0 java-utilities==0.2.0 jedi==0.18.2 jpy==0.14.0 llvmlite==0.41.1 numba==0.58.1 -numpy==1.26.1 -pandas==2.1.2 +numpy==1.26.2 +pandas==2.1.4 parso==0.8.3 -pyarrow==13.0.0 +pyarrow==14.0.1 python-dateutil==2.8.2 pytz==2023.3.post1 six==1.16.0 -turbodbc==4.7.0 +turbodbc==4.8.0 tzdata==2023.3 diff --git a/engine/context/src/main/java/io/deephaven/engine/context/ExecutionContext.java b/engine/context/src/main/java/io/deephaven/engine/context/ExecutionContext.java index 05643ce6166..a698a85b841 100644 --- a/engine/context/src/main/java/io/deephaven/engine/context/ExecutionContext.java +++ b/engine/context/src/main/java/io/deephaven/engine/context/ExecutionContext.java @@ -4,6 +4,7 @@ package io.deephaven.engine.context; import io.deephaven.auth.AuthContext; +import io.deephaven.engine.updategraph.OperationInitializer; import io.deephaven.engine.updategraph.UpdateGraph; import io.deephaven.util.SafeCloseable; import io.deephaven.util.annotations.ScriptApi; @@ -13,10 +14,26 @@ import java.util.Objects; import java.util.function.Supplier; +/** + * Container for context-specific objects, that can be activated on a thread or passed to certain operations. + * ExecutionContexts are immutable, and support a builder pattern to create new instances and "with" methods to + * customize existing ones. Any thread that interacts with the Deephaven engine will need to have an active + * ExecutionContext. + */ public class ExecutionContext { + /** + * Creates a new builder for an ExecutionContext, capturing the current thread's auth context, update graph, and + * operation initializer. Typically, this method should be called on a thread that already has an active + * ExecutionContext, to more easily reuse those. + * + * @return a new builder to create an ExecutionContext + */ public static Builder newBuilder() { - return new Builder(); + ExecutionContext existing = getContext(); + return new Builder() + .setUpdateGraph(existing.getUpdateGraph()) + .setOperationInitializer(existing.getInitializer()); } public static ExecutionContext makeExecutionContext(boolean isSystemic) { @@ -83,6 +100,7 @@ private static void setContext(final ExecutionContext context) { private final QueryScope queryScope; private final QueryCompiler queryCompiler; private final UpdateGraph updateGraph; + private final OperationInitializer operationInitializer; private ExecutionContext( final boolean isSystemic, @@ -90,13 +108,15 @@ private ExecutionContext( final QueryLibrary queryLibrary, final QueryScope queryScope, final QueryCompiler queryCompiler, - final UpdateGraph updateGraph) { + final UpdateGraph updateGraph, + OperationInitializer operationInitializer) { this.isSystemic = isSystemic; this.authContext = authContext; this.queryLibrary = Objects.requireNonNull(queryLibrary); this.queryScope = Objects.requireNonNull(queryScope); this.queryCompiler = Objects.requireNonNull(queryCompiler); - this.updateGraph = updateGraph; + this.updateGraph = Objects.requireNonNull(updateGraph); + this.operationInitializer = Objects.requireNonNull(operationInitializer); } /** @@ -110,7 +130,8 @@ public ExecutionContext withSystemic(boolean isSystemic) { if (isSystemic == this.isSystemic) { return this; } - return new ExecutionContext(isSystemic, authContext, queryLibrary, queryScope, queryCompiler, updateGraph); + return new ExecutionContext(isSystemic, authContext, queryLibrary, queryScope, queryCompiler, updateGraph, + operationInitializer); } /** @@ -124,7 +145,8 @@ public ExecutionContext withAuthContext(final AuthContext authContext) { if (authContext == this.authContext) { return this; } - return new ExecutionContext(isSystemic, authContext, queryLibrary, queryScope, queryCompiler, updateGraph); + return new ExecutionContext(isSystemic, authContext, queryLibrary, queryScope, queryCompiler, updateGraph, + operationInitializer); } /** @@ -138,7 +160,16 @@ public ExecutionContext withUpdateGraph(final UpdateGraph updateGraph) { if (updateGraph == this.updateGraph) { return this; } - return new ExecutionContext(isSystemic, authContext, queryLibrary, queryScope, queryCompiler, updateGraph); + return new ExecutionContext(isSystemic, authContext, queryLibrary, queryScope, queryCompiler, updateGraph, + operationInitializer); + } + + public ExecutionContext withOperationInitializer(final OperationInitializer operationInitializer) { + if (operationInitializer == this.operationInitializer) { + return this; + } + return new ExecutionContext(isSystemic, authContext, queryLibrary, queryScope, queryCompiler, updateGraph, + operationInitializer); } /** @@ -198,6 +229,10 @@ public UpdateGraph getUpdateGraph() { return updateGraph; } + public OperationInitializer getInitializer() { + return operationInitializer; + } + @SuppressWarnings("unused") public static class Builder { private boolean isSystemic = false; @@ -208,6 +243,7 @@ public static class Builder { private QueryScope queryScope = PoisonedQueryScope.INSTANCE; private QueryCompiler queryCompiler = PoisonedQueryCompiler.INSTANCE; private UpdateGraph updateGraph = PoisonedUpdateGraph.INSTANCE; + private OperationInitializer operationInitializer = PoisonedOperationInitializer.INSTANCE; private Builder() { // propagate the auth context from the current context @@ -356,19 +392,32 @@ public Builder setUpdateGraph(UpdateGraph updateGraph) { /** * Use the current ExecutionContext's UpdateGraph instance. + * + * @deprecated The update graph is automatically captured, this method should no longer be needed. */ @ScriptApi + @Deprecated(forRemoval = true, since = "0.31") public Builder captureUpdateGraph() { this.updateGraph = getContext().getUpdateGraph(); return this; } + /** + * Use the specified operation initializer instead of the captured instance. + */ + @ScriptApi + public Builder setOperationInitializer(OperationInitializer operationInitializer) { + this.operationInitializer = operationInitializer; + return this; + } + /** * @return the newly instantiated ExecutionContext */ @ScriptApi public ExecutionContext build() { - return new ExecutionContext(isSystemic, authContext, queryLibrary, queryScope, queryCompiler, updateGraph); + return new ExecutionContext(isSystemic, authContext, queryLibrary, queryScope, queryCompiler, updateGraph, + operationInitializer); } } } diff --git a/engine/context/src/main/java/io/deephaven/engine/context/PoisonedOperationInitializer.java b/engine/context/src/main/java/io/deephaven/engine/context/PoisonedOperationInitializer.java new file mode 100644 index 00000000000..49474844755 --- /dev/null +++ b/engine/context/src/main/java/io/deephaven/engine/context/PoisonedOperationInitializer.java @@ -0,0 +1,30 @@ +package io.deephaven.engine.context; + +import io.deephaven.engine.updategraph.OperationInitializer; +import io.deephaven.util.ExecutionContextRegistrationException; + +import java.util.concurrent.Future; + +public class PoisonedOperationInitializer implements OperationInitializer { + + public static final PoisonedOperationInitializer INSTANCE = new PoisonedOperationInitializer(); + + private T fail() { + throw ExecutionContextRegistrationException.onFailedComponentAccess("OperationInitializer"); + } + + @Override + public boolean canParallelize() { + return fail(); + } + + @Override + public Future submit(Runnable runnable) { + return fail(); + } + + @Override + public int parallelismFactor() { + return fail(); + } +} diff --git a/engine/context/src/main/java/io/deephaven/engine/context/PoisonedUpdateGraph.java b/engine/context/src/main/java/io/deephaven/engine/context/PoisonedUpdateGraph.java index c1f28d33a77..31dc734785b 100644 --- a/engine/context/src/main/java/io/deephaven/engine/context/PoisonedUpdateGraph.java +++ b/engine/context/src/main/java/io/deephaven/engine/context/PoisonedUpdateGraph.java @@ -2,7 +2,6 @@ import io.deephaven.base.log.LogOutput; import io.deephaven.engine.updategraph.LogicalClock; -import io.deephaven.engine.updategraph.LogicalClockImpl; import io.deephaven.engine.updategraph.UpdateGraph; import io.deephaven.io.log.LogEntry; import io.deephaven.util.ExecutionContextRegistrationException; @@ -118,4 +117,7 @@ public boolean supportsRefreshing() { public void requestRefresh() { fail(); } + + @Override + public void stop() {} } diff --git a/engine/context/src/test/java/io/deephaven/engine/context/TestQueryCompiler.java b/engine/context/src/test/java/io/deephaven/engine/context/TestQueryCompiler.java index 39d90603ced..02374a67bf0 100644 --- a/engine/context/src/test/java/io/deephaven/engine/context/TestQueryCompiler.java +++ b/engine/context/src/test/java/io/deephaven/engine/context/TestQueryCompiler.java @@ -66,7 +66,6 @@ public class TestQueryCompiler { @Before public void setUp() throws IOException { executionContextClosable = ExecutionContext.newBuilder() - .captureUpdateGraph() .captureQueryLibrary() .captureQueryScope() .setQueryCompiler(QueryCompiler.create(folder.newFolder(), TestQueryCompiler.class.getClassLoader())) diff --git a/engine/table/build.gradle b/engine/table/build.gradle index 4456f02c262..261678a3c06 100644 --- a/engine/table/build.gradle +++ b/engine/table/build.gradle @@ -28,8 +28,6 @@ dependencies { implementation project(':Configuration') implementation project(':log-factory') implementation project(':Stats') - implementation project(':Net') - implementation project(':FishUtil') implementation 'com.github.f4b6a3:uuid-creator:5.2.0' // TODO(deephaven-core#3204): t-digest 3.3 appears to have higher errors than 3.2 diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/AbstractFilterExecution.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/AbstractFilterExecution.java index d862284777f..a136c87701c 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/AbstractFilterExecution.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/AbstractFilterExecution.java @@ -2,6 +2,7 @@ import io.deephaven.base.log.LogOutput; import io.deephaven.base.verify.Assert; +import io.deephaven.engine.context.ExecutionContext; import io.deephaven.engine.exceptions.CancellationException; import io.deephaven.engine.rowset.RowSet; import io.deephaven.engine.rowset.RowSetFactory; @@ -325,10 +326,10 @@ abstract void enqueueSubFilters( */ abstract boolean doParallelization(long numberOfRows); - static boolean doParallelizationBase(long numberOfRows) { + boolean doParallelizationBase(long numberOfRows) { return !QueryTable.DISABLE_PARALLEL_WHERE && numberOfRows != 0 && (QueryTable.FORCE_PARALLEL_WHERE || numberOfRows / 2 > QueryTable.PARALLEL_WHERE_ROWS_PER_SEGMENT) - && OperationInitializationThreadPool.canParallelize(); + && ExecutionContext.getContext().getInitializer().canParallelize(); } /** diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/InitialFilterExecution.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/InitialFilterExecution.java index b00d195dba0..dc00456fdd2 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/InitialFilterExecution.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/InitialFilterExecution.java @@ -1,6 +1,7 @@ package io.deephaven.engine.table.impl; import io.deephaven.base.verify.Assert; +import io.deephaven.engine.context.ExecutionContext; import io.deephaven.engine.exceptions.CancellationException; import io.deephaven.engine.rowset.RowSet; import io.deephaven.engine.table.ModifiedColumnSet; @@ -85,7 +86,7 @@ void enqueueSubFilters( private void enqueueJobs(Iterable subFilters) { for (NotificationQueue.Notification notification : subFilters) { - OperationInitializationThreadPool.executorService().submit(() -> { + ExecutionContext.getContext().getInitializer().submit(() -> { root.runningChildren.put(Thread.currentThread(), Thread.currentThread()); try { if (!root.cancelled.get()) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/OperationInitializationThreadPool.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/OperationInitializationThreadPool.java index c12a0a605d3..286d4386d17 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/OperationInitializationThreadPool.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/OperationInitializationThreadPool.java @@ -5,17 +5,22 @@ import io.deephaven.chunk.util.pools.MultiChunkPool; import io.deephaven.configuration.Configuration; +import io.deephaven.engine.context.ExecutionContext; +import io.deephaven.engine.updategraph.OperationInitializer; import io.deephaven.util.thread.NamingThreadFactory; import io.deephaven.util.thread.ThreadInitializationFactory; import org.jetbrains.annotations.NotNull; -import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -public class OperationInitializationThreadPool { +/** + * Implementation of OperationInitializer that delegates to a pool of threads. + */ +public class OperationInitializationThreadPool implements OperationInitializer { /** * The number of threads that will be used for parallel initialization in this process @@ -31,56 +36,42 @@ public class OperationInitializationThreadPool { NUM_THREADS = numThreads; } } + private final ThreadLocal isInitializationThread = ThreadLocal.withInitial(() -> false); - private static final ThreadLocal isInitializationThread = ThreadLocal.withInitial(() -> false); - - /** - * @return Whether the current thread is part of the OperationInitializationThreadPool's {@link #executorService()} - */ - public static boolean isInitializationThread() { - return isInitializationThread.get(); - } - - /** - * @return Whether the current thread can parallelize operations using the OperationInitializationThreadPool's - * {@link #executorService()} - */ - public static boolean canParallelize() { - return NUM_THREADS > 1 && !isInitializationThread(); - } - - private static final ThreadPoolExecutor executorService; + private final ThreadPoolExecutor executorService; - static { + public OperationInitializationThreadPool(ThreadInitializationFactory factory) { final ThreadGroup threadGroup = new ThreadGroup("OperationInitializationThreadPool"); final ThreadFactory threadFactory = new NamingThreadFactory( threadGroup, OperationInitializationThreadPool.class, "initializationExecutor", true) { @Override public Thread newThread(@NotNull final Runnable r) { - return super.newThread(ThreadInitializationFactory.wrapRunnable(() -> { + return super.newThread(factory.createInitializer(() -> { isInitializationThread.set(true); MultiChunkPool.enableDedicatedPoolForThisThread(); - r.run(); + ExecutionContext.newBuilder().setOperationInitializer(OperationInitializer.NON_PARALLELIZABLE) + .build().apply(r); })); } }; executorService = new ThreadPoolExecutor( NUM_THREADS, NUM_THREADS, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(), threadFactory); + + executorService.prestartAllCoreThreads(); } - /** - * @return The OperationInitializationThreadPool's {@link ExecutorService}; will be {@code null} if the - * OperationInitializationThreadPool has not been {@link #start() started} - */ - public static ExecutorService executorService() { - return executorService; + @Override + public boolean canParallelize() { + return NUM_THREADS > 1 && !isInitializationThread.get(); } - /** - * Start the OperationInitializationThreadPool. In practice, this just pre-starts all threads in the - * {@link #executorService()}. - */ - public static void start() { - executorService.prestartAllCoreThreads(); + @Override + public Future submit(Runnable runnable) { + return executorService.submit(runnable); + } + + @Override + public int parallelismFactor() { + return NUM_THREADS; } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java index 53e6dc4f776..afeff5a30c8 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java @@ -1482,9 +1482,10 @@ this, mode, columns, rowSet, getModifiedColumnSetForUpdates(), publishTheseSourc final CompletableFuture waitForResult = new CompletableFuture<>(); final JobScheduler jobScheduler; if ((QueryTable.FORCE_PARALLEL_SELECT_AND_UPDATE || QueryTable.ENABLE_PARALLEL_SELECT_AND_UPDATE) - && OperationInitializationThreadPool.canParallelize() + && ExecutionContext.getContext().getInitializer().canParallelize() && analyzer.allowCrossColumnParallelization()) { - jobScheduler = new OperationInitializationPoolJobScheduler(); + jobScheduler = new OperationInitializationPoolJobScheduler( + ExecutionContext.getContext().getInitializer()); } else { jobScheduler = ImmediateJobScheduler.INSTANCE; } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/TableCreatorImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/TableCreatorImpl.java index fe745db0b2a..70c5d46a704 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/TableCreatorImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/TableCreatorImpl.java @@ -8,10 +8,11 @@ import io.deephaven.engine.table.Table; import io.deephaven.engine.table.TableDefinition; import io.deephaven.engine.table.TableFactory; -import io.deephaven.engine.table.impl.util.AppendOnlyArrayBackedMutableTable; -import io.deephaven.engine.table.impl.util.KeyedArrayBackedMutableTable; +import io.deephaven.engine.table.impl.util.AppendOnlyArrayBackedInputTable; +import io.deephaven.engine.table.impl.util.KeyedArrayBackedInputTable; import io.deephaven.engine.util.TableTools; import io.deephaven.qst.TableCreator; +import io.deephaven.qst.table.BlinkInputTable; import io.deephaven.qst.table.EmptyTable; import io.deephaven.qst.table.InMemoryAppendOnlyInputTable; import io.deephaven.qst.table.InMemoryKeyBackedInputTable; @@ -24,8 +25,10 @@ import io.deephaven.qst.table.Clock; import io.deephaven.qst.table.ClockSystem; import io.deephaven.qst.table.TimeTable; +import io.deephaven.stream.TablePublisher; import java.util.Arrays; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Stream; import java.util.stream.StreamSupport; @@ -78,8 +81,8 @@ public final Table of(TicketTable ticketTable) { } @Override - public final UpdatableTable of(InputTable inputTable) { - return UpdatableTableAdapter.of(inputTable); + public final Table of(InputTable inputTable) { + return InputTableAdapter.of(inputTable); } @@ -153,24 +156,35 @@ public io.deephaven.base.clock.Clock visit(ClockSystem system) { } } - enum UpdatableTableAdapter implements InputTable.Visitor { + enum InputTableAdapter implements InputTable.Visitor { INSTANCE; - public static UpdatableTable of(InputTable inputTable) { + private static final AtomicInteger blinkTableCount = new AtomicInteger(); + + public static Table of(InputTable inputTable) { return inputTable.walk(INSTANCE); } @Override public UpdatableTable visit(InMemoryAppendOnlyInputTable inMemoryAppendOnly) { final TableDefinition definition = DefinitionAdapter.of(inMemoryAppendOnly.schema()); - return AppendOnlyArrayBackedMutableTable.make(definition); + return AppendOnlyArrayBackedInputTable.make(definition); } @Override public UpdatableTable visit(InMemoryKeyBackedInputTable inMemoryKeyBacked) { final TableDefinition definition = DefinitionAdapter.of(inMemoryKeyBacked.schema()); final String[] keyColumnNames = inMemoryKeyBacked.keys().toArray(String[]::new); - return KeyedArrayBackedMutableTable.make(definition, keyColumnNames); + return KeyedArrayBackedInputTable.make(definition, keyColumnNames); + } + + @Override + public Table visit(BlinkInputTable blinkInputTable) { + final TableDefinition definition = DefinitionAdapter.of(blinkInputTable.schema()); + return TablePublisher + .of(TableCreatorImpl.class.getSimpleName() + ".BLINK-" + blinkTableCount.getAndIncrement(), + definition, null, null) + .inputTable(); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableImpl.java index e3ecf0a6b03..d401673b5f0 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableImpl.java @@ -30,6 +30,7 @@ import io.deephaven.engine.table.impl.sources.UnionSourceManager; import io.deephaven.engine.table.iterators.ChunkedObjectColumnIterator; import io.deephaven.engine.updategraph.NotificationQueue.Dependency; +import io.deephaven.engine.updategraph.OperationInitializer; import io.deephaven.engine.updategraph.UpdateGraph; import io.deephaven.util.SafeCloseable; import io.deephaven.util.annotations.InternalUseOnly; @@ -296,7 +297,7 @@ public PartitionedTable transform( // Perform the transformation final Table resultTable = prepared.update(List.of(new TableTransformationColumn( constituentColumnName, - executionContext, + disableRecursiveParallelOperationInitialization(executionContext), prepared.isRefreshing() ? transformer : assertResultsStatic(transformer)))); // Make sure we have a valid result constituent definition @@ -318,6 +319,29 @@ public PartitionedTable transform( return resultPartitionedTable; } + /** + * Ensures that the returned executionContext will have an OperationInitializer compatible with being called by work + * already running on an initialization thread - it must either already return false for + * {@link OperationInitializer#canParallelize()}, or must be a different instance than the current context's + * OperationInitializer. + */ + private static ExecutionContext disableRecursiveParallelOperationInitialization(ExecutionContext provided) { + if (provided == null) { + return null; + } + ExecutionContext current = ExecutionContext.getContext(); + if (!provided.getInitializer().canParallelize()) { + return provided; + } + if (current.getInitializer() != provided.getInitializer()) { + return provided; + } + + // The current operation initializer isn't safe to submit more tasks that we will block on, replace + // with an instance that will never attempt to push work to another thread + return provided.withOperationInitializer(OperationInitializer.NON_PARALLELIZABLE); + } + @Override public PartitionedTable partitionedTransform( @NotNull final PartitionedTable other, @@ -353,7 +377,7 @@ public PartitionedTable partitionedTransform( .update(List.of(new BiTableTransformationColumn( constituentColumnName, RHS_CONSTITUENT, - executionContext, + disableRecursiveParallelOperationInitialization(executionContext), prepared.isRefreshing() ? transformer : assertResultsStatic(transformer)))) .dropColumns(RHS_CONSTITUENT); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableProxyImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableProxyImpl.java index 89e789cea6e..8cc0b210593 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableProxyImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/partitioned/PartitionedTableProxyImpl.java @@ -134,7 +134,6 @@ private static ExecutionContext getOrCreateExecutionContext(final boolean requir if (context == null) { final ExecutionContext.Builder builder = ExecutionContext.newBuilder() .captureQueryCompiler() - .captureUpdateGraph() .markSystemic(); if (requiresFullContext) { builder.newQueryLibrary(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java index ddb576b62e0..8cf902f16b3 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java @@ -17,13 +17,14 @@ import io.deephaven.engine.tablelogger.EngineTableLoggers; import io.deephaven.engine.tablelogger.UpdatePerformanceLogLogger; import io.deephaven.engine.updategraph.UpdateGraph; -import io.deephaven.engine.updategraph.impl.PeriodicUpdateGraph; +import io.deephaven.engine.updategraph.impl.BaseUpdateGraph; import io.deephaven.engine.util.string.StringUtils; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; import io.deephaven.stream.StreamToBlinkTableAdapter; import io.deephaven.util.QueryConstants; import io.deephaven.util.SafeCloseable; +import io.deephaven.util.annotations.TestUseOnly; import org.apache.commons.lang3.mutable.MutableObject; import org.jetbrains.annotations.NotNull; @@ -37,7 +38,7 @@ /** *

- * This tool is meant to track periodic update events that take place in an {@link PeriodicUpdateGraph}. This generally + * This tool is meant to track periodic update events that take place in an {@link UpdateGraph}. This generally * includes: *

    *
  1. Update source {@code run()} invocations
  2. @@ -88,8 +89,8 @@ private static class InternalState { private InternalState() { final UpdateGraph publishingGraph = - PeriodicUpdateGraph.getInstance(PeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME); - Assert.neqNull(publishingGraph, "The " + PeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME + " UpdateGraph " + BaseUpdateGraph.getInstance(BaseUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME); + Assert.neqNull(publishingGraph, "The " + BaseUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME + " UpdateGraph " + "must be created before UpdatePerformanceTracker can be initialized."); try (final SafeCloseable ignored = ExecutionContext.getContext().withUpdateGraph(publishingGraph).open()) { tableLogger = EngineTableLoggers.get().updatePerformanceLogLogger(); @@ -286,4 +287,11 @@ public long getIntervalEndTimeEpochNanos() { public static QueryTable getQueryTable() { return (QueryTable) BlinkTableTools.blinkToAppendOnly(getInternalState().blink); } + + @TestUseOnly + public static void resetForUnitTests() { + synchronized (UpdatePerformanceTracker.class) { + INSTANCE = null; + } + } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/rangejoin/RangeJoinOperation.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/rangejoin/RangeJoinOperation.java index 6ec652d0282..aebaf7a975e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/rangejoin/RangeJoinOperation.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/rangejoin/RangeJoinOperation.java @@ -26,7 +26,6 @@ import io.deephaven.engine.rowset.RowSetFactory; import io.deephaven.engine.table.*; import io.deephaven.engine.table.impl.MemoizedOperationKey; -import io.deephaven.engine.table.impl.OperationInitializationThreadPool; import io.deephaven.engine.table.impl.QueryTable; import io.deephaven.engine.table.impl.SortingOrder; import io.deephaven.engine.table.impl.OperationSnapshotControl; @@ -253,14 +252,13 @@ public Result initialize(final boolean usePrev, final long beforeClo QueryTable.checkInitiateBinaryOperation(leftTable, rightTable); final JobScheduler jobScheduler; - if (OperationInitializationThreadPool.canParallelize()) { - jobScheduler = new OperationInitializationPoolJobScheduler(); + if (ExecutionContext.getContext().getInitializer().canParallelize()) { + jobScheduler = new OperationInitializationPoolJobScheduler(ExecutionContext.getContext().getInitializer()); } else { jobScheduler = ImmediateJobScheduler.INSTANCE; } final ExecutionContext executionContext = ExecutionContext.newBuilder() - .captureUpdateGraph() .markSystemic().build(); return new Result<>(staticRangeJoin(jobScheduler, executionContext)); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/sources/UnionSourceManager.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/sources/UnionSourceManager.java index 9392a00bec3..546dbcf19ac 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/sources/UnionSourceManager.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/sources/UnionSourceManager.java @@ -104,7 +104,6 @@ public UnionSourceManager(@NotNull final PartitionedTable partitionedTable) { executionContext = ExecutionContext.newBuilder() .markSystemic() - .captureUpdateGraph() .build(); } else { listenerRecorders = null; diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java index 245d312793f..f6f0ca98558 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java @@ -300,13 +300,13 @@ class PhasedUpdateProcessor implements LogOutputAppendable { dirtyWindowOperators[winIdx].set(0, windows[winIdx].operators.length); } // Create the proper JobScheduler for the following parallel tasks - if (OperationInitializationThreadPool.canParallelize()) { - jobScheduler = new OperationInitializationPoolJobScheduler(); + if (ExecutionContext.getContext().getInitializer().canParallelize()) { + jobScheduler = + new OperationInitializationPoolJobScheduler(ExecutionContext.getContext().getInitializer()); } else { jobScheduler = ImmediateJobScheduler.INSTANCE; } executionContext = ExecutionContext.newBuilder() - .captureUpdateGraph() .markSystemic().build(); } else { // Determine which windows need to be computed. diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AppendOnlyArrayBackedMutableTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AppendOnlyArrayBackedInputTable.java similarity index 59% rename from engine/table/src/main/java/io/deephaven/engine/table/impl/util/AppendOnlyArrayBackedMutableTable.java rename to engine/table/src/main/java/io/deephaven/engine/table/impl/util/AppendOnlyArrayBackedInputTable.java index f40908ed679..a65210dc3ea 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AppendOnlyArrayBackedMutableTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AppendOnlyArrayBackedInputTable.java @@ -9,7 +9,6 @@ import io.deephaven.engine.rowset.RowSet; import io.deephaven.engine.rowset.RowSetFactory; import io.deephaven.engine.rowset.RowSequenceFactory; -import io.deephaven.engine.util.config.InputTableStatusListener; import io.deephaven.engine.table.impl.QueryTable; import io.deephaven.engine.table.impl.sources.NullValueColumnSource; import io.deephaven.engine.table.ChunkSink; @@ -18,15 +17,13 @@ import java.util.Collections; import java.util.List; -import java.util.Map; -import java.util.function.Consumer; /** * An in-memory table that allows you to add rows as if it were an InputTable, which can be updated on the UGP. *

    * The table is not keyed, all rows are added to the end of the table. Deletions and edits are not permitted. */ -public class AppendOnlyArrayBackedMutableTable extends BaseArrayBackedMutableTable { +public class AppendOnlyArrayBackedInputTable extends BaseArrayBackedInputTable { static final String DEFAULT_DESCRIPTION = "Append Only In-Memory Input Table"; /** @@ -36,64 +33,40 @@ public class AppendOnlyArrayBackedMutableTable extends BaseArrayBackedMutableTab * * @return an empty AppendOnlyArrayBackedMutableTable with the given definition */ - public static AppendOnlyArrayBackedMutableTable make(@NotNull TableDefinition definition) { - return make(definition, Collections.emptyMap()); - } - - /** - * Create an empty AppendOnlyArrayBackedMutableTable with the given definition. - * - * @param definition the definition of the new table. - * @param enumValues a map of column names to enumeration values - * - * @return an empty AppendOnlyArrayBackedMutableTable with the given definition - */ - public static AppendOnlyArrayBackedMutableTable make(@NotNull TableDefinition definition, - final Map enumValues) { + public static AppendOnlyArrayBackedInputTable make( + @NotNull TableDefinition definition) { // noinspection resource return make(new QueryTable(definition, RowSetFactory.empty().toTracking(), - NullValueColumnSource.createColumnSourceMap(definition)), enumValues); - } - - /** - * Create an AppendOnlyArrayBackedMutableTable with the given initial data. - * - * @param initialTable the initial values to copy into the AppendOnlyArrayBackedMutableTable - * - * @return an empty AppendOnlyArrayBackedMutableTable with the given definition - */ - public static AppendOnlyArrayBackedMutableTable make(final Table initialTable) { - return make(initialTable, Collections.emptyMap()); + NullValueColumnSource.createColumnSourceMap(definition))); } /** * Create an AppendOnlyArrayBackedMutableTable with the given initial data. * * @param initialTable the initial values to copy into the AppendOnlyArrayBackedMutableTable - * @param enumValues a map of column names to enumeration values * * @return an empty AppendOnlyArrayBackedMutableTable with the given definition */ - public static AppendOnlyArrayBackedMutableTable make(final Table initialTable, - final Map enumValues) { - final AppendOnlyArrayBackedMutableTable result = new AppendOnlyArrayBackedMutableTable( - initialTable.getDefinition(), enumValues, new ProcessPendingUpdater()); + public static AppendOnlyArrayBackedInputTable make(final Table initialTable) { + final AppendOnlyArrayBackedInputTable result = + new AppendOnlyArrayBackedInputTable( + initialTable.getDefinition(), new ProcessPendingUpdater()); result.setAttribute(Table.ADD_ONLY_TABLE_ATTRIBUTE, Boolean.TRUE); + result.setAttribute(Table.APPEND_ONLY_TABLE_ATTRIBUTE, Boolean.TRUE); result.setFlat(); processInitial(initialTable, result); return result; } - private AppendOnlyArrayBackedMutableTable(@NotNull TableDefinition definition, - final Map enumValues, final ProcessPendingUpdater processPendingUpdater) { + private AppendOnlyArrayBackedInputTable(@NotNull TableDefinition definition, + final ProcessPendingUpdater processPendingUpdater) { // noinspection resource super(RowSetFactory.empty().toTracking(), makeColumnSourceMap(definition), - enumValues, processPendingUpdater); + processPendingUpdater); } @Override - protected void processPendingTable(Table table, boolean allowEdits, RowSetChangeRecorder rowSetChangeRecorder, - Consumer errorNotifier) { + protected void processPendingTable(Table table, RowSetChangeRecorder rowSetChangeRecorder) { try (final RowSet addRowSet = table.getRowSet().copy()) { final long firstRow = nextRow; final long lastRow = firstRow + addRowSet.intSize() - 1; @@ -135,28 +108,15 @@ protected List getKeyNames() { } @Override - ArrayBackedMutableInputTable makeHandler() { - return new AppendOnlyArrayBackedMutableInputTable(); + ArrayBackedInputTableUpdater makeUpdater() { + return new Updater(); } - private class AppendOnlyArrayBackedMutableInputTable extends ArrayBackedMutableInputTable { - @Override - public void setRows(@NotNull Table defaultValues, int[] rowArray, Map[] valueArray, - InputTableStatusListener listener) { - throw new UnsupportedOperationException(); - } + private class Updater extends ArrayBackedInputTableUpdater { @Override public void validateDelete(Table tableToDelete) { throw new UnsupportedOperationException("Table doesn't support delete operation"); } - - @Override - public void addRows(Map[] valueArray, boolean allowEdits, InputTableStatusListener listener) { - if (allowEdits) { - throw new UnsupportedOperationException(); - } - super.addRows(valueArray, allowEdits, listener); - } } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/BaseArrayBackedMutableTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/BaseArrayBackedInputTable.java similarity index 59% rename from engine/table/src/main/java/io/deephaven/engine/table/impl/util/BaseArrayBackedMutableTable.java rename to engine/table/src/main/java/io/deephaven/engine/table/impl/util/BaseArrayBackedInputTable.java index fc1c75d69df..297f0408504 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/BaseArrayBackedMutableTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/BaseArrayBackedInputTable.java @@ -4,9 +4,6 @@ package io.deephaven.engine.table.impl.util; import io.deephaven.base.verify.Assert; -import io.deephaven.base.verify.Require; -import io.deephaven.datastructures.util.CollectionUtil; -import io.deephaven.engine.rowset.RowSet; import io.deephaven.engine.rowset.RowSetBuilderSequential; import io.deephaven.engine.rowset.RowSetFactory; import io.deephaven.engine.rowset.TrackingRowSet; @@ -15,9 +12,8 @@ import io.deephaven.engine.table.TableDefinition; import io.deephaven.engine.table.WritableColumnSource; import io.deephaven.engine.table.impl.sources.ArrayBackedColumnSource; -import io.deephaven.engine.util.config.InputTableStatusListener; -import io.deephaven.engine.util.config.MutableInputTable; -import io.deephaven.engine.table.impl.QueryTable; +import io.deephaven.engine.util.input.InputTableStatusListener; +import io.deephaven.engine.util.input.InputTableUpdater; import io.deephaven.engine.table.impl.UpdatableTable; import io.deephaven.engine.table.ColumnSource; import io.deephaven.util.annotations.TestUseOnly; @@ -26,11 +22,8 @@ import java.io.IOException; import java.util.*; import java.util.concurrent.CompletableFuture; -import java.util.function.Consumer; -abstract class BaseArrayBackedMutableTable extends UpdatableTable { - - private static final Object[] BOOLEAN_ENUM_ARRAY = new Object[] {true, false, null}; +abstract class BaseArrayBackedInputTable extends UpdatableTable { /** * Queue of pending changes. Only synchronized access is permitted. @@ -45,30 +38,27 @@ abstract class BaseArrayBackedMutableTable extends UpdatableTable { */ private long processedSequence = 0L; - private final Map enumValues; - private String description = getDefaultDescription(); private Runnable onPendingChange = updateGraph::requestRefresh; long nextRow = 0; private long pendingProcessed = -1L; - public BaseArrayBackedMutableTable(TrackingRowSet rowSet, Map> nameToColumnSource, - Map enumValues, ProcessPendingUpdater processPendingUpdater) { + public BaseArrayBackedInputTable(TrackingRowSet rowSet, Map> nameToColumnSource, + ProcessPendingUpdater processPendingUpdater) { super(rowSet, nameToColumnSource, processPendingUpdater); - this.enumValues = enumValues; - MutableInputTable mutableInputTable = makeHandler(); - setAttribute(Table.INPUT_TABLE_ATTRIBUTE, mutableInputTable); + InputTableUpdater inputTableUpdater = makeUpdater(); + setAttribute(Table.INPUT_TABLE_ATTRIBUTE, inputTableUpdater); setRefreshing(true); processPendingUpdater.setThis(this); } - public MutableInputTable mutableInputTable() { - return (MutableInputTable) getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + public InputTableUpdater inputTable() { + return (InputTableUpdater) getAttribute(Table.INPUT_TABLE_ATTRIBUTE); } public Table readOnlyCopy() { - return copy(BaseArrayBackedMutableTable::applicableForReadOnly); + return copy(BaseArrayBackedInputTable::applicableForReadOnly); } private static boolean applicableForReadOnly(String attributeName) { @@ -84,9 +74,9 @@ private static boolean applicableForReadOnly(String attributeName) { return resultMap; } - static void processInitial(Table initialTable, BaseArrayBackedMutableTable result) { + static void processInitial(Table initialTable, BaseArrayBackedInputTable result) { final RowSetBuilderSequential builder = RowSetFactory.builderSequential(); - result.processPendingTable(initialTable, true, new RowSetChangeRecorder() { + result.processPendingTable(initialTable, new RowSetChangeRecorder() { @Override public void addRowKey(long key) { builder.appendKey(key); @@ -101,14 +91,13 @@ public void removeRowKey(long key) { public void modifyRowKey(long key) { throw new UnsupportedOperationException(); } - }, (e) -> { }); result.getRowSet().writableCast().insert(builder.build()); result.getRowSet().writableCast().initializePreviousValue(); result.getUpdateGraph().addSource(result); } - public BaseArrayBackedMutableTable setDescription(String newDescription) { + public BaseArrayBackedInputTable setDescription(String newDescription) { this.description = newDescription; return this; } @@ -132,8 +121,7 @@ private void processPending(RowSetChangeRecorder rowSetChangeRecorder) { if (pendingChange.delete) { processPendingDelete(pendingChange.table, rowSetChangeRecorder); } else { - processPendingTable(pendingChange.table, pendingChange.allowEdits, rowSetChangeRecorder, - (e) -> pendingChange.error = e); + processPendingTable(pendingChange.table, rowSetChangeRecorder); } pendingProcessed = pendingChange.sequence; } @@ -154,8 +142,7 @@ public void run() { } } - protected abstract void processPendingTable(Table table, boolean allowEdits, - RowSetChangeRecorder rowSetChangeRecorder, Consumer errorNotifier); + protected abstract void processPendingTable(Table table, RowSetChangeRecorder rowSetChangeRecorder); protected abstract void processPendingDelete(Table table, RowSetChangeRecorder rowSetChangeRecorder); @@ -164,74 +151,73 @@ protected abstract void processPendingTable(Table table, boolean allowEdits, protected abstract List getKeyNames(); protected static class ProcessPendingUpdater implements Updater { - private BaseArrayBackedMutableTable baseArrayBackedMutableTable; + private BaseArrayBackedInputTable baseArrayBackedInputTable; @Override public void accept(RowSetChangeRecorder rowSetChangeRecorder) { - baseArrayBackedMutableTable.processPending(rowSetChangeRecorder); + baseArrayBackedInputTable.processPending(rowSetChangeRecorder); } - public void setThis(BaseArrayBackedMutableTable keyedArrayBackedMutableTable) { - this.baseArrayBackedMutableTable = keyedArrayBackedMutableTable; + public void setThis(BaseArrayBackedInputTable keyedArrayBackedMutableTable) { + this.baseArrayBackedInputTable = keyedArrayBackedMutableTable; } } private final class PendingChange { final boolean delete; + @NotNull final Table table; final long sequence; - final boolean allowEdits; String error; - private PendingChange(Table table, boolean delete, boolean allowEdits) { + private PendingChange(@NotNull Table table, boolean delete) { Assert.holdsLock(pendingChanges, "pendingChanges"); + Assert.neqNull(table, "table"); this.table = table; this.delete = delete; - this.allowEdits = allowEdits; this.sequence = ++enqueuedSequence; } } - ArrayBackedMutableInputTable makeHandler() { - return new ArrayBackedMutableInputTable(); + ArrayBackedInputTableUpdater makeUpdater() { + return new ArrayBackedInputTableUpdater(); } - protected class ArrayBackedMutableInputTable implements MutableInputTable { + protected class ArrayBackedInputTableUpdater implements InputTableUpdater { @Override public List getKeyNames() { - return BaseArrayBackedMutableTable.this.getKeyNames(); + return BaseArrayBackedInputTable.this.getKeyNames(); } @Override public TableDefinition getTableDefinition() { - return BaseArrayBackedMutableTable.this.getDefinition(); + return BaseArrayBackedInputTable.this.getDefinition(); } @Override public void add(@NotNull final Table newData) throws IOException { checkBlockingEditSafety(); - PendingChange pendingChange = enqueueAddition(newData, true); + PendingChange pendingChange = enqueueAddition(newData); blockingContinuation(pendingChange); } @Override public void addAsync( @NotNull final Table newData, - final boolean allowEdits, @NotNull final InputTableStatusListener listener) { checkAsyncEditSafety(newData); - final PendingChange pendingChange = enqueueAddition(newData, allowEdits); + final PendingChange pendingChange = enqueueAddition(newData); asynchronousContinuation(pendingChange, listener); } - private PendingChange enqueueAddition(@NotNull final Table newData, final boolean allowEdits) { + private PendingChange enqueueAddition(@NotNull final Table newData) { validateAddOrModify(newData); // we want to get a clean copy of the table; that can not change out from under us or result in long reads // during our UGP run final Table newDataSnapshot = snapshotData(newData); final PendingChange pendingChange; synchronized (pendingChanges) { - pendingChange = new PendingChange(newDataSnapshot, false, allowEdits); + pendingChange = new PendingChange(newDataSnapshot, false); pendingChanges.add(pendingChange); } onPendingChange.run(); @@ -239,38 +225,33 @@ private PendingChange enqueueAddition(@NotNull final Table newData, final boolea } @Override - public void delete(@NotNull final Table table, @NotNull final TrackingRowSet rowsToDelete) throws IOException { + public void delete(@NotNull final Table table) throws IOException { checkBlockingEditSafety(); - final PendingChange pendingChange = enqueueDeletion(table, rowsToDelete); + final PendingChange pendingChange = enqueueDeletion(table); blockingContinuation(pendingChange); } @Override public void deleteAsync( @NotNull final Table table, - @NotNull final TrackingRowSet rowsToDelete, @NotNull final InputTableStatusListener listener) { checkAsyncEditSafety(table); - final PendingChange pendingChange = enqueueDeletion(table, rowsToDelete); + final PendingChange pendingChange = enqueueDeletion(table); asynchronousContinuation(pendingChange, listener); } - private PendingChange enqueueDeletion(@NotNull final Table table, @NotNull final TrackingRowSet rowsToDelete) { + private PendingChange enqueueDeletion(@NotNull final Table table) { validateDelete(table); - final Table oldDataSnapshot = snapshotData(table, rowsToDelete); + final Table oldDataSnapshot = snapshotData(table); final PendingChange pendingChange; synchronized (pendingChanges) { - pendingChange = new PendingChange(oldDataSnapshot, true, false); + pendingChange = new PendingChange(oldDataSnapshot, true); pendingChanges.add(pendingChange); } onPendingChange.run(); return pendingChange; } - private Table snapshotData(@NotNull final Table data, @NotNull final TrackingRowSet rowSet) { - return snapshotData(data.getSubTable(rowSet)); - } - private Table snapshotData(@NotNull final Table data) { Table dataSnapshot; if (data.isRefreshing()) { @@ -322,18 +303,13 @@ private void checkAsyncEditSafety(@NotNull final Table changeData) { } } - @Override - public String getDescription() { - return description; - } - void waitForSequence(long sequence) { if (updateGraph.exclusiveLock().isHeldByCurrentThread()) { // We're holding the lock. currentTable had better be refreshing. Wait on its UGP condition // in order to allow updates. while (processedSequence < sequence) { try { - BaseArrayBackedMutableTable.this.awaitUpdate(); + BaseArrayBackedInputTable.this.awaitUpdate(); } catch (InterruptedException ignored) { } } @@ -350,84 +326,6 @@ void waitForSequence(long sequence) { } } - @Override - public void setRows(@NotNull Table defaultValues, int[] rowArray, Map[] valueArray, - InputTableStatusListener listener) { - Assert.neqNull(defaultValues, "defaultValues"); - if (defaultValues.isRefreshing()) { - updateGraph.checkInitiateSerialTableOperation(); - } - - final List> columnDefinitions = getTableDefinition().getColumns(); - final Map> sources = - buildSourcesMap(valueArray.length, columnDefinitions); - final String[] kabmtColumns = - getTableDefinition().getColumnNames().toArray(CollectionUtil.ZERO_LENGTH_STRING_ARRAY); - // noinspection unchecked - final WritableColumnSource[] sourcesByPosition = - Arrays.stream(kabmtColumns).map(sources::get).toArray(WritableColumnSource[]::new); - - final Set missingColumns = new HashSet<>(getTableDefinition().getColumnNames()); - - for (final Map.Entry> entry : defaultValues.getColumnSourceMap() - .entrySet()) { - final String colName = entry.getKey(); - if (!sources.containsKey(colName)) { - continue; - } - final ColumnSource cs = Require.neqNull(entry.getValue(), "defaultValue column source: " + colName); - final WritableColumnSource dest = - Require.neqNull(sources.get(colName), "destination column source: " + colName); - - final RowSet defaultValuesRowSet = defaultValues.getRowSet(); - for (int rr = 0; rr < rowArray.length; ++rr) { - final long key = defaultValuesRowSet.get(rowArray[rr]); - dest.set(rr, cs.get(key)); - } - - missingColumns.remove(colName); - } - - for (int ii = 0; ii < valueArray.length; ++ii) { - final Map passedInValues = valueArray[ii]; - - for (int cc = 0; cc < sourcesByPosition.length; cc++) { - final String colName = kabmtColumns[cc]; - if (passedInValues.containsKey(colName)) { - sourcesByPosition[cc].set(ii, passedInValues.get(colName)); - } else if (missingColumns.contains(colName)) { - throw new IllegalArgumentException("No value specified for " + colName + " row " + ii); - } - } - } - - // noinspection resource - final QueryTable newData = new QueryTable(getTableDefinition(), - RowSetFactory.flat(valueArray.length).toTracking(), sources); - addAsync(newData, true, listener); - } - - @Override - public void addRows(Map[] valueArray, boolean allowEdits, InputTableStatusListener listener) { - final List> columnDefinitions = getTableDefinition().getColumns(); - final Map> sources = - buildSourcesMap(valueArray.length, columnDefinitions); - - for (int rowNumber = 0; rowNumber < valueArray.length; rowNumber++) { - final Map values = valueArray[rowNumber]; - for (final ColumnDefinition columnDefinition : columnDefinitions) { - sources.get(columnDefinition.getName()).set(rowNumber, values.get(columnDefinition.getName())); - } - - } - - // noinspection resource - final QueryTable newData = new QueryTable(getTableDefinition(), - RowSetFactory.flat(valueArray.length).toTracking(), sources); - - addAsync(newData, allowEdits, listener); - } - @NotNull private Map> buildSourcesMap(int capacity, List> columnDefinitions) { @@ -443,24 +341,5 @@ private Map> buildSourcesMap(int capacity, return sources; } - @Override - public Object[] getEnumsForColumn(String columnName) { - if (getTableDefinition().getColumn(columnName).getDataType().equals(Boolean.class)) { - return BOOLEAN_ENUM_ARRAY; - } - return enumValues.get(columnName); - } - - @Override - public Table getTable() { - return BaseArrayBackedMutableTable.this; - } - - @Override - public boolean canEdit() { - // TODO: Should we be more restrictive, or provide a mechanism for determining which users can edit this - // table beyond "they have a handle to it"? - return true; - } } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/KeyedArrayBackedMutableTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/KeyedArrayBackedInputTable.java similarity index 76% rename from engine/table/src/main/java/io/deephaven/engine/table/impl/util/KeyedArrayBackedMutableTable.java rename to engine/table/src/main/java/io/deephaven/engine/table/impl/util/KeyedArrayBackedInputTable.java index ad4221bbb90..1eaeba52a01 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/KeyedArrayBackedMutableTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/KeyedArrayBackedInputTable.java @@ -20,14 +20,13 @@ import org.jetbrains.annotations.NotNull; import java.util.*; -import java.util.function.Consumer; /** * An in-memory table that has keys for each row, which can be updated on the UGP. *

    * This is used to implement in-memory editable table columns from web plugins. */ -public class KeyedArrayBackedMutableTable extends BaseArrayBackedMutableTable { +public class KeyedArrayBackedInputTable extends BaseArrayBackedInputTable { private static final String DEFAULT_DESCRIPTION = "In-Memory Input Table"; @@ -47,44 +46,13 @@ public class KeyedArrayBackedMutableTable extends BaseArrayBackedMutableTable { * * @return an empty KeyedArrayBackedMutableTable with the given definition and key columns */ - public static KeyedArrayBackedMutableTable make(@NotNull TableDefinition definition, + public static KeyedArrayBackedInputTable make(@NotNull TableDefinition definition, final String... keyColumnNames) { // noinspection resource return make(new QueryTable(definition, RowSetFactory.empty().toTracking(), NullValueColumnSource.createColumnSourceMap(definition)), keyColumnNames); } - /** - * Create an empty KeyedArrayBackedMutableTable. - * - * @param definition the definition of the table to create - * @param enumValues a map of column names to enumeration values - * @param keyColumnNames the name of the key columns - * - * @return an empty KeyedArrayBackedMutableTable with the given definition and key columns - */ - public static KeyedArrayBackedMutableTable make(@NotNull TableDefinition definition, - final Map enumValues, final String... keyColumnNames) { - // noinspection resource - return make(new QueryTable(definition, RowSetFactory.empty().toTracking(), - NullValueColumnSource.createColumnSourceMap(definition)), enumValues, keyColumnNames); - } - - /** - * Create an empty KeyedArrayBackedMutableTable. - *

    - * The initialTable is processed in order, so if there are duplicate keys only the last row is reflected in the - * output. - * - * @param initialTable the initial values to copy into the KeyedArrayBackedMutableTable - * @param keyColumnNames the name of the key columns - * - * @return an empty KeyedArrayBackedMutableTable with the given definition and key columns - */ - public static KeyedArrayBackedMutableTable make(final Table initialTable, final String... keyColumnNames) { - return make(initialTable, Collections.emptyMap(), keyColumnNames); - } - /** * Create an empty KeyedArrayBackedMutableTable. *

    @@ -92,25 +60,23 @@ public static KeyedArrayBackedMutableTable make(final Table initialTable, final * output. * * @param initialTable the initial values to copy into the KeyedArrayBackedMutableTable - * @param enumValues a map of column names to enumeration values * @param keyColumnNames the name of the key columns * * @return an empty KeyedArrayBackedMutableTable with the given definition and key columns */ - public static KeyedArrayBackedMutableTable make(final Table initialTable, final Map enumValues, - final String... keyColumnNames) { - final KeyedArrayBackedMutableTable result = new KeyedArrayBackedMutableTable(initialTable.getDefinition(), - keyColumnNames, enumValues, new ProcessPendingUpdater()); + public static KeyedArrayBackedInputTable make(final Table initialTable, final String... keyColumnNames) { + final KeyedArrayBackedInputTable result = new KeyedArrayBackedInputTable(initialTable.getDefinition(), + keyColumnNames, new ProcessPendingUpdater()); processInitial(initialTable, result); result.startTrackingPrev(); return result; } - private KeyedArrayBackedMutableTable(@NotNull TableDefinition definition, final String[] keyColumnNames, - final Map enumValues, final ProcessPendingUpdater processPendingUpdater) { + private KeyedArrayBackedInputTable(@NotNull TableDefinition definition, final String[] keyColumnNames, + final ProcessPendingUpdater processPendingUpdater) { // noinspection resource super(RowSetFactory.empty().toTracking(), makeColumnSourceMap(definition), - enumValues, processPendingUpdater); + processPendingUpdater); final List missingKeyColumns = new ArrayList<>(Arrays.asList(keyColumnNames)); missingKeyColumns.removeAll(definition.getColumnNames()); if (!missingKeyColumns.isEmpty()) { @@ -135,13 +101,11 @@ private void startTrackingPrev() { } @Override - protected void processPendingTable(Table table, boolean allowEdits, RowSetChangeRecorder rowSetChangeRecorder, - Consumer errorNotifier) { + protected void processPendingTable(Table table, RowSetChangeRecorder rowSetChangeRecorder) { final ChunkSource keySource = makeKeySource(table); final int chunkCapacity = table.intSize(); long rowToInsert = nextRow; - final StringBuilder errorBuilder = new StringBuilder(); try (final RowSet addRowSet = table.getRowSet().copy(); final WritableLongChunk destinations = WritableLongChunk.makeWritableChunk(chunkCapacity); @@ -161,25 +125,13 @@ protected void processPendingTable(Table table, boolean allowEdits, RowSetChange keyToRowMap.put(key, rowNumber); rowSetChangeRecorder.addRowKey(rowNumber); destinations.set(ii, rowNumber); - } else if (allowEdits) { + } else { rowSetChangeRecorder.modifyRowKey(rowNumber); destinations.set(ii, rowNumber); - } else { - // invalid edit - if (errorBuilder.length() > 0) { - errorBuilder.append(", ").append(key); - } else { - errorBuilder.append("Can not edit keys ").append(key); - } } } } - if (errorBuilder.length() > 0) { - errorNotifier.accept(errorBuilder.toString()); - return; - } - for (long ii = nextRow; ii < rowToInsert; ++ii) { rowSetChangeRecorder.addRowKey(ii); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/OperationInitializationPoolJobScheduler.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/OperationInitializationPoolJobScheduler.java index 2722d61fd35..f3a9a45fc30 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/OperationInitializationPoolJobScheduler.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/OperationInitializationPoolJobScheduler.java @@ -4,6 +4,7 @@ import io.deephaven.engine.context.ExecutionContext; import io.deephaven.engine.table.impl.OperationInitializationThreadPool; import io.deephaven.engine.table.impl.perf.BasePerformanceEntry; +import io.deephaven.engine.updategraph.OperationInitializer; import io.deephaven.io.log.impl.LogOutputStringImpl; import io.deephaven.util.SafeCloseable; import io.deephaven.util.process.ProcessEnvironment; @@ -11,7 +12,12 @@ import java.util.function.Consumer; public class OperationInitializationPoolJobScheduler implements JobScheduler { - final BasePerformanceEntry accumulatedBaseEntry = new BasePerformanceEntry(); + private final BasePerformanceEntry accumulatedBaseEntry = new BasePerformanceEntry(); + private final OperationInitializer threadPool; + + public OperationInitializationPoolJobScheduler(OperationInitializer threadPool) { + this.threadPool = threadPool; + } @Override public void submit( @@ -19,7 +25,7 @@ public void submit( final Runnable runnable, final LogOutputAppendable description, final Consumer onError) { - OperationInitializationThreadPool.executorService().submit(() -> { + threadPool.submit(() -> { final BasePerformanceEntry basePerformanceEntry = new BasePerformanceEntry(); basePerformanceEntry.onBaseEntryStart(); try (final SafeCloseable ignored = executionContext == null ? null : executionContext.open()) { diff --git a/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/BaseUpdateGraph.java b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/BaseUpdateGraph.java new file mode 100644 index 00000000000..2676354eeb7 --- /dev/null +++ b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/BaseUpdateGraph.java @@ -0,0 +1,1075 @@ +/** + * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending + */ + +package io.deephaven.engine.updategraph.impl; + +import io.deephaven.base.log.LogOutput; +import io.deephaven.base.log.LogOutputAppendable; +import io.deephaven.base.reference.SimpleReference; +import io.deephaven.base.verify.Assert; +import io.deephaven.configuration.Configuration; +import io.deephaven.engine.liveness.LivenessManager; +import io.deephaven.engine.liveness.LivenessScope; +import io.deephaven.engine.liveness.LivenessScopeStack; +import io.deephaven.engine.table.impl.perf.PerformanceEntry; +import io.deephaven.engine.table.impl.perf.UpdatePerformanceTracker; +import io.deephaven.engine.table.impl.util.StepUpdater; +import io.deephaven.engine.updategraph.*; +import io.deephaven.engine.util.reference.CleanupReferenceProcessorInstance; +import io.deephaven.hash.KeyedObjectHashMap; +import io.deephaven.hash.KeyedObjectKey; +import io.deephaven.hotspot.JvmIntrospectionContext; +import io.deephaven.io.log.LogEntry; +import io.deephaven.io.log.impl.LogOutputStringImpl; +import io.deephaven.io.logger.Logger; +import io.deephaven.util.SafeCloseable; +import io.deephaven.util.annotations.TestUseOnly; +import io.deephaven.util.datastructures.SimpleReferenceManager; +import io.deephaven.util.datastructures.linked.IntrusiveDoublyLinkedNode; +import io.deephaven.util.datastructures.linked.IntrusiveDoublyLinkedQueue; +import io.deephaven.util.locks.AwareFunctionalLock; +import io.deephaven.util.process.ProcessEnvironment; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +import java.lang.ref.WeakReference; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * The BaseUpdateGraph contains common code for other UpdateGraph implementations and a map of named UpdateGraph + * instances. + */ +public abstract class BaseUpdateGraph implements UpdateGraph, LogOutputAppendable { + public static final String DEFAULT_UPDATE_GRAPH_NAME = "DEFAULT"; + + /** + * If the provided update graph is a {@link BaseUpdateGraph} then create a PerformanceEntry using the given + * description. Otherwise, return null. + * + * @param updateGraph The update graph to create a performance entry for. + * @param description The description for the performance entry. + * @return The performance entry, or null if the update graph is not a {@link BaseUpdateGraph}. + */ + @Nullable + public static PerformanceEntry createUpdatePerformanceEntry( + final UpdateGraph updateGraph, + final String description) { + if (updateGraph instanceof BaseUpdateGraph) { + final BaseUpdateGraph bug = (BaseUpdateGraph) updateGraph; + if (bug.updatePerformanceTracker != null) { + return bug.updatePerformanceTracker.getEntry(description); + } + throw new IllegalStateException("Cannot create a performance entry for a BaseUpdateGraph that has " + + "not been completely constructed."); + } + return null; + } + + private static final KeyedObjectHashMap INSTANCES = new KeyedObjectHashMap<>( + new KeyedObjectKey.BasicAdapter<>(UpdateGraph::getName)); + + private final Logger log; + + /** + * Update sources that are part of this BaseUpdateGraph. + */ + private final SimpleReferenceManager sources = + new SimpleReferenceManager<>(UpdateSourceRefreshNotification::new); + + /** + * Recorder for updates source satisfaction as a phase of notification processing. + */ + private volatile long sourcesLastSatisfiedStep; + + /** + * The queue of non-terminal notifications to process. + */ + final IntrusiveDoublyLinkedQueue pendingNormalNotifications = + new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); + + /** + * The queue of terminal notifications to process. + */ + final IntrusiveDoublyLinkedQueue terminalNotifications = + new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); + + volatile boolean running = true; + + public static final String MINIMUM_CYCLE_DURATION_TO_LOG_MILLIS_PROP = + "UpdateGraph.minimumCycleDurationToLogMillis"; + public static final long DEFAULT_MINIMUM_CYCLE_DURATION_TO_LOG_NANOSECONDS = TimeUnit.MILLISECONDS.toNanos( + Configuration.getInstance().getIntegerWithDefault(MINIMUM_CYCLE_DURATION_TO_LOG_MILLIS_PROP, 25)); + private final long minimumCycleDurationToLogNanos; + + /** when to next flush the performance tracker; initializes to zero to force a flush on start */ + private long nextUpdatePerformanceTrackerFlushTimeNanos; + + /** + * How many cycles we have not logged, but were non-zero. + */ + long suppressedCycles; + long suppressedCyclesTotalNanos; + long suppressedCyclesTotalSafePointTimeMillis; + + /** + * Accumulated UpdateGraph exclusive lock waits for the current cycle (or previous, if idle). + */ + private long currentCycleLockWaitTotalNanos; + + public static class AccumulatedCycleStats { + /** + * Number of cycles run. + */ + public int cycles = 0; + /** + * Number of cycles run not exceeding their time budget. + */ + public int cyclesOnBudget = 0; + /** + * Accumulated safepoints over all cycles. + */ + public int safePoints = 0; + /** + * Accumulated safepoint time over all cycles. + */ + public long safePointPauseTimeMillis = 0L; + + public int[] cycleTimesMicros = new int[32]; + public static final int MAX_DOUBLING_LEN = 1024; + + synchronized void accumulate( + final boolean onBudget, + final long cycleTimeNanos, + final long safePoints, + final long safePointPauseTimeMillis) { + if (onBudget) { + ++cyclesOnBudget; + } + this.safePoints += safePoints; + this.safePointPauseTimeMillis += safePointPauseTimeMillis; + if (cycles >= cycleTimesMicros.length) { + final int newLen; + if (cycleTimesMicros.length < MAX_DOUBLING_LEN) { + newLen = cycleTimesMicros.length * 2; + } else { + newLen = cycleTimesMicros.length + MAX_DOUBLING_LEN; + } + cycleTimesMicros = Arrays.copyOf(cycleTimesMicros, newLen); + } + cycleTimesMicros[cycles] = (int) ((cycleTimeNanos + 500) / 1_000); + ++cycles; + } + + public synchronized void take(final AccumulatedCycleStats out) { + out.cycles = cycles; + out.cyclesOnBudget = cyclesOnBudget; + out.safePoints = safePoints; + out.safePointPauseTimeMillis = safePointPauseTimeMillis; + if (out.cycleTimesMicros.length < cycleTimesMicros.length) { + out.cycleTimesMicros = new int[cycleTimesMicros.length]; + } + System.arraycopy(cycleTimesMicros, 0, out.cycleTimesMicros, 0, cycles); + cycles = 0; + cyclesOnBudget = 0; + safePoints = 0; + safePointPauseTimeMillis = 0; + } + } + + public final AccumulatedCycleStats accumulatedCycleStats = new AccumulatedCycleStats(); + + /** + * Abstracts away the processing of non-terminal notifications. + */ + NotificationProcessor notificationProcessor; + + /** + * Facilitate GC Introspection during refresh cycles. + */ + private final JvmIntrospectionContext jvmIntrospectionContext; + + /** + * The {@link LivenessScope} that should be on top of the {@link LivenessScopeStack} for all run and notification + * processing. Only non-null while some thread is in {@link #doRefresh(Runnable)}. + */ + volatile LivenessScope refreshScope; + + /** + * Is this one of the threads engaged in notification processing? (Either the solitary run thread, or one of the + * pooled threads it uses in some configurations) + */ + final ThreadLocal isUpdateThread = ThreadLocal.withInitial(() -> false); + + private final ThreadLocal serialTableOperationsSafe = ThreadLocal.withInitial(() -> false); + + final LogicalClockImpl logicalClock = new LogicalClockImpl(); + + /** + * Encapsulates locking support. + */ + private final UpdateGraphLock lock; + + /** + * When UpdateGraph.printDependencyInformation is set to true, the UpdateGraph will print debug information for each + * notification that has dependency information; as well as which notifications have been completed and are + * outstanding. + */ + private final boolean printDependencyInformation = + Configuration.getInstance().getBooleanWithDefault("UpdateGraph.printDependencyInformation", false); + + private final String name; + + final UpdatePerformanceTracker updatePerformanceTracker; + + /** + * The BaseUpdateGraph is an abstract class that is suitable for extension by UpdateGraphs that process a set of + * sources and then the resulting {@link io.deephaven.engine.updategraph.NotificationQueue.Notification + * Notifications} using a {@link NotificationProcessor}. + * + * @param name the name of the update graph, which must be unique + * @param allowUnitTestMode is unit test mode allowed, used for configuring the lock + * @param log the logger for this update graph + * @param minimumCycleDurationToLogNanos the minimum cycle time, in nanoseconds, that results in cycle times being + * logged to at an INFO level + */ + public BaseUpdateGraph( + final String name, + final boolean allowUnitTestMode, + final Logger log, + long minimumCycleDurationToLogNanos) { + this.name = name; + this.log = log; + this.minimumCycleDurationToLogNanos = minimumCycleDurationToLogNanos; + notificationProcessor = PoisonedNotificationProcessor.INSTANCE; + jvmIntrospectionContext = new JvmIntrospectionContext(); + lock = UpdateGraphLock.create(this, allowUnitTestMode); + updatePerformanceTracker = new UpdatePerformanceTracker(this); + } + + public String getName() { + return name; + } + + public UpdateGraph getUpdateGraph() { + return this; + } + + @Override + public String toString() { + return new LogOutputStringImpl().append(this).toString(); + } + + @Override + public LogicalClock clock() { + return logicalClock; + } + // region Accessors for the shared and exclusive locks + + /** + *

    + * Get the shared lock for this {@link UpdateGraph}. + *

    + * Using this lock will prevent run processing from proceeding concurrently, but will allow other read-only + * processing to proceed. + *

    + * The shared lock implementation is expected to support reentrance. + *

    + * This lock does not support {@link java.util.concurrent.locks.Lock#newCondition()}. Use the exclusive + * lock if you need to wait on events that are driven by run processing. + * + * @return The shared lock for this {@link UpdateGraph} + */ + public AwareFunctionalLock sharedLock() { + return lock.sharedLock(); + } + + /** + *

    + * Get the exclusive lock for this {@link UpdateGraph}. + *

    + * Using this lock will prevent run or read-only processing from proceeding concurrently. + *

    + * The exclusive lock implementation is expected to support reentrance. + *

    + * Note that using the exclusive lock while the shared lock is held by the current thread will result in exceptions, + * as lock upgrade is not supported. + *

    + * This lock does support {@link java.util.concurrent.locks.Lock#newCondition()}. + * + * @return The exclusive lock for this {@link UpdateGraph} + */ + public AwareFunctionalLock exclusiveLock() { + return lock.exclusiveLock(); + } + + // endregion Accessors for the shared and exclusive locks + + /** + * Test if this thread is part of our run thread executor service. + * + * @return whether this is one of our run threads. + */ + @Override + public boolean currentThreadProcessesUpdates() { + return isUpdateThread.get(); + } + + @Override + public boolean serialTableOperationsSafe() { + return serialTableOperationsSafe.get(); + } + + @Override + public boolean setSerialTableOperationsSafe(final boolean newValue) { + final boolean old = serialTableOperationsSafe.get(); + serialTableOperationsSafe.set(newValue); + return old; + } + + + /** + * Add a table to the list of tables to run and mark it as {@link DynamicNode#setRefreshing(boolean) refreshing} if + * it was a {@link DynamicNode}. + * + * @param updateSource The table to be added to the run list + */ + @Override + public void addSource(@NotNull final Runnable updateSource) { + if (!running) { + throw new IllegalStateException("UpdateGraph is no longer running"); + } + + if (updateSource instanceof DynamicNode) { + ((DynamicNode) updateSource).setRefreshing(true); + } + + sources.add(updateSource); + } + + + @Override + public void removeSource(@NotNull final Runnable updateSource) { + sources.remove(updateSource); + } + + /** + * Remove a collection of sources from the list of refreshing sources. + * + * @implNote This will not set the sources as {@link DynamicNode#setRefreshing(boolean) non-refreshing}. + * @param sourcesToRemove The sources to remove from the list of refreshing sources + */ + public void removeSources(final Collection sourcesToRemove) { + sources.removeAll(sourcesToRemove); + } + + /** + * Return the number of valid sources. + * + * @return the number of valid sources + */ + public int sourceCount() { + return sources.size(); + } + + /** + * Enqueue a notification to be flushed according to its priority. Non-terminal notifications should only be + * enqueued during the updating phase of a cycle. That is, they should be enqueued from an update source or + * subsequent notification delivery. + * + * @param notification The notification to enqueue + * @see NotificationQueue.Notification#isTerminal() + * @see LogicalClock.State + */ + @Override + public void addNotification(@NotNull final Notification notification) { + if (notification.isTerminal()) { + synchronized (terminalNotifications) { + terminalNotifications.offer(notification); + } + } else { + logDependencies().append(Thread.currentThread().getName()).append(": Adding notification ") + .append(notification).endl(); + synchronized (pendingNormalNotifications) { + Assert.eq(logicalClock.currentState(), "logicalClock.currentState()", + LogicalClock.State.Updating, "LogicalClock.State.Updating"); + pendingNormalNotifications.offer(notification); + } + notificationProcessor.onNotificationAdded(); + } + } + + @Override + public boolean maybeAddNotification(@NotNull final Notification notification, final long deliveryStep) { + if (notification.isTerminal()) { + throw new IllegalArgumentException("Notification must not be terminal"); + } + logDependencies().append(Thread.currentThread().getName()).append(": Adding notification ").append(notification) + .append(" if step is ").append(deliveryStep).endl(); + final boolean added; + synchronized (pendingNormalNotifications) { + // Note that the clock is advanced to idle under the pendingNormalNotifications lock, after which point no + // further normal notifications will be processed on this cycle. + final long logicalClockValue = logicalClock.currentValue(); + if (LogicalClock.getState(logicalClockValue) == LogicalClock.State.Updating + && LogicalClock.getStep(logicalClockValue) == deliveryStep) { + pendingNormalNotifications.offer(notification); + added = true; + } else { + added = false; + } + } + if (added) { + notificationProcessor.onNotificationAdded(); + } + return added; + } + + @Override + public boolean satisfied(final long step) { + StepUpdater.checkForOlderStep(step, sourcesLastSatisfiedStep); + return sourcesLastSatisfiedStep == step; + } + + /** + * Enqueue a collection of notifications to be flushed. + * + * @param notifications The notification to enqueue + * + * @see #addNotification(Notification) + */ + @Override + public void addNotifications(@NotNull final Collection notifications) { + synchronized (pendingNormalNotifications) { + synchronized (terminalNotifications) { + notifications.forEach(this::addNotification); + } + } + } + + /** + * @return Whether this UpdateGraph has a mechanism that supports refreshing + */ + @Override + public boolean supportsRefreshing() { + return true; + } + + /** + * Reset state at the beginning or end of a unit test. + * + * @param after if this is done after a test, in which case the liveness scope is popped + * @param errors the list of errors generated during reset + */ + @TestUseOnly + void resetForUnitTests(final boolean after, final List errors) { + sources.clear(); + notificationProcessor.shutdown(); + synchronized (pendingNormalNotifications) { + pendingNormalNotifications.clear(); + } + isUpdateThread.remove(); + synchronized (terminalNotifications) { + terminalNotifications.clear(); + } + logicalClock.resetForUnitTests(); + sourcesLastSatisfiedStep = logicalClock.currentStep(); + + refreshScope = null; + if (after) { + LivenessManager stackTop; + while ((stackTop = LivenessScopeStack.peek()) instanceof LivenessScope) { + LivenessScopeStack.pop((LivenessScope) stackTop); + } + CleanupReferenceProcessorInstance.resetAllForUnitTests(); + } + + ensureUnlocked("unit test reset thread", errors); + } + + @TestUseOnly + void resetLock() { + lock.reset(); + } + + /** + * Flush all non-terminal notifications, complete the logical clock update cycle, then flush all terminal + * notifications. + * + * @param check whether to check that update sources have not yet been satisfied (false in unit test mode) + */ + void flushNotificationsAndCompleteCycle(boolean check) { + // We cannot proceed with normal notifications, nor are we satisfied, until all update source refresh + // notifications have been processed. Note that non-update source notifications that require dependency + // satisfaction are delivered first to the pendingNormalNotifications queue, and hence will not be processed + // until we advance to the flush* methods. + // TODO: If and when we properly integrate update sources into the dependency tracking system, we can + // discontinue this distinct phase, along with the requirement to treat the UpdateGraph itself as a Dependency. + // Until then, we must delay the beginning of "normal" notification processing until all update sources are + // done. See IDS-8039. + notificationProcessor.doAllWork(); + + updateSourcesLastSatisfiedStep(check); + + flushNormalNotificationsAndCompleteCycle(); + flushTerminalNotifications(); + synchronized (pendingNormalNotifications) { + Assert.assertion(pendingNormalNotifications.isEmpty(), "pendingNormalNotifications.isEmpty()"); + } + } + + void updateSourcesLastSatisfiedStep(boolean check) { + if (check && sourcesLastSatisfiedStep >= logicalClock.currentStep()) { + throw new IllegalStateException("Already marked sources as satisfied!"); + } + sourcesLastSatisfiedStep = logicalClock.currentStep(); + } + + /** + * Flush all non-terminal {@link Notification notifications} from the queue. + */ + private void flushNormalNotificationsAndCompleteCycle() { + final IntrusiveDoublyLinkedQueue pendingToEvaluate = + new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); + while (true) { + final int outstandingCountAtStart = notificationProcessor.outstandingNotificationsCount(); + notificationProcessor.beforeNotificationsDrained(); + synchronized (pendingNormalNotifications) { + pendingToEvaluate.transferAfterTailFrom(pendingNormalNotifications); + if (outstandingCountAtStart == 0 && pendingToEvaluate.isEmpty()) { + // We complete the cycle here before releasing the lock on pendingNotifications, so that + // maybeAddNotification can detect scenarios where the notification cannot be delivered on the + // desired step. + logicalClock.completeUpdateCycle(); + break; + } + } + logDependencies().append(Thread.currentThread().getName()) + .append(": Notification queue size=").append(pendingToEvaluate.size()) + .append(", outstanding=").append(outstandingCountAtStart) + .endl(); + + boolean nothingBecameSatisfied = true; + for (final Iterator it = pendingToEvaluate.iterator(); it.hasNext();) { + final Notification notification = it.next(); + + Assert.eqFalse(notification.isTerminal(), "notification.isTerminal()"); + Assert.eqFalse(notification.mustExecuteWithUpdateGraphLock(), + "notification.mustExecuteWithUpdateGraphLock()"); + + final boolean satisfied = notification.canExecute(sourcesLastSatisfiedStep); + if (satisfied) { + nothingBecameSatisfied = false; + it.remove(); + logDependencies().append(Thread.currentThread().getName()) + .append(": Submitting to notification processor ").append(notification).endl(); + notificationProcessor.submit(notification); + } else { + logDependencies().append(Thread.currentThread().getName()).append(": Unmet dependencies for ") + .append(notification).endl(); + } + } + if (outstandingCountAtStart == 0 && nothingBecameSatisfied) { + throw new IllegalStateException( + "No outstanding notifications, yet the notification queue is not empty!"); + } + if (notificationProcessor.outstandingNotificationsCount() > 0) { + notificationProcessor.doWork(); + } + } + synchronized (pendingNormalNotifications) { + Assert.eqZero(pendingNormalNotifications.size() + pendingToEvaluate.size(), + "pendingNormalNotifications.size() + pendingToEvaluate.size()"); + } + } + + /** + * Flush all {@link Notification#isTerminal() terminal} {@link Notification notifications} from the queue. + * + * @implNote Any notification that may have been queued while the clock's state is Updating must be invoked during + * this cycle's Idle phase. + */ + private void flushTerminalNotifications() { + synchronized (terminalNotifications) { + for (final Iterator it = terminalNotifications.iterator(); it.hasNext();) { + final Notification notification = it.next(); + Assert.assertion(notification.isTerminal(), "notification.isTerminal()"); + + if (!notification.mustExecuteWithUpdateGraphLock()) { + it.remove(); + // for the single threaded queue case; this enqueues the notification; + // for the executor service case, this causes the notification to be kicked off + notificationProcessor.submit(notification); + } + } + } + + // run the notifications that must be run on this thread + while (true) { + final Notification notificationForThisThread; + synchronized (terminalNotifications) { + notificationForThisThread = terminalNotifications.poll(); + } + if (notificationForThisThread == null) { + break; + } + runNotification(notificationForThisThread); + } + + // We can not proceed until all of the terminal notifications have executed. + notificationProcessor.doAllWork(); + } + + /** + * Abstract away the details of satisfied notification processing. + */ + interface NotificationProcessor { + + /** + * Submit a satisfied notification for processing. + * + * @param notification The notification + */ + void submit(@NotNull NotificationQueue.Notification notification); + + /** + * Submit a queue of satisfied notification for processing. + * + * @param notifications The queue of notifications to + * {@link IntrusiveDoublyLinkedQueue#transferAfterTailFrom(IntrusiveDoublyLinkedQueue) transfer} from. + * Will become empty as a result of successful completion + */ + void submitAll(@NotNull IntrusiveDoublyLinkedQueue notifications); + + /** + * Query the number of outstanding notifications submitted to this processor. + * + * @return The number of outstanding notifications + */ + int outstandingNotificationsCount(); + + /** + *

    + * Do work (or in the multi-threaded case, wait for some work to have happened). + *

    + * Caller must know that work is outstanding. + */ + void doWork(); + + /** + * Do all outstanding work. + */ + void doAllWork(); + + /** + * Shutdown this notification processor (for unit tests). + */ + void shutdown(); + + /** + * Called after a pending notification is added. + */ + void onNotificationAdded(); + + /** + * Called before pending notifications are drained. + */ + void beforeNotificationsDrained(); + } + + void runNotification(@NotNull final Notification notification) { + logDependencies().append(Thread.currentThread().getName()).append(": Executing ").append(notification).endl(); + + final LivenessScope scope; + final boolean releaseScopeOnClose; + if (notification.isTerminal()) { + // Terminal notifications can't create new notifications, so they have no need to participate in a shared + // run scope. + scope = new LivenessScope(); + releaseScopeOnClose = true; + } else { + // Non-terminal notifications must use a shared run scope. + Assert.neqNull(refreshScope, "refreshScope"); + scope = refreshScope == LivenessScopeStack.peek() ? null : refreshScope; + releaseScopeOnClose = false; + } + + try (final SafeCloseable ignored = scope == null ? null : LivenessScopeStack.open(scope, releaseScopeOnClose)) { + notification.run(); + logDependencies().append(Thread.currentThread().getName()).append(": Completed ").append(notification) + .endl(); + } catch (final Exception e) { + log.error().append(Thread.currentThread().getName()) + .append(": Exception while executing UpdateGraph notification: ").append(notification) + .append(": ").append(e).endl(); + ProcessEnvironment.getGlobalFatalErrorReporter() + .report("Exception while processing UpdateGraph notification", e); + } + } + + class QueueNotificationProcessor implements NotificationProcessor { + + final IntrusiveDoublyLinkedQueue satisfiedNotifications = + new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); + + @Override + public void submit(@NotNull final Notification notification) { + satisfiedNotifications.offer(notification); + } + + @Override + public void submitAll(@NotNull IntrusiveDoublyLinkedQueue notifications) { + satisfiedNotifications.transferAfterTailFrom(notifications); + } + + @Override + public int outstandingNotificationsCount() { + return satisfiedNotifications.size(); + } + + @Override + public void doWork() { + Notification satisfiedNotification; + while ((satisfiedNotification = satisfiedNotifications.poll()) != null) { + runNotification(satisfiedNotification); + } + } + + @Override + public void doAllWork() { + doWork(); + } + + @Override + public void shutdown() { + satisfiedNotifications.clear(); + } + + @Override + public void onNotificationAdded() {} + + @Override + public void beforeNotificationsDrained() {} + } + + + static LogEntry appendAsMillisFromNanos(final LogEntry entry, final long nanos) { + if (nanos > 0) { + return entry.appendDouble(nanos / 1_000_000.0, 3); + } + return entry.append(0); + } + + /** + * Iterate over all monitored tables and run them. + */ + void refreshTablesAndFlushNotifications() { + final long startTimeNanos = System.nanoTime(); + + currentCycleLockWaitTotalNanos = 0; + jvmIntrospectionContext.startSample(); + + if (sources.isEmpty()) { + exclusiveLock().doLocked(this::flushTerminalNotifications); + } else { + refreshAllTables(); + } + + jvmIntrospectionContext.endSample(); + final long cycleTimeNanos = System.nanoTime() - startTimeNanos; + computeStatsAndLogCycle(cycleTimeNanos); + } + + private void computeStatsAndLogCycle(final long cycleTimeNanos) { + final long safePointPauseTimeMillis = jvmIntrospectionContext.deltaSafePointPausesTimeMillis(); + accumulatedCycleStats.accumulate( + isCycleOnBudget(cycleTimeNanos), + cycleTimeNanos, + jvmIntrospectionContext.deltaSafePointPausesCount(), + safePointPauseTimeMillis); + if (cycleTimeNanos >= minimumCycleDurationToLogNanos) { + if (suppressedCycles > 0) { + logSuppressedCycles(); + } + final double cycleTimeMillis = cycleTimeNanos / 1_000_000.0; + LogEntry entry = log.info().append(getName()) + .append(": Update Graph Processor cycleTime=").appendDouble(cycleTimeMillis, 3); + if (jvmIntrospectionContext.hasSafePointData()) { + final long safePointSyncTimeMillis = jvmIntrospectionContext.deltaSafePointSyncTimeMillis(); + entry = entry + .append("ms, safePointTime=") + .append(safePointPauseTimeMillis) + .append("ms, safePointTimePct="); + if (safePointPauseTimeMillis > 0 && cycleTimeMillis > 0.0) { + final double safePointTimePct = 100.0 * safePointPauseTimeMillis / cycleTimeMillis; + entry = entry.appendDouble(safePointTimePct, 2); + } else { + entry = entry.append("0"); + } + entry = entry.append("%, safePointSyncTime=").append(safePointSyncTimeMillis); + } + entry = entry.append("ms, lockWaitTime="); + entry = appendAsMillisFromNanos(entry, currentCycleLockWaitTotalNanos); + entry.append("ms").endl(); + return; + } + if (cycleTimeNanos > 0) { + ++suppressedCycles; + suppressedCyclesTotalNanos += cycleTimeNanos; + suppressedCyclesTotalSafePointTimeMillis += safePointPauseTimeMillis; + if (suppressedCyclesTotalNanos >= minimumCycleDurationToLogNanos) { + logSuppressedCycles(); + } + } + } + + /** + * Is the provided cycle time on budget? + * + * @param cycleTimeNanos the cycle time, in nanoseconds + * + * @return true if the cycle time is within the desired budget + */ + public boolean isCycleOnBudget(long cycleTimeNanos) { + return true; + } + + private void logSuppressedCycles() { + LogEntry entry = log.info() + .append("Minimal Update Graph Processor cycle times: ") + .appendDouble((double) (suppressedCyclesTotalNanos) / 1_000_000.0, 3).append("ms / ") + .append(suppressedCycles).append(" cycles = ") + .appendDouble( + (double) suppressedCyclesTotalNanos / (double) suppressedCycles / 1_000_000.0, 3) + .append("ms/cycle average)"); + if (jvmIntrospectionContext.hasSafePointData()) { + entry = entry + .append(", safePointTime=") + .append(suppressedCyclesTotalSafePointTimeMillis) + .append("ms"); + } + entry.endl(); + suppressedCycles = suppressedCyclesTotalNanos = 0; + suppressedCyclesTotalSafePointTimeMillis = 0; + } + + + void maybeFlushUpdatePerformance(final long nowNanos, final long checkTime) { + if (checkTime >= nextUpdatePerformanceTrackerFlushTimeNanos) { + nextUpdatePerformanceTrackerFlushTimeNanos = + nowNanos + MILLISECONDS.toNanos(UpdatePerformanceTracker.REPORT_INTERVAL_MILLIS); + try { + updatePerformanceTracker.flush(); + } catch (Exception err) { + log.error().append("Error flushing UpdatePerformanceTracker: ").append(err).endl(); + } + } + } + + /** + * In unit tests it can be convenient to force the update performance tracker to flush, without waiting for the + * complete REPORT_INTERVAL_MILLIS to elapse. + */ + @TestUseOnly + public void resetNextFlushTime() { + nextUpdatePerformanceTrackerFlushTimeNanos = 0; + } + + /** + * Refresh all the update sources within an {@link LogicalClock update cycle} after the UpdateGraph has been locked. + * At the end of the updates all {@link Notification notifications} will be flushed. + */ + void refreshAllTables() { + doRefresh(() -> sources.forEach((final UpdateSourceRefreshNotification updateSourceNotification, + final Runnable unused) -> notificationProcessor.submit(updateSourceNotification))); + } + + /** + * Perform a run cycle, using {@code refreshFunction} to ensure the desired update sources are refreshed at the + * start. + * + * @param refreshFunction Function to submit one or more {@link UpdateSourceRefreshNotification update source + * refresh notifications} to the {@link NotificationProcessor notification processor} or run them directly. + */ + private void doRefresh(@NotNull final Runnable refreshFunction) { + final long lockStartTimeNanos = System.nanoTime(); + exclusiveLock().doLocked(() -> { + currentCycleLockWaitTotalNanos += System.nanoTime() - lockStartTimeNanos; + if (!running) { + return; + } + synchronized (pendingNormalNotifications) { + Assert.eqZero(pendingNormalNotifications.size(), "pendingNormalNotifications.size()"); + } + Assert.eqNull(refreshScope, "refreshScope"); + refreshScope = new LivenessScope(); + final long updatingCycleValue = logicalClock.startUpdateCycle(); + logDependencies().append("Beginning UpdateGraph cycle step=") + .append(logicalClock.currentStep()).endl(); + try (final SafeCloseable ignored = LivenessScopeStack.open(refreshScope, true)) { + refreshFunction.run(); + flushNotificationsAndCompleteCycle(true); + } finally { + logicalClock.ensureUpdateCycleCompleted(updatingCycleValue); + refreshScope = null; + } + logDependencies().append("Completed UpdateGraph cycle step=") + .append(logicalClock.currentStep()).endl(); + }); + } + + /** + * Re-usable class for adapting update sources to {@link Notification}s. + */ + static final class UpdateSourceRefreshNotification extends AbstractNotification + implements SimpleReference { + + private final WeakReference updateSourceRef; + + private UpdateSourceRefreshNotification(@NotNull final Runnable updateSource) { + super(false); + updateSourceRef = new WeakReference<>(updateSource); + } + + @Override + public LogOutput append(@NotNull final LogOutput logOutput) { + return logOutput.append("UpdateSourceRefreshNotification{").append(System.identityHashCode(this)) + .append(", for UpdateSource{").append(System.identityHashCode(get())).append("}}"); + } + + @Override + public boolean canExecute(final long step) { + return true; + } + + @Override + public void run() { + final Runnable updateSource = updateSourceRef.get(); + if (updateSource == null) { + return; + } + updateSource.run(); + } + + @Override + public Runnable get() { + // NB: Arguably we should make get() and clear() synchronized. + return updateSourceRef.get(); + } + + @Override + public void clear() { + updateSourceRef.clear(); + } + } + + public LogEntry logDependencies() { + if (printDependencyInformation) { + return log.info(); + } else { + return LogEntry.NULL; + } + } + + /** + * Ensure the lock is not held by the current thread. + * + * @param callerDescription the description of the caller + * @param errors an optional list to populate with errors when the lock is held. + */ + @TestUseOnly + void ensureUnlocked(@NotNull final String callerDescription, @Nullable final List errors) { + if (exclusiveLock().isHeldByCurrentThread()) { + if (errors != null) { + errors.add(callerDescription + ": UpdateGraph exclusive lock is still held"); + } + while (exclusiveLock().isHeldByCurrentThread()) { + exclusiveLock().unlock(); + } + } + if (sharedLock().isHeldByCurrentThread()) { + if (errors != null) { + errors.add(callerDescription + ": UpdateGraph shared lock is still held"); + } + while (sharedLock().isHeldByCurrentThread()) { + sharedLock().unlock(); + } + } + } + + public void takeAccumulatedCycleStats(AccumulatedCycleStats updateGraphAccumCycleStats) { + accumulatedCycleStats.take(updateGraphAccumCycleStats); + } + + public static UpdateGraph getInstance(final String name) { + return INSTANCES.get(name); + } + + + + /** + * Remove a named UpdateGraph. + * + *

    + * In addition to removing the UpdateGraph from the instances, an attempt is made to {@link #stop()} it. + *

    + * + * @param name the name of the UpdateGraph to remove + * @return true if the update graph was found + */ + public static boolean removeInstance(final String name) { + final UpdateGraph graph; + synchronized (INSTANCES) { + graph = INSTANCES.removeKey(name); + if (graph == null) { + return false; + } + } + graph.stop(); + return true; + } + + /** + * Builds and caches a new UpdateGraph named {@code name} and provided by {@code construct}. It is an error if there + * is already an UpdateGraph with the same name. + * + * @param name the name of the new update graph + * @param construct A {@link Supplier} to construct an UpdateGraph if no update graph with the name already exists. + * The Supplier must provide an update graph with the given name. + * + * @throws IllegalStateException if an UpdateGraph with the provided name already exists + */ + public static T buildOrThrow(final String name, final Supplier construct) { + synchronized (INSTANCES) { + if (INSTANCES.containsKey(name)) { + throw new IllegalStateException( + String.format("UpdateGraph with name %s already exists", name)); + } + final T newGraph = construct.get(); + Assert.equals(newGraph.getName(), "newGraph.getName()", name, "name"); + INSTANCES.put(name, newGraph); + return newGraph; + } + } + + /** + * Returns an existing UpdateGraph with the provided {@code name} if one exists, else returns a new named + * UpdateGraph supplied by {@code construct}. + * + * @param construct A {@link Supplier} to construct an UpdateGraph if no update graph with the name already exists. + * The Supplier must provide an update graph with the given name. + * + * @return the UpdateGraph + */ + public static T existingOrBuild(final String name, Supplier construct) { + return INSTANCES.putIfAbsent(name, (nameToInsert) -> { + final T newGraph = construct.get(); + Assert.equals(newGraph.getName(), "newGraph.getName()", nameToInsert, "name"); + return newGraph; + }).cast(); + } +} diff --git a/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/EventDrivenUpdateGraph.java b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/EventDrivenUpdateGraph.java new file mode 100644 index 00000000000..701a0d17878 --- /dev/null +++ b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/EventDrivenUpdateGraph.java @@ -0,0 +1,143 @@ +package io.deephaven.engine.updategraph.impl; + +import io.deephaven.base.log.LogOutput; +import io.deephaven.engine.context.ExecutionContext; +import io.deephaven.internal.log.LoggerFactory; +import io.deephaven.io.logger.Logger; +import io.deephaven.util.SafeCloseable; +import org.jetbrains.annotations.NotNull; + +/** + * An EventDrivenUpdateGraph provides an isolated refresh processor. + * + *

    + * As with a {@link PeriodicUpdateGraph}, the EventDrivenUpdateGraph contains a set of sources, but it is refreshed only + * when a call to {@link #requestRefresh()} is made. All sources are synchronously refreshed on that thread, and then + * the resultant notifications are also synchronously processed. + *

    + */ +public class EventDrivenUpdateGraph extends BaseUpdateGraph { + private static final Logger log = LoggerFactory.getLogger(EventDrivenUpdateGraph.class); + private boolean started = false; + + /** + * Create a builder for an EventDrivenUpdateGraph with the given name. + * + * @param name the name of the new EventDrivenUpdateGraph + * @return a builder for the EventDrivenUpdateGraph + */ + public static EventDrivenUpdateGraph.Builder newBuilder(final String name) { + return new EventDrivenUpdateGraph.Builder(name); + } + + private EventDrivenUpdateGraph(String name, long minimumCycleDurationToLogNanos) { + super(name, false, log, minimumCycleDurationToLogNanos); + notificationProcessor = new QueueNotificationProcessor(); + } + + @Override + public LogOutput append(@NotNull final LogOutput logOutput) { + return logOutput.append("EventDrivenUpdateGraph-").append(getName()); + } + + @Override + public int parallelismFactor() { + return 1; + } + + /** + * Refresh all sources and execute the resulting notifications synchronously on this thread. + */ + @Override + public void requestRefresh() { + maybeStart(); + // do the work to refresh everything, on this thread + isUpdateThread.set(true); + try (final SafeCloseable ignored = ExecutionContext.newBuilder().setUpdateGraph(this).build().open()) { + refreshAllTables(); + } finally { + isUpdateThread.remove(); + } + final long nowNanos = System.nanoTime(); + synchronized (this) { + maybeFlushUpdatePerformance(nowNanos, nowNanos); + } + } + + /** + * We defer starting the update performance tracker until our first cycle. This is essential when we are the DEFAULT + * graph used for UPT publishing, as the UPT requires the publication graph to be in the BaseUpdateGraph map, which + * is not done until after our constructor completes. + */ + private synchronized void maybeStart() { + if (started) { + return; + } + updatePerformanceTracker.start(); + started = true; + } + + @Override + public void stop() { + running = false; + // if we wait for the lock to be done, then we should have completed our cycle and will not execute again + exclusiveLock().doLocked(() -> { + }); + } + + /** + * Builds or retrieves a new EventDrivenUpdateGraph. + */ + public static class Builder { + private final String name; + private long minimumCycleDurationToLogNanos = DEFAULT_MINIMUM_CYCLE_DURATION_TO_LOG_NANOSECONDS; + + public Builder(String name) { + this.name = name; + } + + /** + * Set the minimum duration of an update cycle that should be logged at the INFO level. + * + * @param minimumCycleDurationToLogNanos threshold to log a slow cycle + * @return this builder + */ + public Builder minimumCycleDurationToLogNanos(long minimumCycleDurationToLogNanos) { + this.minimumCycleDurationToLogNanos = minimumCycleDurationToLogNanos; + return this; + } + + /** + * Constructs and returns an EventDrivenUpdateGraph. It is an error to do so if an UpdateGraph already exists + * with the name provided to this builder. + * + * @return the new EventDrivenUpdateGraph + * @throws IllegalStateException if an UpdateGraph with the provided name already exists + */ + public EventDrivenUpdateGraph build() { + return BaseUpdateGraph.buildOrThrow(name, this::construct); + } + + /** + * Returns an existing EventDrivenUpdateGraph with the name provided to this Builder, if one exists, else + * returns a new EventDrivenUpdateGraph. + * + *

    + * If the options for the existing graph are different than the options specified in this Builder, this + * Builder's options are ignored. + *

    + * + * @return the EventDrivenUpdateGraph + * @throws ClassCastException if the existing graph is not an EventDrivenUpdateGraph + */ + public EventDrivenUpdateGraph existingOrBuild() { + return BaseUpdateGraph.existingOrBuild(name, this::construct); + } + + private EventDrivenUpdateGraph construct() { + return new EventDrivenUpdateGraph( + name, + minimumCycleDurationToLogNanos); + } + } +} diff --git a/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PeriodicUpdateGraph.java b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PeriodicUpdateGraph.java index a2fbaee6581..acc4ea026ee 100644 --- a/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PeriodicUpdateGraph.java +++ b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PeriodicUpdateGraph.java @@ -6,51 +6,37 @@ import io.deephaven.UncheckedDeephavenException; import io.deephaven.base.SleepUtil; import io.deephaven.base.log.LogOutput; -import io.deephaven.base.reference.SimpleReference; import io.deephaven.base.verify.Assert; import io.deephaven.chunk.util.pools.MultiChunkPool; import io.deephaven.configuration.Configuration; import io.deephaven.engine.context.ExecutionContext; -import io.deephaven.engine.liveness.LivenessManager; import io.deephaven.engine.liveness.LivenessScope; import io.deephaven.engine.liveness.LivenessScopeStack; -import io.deephaven.engine.table.impl.perf.PerformanceEntry; -import io.deephaven.engine.table.impl.perf.UpdatePerformanceTracker; -import io.deephaven.engine.table.impl.util.StepUpdater; import io.deephaven.engine.updategraph.*; -import io.deephaven.engine.util.reference.CleanupReferenceProcessorInstance; import io.deephaven.engine.util.systemicmarking.SystemicObjectTracker; -import io.deephaven.hash.KeyedObjectHashMap; -import io.deephaven.hash.KeyedObjectKey; -import io.deephaven.hotspot.JvmIntrospectionContext; import io.deephaven.internal.log.LoggerFactory; -import io.deephaven.io.log.LogEntry; -import io.deephaven.io.log.impl.LogOutputStringImpl; import io.deephaven.io.logger.Logger; -import io.deephaven.io.sched.Scheduler; -import io.deephaven.io.sched.TimedJob; -import io.deephaven.net.CommBase; import io.deephaven.util.SafeCloseable; import io.deephaven.util.annotations.TestUseOnly; -import io.deephaven.util.datastructures.SimpleReferenceManager; import io.deephaven.util.datastructures.linked.IntrusiveDoublyLinkedNode; import io.deephaven.util.datastructures.linked.IntrusiveDoublyLinkedQueue; import io.deephaven.util.function.ThrowingRunnable; -import io.deephaven.util.locks.AwareFunctionalLock; -import io.deephaven.util.process.ProcessEnvironment; import io.deephaven.util.thread.NamingThreadFactory; import io.deephaven.util.thread.ThreadInitializationFactory; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; -import java.lang.ref.WeakReference; -import java.util.*; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Random; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BooleanSupplier; import java.util.function.LongConsumer; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + /** *

    * This class uses a thread (or pool of threads) to periodically update a set of monitored update sources at a specified @@ -63,9 +49,8 @@ * defined) * */ -public class PeriodicUpdateGraph implements UpdateGraph { +public class PeriodicUpdateGraph extends BaseUpdateGraph { - public static final String DEFAULT_UPDATE_GRAPH_NAME = "DEFAULT"; public static final int NUM_THREADS_DEFAULT_UPDATE_GRAPH = Configuration.getInstance().getIntegerWithDefault("PeriodicUpdateGraph.updateThreads", -1); @@ -73,56 +58,8 @@ public static Builder newBuilder(final String name) { return new Builder(name); } - /** - * If the provided update graph is a {@link PeriodicUpdateGraph} then create a PerformanceEntry using the given - * description. Otherwise, return null. - * - * @param updateGraph The update graph to create a performance entry for. - * @param description The description for the performance entry. - * @return The performance entry, or null if the update graph is not a {@link PeriodicUpdateGraph}. - */ - @Nullable - public static PerformanceEntry createUpdatePerformanceEntry( - final UpdateGraph updateGraph, - final String description) { - if (updateGraph instanceof PeriodicUpdateGraph) { - final PeriodicUpdateGraph pug = (PeriodicUpdateGraph) updateGraph; - if (pug.updatePerformanceTracker != null) { - return pug.updatePerformanceTracker.getEntry(description); - } - throw new IllegalStateException("Cannot create a performance entry for a PeriodicUpdateGraph that has " - + "not been completely constructed."); - } - return null; - } - - private static final KeyedObjectHashMap INSTANCES = new KeyedObjectHashMap<>( - new KeyedObjectKey.BasicAdapter<>(PeriodicUpdateGraph::getName)); - - private final Logger log = LoggerFactory.getLogger(PeriodicUpdateGraph.class); - /** - * Update sources that are part of this PeriodicUpdateGraph. - */ - private final SimpleReferenceManager sources = - new SimpleReferenceManager<>(UpdateSourceRefreshNotification::new); - - /** - * Recorder for updates source satisfaction as a phase of notification processing. - */ - private volatile long sourcesLastSatisfiedStep; - - /** - * The queue of non-terminal notifications to process. - */ - private final IntrusiveDoublyLinkedQueue pendingNormalNotifications = - new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); - - /** - * The queue of terminal notifications to process. - */ - private final IntrusiveDoublyLinkedQueue terminalNotifications = - new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); + private static final Logger log = LoggerFactory.getLogger(PeriodicUpdateGraph.class); /** * A flag indicating that an accelerated cycle has been requested. @@ -130,7 +67,11 @@ public static PerformanceEntry createUpdatePerformanceEntry( private final AtomicBoolean refreshRequested = new AtomicBoolean(); private final Thread refreshThread; - private volatile boolean running = true; + + /** + * {@link ScheduledExecutorService} used for scheduling the {@link #watchDogTimeoutProcedure}. + */ + private final ScheduledExecutorService watchdogScheduler; /** * If this is set to a positive value, then we will call the {@link #watchDogTimeoutProcedure} if any single run @@ -138,130 +79,27 @@ public static PerformanceEntry createUpdatePerformanceEntry( * PeriodicUpdateGraph loop that is "stuck" is the equivalent of an error. Set the value with * {@link #setWatchDogMillis(int)}. */ - private int watchDogMillis = 0; + private volatile int watchDogMillis = 0; /** * If a timeout time has been {@link #setWatchDogMillis(int) set}, this procedure will be called if any single run * loop takes longer than the value specified. Set the value with * {@link #setWatchDogTimeoutProcedure(LongConsumer)}. */ - private LongConsumer watchDogTimeoutProcedure = null; + private volatile LongConsumer watchDogTimeoutProcedure; public static final String ALLOW_UNIT_TEST_MODE_PROP = "PeriodicUpdateGraph.allowUnitTestMode"; private final boolean allowUnitTestMode; - private int notificationAdditionDelay = 0; + private int notificationAdditionDelay; private Random notificationRandomizer = new Random(0); - private boolean unitTestMode = false; + private boolean unitTestMode; private ExecutorService unitTestRefreshThreadPool; public static final String DEFAULT_TARGET_CYCLE_DURATION_MILLIS_PROP = "PeriodicUpdateGraph.targetCycleDurationMillis"; - public static final String MINIMUM_CYCLE_DURATION_TO_LOG_MILLIS_PROP = - "PeriodicUpdateGraph.minimumCycleDurationToLogMillis"; private final long defaultTargetCycleDurationMillis; private volatile long targetCycleDurationMillis; - private final long minimumCycleDurationToLogNanos; - - /** when to next flush the performance tracker; initializes to zero to force a flush on start */ - private long nextUpdatePerformanceTrackerFlushTime = 0; - - /** - * How many cycles we have not logged, but were non-zero. - */ - private long suppressedCycles = 0; - private long suppressedCyclesTotalNanos = 0; - private long suppressedCyclesTotalSafePointTimeMillis = 0; - - /** - * Accumulated UpdateGraph exclusive lock waits for the current cycle (or previous, if idle). - */ - private long currentCycleLockWaitTotalNanos = 0; - /** - * Accumulated delays due to intracycle yields for the current cycle (or previous, if idle). - */ - private long currentCycleYieldTotalNanos = 0L; - /** - * Accumulated delays due to intracycle sleeps for the current cycle (or previous, if idle). - */ - private long currentCycleSleepTotalNanos = 0L; - - public static class AccumulatedCycleStats { - /** - * Number of cycles run. - */ - public int cycles = 0; - /** - * Number of cycles run not exceeding their time budget. - */ - public int cyclesOnBudget = 0; - /** - * Accumulated safepoints over all cycles. - */ - public int safePoints = 0; - /** - * Accumulated safepoint time over all cycles. - */ - public long safePointPauseTimeMillis = 0L; - - public int[] cycleTimesMicros = new int[32]; - public static final int MAX_DOUBLING_LEN = 1024; - - synchronized void accumulate( - final long targetCycleDurationMillis, - final long cycleTimeNanos, - final long safePoints, - final long safePointPauseTimeMillis) { - final boolean onBudget = targetCycleDurationMillis * 1000 * 1000 >= cycleTimeNanos; - if (onBudget) { - ++cyclesOnBudget; - } - this.safePoints += safePoints; - this.safePointPauseTimeMillis += safePointPauseTimeMillis; - if (cycles >= cycleTimesMicros.length) { - final int newLen; - if (cycleTimesMicros.length < MAX_DOUBLING_LEN) { - newLen = cycleTimesMicros.length * 2; - } else { - newLen = cycleTimesMicros.length + MAX_DOUBLING_LEN; - } - cycleTimesMicros = Arrays.copyOf(cycleTimesMicros, newLen); - } - cycleTimesMicros[cycles] = (int) ((cycleTimeNanos + 500) / 1_000); - ++cycles; - } - - public synchronized void take(final AccumulatedCycleStats out) { - out.cycles = cycles; - out.cyclesOnBudget = cyclesOnBudget; - out.safePoints = safePoints; - out.safePointPauseTimeMillis = safePointPauseTimeMillis; - if (out.cycleTimesMicros.length < cycleTimesMicros.length) { - out.cycleTimesMicros = new int[cycleTimesMicros.length]; - } - System.arraycopy(cycleTimesMicros, 0, out.cycleTimesMicros, 0, cycles); - cycles = 0; - cyclesOnBudget = 0; - safePoints = 0; - safePointPauseTimeMillis = 0; - } - } - - public final AccumulatedCycleStats accumulatedCycleStats = new AccumulatedCycleStats(); + private final ThreadInitializationFactory threadInitializationFactory; - /** - * Abstracts away the processing of non-terminal notifications. - */ - private NotificationProcessor notificationProcessor; - - /** - * Facilitate GC Introspection during refresh cycles. - */ - private final JvmIntrospectionContext jvmIntrospectionContext; - - /** - * The {@link LivenessScope} that should be on top of the {@link LivenessScopeStack} for all run and notification - * processing. Only non-null while some thread is in {@link #doRefresh(Runnable)}. - */ - private volatile LivenessScope refreshScope; /** * The number of threads in our executor service for dispatching notifications. If 1, then we don't actually use the @@ -269,50 +107,23 @@ public synchronized void take(final AccumulatedCycleStats out) { */ private final int updateThreads; - /** - * Is this one of the threads engaged in notification processing? (Either the solitary run thread, or one of the - * pooled threads it uses in some configurations) - */ - private final ThreadLocal isUpdateThread = ThreadLocal.withInitial(() -> false); - - private final ThreadLocal serialTableOperationsSafe = ThreadLocal.withInitial(() -> false); - private final long minimumInterCycleSleep = Configuration.getInstance().getIntegerWithDefault("PeriodicUpdateGraph.minimumInterCycleSleep", 0); private final boolean interCycleYield = Configuration.getInstance().getBooleanWithDefault("PeriodicUpdateGraph.interCycleYield", false); - private final LogicalClockImpl logicalClock = new LogicalClockImpl(); - - /** - * Encapsulates locking support. - */ - private final UpdateGraphLock lock; - - /** - * When PeriodicUpdateGraph.printDependencyInformation is set to true, the PeriodicUpdateGraph will print debug - * information for each notification that has dependency information; as well as which notifications have been - * completed and are outstanding. - */ - private final boolean printDependencyInformation = - Configuration.getInstance().getBooleanWithDefault("PeriodicUpdateGraph.printDependencyInformation", false); - - private final String name; - - private final UpdatePerformanceTracker updatePerformanceTracker; - public PeriodicUpdateGraph( final String name, final boolean allowUnitTestMode, final long targetCycleDurationMillis, final long minimumCycleDurationToLogNanos, - final int numUpdateThreads) { - this.name = name; + final int numUpdateThreads, + final ThreadInitializationFactory threadInitializationFactory) { + super(name, allowUnitTestMode, log, minimumCycleDurationToLogNanos); this.allowUnitTestMode = allowUnitTestMode; this.defaultTargetCycleDurationMillis = targetCycleDurationMillis; this.targetCycleDurationMillis = targetCycleDurationMillis; - this.minimumCycleDurationToLogNanos = minimumCycleDurationToLogNanos; - this.lock = UpdateGraphLock.create(this, this.allowUnitTestMode); + this.threadInitializationFactory = threadInitializationFactory; if (numUpdateThreads <= 0) { this.updateThreads = Runtime.getRuntime().availableProcessors(); @@ -320,42 +131,28 @@ public PeriodicUpdateGraph( this.updateThreads = numUpdateThreads; } - notificationProcessor = PoisonedNotificationProcessor.INSTANCE; - jvmIntrospectionContext = new JvmIntrospectionContext(); - - refreshThread = new Thread(ThreadInitializationFactory.wrapRunnable(() -> { - configureRefreshThread(); + OperationInitializer captured = ExecutionContext.getContext().getInitializer(); + refreshThread = new Thread(threadInitializationFactory.createInitializer(() -> { + configureRefreshThread(captured); while (running) { Assert.eqFalse(this.allowUnitTestMode, "allowUnitTestMode"); refreshTablesAndFlushNotifications(); } }), "PeriodicUpdateGraph." + name + ".refreshThread"); refreshThread.setDaemon(true); - - updatePerformanceTracker = new UpdatePerformanceTracker(this); - } - - public String getName() { - return name; - } - - public UpdateGraph getUpdateGraph() { - return this; + watchdogScheduler = Executors.newSingleThreadScheduledExecutor( + new NamingThreadFactory(PeriodicUpdateGraph.class, "watchdogScheduler", true) { + @Override + public Thread newThread(@NotNull final Runnable r) { + // Not a refresh thread, but should still be instrumented for debugging purposes. + return super.newThread(threadInitializationFactory.createInitializer(r)); + } + }); } @Override public LogOutput append(@NotNull final LogOutput logOutput) { - return logOutput.append("PeriodicUpdateGraph-").append(name); - } - - @Override - public String toString() { - return new LogOutputStringImpl().append(this).toString(); - } - - @Override - public LogicalClock clock() { - return logicalClock; + return logOutput.append("PeriodicUpdateGraph-").append(getName()); } @NotNull @@ -433,69 +230,6 @@ public int parallelismFactor() { } } - // region Accessors for the shared and exclusive locks - - /** - *

    - * Get the shared lock for this {@link PeriodicUpdateGraph}. - *

    - * Using this lock will prevent run processing from proceeding concurrently, but will allow other read-only - * processing to proceed. - *

    - * The shared lock implementation is expected to support reentrance. - *

    - * This lock does not support {@link java.util.concurrent.locks.Lock#newCondition()}. Use the exclusive - * lock if you need to wait on events that are driven by run processing. - * - * @return The shared lock for this {@link PeriodicUpdateGraph} - */ - public AwareFunctionalLock sharedLock() { - return lock.sharedLock(); - } - - /** - *

    - * Get the exclusive lock for this {@link PeriodicUpdateGraph}. - *

    - * Using this lock will prevent run or read-only processing from proceeding concurrently. - *

    - * The exclusive lock implementation is expected to support reentrance. - *

    - * Note that using the exclusive lock while the shared lock is held by the current thread will result in exceptions, - * as lock upgrade is not supported. - *

    - * This lock does support {@link java.util.concurrent.locks.Lock#newCondition()}. - * - * @return The exclusive lock for this {@link PeriodicUpdateGraph} - */ - public AwareFunctionalLock exclusiveLock() { - return lock.exclusiveLock(); - } - - // endregion Accessors for the shared and exclusive locks - - /** - * Test if this thread is part of our run thread executor service. - * - * @return whether this is one of our run threads. - */ - @Override - public boolean currentThreadProcessesUpdates() { - return isUpdateThread.get(); - } - - @Override - public boolean serialTableOperationsSafe() { - return serialTableOperationsSafe.get(); - } - - @Override - public boolean setSerialTableOperationsSafe(final boolean newValue) { - final boolean old = serialTableOperationsSafe.get(); - serialTableOperationsSafe.set(newValue); - return old; - } - /** * Set the target duration of an update cycle, including the updating phase and the idle phase. This is also the * target interval between the start of one cycle and the start of the next. @@ -520,6 +254,11 @@ public long getTargetCycleDurationMillis() { return targetCycleDurationMillis; } + @Override + public boolean isCycleOnBudget(long cycleTimeNanos) { + return cycleTimeNanos <= MILLISECONDS.toNanos(targetCycleDurationMillis); + } + /** * Resets the run cycle time to the default target configured via the {@link Builder} setting. * @@ -551,7 +290,7 @@ public void enableUnitTestMode() { if (refreshThread.isAlive()) { throw new IllegalStateException("PeriodicUpdateGraph.refreshThread is executing!"); } - lock.reset(); + resetLock(); unitTestMode = true; unitTestRefreshThreadPool = makeUnitTestRefreshExecutor(); updatePerformanceTracker.enableUnitTestMode(); @@ -593,15 +332,6 @@ public void setWatchDogTimeoutProcedure(LongConsumer procedure) { this.watchDogTimeoutProcedure = procedure; } - private class WatchdogJob extends TimedJob { - @Override - public void timedOut() { - if (watchDogTimeoutProcedure != null) { - watchDogTimeoutProcedure.accept(watchDogMillis); - } - } - } - /** * Install a real NotificationProcessor and start the primary refresh thread. * @@ -628,58 +358,32 @@ public void start() { * Begins the process to stop all processing threads and forces ReferenceCounted sources to a reference count of * zero. */ + @Override public void stop() { running = false; notificationProcessor.shutdown(); + // ensure that any outstanding cycle has completed + exclusiveLock().doLocked(() -> { + }); } /** - * Add a table to the list of tables to run and mark it as {@link DynamicNode#setRefreshing(boolean) refreshing} if - * it was a {@link DynamicNode}. + * {@inheritDoc} * * @implNote This will do nothing in {@link #enableUnitTestMode() unit test} mode other than mark the table as * refreshing. - * @param updateSource The table to be added to the run list */ @Override - public void addSource(@NotNull final Runnable updateSource) { - if (!running) { - throw new IllegalStateException("PeriodicUpdateGraph is no longer running"); - } - - if (updateSource instanceof DynamicNode) { - ((DynamicNode) updateSource).setRefreshing(true); - } - - if (!allowUnitTestMode) { + public void addSource(@NotNull Runnable updateSource) { + if (allowUnitTestMode) { // if we are in unit test mode we never want to start the UpdateGraph - sources.add(updateSource); - start(); + if (updateSource instanceof DynamicNode) { + ((DynamicNode) updateSource).setRefreshing(true); + } + return; } - } - - @Override - public void removeSource(@NotNull final Runnable updateSource) { - sources.remove(updateSource); - } - - /** - * Remove a collection of sources from the list of refreshing sources. - * - * @implNote This will not set the sources as {@link DynamicNode#setRefreshing(boolean) non-refreshing}. - * @param sourcesToRemove The sources to remove from the list of refreshing sources - */ - public void removeSources(final Collection sourcesToRemove) { - sources.removeAll(sourcesToRemove); - } - - /** - * Return the number of valid sources. - * - * @return the number of valid sources - */ - public int sourceCount() { - return sources.size(); + super.addSource(updateSource); + start(); } /** @@ -696,20 +400,7 @@ public void addNotification(@NotNull final Notification notification) { if (notificationAdditionDelay > 0) { SleepUtil.sleep(notificationRandomizer.nextInt(notificationAdditionDelay)); } - if (notification.isTerminal()) { - synchronized (terminalNotifications) { - terminalNotifications.offer(notification); - } - } else { - logDependencies().append(Thread.currentThread().getName()).append(": Adding notification ") - .append(notification).endl(); - synchronized (pendingNormalNotifications) { - Assert.eq(logicalClock.currentState(), "logicalClock.currentState()", - LogicalClock.State.Updating, "LogicalClock.State.Updating"); - pendingNormalNotifications.offer(notification); - } - notificationProcessor.onNotificationAdded(); - } + super.addNotification(notification); } @Override @@ -717,50 +408,7 @@ public boolean maybeAddNotification(@NotNull final Notification notification, fi if (notificationAdditionDelay > 0) { SleepUtil.sleep(notificationRandomizer.nextInt(notificationAdditionDelay)); } - if (notification.isTerminal()) { - throw new IllegalArgumentException("Notification must not be terminal"); - } - logDependencies().append(Thread.currentThread().getName()).append(": Adding notification ").append(notification) - .append(" if step is ").append(deliveryStep).endl(); - final boolean added; - synchronized (pendingNormalNotifications) { - // Note that the clock is advanced to idle under the pendingNormalNotifications lock, after which point no - // further normal notifications will be processed on this cycle. - final long logicalClockValue = logicalClock.currentValue(); - if (LogicalClock.getState(logicalClockValue) == LogicalClock.State.Updating - && LogicalClock.getStep(logicalClockValue) == deliveryStep) { - pendingNormalNotifications.offer(notification); - added = true; - } else { - added = false; - } - } - if (added) { - notificationProcessor.onNotificationAdded(); - } - return added; - } - - @Override - public boolean satisfied(final long step) { - StepUpdater.checkForOlderStep(step, sourcesLastSatisfiedStep); - return sourcesLastSatisfiedStep == step; - } - - /** - * Enqueue a collection of notifications to be flushed. - * - * @param notifications The notification to enqueue - * - * @see #addNotification(Notification) - */ - @Override - public void addNotifications(@NotNull final Collection notifications) { - synchronized (pendingNormalNotifications) { - synchronized (terminalNotifications) { - notifications.forEach(this::addNotification); - } - } + return super.maybeAddNotification(notification, deliveryStep); } /** @@ -769,20 +417,15 @@ public void addNotifications(@NotNull final Collection n */ @Override public void requestRefresh() { + if (!running) { + throw new IllegalStateException("Cannot request refresh when UpdateGraph is no longer running."); + } refreshRequested.set(true); synchronized (refreshRequested) { refreshRequested.notify(); } } - /** - * @return Whether this UpdateGraph has a mechanism that supports refreshing - */ - @Override - public boolean supportsRefreshing() { - return true; - } - /** * Clear all monitored tables and enqueued notifications to support {@link #enableUnitTestMode() unit-tests}. * @@ -805,6 +448,7 @@ public void resetForUnitTests(final boolean after) { * @param notificationStartDelay Maximum randomized notification start delay * @param notificationAdditionDelay Maximum randomized notification addition delay */ + @TestUseOnly public void resetForUnitTests(boolean after, final boolean randomizedNotifications, final int seed, final int maxRandomizedThreadCount, final int notificationStartDelay, final int notificationAdditionDelay) { @@ -812,34 +456,15 @@ public void resetForUnitTests(boolean after, this.notificationRandomizer = new Random(seed); this.notificationAdditionDelay = notificationAdditionDelay; Assert.assertion(unitTestMode, "unitTestMode"); - sources.clear(); - notificationProcessor.shutdown(); - synchronized (pendingNormalNotifications) { - pendingNormalNotifications.clear(); - } - isUpdateThread.remove(); + + resetForUnitTests(after, errors); + if (randomizedNotifications) { notificationProcessor = makeRandomizedNotificationProcessor(notificationRandomizer, maxRandomizedThreadCount, notificationStartDelay); } else { notificationProcessor = makeNotificationProcessor(); } - synchronized (terminalNotifications) { - terminalNotifications.clear(); - } - logicalClock.resetForUnitTests(); - sourcesLastSatisfiedStep = logicalClock.currentStep(); - - refreshScope = null; - if (after) { - LivenessManager stackTop; - while ((stackTop = LivenessScopeStack.peek()) instanceof LivenessScope) { - LivenessScopeStack.pop((LivenessScope) stackTop); - } - CleanupReferenceProcessorInstance.resetAllForUnitTests(); - } - - ensureUnlocked("unit test reset thread", errors); if (refreshThread.isAlive()) { errors.add("UpdateGraph refreshThread isAlive"); @@ -869,7 +494,7 @@ public void resetForUnitTests(boolean after, } } - lock.reset(); + resetLock(); } /** @@ -921,10 +546,7 @@ private void startCycleForUnitTestsInternal(final boolean sourcesSatisfied) { @TestUseOnly public void markSourcesRefreshedForUnitTests() { Assert.assertion(unitTestMode, "unitTestMode"); - if (sourcesLastSatisfiedStep >= logicalClock.currentStep()) { - throw new IllegalStateException("Already marked sources as satisfied!"); - } - sourcesLastSatisfiedStep = logicalClock.currentStep(); + updateSourcesLastSatisfiedStep(true); } /** @@ -947,8 +569,9 @@ public void completeCycleForUnitTests() { private void completeCycleForUnitTests(boolean errorCaughtAndInFinallyBlock) { Assert.assertion(unitTestMode, "unitTestMode"); if (!errorCaughtAndInFinallyBlock) { - Assert.eq(sourcesLastSatisfiedStep, "sourcesLastSatisfiedStep", logicalClock.currentStep(), - "logicalClock.currentStep()"); + final long currentStep = logicalClock.currentStep(); + final boolean satisfied = satisfied(currentStep); + Assert.assertion(satisfied, "satisfied()", currentStep, "currentStep"); } try { unitTestRefreshThreadPool.submit(this::completeCycleForUnitTestsInternal).get(); @@ -971,7 +594,7 @@ private void completeCycleForUnitTestsInternal() { exclusiveLock().unlock(); isUpdateThread.remove(); }) { - flushNotificationsAndCompleteCycle(); + flushNotificationsAndCompleteCycle(false); } } @@ -1129,7 +752,7 @@ public Runnable flushAllNormalNotificationsForUnitTests(@NotNull final BooleanSu final ControlledNotificationProcessor controlledNotificationProcessor = new ControlledNotificationProcessor(); notificationProcessor = controlledNotificationProcessor; final Future flushJobFuture = unitTestRefreshThreadPool.submit(() -> { - final long deadlineNanoTime = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(timeoutMillis); + final long deadlineNanoTime = System.nanoTime() + MILLISECONDS.toNanos(timeoutMillis); boolean flushed; while ((flushed = flushOneNotificationForUnitTestsInternal(false)) || !done.getAsBoolean()) { if (!flushed) { @@ -1154,7 +777,7 @@ public Runnable flushAllNormalNotificationsForUnitTests(@NotNull final BooleanSu } /** - * If the run thread is waiting in {@link #flushNormalNotificationsAndCompleteCycle()} or + * If the run thread is waiting in flushNormalNotificationsAndCompleteCycle() or * {@link #flushAllNormalNotificationsForUnitTests(BooleanSupplier, long)}, wake it up. */ @TestUseOnly @@ -1163,210 +786,6 @@ public void wakeRefreshThreadForUnitTests() { notificationProcessor.onNotificationAdded(); } - /** - * Flush all non-terminal notifications, complete the logical clock update cycle, then flush all terminal - * notifications. - */ - private void flushNotificationsAndCompleteCycle() { - // We cannot proceed with normal notifications, nor are we satisfied, until all update source refresh - // notifications have been processed. Note that non-update source notifications that require dependency - // satisfaction are delivered first to the pendingNormalNotifications queue, and hence will not be processed - // until we advance to the flush* methods. - // TODO: If and when we properly integrate update sources into the dependency tracking system, we can - // discontinue this distinct phase, along with the requirement to treat the UpdateGraph itself as a Dependency. - // Until then, we must delay the beginning of "normal" notification processing until all update sources are - // done. See IDS-8039. - notificationProcessor.doAllWork(); - sourcesLastSatisfiedStep = logicalClock.currentStep(); - - flushNormalNotificationsAndCompleteCycle(); - flushTerminalNotifications(); - synchronized (pendingNormalNotifications) { - Assert.assertion(pendingNormalNotifications.isEmpty(), "pendingNormalNotifications.isEmpty()"); - } - } - - /** - * Flush all non-terminal {@link Notification notifications} from the queue. - */ - private void flushNormalNotificationsAndCompleteCycle() { - final IntrusiveDoublyLinkedQueue pendingToEvaluate = - new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); - while (true) { - final int outstandingCountAtStart = notificationProcessor.outstandingNotificationsCount(); - notificationProcessor.beforeNotificationsDrained(); - synchronized (pendingNormalNotifications) { - pendingToEvaluate.transferAfterTailFrom(pendingNormalNotifications); - if (outstandingCountAtStart == 0 && pendingToEvaluate.isEmpty()) { - // We complete the cycle here before releasing the lock on pendingNotifications, so that - // maybeAddNotification can detect scenarios where the notification cannot be delivered on the - // desired step. - logicalClock.completeUpdateCycle(); - break; - } - } - logDependencies().append(Thread.currentThread().getName()) - .append(": Notification queue size=").append(pendingToEvaluate.size()) - .append(", outstanding=").append(outstandingCountAtStart) - .endl(); - - boolean nothingBecameSatisfied = true; - for (final Iterator it = pendingToEvaluate.iterator(); it.hasNext();) { - final Notification notification = it.next(); - - Assert.eqFalse(notification.isTerminal(), "notification.isTerminal()"); - Assert.eqFalse(notification.mustExecuteWithUpdateGraphLock(), - "notification.mustExecuteWithUpdateGraphLock()"); - - final boolean satisfied = notification.canExecute(sourcesLastSatisfiedStep); - if (satisfied) { - nothingBecameSatisfied = false; - it.remove(); - logDependencies().append(Thread.currentThread().getName()) - .append(": Submitting to notification processor ").append(notification).endl(); - notificationProcessor.submit(notification); - } else { - logDependencies().append(Thread.currentThread().getName()).append(": Unmet dependencies for ") - .append(notification).endl(); - } - } - if (outstandingCountAtStart == 0 && nothingBecameSatisfied) { - throw new IllegalStateException( - "No outstanding notifications, yet the notification queue is not empty!"); - } - if (notificationProcessor.outstandingNotificationsCount() > 0) { - notificationProcessor.doWork(); - } - } - synchronized (pendingNormalNotifications) { - Assert.eqZero(pendingNormalNotifications.size() + pendingToEvaluate.size(), - "pendingNormalNotifications.size() + pendingToEvaluate.size()"); - } - } - - /** - * Flush all {@link Notification#isTerminal() terminal} {@link Notification notifications} from the queue. - * - * @implNote Any notification that may have been queued while the clock's state is Updating must be invoked during - * this cycle's Idle phase. - */ - private void flushTerminalNotifications() { - synchronized (terminalNotifications) { - for (final Iterator it = terminalNotifications.iterator(); it.hasNext();) { - final Notification notification = it.next(); - Assert.assertion(notification.isTerminal(), "notification.isTerminal()"); - - if (!notification.mustExecuteWithUpdateGraphLock()) { - it.remove(); - // for the single threaded queue case; this enqueues the notification; - // for the executor service case, this causes the notification to be kicked off - notificationProcessor.submit(notification); - } - } - } - - // run the notifications that must be run on this thread - while (true) { - final Notification notificationForThisThread; - synchronized (terminalNotifications) { - notificationForThisThread = terminalNotifications.poll(); - } - if (notificationForThisThread == null) { - break; - } - runNotification(notificationForThisThread); - } - - // We can not proceed until all of the terminal notifications have executed. - notificationProcessor.doAllWork(); - } - - /** - * Abstract away the details of satisfied notification processing. - */ - private interface NotificationProcessor { - - /** - * Submit a satisfied notification for processing. - * - * @param notification The notification - */ - void submit(@NotNull NotificationQueue.Notification notification); - - /** - * Submit a queue of satisfied notification for processing. - * - * @param notifications The queue of notifications to - * {@link IntrusiveDoublyLinkedQueue#transferAfterTailFrom(IntrusiveDoublyLinkedQueue) transfer} from. - * Will become empty as a result of successful completion - */ - void submitAll(@NotNull IntrusiveDoublyLinkedQueue notifications); - - /** - * Query the number of outstanding notifications submitted to this processor. - * - * @return The number of outstanding notifications - */ - int outstandingNotificationsCount(); - - /** - *

    - * Do work (or in the multi-threaded case, wait for some work to have happened). - *

    - * Caller must know that work is outstanding. - */ - void doWork(); - - /** - * Do all outstanding work. - */ - void doAllWork(); - - /** - * Shutdown this notification processor (for unit tests). - */ - void shutdown(); - - /** - * Called after a pending notification is added. - */ - void onNotificationAdded(); - - /** - * Called before pending notifications are drained. - */ - void beforeNotificationsDrained(); - } - - private void runNotification(@NotNull final Notification notification) { - logDependencies().append(Thread.currentThread().getName()).append(": Executing ").append(notification).endl(); - - final LivenessScope scope; - final boolean releaseScopeOnClose; - if (notification.isTerminal()) { - // Terminal notifications can't create new notifications, so they have no need to participate in a shared - // run scope. - scope = new LivenessScope(); - releaseScopeOnClose = true; - } else { - // Non-terminal notifications must use a shared run scope. - Assert.neqNull(refreshScope, "refreshScope"); - scope = refreshScope == LivenessScopeStack.peek() ? null : refreshScope; - releaseScopeOnClose = false; - } - - try (final SafeCloseable ignored = scope == null ? null : LivenessScopeStack.open(scope, releaseScopeOnClose)) { - notification.run(); - logDependencies().append(Thread.currentThread().getName()).append(": Completed ").append(notification) - .endl(); - } catch (final Exception e) { - log.error().append(Thread.currentThread().getName()) - .append(": Exception while executing PeriodicUpdateGraph notification: ").append(notification) - .append(": ").append(e).endl(); - ProcessEnvironment.getGlobalFatalErrorReporter() - .report("Exception while processing PeriodicUpdateGraph notification", e); - } - } private class ConcurrentNotificationProcessor implements NotificationProcessor { @@ -1508,100 +927,6 @@ int threadCount() { } } - private static final class PoisonedNotificationProcessor implements NotificationProcessor { - - private static final NotificationProcessor INSTANCE = new PoisonedNotificationProcessor(); - - private static RuntimeException notYetStarted() { - return new IllegalStateException("PeriodicUpdateGraph has not been started yet"); - } - - private PoisonedNotificationProcessor() {} - - @Override - public void submit(@NotNull Notification notification) { - throw notYetStarted(); - } - - @Override - public void submitAll(@NotNull IntrusiveDoublyLinkedQueue notifications) { - throw notYetStarted(); - } - - @Override - public int outstandingNotificationsCount() { - throw notYetStarted(); - } - - @Override - public void doWork() { - throw notYetStarted(); - } - - @Override - public void doAllWork() { - throw notYetStarted(); - } - - @Override - public void shutdown() {} - - @Override - public void onNotificationAdded() { - throw notYetStarted(); - } - - @Override - public void beforeNotificationsDrained() { - throw notYetStarted(); - } - } - - private class QueueNotificationProcessor implements NotificationProcessor { - - final IntrusiveDoublyLinkedQueue satisfiedNotifications = - new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); - - @Override - public void submit(@NotNull final Notification notification) { - satisfiedNotifications.offer(notification); - } - - @Override - public void submitAll(@NotNull IntrusiveDoublyLinkedQueue notifications) { - satisfiedNotifications.transferAfterTailFrom(notifications); - } - - @Override - public int outstandingNotificationsCount() { - return satisfiedNotifications.size(); - } - - @Override - public void doWork() { - Notification satisfiedNotification; - while ((satisfiedNotification = satisfiedNotifications.poll()) != null) { - runNotification(satisfiedNotification); - } - } - - @Override - public void doAllWork() { - doWork(); - } - - @Override - public void shutdown() { - satisfiedNotifications.clear(); - } - - @Override - public void onNotificationAdded() {} - - @Override - public void beforeNotificationsDrained() {} - } - @TestUseOnly private class ControlledNotificationProcessor implements NotificationProcessor { @@ -1664,116 +989,39 @@ private boolean blockUntilNotificationAdded(final long nanosToWait) { } } - private static LogEntry appendAsMillisFromNanos(final LogEntry entry, final long nanos) { - if (nanos > 0) { - return entry.appendDouble(nanos / 1_000_000.0, 3); - } - return entry.append(0); - } /** - * Iterate over all monitored tables and run them. This method also ensures that the loop runs no faster than - * {@link #getTargetCycleDurationMillis() minimum cycle time}. + * Iterate over all monitored tables and run them. + * + *

    + * This method also ensures that the loop runs no faster than {@link #getTargetCycleDurationMillis() minimum cycle + * time}. + *

    */ - private void refreshTablesAndFlushNotifications() { - final Scheduler sched = CommBase.getScheduler(); - final long startTime = sched.currentTimeMillis(); + @Override + void refreshTablesAndFlushNotifications() { final long startTimeNanos = System.nanoTime(); - jvmIntrospectionContext.startSample(); - - if (sources.isEmpty()) { - exclusiveLock().doLocked(this::flushTerminalNotifications); - } else { - currentCycleLockWaitTotalNanos = currentCycleYieldTotalNanos = currentCycleSleepTotalNanos = 0L; - WatchdogJob watchdogJob = null; - - if ((watchDogMillis > 0) && (watchDogTimeoutProcedure != null)) { - watchdogJob = new WatchdogJob(); - sched.installJob(watchdogJob, startTime + watchDogMillis); - } + ScheduledFuture watchdogFuture = null; + final long localWatchdogMillis = watchDogMillis; + final LongConsumer localWatchdogTimeoutProcedure = watchDogTimeoutProcedure; + if ((localWatchdogMillis > 0) && (localWatchdogTimeoutProcedure != null)) { + watchdogFuture = watchdogScheduler.schedule( + () -> localWatchdogTimeoutProcedure.accept(localWatchdogMillis), + localWatchdogMillis, MILLISECONDS); + } - refreshAllTables(); + super.refreshTablesAndFlushNotifications(); - if (watchdogJob != null) { - sched.cancelJob(watchdogJob); - } - jvmIntrospectionContext.endSample(); - final long cycleTimeNanos = System.nanoTime() - startTimeNanos; - computeStatsAndLogCycle(cycleTimeNanos); + if (watchdogFuture != null) { + watchdogFuture.cancel(true); } if (interCycleYield) { Thread.yield(); } - waitForNextCycle(startTime, sched); - } - - private void computeStatsAndLogCycle(final long cycleTimeNanos) { - final long safePointPauseTimeMillis = jvmIntrospectionContext.deltaSafePointPausesTimeMillis(); - accumulatedCycleStats.accumulate( - getTargetCycleDurationMillis(), - cycleTimeNanos, - jvmIntrospectionContext.deltaSafePointPausesCount(), - safePointPauseTimeMillis); - if (cycleTimeNanos >= minimumCycleDurationToLogNanos) { - if (suppressedCycles > 0) { - logSuppressedCycles(); - } - final double cycleTimeMillis = cycleTimeNanos / 1_000_000.0; - LogEntry entry = log.info() - .append("Update Graph Processor cycleTime=").appendDouble(cycleTimeMillis, 3); - if (jvmIntrospectionContext.hasSafePointData()) { - final long safePointSyncTimeMillis = jvmIntrospectionContext.deltaSafePointSyncTimeMillis(); - entry = entry - .append("ms, safePointTime=") - .append(safePointPauseTimeMillis) - .append("ms, safePointTimePct="); - if (safePointPauseTimeMillis > 0 && cycleTimeMillis > 0.0) { - final double safePointTimePct = 100.0 * safePointPauseTimeMillis / cycleTimeMillis; - entry = entry.appendDouble(safePointTimePct, 2); - } else { - entry = entry.append("0"); - } - entry = entry.append("%, safePointSyncTime=").append(safePointSyncTimeMillis); - } - entry = entry.append("ms, lockWaitTime="); - entry = appendAsMillisFromNanos(entry, currentCycleLockWaitTotalNanos); - entry = entry.append("ms, yieldTime="); - entry = appendAsMillisFromNanos(entry, currentCycleSleepTotalNanos); - entry = entry.append("ms, sleepTime="); - entry = appendAsMillisFromNanos(entry, currentCycleSleepTotalNanos); - entry.append("ms").endl(); - return; - } - if (cycleTimeNanos > 0) { - ++suppressedCycles; - suppressedCyclesTotalNanos += cycleTimeNanos; - suppressedCyclesTotalSafePointTimeMillis += safePointPauseTimeMillis; - if (suppressedCyclesTotalNanos >= minimumCycleDurationToLogNanos) { - logSuppressedCycles(); - } - } - } - - private void logSuppressedCycles() { - LogEntry entry = log.info() - .append("Minimal Update Graph Processor cycle times: ") - .appendDouble((double) (suppressedCyclesTotalNanos) / 1_000_000.0, 3).append("ms / ") - .append(suppressedCycles).append(" cycles = ") - .appendDouble( - (double) suppressedCyclesTotalNanos / (double) suppressedCycles / 1_000_000.0, 3) - .append("ms/cycle average)"); - if (jvmIntrospectionContext.hasSafePointData()) { - entry = entry - .append(", safePointTime=") - .append(suppressedCyclesTotalSafePointTimeMillis) - .append("ms"); - } - entry.endl(); - suppressedCycles = suppressedCyclesTotalNanos = 0; - suppressedCyclesTotalSafePointTimeMillis = 0; + waitForNextCycle(startTimeNanos); } /** @@ -1791,24 +1039,17 @@ private void logSuppressedCycles() { * wait the remaining period. *

    * - * @param startTime The start time of the last run cycle - * @param timeSource The source of time that startTime was based on + * @param startTimeNanos The start time of the last run cycle as reported by {@link System#nanoTime()} */ - private void waitForNextCycle(final long startTime, final Scheduler timeSource) { - final long now = timeSource.currentTimeMillis(); - long expectedEndTime = startTime + targetCycleDurationMillis; + private void waitForNextCycle(final long startTimeNanos) { + final long nowNanos = System.nanoTime(); + long expectedEndTimeNanos = startTimeNanos + MILLISECONDS.toNanos(targetCycleDurationMillis); if (minimumInterCycleSleep > 0) { - expectedEndTime = Math.max(expectedEndTime, now + minimumInterCycleSleep); + expectedEndTimeNanos = + Math.max(expectedEndTimeNanos, nowNanos + MILLISECONDS.toNanos(minimumInterCycleSleep)); } - if (expectedEndTime >= nextUpdatePerformanceTrackerFlushTime) { - nextUpdatePerformanceTrackerFlushTime = now + UpdatePerformanceTracker.REPORT_INTERVAL_MILLIS; - try { - updatePerformanceTracker.flush(); - } catch (Exception err) { - log.error().append("Error flushing UpdatePerformanceTracker: ").append(err).endl(); - } - } - waitForEndTime(expectedEndTime, timeSource); + maybeFlushUpdatePerformance(nowNanos, expectedEndTimeNanos); + waitForEndTime(expectedEndTimeNanos); } /** @@ -1819,12 +1060,11 @@ private void waitForNextCycle(final long startTime, final Scheduler timeSource) * If the delay is interrupted for any other {@link InterruptedException reason}, it will be logged and continue to * wait the remaining period. * - * @param expectedEndTime The time which we should sleep until - * @param timeSource The source of time that startTime was based on + * @param expectedEndTimeNanos The time (as reported by {@link System#nanoTime()}) which we should sleep until */ - private void waitForEndTime(final long expectedEndTime, final Scheduler timeSource) { - long remainingMillis; - while ((remainingMillis = expectedEndTime - timeSource.currentTimeMillis()) > 0) { + private void waitForEndTime(final long expectedEndTimeNanos) { + long remainingNanos; + while ((remainingNanos = expectedEndTimeNanos - System.nanoTime()) > 0) { if (refreshRequested.get()) { return; } @@ -1832,8 +1072,10 @@ private void waitForEndTime(final long expectedEndTime, final Scheduler timeSour if (refreshRequested.get()) { return; } + final long millisToWait = remainingNanos / 1_000_000; + final int extraNanosToWait = (int) (remainingNanos - (millisToWait * 1_000_000)); try { - refreshRequested.wait(remainingMillis); + refreshRequested.wait(millisToWait, extraNanosToWait); } catch (final InterruptedException logAndIgnore) { log.warn().append("Interrupted while waiting on refreshRequested. Ignoring: ").append(logAndIgnore) .endl(); @@ -1842,98 +1084,10 @@ private void waitForEndTime(final long expectedEndTime, final Scheduler timeSour } } - /** - * Refresh all the update sources within an {@link LogicalClock update cycle} after the UpdateGraph has been locked. - * At the end of the updates all {@link Notification notifications} will be flushed. - */ - private void refreshAllTables() { + @Override + void refreshAllTables() { refreshRequested.set(false); - doRefresh(() -> sources.forEach((final UpdateSourceRefreshNotification updateSourceNotification, - final Runnable unused) -> notificationProcessor.submit(updateSourceNotification))); - } - - /** - * Perform a run cycle, using {@code refreshFunction} to ensure the desired update sources are refreshed at the - * start. - * - * @param refreshFunction Function to submit one or more {@link UpdateSourceRefreshNotification update source - * refresh notifications} to the {@link NotificationProcessor notification processor} or run them directly. - */ - private void doRefresh(@NotNull final Runnable refreshFunction) { - final long lockStartTimeNanos = System.nanoTime(); - exclusiveLock().doLocked(() -> { - currentCycleLockWaitTotalNanos += System.nanoTime() - lockStartTimeNanos; - synchronized (pendingNormalNotifications) { - Assert.eqZero(pendingNormalNotifications.size(), "pendingNormalNotifications.size()"); - } - Assert.eqNull(refreshScope, "refreshScope"); - refreshScope = new LivenessScope(); - final long updatingCycleValue = logicalClock.startUpdateCycle(); - logDependencies().append("Beginning PeriodicUpdateGraph cycle step=") - .append(logicalClock.currentStep()).endl(); - try (final SafeCloseable ignored = LivenessScopeStack.open(refreshScope, true)) { - refreshFunction.run(); - flushNotificationsAndCompleteCycle(); - } finally { - logicalClock.ensureUpdateCycleCompleted(updatingCycleValue); - refreshScope = null; - } - logDependencies().append("Completed PeriodicUpdateGraph cycle step=") - .append(logicalClock.currentStep()).endl(); - }); - } - - /** - * Re-usable class for adapting update sources to {@link Notification}s. - */ - private static final class UpdateSourceRefreshNotification extends AbstractNotification - implements SimpleReference { - - private final WeakReference updateSourceRef; - - private UpdateSourceRefreshNotification(@NotNull final Runnable updateSource) { - super(false); - updateSourceRef = new WeakReference<>(updateSource); - } - - @Override - public LogOutput append(@NotNull final LogOutput logOutput) { - return logOutput.append("UpdateSourceRefreshNotification{").append(System.identityHashCode(this)) - .append(", for UpdateSource{").append(System.identityHashCode(get())).append("}}"); - } - - @Override - public boolean canExecute(final long step) { - return true; - } - - @Override - public void run() { - final Runnable updateSource = updateSourceRef.get(); - if (updateSource == null) { - return; - } - updateSource.run(); - } - - @Override - public Runnable get() { - // NB: Arguably we should make get() and clear() synchronized. - return updateSourceRef.get(); - } - - @Override - public void clear() { - updateSourceRef.clear(); - } - } - - public LogEntry logDependencies() { - if (printDependencyInformation) { - return log.info(); - } else { - return LogEntry.NULL; - } + super.refreshAllTables(); } private class NotificationProcessorThreadFactory extends NamingThreadFactory { @@ -1943,33 +1097,14 @@ private NotificationProcessorThreadFactory(@NotNull final ThreadGroup threadGrou @Override public Thread newThread(@NotNull final Runnable r) { - return super.newThread(ThreadInitializationFactory.wrapRunnable(() -> { - configureRefreshThread(); + OperationInitializer captured = ExecutionContext.getContext().getInitializer(); + return super.newThread(threadInitializationFactory.createInitializer(() -> { + configureRefreshThread(captured); r.run(); })); } } - @TestUseOnly - private void ensureUnlocked(@NotNull final String callerDescription, @Nullable final List errors) { - if (exclusiveLock().isHeldByCurrentThread()) { - if (errors != null) { - errors.add(callerDescription + ": UpdateGraph exclusive lock is still held"); - } - while (exclusiveLock().isHeldByCurrentThread()) { - exclusiveLock().unlock(); - } - } - if (sharedLock().isHeldByCurrentThread()) { - if (errors != null) { - errors.add(callerDescription + ": UpdateGraph shared lock is still held"); - } - while (sharedLock().isHeldByCurrentThread()) { - sharedLock().unlock(); - } - } - } - private ExecutorService makeUnitTestRefreshExecutor() { return Executors.newFixedThreadPool(1, new UnitTestThreadFactory()); } @@ -1983,8 +1118,9 @@ private UnitTestThreadFactory() { @Override public Thread newThread(@NotNull final Runnable r) { + OperationInitializer captured = ExecutionContext.getContext().getInitializer(); return super.newThread(() -> { - configureUnitTestRefreshThread(); + configureUnitTestRefreshThread(captured); r.run(); }); } @@ -1993,19 +1129,19 @@ public Thread newThread(@NotNull final Runnable r) { /** * Configure the primary UpdateGraph thread or one of the auxiliary notification processing threads. */ - private void configureRefreshThread() { + private void configureRefreshThread(OperationInitializer captured) { SystemicObjectTracker.markThreadSystemic(); MultiChunkPool.enableDedicatedPoolForThisThread(); isUpdateThread.set(true); - // Install this UpdateGraph via ExecutionContext for refresh threads + // Install this UpdateGraph via ExecutionContext for refresh threads, share the same operation initializer // noinspection resource - ExecutionContext.newBuilder().setUpdateGraph(this).build().open(); + ExecutionContext.newBuilder().setUpdateGraph(this).setOperationInitializer(captured).build().open(); } /** * Configure threads to be used for unit test processing. */ - private void configureUnitTestRefreshThread() { + private void configureUnitTestRefreshThread(OperationInitializer captured) { final Thread currentThread = Thread.currentThread(); final Thread.UncaughtExceptionHandler existing = currentThread.getUncaughtExceptionHandler(); currentThread.setUncaughtExceptionHandler((final Thread errorThread, final Throwable throwable) -> { @@ -2013,17 +1149,13 @@ private void configureUnitTestRefreshThread() { existing.uncaughtException(errorThread, throwable); }); isUpdateThread.set(true); - // Install this UpdateGraph via ExecutionContext for refresh threads + // Install this UpdateGraph and share operation initializer pool via ExecutionContext for refresh threads // noinspection resource - ExecutionContext.newBuilder().setUpdateGraph(this).build().open(); - } - - public void takeAccumulatedCycleStats(AccumulatedCycleStats updateGraphAccumCycleStats) { - accumulatedCycleStats.take(updateGraphAccumCycleStats); + ExecutionContext.newBuilder().setUpdateGraph(this).setOperationInitializer(captured).build().open(); } public static PeriodicUpdateGraph getInstance(final String name) { - return INSTANCES.get(name); + return BaseUpdateGraph.getInstance(name).cast(); } public static final class Builder { @@ -2031,11 +1163,11 @@ public static final class Builder { Configuration.getInstance().getBooleanWithDefault(ALLOW_UNIT_TEST_MODE_PROP, false); private long targetCycleDurationMillis = Configuration.getInstance().getIntegerWithDefault(DEFAULT_TARGET_CYCLE_DURATION_MILLIS_PROP, 1000); - private long minimumCycleDurationToLogNanos = TimeUnit.MILLISECONDS.toNanos( - Configuration.getInstance().getIntegerWithDefault(MINIMUM_CYCLE_DURATION_TO_LOG_MILLIS_PROP, 25)); + private long minimumCycleDurationToLogNanos = DEFAULT_MINIMUM_CYCLE_DURATION_TO_LOG_NANOSECONDS; private String name; private int numUpdateThreads = -1; + private ThreadInitializationFactory threadInitializationFactory = runnable -> runnable; public Builder(String name) { this.name = name; @@ -2078,23 +1210,26 @@ public Builder numUpdateThreads(int numUpdateThreads) { return this; } + /** + * Sets a functional interface that adds custom initialization for threads started by this UpdateGraph. + * + * @param threadInitializationFactory the function to invoke on any runnables that will be used to start threads + * @return this builder + */ + public Builder threadInitializationFactory(ThreadInitializationFactory threadInitializationFactory) { + this.threadInitializationFactory = threadInitializationFactory; + return this; + } + /** * Constructs and returns a PeriodicUpdateGraph. It is an error to do so an instance already exists with the * name provided to this builder. * * @return the new PeriodicUpdateGraph - * @throws IllegalStateException if a PeriodicUpdateGraph with the provided name already exists + * @throws IllegalStateException if an UpdateGraph with the provided name already exists */ public PeriodicUpdateGraph build() { - synchronized (INSTANCES) { - if (INSTANCES.containsKey(name)) { - throw new IllegalStateException( - String.format("PeriodicUpdateGraph with name %s already exists", name)); - } - final PeriodicUpdateGraph newUpdateGraph = construct(); - INSTANCES.put(name, newUpdateGraph); - return newUpdateGraph; - } + return BaseUpdateGraph.buildOrThrow(name, this::construct); } /** @@ -2102,9 +1237,10 @@ public PeriodicUpdateGraph build() { * new PeriodicUpdateGraph. * * @return the PeriodicUpdateGraph + * @throws ClassCastException if the existing graph is not a PeriodicUpdateGraph */ public PeriodicUpdateGraph existingOrBuild() { - return INSTANCES.putIfAbsent(name, n -> construct()); + return BaseUpdateGraph.existingOrBuild(name, this::construct).cast(); } private PeriodicUpdateGraph construct() { @@ -2113,7 +1249,8 @@ private PeriodicUpdateGraph construct() { allowUnitTestMode, targetCycleDurationMillis, minimumCycleDurationToLogNanos, - numUpdateThreads); + numUpdateThreads, + threadInitializationFactory); } } } diff --git a/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PoisonedNotificationProcessor.java b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PoisonedNotificationProcessor.java new file mode 100644 index 00000000000..e6590d9285b --- /dev/null +++ b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PoisonedNotificationProcessor.java @@ -0,0 +1,58 @@ +package io.deephaven.engine.updategraph.impl; + +import io.deephaven.engine.updategraph.NotificationQueue; +import io.deephaven.util.datastructures.linked.IntrusiveDoublyLinkedQueue; +import org.jetbrains.annotations.NotNull; + +/** + * The poisoned notification processor is used when an update graph has not yet been started, throwing an + * IllegalStateException on all operations. + */ +final class PoisonedNotificationProcessor implements BaseUpdateGraph.NotificationProcessor { + + static final BaseUpdateGraph.NotificationProcessor INSTANCE = new PoisonedNotificationProcessor(); + + private static RuntimeException notYetStarted() { + return new IllegalStateException("UpdateGraph has not been started yet"); + } + + private PoisonedNotificationProcessor() {} + + @Override + public void submit(@NotNull NotificationQueue.Notification notification) { + throw notYetStarted(); + } + + @Override + public void submitAll(@NotNull IntrusiveDoublyLinkedQueue notifications) { + throw notYetStarted(); + } + + @Override + public int outstandingNotificationsCount() { + throw notYetStarted(); + } + + @Override + public void doWork() { + throw notYetStarted(); + } + + @Override + public void doAllWork() { + throw notYetStarted(); + } + + @Override + public void shutdown() {} + + @Override + public void onNotificationAdded() { + throw notYetStarted(); + } + + @Override + public void beforeNotificationsDrained() { + throw notYetStarted(); + } +} diff --git a/engine/table/src/main/java/io/deephaven/engine/util/AbstractScriptSession.java b/engine/table/src/main/java/io/deephaven/engine/util/AbstractScriptSession.java index 4466c6a7bb1..9bd1679b939 100644 --- a/engine/table/src/main/java/io/deephaven/engine/util/AbstractScriptSession.java +++ b/engine/table/src/main/java/io/deephaven/engine/util/AbstractScriptSession.java @@ -18,10 +18,12 @@ import io.deephaven.engine.context.QueryScope; import io.deephaven.engine.context.QueryScopeParam; import io.deephaven.engine.table.hierarchical.HierarchicalTable; +import io.deephaven.engine.table.impl.OperationInitializationThreadPool; import io.deephaven.engine.updategraph.UpdateGraph; import io.deephaven.plugin.type.ObjectType; import io.deephaven.plugin.type.ObjectTypeLookup; import io.deephaven.util.SafeCloseable; +import io.deephaven.util.thread.ThreadInitializationFactory; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -70,6 +72,7 @@ private static void createOrClearDirectory(final File directory) { protected AbstractScriptSession( UpdateGraph updateGraph, + final ThreadInitializationFactory threadInitializationFactory, ObjectTypeLookup objectTypeLookup, @Nullable Listener changeListener) { this.objectTypeLookup = objectTypeLookup; @@ -90,6 +93,7 @@ protected AbstractScriptSession( .setQueryScope(queryScope) .setQueryCompiler(compilerContext) .setUpdateGraph(updateGraph) + .setOperationInitializer(new OperationInitializationThreadPool(threadInitializationFactory)) .build(); } diff --git a/engine/table/src/main/java/io/deephaven/engine/util/GroovyDeephavenSession.java b/engine/table/src/main/java/io/deephaven/engine/util/GroovyDeephavenSession.java index 6f644571dfc..c071ce0a9c7 100644 --- a/engine/table/src/main/java/io/deephaven/engine/util/GroovyDeephavenSession.java +++ b/engine/table/src/main/java/io/deephaven/engine/util/GroovyDeephavenSession.java @@ -41,6 +41,7 @@ import io.deephaven.time.DateTimeUtils; import io.deephaven.util.QueryConstants; import io.deephaven.util.annotations.VisibleForTesting; +import io.deephaven.util.thread.ThreadInitializationFactory; import io.deephaven.util.type.ArrayTypeUtils; import io.deephaven.util.type.TypeUtils; import io.github.classgraph.ClassGraph; @@ -145,18 +146,20 @@ private String getNextScriptClassName() { public GroovyDeephavenSession( final UpdateGraph updateGraph, + final ThreadInitializationFactory threadInitializationFactory, final ObjectTypeLookup objectTypeLookup, final RunScripts runScripts) throws IOException { - this(updateGraph, objectTypeLookup, null, runScripts); + this(updateGraph, threadInitializationFactory, objectTypeLookup, null, runScripts); } public GroovyDeephavenSession( final UpdateGraph updateGraph, + final ThreadInitializationFactory threadInitializationFactory, ObjectTypeLookup objectTypeLookup, @Nullable final Listener changeListener, final RunScripts runScripts) throws IOException { - super(updateGraph, objectTypeLookup, changeListener); + super(updateGraph, threadInitializationFactory, objectTypeLookup, changeListener); addDefaultImports(consoleImports); if (INCLUDE_DEFAULT_IMPORTS_IN_LOADED_GROOVY) { diff --git a/engine/table/src/main/java/io/deephaven/engine/util/NoLanguageDeephavenSession.java b/engine/table/src/main/java/io/deephaven/engine/util/NoLanguageDeephavenSession.java index 1140aec2a1a..7c7a28c838e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/util/NoLanguageDeephavenSession.java +++ b/engine/table/src/main/java/io/deephaven/engine/util/NoLanguageDeephavenSession.java @@ -5,6 +5,7 @@ import io.deephaven.engine.context.QueryScope; import io.deephaven.engine.updategraph.UpdateGraph; +import io.deephaven.util.thread.ThreadInitializationFactory; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -24,12 +25,14 @@ public class NoLanguageDeephavenSession extends AbstractScriptSession variables; - public NoLanguageDeephavenSession(final UpdateGraph updateGraph) { - this(updateGraph, SCRIPT_TYPE); + public NoLanguageDeephavenSession(final UpdateGraph updateGraph, + final ThreadInitializationFactory threadInitializationFactory) { + this(updateGraph, threadInitializationFactory, SCRIPT_TYPE); } - public NoLanguageDeephavenSession(final UpdateGraph updateGraph, final String scriptType) { - super(updateGraph, null, null); + public NoLanguageDeephavenSession(final UpdateGraph updateGraph, + final ThreadInitializationFactory threadInitializationFactory, final String scriptType) { + super(updateGraph, threadInitializationFactory, null, null); this.scriptType = scriptType; variables = new LinkedHashMap<>(); diff --git a/engine/table/src/main/java/io/deephaven/engine/util/PyCallableWrapperJpyImpl.java b/engine/table/src/main/java/io/deephaven/engine/util/PyCallableWrapperJpyImpl.java index 18262f8e7f0..006bae5be5c 100644 --- a/engine/table/src/main/java/io/deephaven/engine/util/PyCallableWrapperJpyImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/util/PyCallableWrapperJpyImpl.java @@ -24,17 +24,18 @@ public class PyCallableWrapperJpyImpl implements PyCallableWrapper { private static final PyObject NUMBA_VECTORIZED_FUNC_TYPE = getNumbaVectorizedFuncType(); private static final PyObject NUMBA_GUVECTORIZED_FUNC_TYPE = getNumbaGUVectorizedFuncType(); - private static final PyModule dh_table_module = PyModule.importModule("deephaven.table"); + private static final PyModule dh_udf_module = PyModule.importModule("deephaven._udf"); private static final Map> numpyType2JavaClass = new HashMap<>(); static { + numpyType2JavaClass.put('b', byte.class); + numpyType2JavaClass.put('h', short.class); + numpyType2JavaClass.put('H', char.class); numpyType2JavaClass.put('i', int.class); numpyType2JavaClass.put('l', long.class); - numpyType2JavaClass.put('h', short.class); numpyType2JavaClass.put('f', float.class); numpyType2JavaClass.put('d', double.class); - numpyType2JavaClass.put('b', byte.class); numpyType2JavaClass.put('?', boolean.class); numpyType2JavaClass.put('U', String.class); numpyType2JavaClass.put('M', Instant.class); @@ -133,23 +134,21 @@ private void prepareSignature() { pyCallable + " has multiple signatures; this is not currently supported for numba vectorized/guvectorized functions"); } - signature = params.get(0).getStringValue(); unwrapped = pyCallable; // since vectorization doesn't support array type parameters, don't flag numba guvectorized as vectorized numbaVectorized = isNumbaVectorized; vectorized = isNumbaVectorized; } else if (pyCallable.hasAttribute("dh_vectorized")) { - signature = pyCallable.getAttribute("signature").toString(); unwrapped = pyCallable.getAttribute("callable"); numbaVectorized = false; vectorized = true; } else { - signature = dh_table_module.call("_encode_signature", pyCallable).toString(); unwrapped = pyCallable; numbaVectorized = false; vectorized = false; } - pyUdfDecoratedCallable = dh_table_module.call("_py_udf", unwrapped); + pyUdfDecoratedCallable = dh_udf_module.call("_py_udf", unwrapped); + signature = pyUdfDecoratedCallable.getAttribute("signature").toString(); } @Override @@ -199,7 +198,7 @@ public PyObject vectorizedCallable() { if (numbaVectorized || vectorized) { return pyCallable; } else { - return dh_table_module.call("dh_vectorize", unwrapped); + return dh_udf_module.call("_dh_vectorize", unwrapped); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableEnumGetter.java b/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableEnumGetter.java deleted file mode 100644 index d861e125377..00000000000 --- a/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableEnumGetter.java +++ /dev/null @@ -1,11 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.engine.util.config; - -/** - * Accessor interface for enumeration constants for an input table column. - */ -public interface InputTableEnumGetter { - Object[] getEnumsForColumn(String columnName); -} diff --git a/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableRowSetter.java b/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableRowSetter.java deleted file mode 100644 index 1d058ea6567..00000000000 --- a/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableRowSetter.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.engine.util.config; - -import io.deephaven.engine.table.Table; - -import java.util.Map; - -public interface InputTableRowSetter { - /** - * Set the values of the column specified by the input, filling in missing data using the parameter 'table' as the - * previous value source. This method will be invoked asynchronously. Users may use - * {@link #setRows(Table, int[], Map[], InputTableStatusListener)} to be notified of asynchronous results. - * - * @param table The table to use as the previous value source - * @param row The row key to set - * @param values A map of column name to value to set. - */ - default void setRow(Table table, int row, Map values) { - // noinspection unchecked - setRows(table, new int[] {row}, new Map[] {values}); - } - - /** - * Set the values of the columns specified by the input, filling in missing data using the parameter 'table' as the - * previous value source. This method will be invoked asynchronously. Users may use - * {@link #setRows(Table, int[], Map[], InputTableStatusListener)} to be notified of asynchronous results. - * - * @param table The table to use as the previous value source - * @param rowArray The row keys to update. - * @param valueArray The new values. - */ - default void setRows(Table table, int[] rowArray, Map[] valueArray) { - setRows(table, rowArray, valueArray, InputTableStatusListener.DEFAULT); - } - - /** - * Set the values of the columns specified by the input, filling in missing data using the parameter 'table' as the - * previous value source. This method will be invoked asynchronously. The input listener will be notified on - * success/failure - * - * @param table The table to use as the previous value source - * @param rowArray The row keys to update. - * @param valueArray The new values. - * @param listener The listener to notify on asynchronous results. - */ - void setRows(Table table, int[] rowArray, Map[] valueArray, InputTableStatusListener listener); - - /** - * Add the specified row to the table. Duplicate keys will be overwritten. This method will execute asynchronously. - * Users may use {@link #addRow(Map, boolean, InputTableStatusListener)} to handle the result of the asynchronous - * write. - * - * @param values The values to write. - */ - default void addRow(Map values) { - // noinspection unchecked - addRows(new Map[] {values}); - } - - /** - * Add the specified rows to the table. Duplicate keys will be overwritten. This method will execute asynchronously. - * Users may use {@link #addRows(Map[], boolean, InputTableStatusListener)} to handle the asynchronous result. - * - * @param valueArray The values to write. - */ - default void addRows(Map[] valueArray) { - addRows(valueArray, true, InputTableStatusListener.DEFAULT); - } - - /** - * Add the specified row to the table, optionally overwriting existing keys. This method will execute - * asynchronously, the input listener will be notified on success/failure. - * - * @param valueArray The value to write. - * @param allowEdits Should pre-existing keys be overwritten? - * @param listener The listener to report asynchronous result to. - */ - default void addRow(Map valueArray, boolean allowEdits, InputTableStatusListener listener) { - // noinspection unchecked - addRows(new Map[] {valueArray}, allowEdits, listener); - } - - /** - * Add the specified rows to the table, optionally overwriting existing keys. This method will execute - * asynchronously, the input listener will be notified on success/failure. - * - * @param valueArray The values to write. - * @param allowEdits Should pre-existing keys be overwritten? - * @param listener The listener to report asynchronous results to. - */ - void addRows(Map[] valueArray, boolean allowEdits, InputTableStatusListener listener); -} diff --git a/engine/table/src/main/java/io/deephaven/engine/util/file/TrackedFileHandleFactory.java b/engine/table/src/main/java/io/deephaven/engine/util/file/TrackedFileHandleFactory.java index 7d89b355700..18f23de97fd 100644 --- a/engine/table/src/main/java/io/deephaven/engine/util/file/TrackedFileHandleFactory.java +++ b/engine/table/src/main/java/io/deephaven/engine/util/file/TrackedFileHandleFactory.java @@ -3,13 +3,12 @@ */ package io.deephaven.engine.util.file; -import io.deephaven.net.CommBase; +import io.deephaven.UncheckedDeephavenException; import io.deephaven.base.verify.Require; import io.deephaven.configuration.Configuration; -import io.deephaven.io.logger.Logger; -import io.deephaven.io.sched.Scheduler; -import io.deephaven.io.sched.TimedJob; +import io.deephaven.util.thread.NamingThreadFactory; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.VisibleForTesting; import java.io.File; import java.io.IOException; @@ -17,8 +16,13 @@ import java.nio.channels.FileChannel; import java.nio.file.OpenOption; -import java.util.*; +import java.util.Iterator; +import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -26,7 +30,7 @@ * Simple least-recently-opened "cache" for FileHandles, to avoid running up against ulimits. Will probably not achieve * satisfactory results if the number of file handles concurrently in active use exceeds capacity. Note that returned * FileHandles may be closed asynchronously by the factory. - * + *

    * TODO: Consider adding a lookup to enable handle sharing. Not necessary for current usage. */ public class TrackedFileHandleFactory implements FileHandleFactory { @@ -38,9 +42,16 @@ public static TrackedFileHandleFactory getInstance() { synchronized (TrackedFileHandleFactory.class) { if (instance == null) { instance = new TrackedFileHandleFactory( - CommBase.singleThreadedScheduler("TrackedFileHandleFactory.CleanupScheduler", Logger.NULL) - .start(), - Configuration.getInstance().getInteger("TrackedFileHandleFactory.maxOpenFiles")); + Executors.newSingleThreadScheduledExecutor( + new NamingThreadFactory(TrackedFileHandleFactory.class, "cleanupScheduler", true)), + Configuration.getInstance().getInteger("TrackedFileHandleFactory.maxOpenFiles")) { + + @Override + public void shutdown() { + super.shutdown(); + getScheduler().shutdown(); + } + }; } } } @@ -50,11 +61,12 @@ public static TrackedFileHandleFactory getInstance() { private final static double DEFAULT_TARGET_USAGE_RATIO = 0.9; private final static long DEFAULT_CLEANUP_INTERVAL_MILLIS = 60_000; - private final Scheduler scheduler; + private final ScheduledExecutorService scheduler; private final int capacity; private final double targetUsageRatio; private final int targetUsageThreshold; + private final ScheduledFuture cleanupJobFuture; private final AtomicInteger size = new AtomicInteger(0); private final Queue handleReferences = new ConcurrentLinkedQueue<>(); @@ -70,32 +82,39 @@ public static TrackedFileHandleFactory getInstance() { /** * Full constructor. * - * @param scheduler The scheduler to use for cleanup + * @param scheduler The {@link ScheduledExecutorService} to use for cleanup * @param capacity The total number of file handles to allow outstanding * @param targetUsageRatio The target usage threshold as a ratio of capacity, in [0.1, 0.9] * @param cleanupIntervalMillis The interval for asynchronous cleanup attempts */ - public TrackedFileHandleFactory(@NotNull final Scheduler scheduler, final int capacity, - final double targetUsageRatio, final long cleanupIntervalMillis) { + @VisibleForTesting + TrackedFileHandleFactory( + @NotNull final ScheduledExecutorService scheduler, + final int capacity, + final double targetUsageRatio, + final long cleanupIntervalMillis) { this.scheduler = scheduler; this.capacity = Require.gtZero(capacity, "capacity"); this.targetUsageRatio = Require.inRange(targetUsageRatio, 0.1, 0.9, "targetUsageRatio"); targetUsageThreshold = Require.gtZero((int) (capacity * targetUsageRatio), "targetUsageThreshold"); - new CleanupJob(cleanupIntervalMillis).schedule(); + cleanupJobFuture = scheduler.scheduleAtFixedRate( + new CleanupJob(), cleanupIntervalMillis, cleanupIntervalMillis, TimeUnit.MILLISECONDS); } /** * Constructor with default target usage ratio of 0.9 (90%) and cleanup attempts every 60 seconds. - * - * @param scheduler The scheduler to use for cleanup + * + * @param scheduler The {@link ScheduledExecutorService} to use for cleanup * @param capacity The total number of file handles to allow outstanding */ - public TrackedFileHandleFactory(@NotNull final Scheduler scheduler, final int capacity) { + @VisibleForTesting + TrackedFileHandleFactory(@NotNull final ScheduledExecutorService scheduler, final int capacity) { this(scheduler, capacity, DEFAULT_TARGET_USAGE_RATIO, DEFAULT_CLEANUP_INTERVAL_MILLIS); } - public Scheduler getScheduler() { + @VisibleForTesting + ScheduledExecutorService getScheduler() { return scheduler; } @@ -160,26 +179,18 @@ public void closeAll() { } } - private class CleanupJob extends TimedJob { - - private final long intervalMills; - - private CleanupJob(final long intervalMills) { - this.intervalMills = intervalMills; - } + public void shutdown() { + cleanupJobFuture.cancel(true); + } - private void schedule() { - scheduler.installJob(this, scheduler.currentTimeMillis() + intervalMills); - } + private class CleanupJob implements Runnable { - @Override - public void timedOut() { + public void run() { try { cleanup(); } catch (Exception e) { - throw new RuntimeException("TrackedFileHandleFactory.CleanupJob: Unexpected exception", e); + throw new UncheckedDeephavenException("TrackedFileHandleFactory.CleanupJob: Unexpected exception", e); } - schedule(); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableStatusListener.java b/engine/table/src/main/java/io/deephaven/engine/util/input/InputTableStatusListener.java similarity index 92% rename from engine/table/src/main/java/io/deephaven/engine/util/config/InputTableStatusListener.java rename to engine/table/src/main/java/io/deephaven/engine/util/input/InputTableStatusListener.java index 8061f253642..2676d20a11f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableStatusListener.java +++ b/engine/table/src/main/java/io/deephaven/engine/util/input/InputTableStatusListener.java @@ -1,7 +1,7 @@ /** * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending */ -package io.deephaven.engine.util.config; +package io.deephaven.engine.util.input; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; @@ -37,7 +37,7 @@ public void onSuccess() { } /** - * Handle an error that occured during an input table write. + * Handle an error that occurred during an input table write. * * @param t the error. */ diff --git a/engine/table/src/main/java/io/deephaven/engine/util/config/MutableInputTable.java b/engine/table/src/main/java/io/deephaven/engine/util/input/InputTableUpdater.java similarity index 65% rename from engine/table/src/main/java/io/deephaven/engine/util/config/MutableInputTable.java rename to engine/table/src/main/java/io/deephaven/engine/util/input/InputTableUpdater.java index 202256ca7ea..271e3f312c3 100644 --- a/engine/table/src/main/java/io/deephaven/engine/util/config/MutableInputTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/util/input/InputTableUpdater.java @@ -1,13 +1,12 @@ /** * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending */ -package io.deephaven.engine.util.config; +package io.deephaven.engine.util.input; import io.deephaven.engine.exceptions.ArgumentException; import io.deephaven.engine.table.ColumnDefinition; import io.deephaven.engine.table.Table; import io.deephaven.engine.table.TableDefinition; -import io.deephaven.engine.rowset.TrackingRowSet; import java.io.IOException; import java.util.List; @@ -15,12 +14,12 @@ /** * A minimal interface for mutable shared tables, providing the ability to write to the table instance this is attached - * to. MutableInputTable instances are set on the table as an attribute. + * to. InputTable instances are set on the table as an attribute. *

    * Implementations of this interface will make their own guarantees about how atomically changes will be applied and * what operations they support. */ -public interface MutableInputTable extends InputTableRowSetter, InputTableEnumGetter { +public interface InputTableUpdater { /** * Gets the names of the key columns. @@ -85,19 +84,24 @@ default void validateDelete(Table tableToDelete) { error.append("Unknown key columns: ").append(extraKeys); } if (error.length() > 0) { - throw new ArgumentException("Invalid Key Table Definition: " + error.toString()); + throw new ArgumentException("Invalid Key Table Definition: " + error); } } /** * Write {@code newData} to this table. Added rows with keys that match existing rows will instead replace those * rows, if supported. + * + *

    + * This method will block until the add is "completed", where the definition of "completed" is implementation + * dependenent. + * *

    - * This method will block until the rows are added. As a result, this method is not suitable for use from a - * {@link io.deephaven.engine.table.TableListener table listener} or any other + * For implementations where "completed" means "visible in the next update graph cycle", this method is not suitable + * for use from a {@link io.deephaven.engine.table.TableListener table listener} or any other * {@link io.deephaven.engine.updategraph.NotificationQueue.Notification notification}-dispatched callback - * dispatched by this MutableInputTable's {@link io.deephaven.engine.updategraph.UpdateGraph update graph}. It may - * be suitable to delete from another update graph if doing so does not introduce any cycles. + * dispatched by this InputTable's {@link io.deephaven.engine.updategraph.UpdateGraph update graph}. It may be + * suitable to delete from another update graph if doing so does not introduce any cycles. * * @param newData The data to write to this table * @throws IOException If there is an error writing the data @@ -105,8 +109,13 @@ default void validateDelete(Table tableToDelete) { void add(Table newData) throws IOException; /** - * Write {@code newData} to this table. Added rows with keys that match existing rows will instead replace those - * rows, if supported and {@code allowEdits == true}. + * Write {@code newData} to this table. Added rows with keys that match existing rows replace those rows, if + * supported. + * + *

    + * The callback to {@code listener} will happen when the add has "completed", where the definition of "completed" is + * implementation dependenent. It's possible that the callback happens immediately on the same thread. + * *

    * This method will not block, and can be safely used from a {@link io.deephaven.engine.table.TableListener * table listener} or any other {@link io.deephaven.engine.updategraph.NotificationQueue.Notification @@ -115,49 +124,39 @@ default void validateDelete(Table tableToDelete) { * cycle. * * @param newData The data to write to this table - * @param allowEdits Whether added rows with keys that match existing rows will instead replace those rows, or - * result in an error * @param listener The listener for asynchronous results */ - void addAsync(Table newData, boolean allowEdits, InputTableStatusListener listener); + void addAsync(Table newData, InputTableStatusListener listener); /** * Delete the keys contained in {@code table} from this input table. + * *

    - * This method will block until the rows are deleted. As a result, this method is not suitable for use from a - * {@link io.deephaven.engine.table.TableListener table listener} or any other + * This method will block until the delete is "completed", where the definition of "completed" is implementation + * dependenent. + * + *

    + * For implementations where "completed" means "visible in the next update graph cycle", this method is not suitable + * for use from a {@link io.deephaven.engine.table.TableListener table listener} or any other * {@link io.deephaven.engine.updategraph.NotificationQueue.Notification notification}-dispatched callback - * dispatched by this MutableInputTable's {@link io.deephaven.engine.updategraph.UpdateGraph update graph}. It may - * be suitable to delete from another update graph if doing so does not introduce any cycles. + * dispatched by this InputTable's {@link io.deephaven.engine.updategraph.UpdateGraph update graph}. It may be + * suitable to delete from another update graph if doing so does not introduce any cycles. * * @param table The rows to delete * @throws IOException If a problem occurred while deleting the rows. * @throws UnsupportedOperationException If this table does not support deletes */ default void delete(Table table) throws IOException { - delete(table, table.getRowSet()); - } - - /** - * Delete the keys contained in {@code table.subTable(rowSet)} from this input table. - *

    - * This method will block until the rows are deleted. As a result, this method is not suitable for use from a - * {@link io.deephaven.engine.table.TableListener table listener} or any other - * {@link io.deephaven.engine.updategraph.NotificationQueue.Notification notification}-dispatched callback - * dispatched by this MutableInputTable's {@link io.deephaven.engine.updategraph.UpdateGraph update graph}. It may - * be suitable to delete from another update graph if doing so does not introduce any cycles. - * - * @param table Table containing the rows to delete - * @param rowSet The rows to delete - * @throws IOException If a problem occurred while deleting the rows - * @throws UnsupportedOperationException If this table does not support deletes - */ - default void delete(Table table, TrackingRowSet rowSet) throws IOException { throw new UnsupportedOperationException("Table does not support deletes"); } /** - * Delete the keys contained in {@code table.subTable(rowSet)} from this input table. + * Delete the keys contained in table from this input table. + * + *

    + * The callback to {@code listener} will happen when the delete has "completed", where the definition of "completed" + * is implementation dependenent. It's possible that the callback happens immediately on the same thread. + * *

    * This method will not block, and can be safely used from a {@link io.deephaven.engine.table.TableListener * table listener} or any other {@link io.deephaven.engine.updategraph.NotificationQueue.Notification @@ -166,27 +165,12 @@ default void delete(Table table, TrackingRowSet rowSet) throws IOException { * cycle. * * @param table Table containing the rows to delete - * @param rowSet The rows to delete * @throws UnsupportedOperationException If this table does not support deletes */ - default void deleteAsync(Table table, TrackingRowSet rowSet, InputTableStatusListener listener) { + default void deleteAsync(Table table, InputTableStatusListener listener) { throw new UnsupportedOperationException("Table does not support deletes"); } - /** - * Return a user-readable description of this MutableInputTable. - * - * @return a description of this input table - */ - String getDescription(); - - /** - * Returns a Deephaven table that contains the current data for this MutableInputTable. - * - * @return the current data in this MutableInputTable. - */ - Table getTable(); - /** * Returns true if the specified column is a key. * @@ -198,20 +182,12 @@ default boolean isKey(String columnName) { } /** - * Returns true if the specified column exists in this MutableInputTable. + * Returns true if the specified column exists in this InputTable. * * @param columnName the column to interrogate - * @return true if columnName exists in this MutableInputTable + * @return true if columnName exists in this InputTable */ default boolean hasColumn(String columnName) { return getTableDefinition().getColumnNames().contains(columnName); } - - /** - * Queries whether this MutableInputTable is editable in the current context. - * - * @return true if this MutableInputTable may be edited, false otherwise TODO (deephaven/deephaven-core/issues/255): - * Add AuthContext and whatever else is appropriate - */ - boolean canEdit(); } diff --git a/engine/table/src/main/java/io/deephaven/stream/TablePublisher.java b/engine/table/src/main/java/io/deephaven/stream/TablePublisher.java index 1eadf4fd88e..2eb9eb46f82 100644 --- a/engine/table/src/main/java/io/deephaven/stream/TablePublisher.java +++ b/engine/table/src/main/java/io/deephaven/stream/TablePublisher.java @@ -5,6 +5,8 @@ import io.deephaven.engine.table.TableDefinition; import io.deephaven.engine.table.impl.sources.ArrayBackedColumnSource; import io.deephaven.engine.updategraph.UpdateGraph; +import io.deephaven.engine.util.input.InputTableStatusListener; +import io.deephaven.engine.util.input.InputTableUpdater; import io.deephaven.util.annotations.TestUseOnly; import javax.annotation.Nullable; @@ -169,6 +171,29 @@ public boolean isAlive() { return adapter.isAlive(); } + /** + * Creates a new {@link Table#BLINK_TABLE_ATTRIBUTE blink table} with its {@link Table#getAttribute(String) + * attribute} {@value Table#INPUT_TABLE_ATTRIBUTE} set to an {@link InputTableUpdater} implementation based on + * {@code this}. The implementation's definition of "completed" with respect to {@link InputTableUpdater#add(Table)} + * and {@link InputTableUpdater#addAsync(Table, InputTableStatusListener)} matches the semantics provided by + * {@link #add(Table)} - that is, "completed" means that a snapshot of {@code newData} has been taken and handed + * off. The implementation does not implement {@link InputTableUpdater#delete(Table)} nor + * {@link InputTableUpdater#deleteAsync(Table, InputTableStatusListener)}. + * + *

    + * May return {@code null} if invoked more than once and the initial caller does not enforce strong reachability of + * the result. + * + * @return the input-table blink table + */ + public Table inputTable() { + final Table table = adapter.table(); + if (table == null) { + return null; + } + return table.withAttributes(Map.of(Table.INPUT_TABLE_ATTRIBUTE, publisher.inputTableUpdater())); + } + @TestUseOnly void runForUnitTests() { adapter.run(); diff --git a/engine/table/src/main/java/io/deephaven/stream/TableStreamPublisherImpl.java b/engine/table/src/main/java/io/deephaven/stream/TableStreamPublisherImpl.java index b987481ca23..3446b2828bb 100644 --- a/engine/table/src/main/java/io/deephaven/stream/TableStreamPublisherImpl.java +++ b/engine/table/src/main/java/io/deephaven/stream/TableStreamPublisherImpl.java @@ -17,11 +17,14 @@ import io.deephaven.engine.table.impl.remote.ConstructSnapshot.SnapshotFunction; import io.deephaven.engine.table.impl.remote.ConstructSnapshot.State; import io.deephaven.engine.table.impl.sources.ReinterpretUtils; +import io.deephaven.engine.util.input.InputTableStatusListener; +import io.deephaven.engine.util.input.InputTableUpdater; import io.deephaven.util.SafeCloseable; import io.deephaven.util.SafeCloseableArray; import org.jetbrains.annotations.NotNull; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.stream.Stream; @@ -94,6 +97,39 @@ public void shutdown() { } } + public InputTableUpdater inputTableUpdater() { + return new InputTableAdapter(); + } + + private class InputTableAdapter implements InputTableUpdater { + @Override + public TableDefinition getTableDefinition() { + return definition; + } + + @Override + public void add(Table newData) { + TableStreamPublisherImpl.this.add(newData); + } + + @Override + public void addAsync(Table newData, InputTableStatusListener listener) { + try { + TableStreamPublisherImpl.this.add(newData); + } catch (Throwable t) { + listener.onError(t); + return; + } + listener.onSuccess(); + } + + @Override + public List getKeyNames() { + return Collections.emptyList(); + } + + } + private class FillChunks implements SnapshotFunction { private final Table table; private final ColumnSource[] sources; diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/CapturingUpdateGraph.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/CapturingUpdateGraph.java index c70555e5a06..63e98610be9 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/CapturingUpdateGraph.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/CapturingUpdateGraph.java @@ -163,4 +163,9 @@ public void runWithinUnitTestCycle( final boolean satisfied) throws T { delegate.runWithinUnitTestCycle(runnable, satisfied); } + + @Override + public void stop() { + delegate.stop(); + } } diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/FuzzerTest.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/FuzzerTest.java index 5f321b33854..b179795b59d 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/FuzzerTest.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/FuzzerTest.java @@ -20,9 +20,9 @@ import io.deephaven.time.DateTimeUtils; import io.deephaven.engine.util.TableTools; import io.deephaven.engine.util.GroovyDeephavenSession; -import io.deephaven.engine.util.GroovyDeephavenSession.RunScripts; import io.deephaven.test.types.SerialTest; import io.deephaven.util.SafeCloseable; +import io.deephaven.util.thread.ThreadInitializationFactory; import org.jetbrains.annotations.Nullable; import org.junit.Assume; import org.junit.Rule; @@ -75,7 +75,10 @@ private GroovyDeephavenSession getGroovySession() throws IOException { private GroovyDeephavenSession getGroovySession(@Nullable Clock clock) throws IOException { final GroovyDeephavenSession session = new GroovyDeephavenSession( - ExecutionContext.getContext().getUpdateGraph(), NoOp.INSTANCE, RunScripts.serviceLoader()); + ExecutionContext.getContext().getUpdateGraph(), + ThreadInitializationFactory.NO_OP, + NoOp.INSTANCE, + GroovyDeephavenSession.RunScripts.serviceLoader()); session.getExecutionContext().open(); return session; } diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/PartitionedTableTest.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/PartitionedTableTest.java index 22c45cca47f..61c2a97f102 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/PartitionedTableTest.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/PartitionedTableTest.java @@ -555,7 +555,6 @@ public void testCrossDependencies() { .captureQueryScopeVars("pauseHelper2") .captureQueryLibrary() .captureQueryCompiler() - .captureUpdateGraph() .build(); final PartitionedTable result2 = sourceTable2.update("SlowItDown=pauseHelper.pauseValue(k)").partitionBy("USym2").transform( @@ -646,7 +645,6 @@ public void testCrossDependencies2() { .captureQueryScopeVars("pauseHelper") .captureQueryLibrary() .captureQueryCompiler() - .captureUpdateGraph() .build(); final PartitionedTable result2 = sourceTable2.partitionBy("USym2").transform(executionContext, t -> t.withAttributes(Map.of(BaseTable.TEST_SOURCE_TABLE_ATTRIBUTE, "true")) @@ -935,7 +933,6 @@ protected Table e() { .newQueryScope() .captureQueryCompiler() .captureQueryLibrary() - .captureUpdateGraph() .build().open()) { ExecutionContext.getContext().getQueryScope().putParam("queryScopeVar", "queryScopeValue"); @@ -996,7 +993,6 @@ public void testTransformDependencyCorrectness() { final ExecutionContext executionContext = ExecutionContext.newBuilder() .emptyQueryScope() .newQueryLibrary() - .captureUpdateGraph() .captureQueryCompiler() .build(); final PartitionedTable transformed = partitioned.transform(executionContext, tableIn -> { diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableTest.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableTest.java index 5648009a55f..58ef8ac5996 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableTest.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableTest.java @@ -3006,7 +3006,13 @@ public void testMemoize() { } public void testMemoizeConcurrent() { - final ExecutorService dualPool = Executors.newFixedThreadPool(2); + final ExecutorService dualPool = Executors.newFixedThreadPool(2, new ThreadFactory() { + @Override + public Thread newThread(Runnable runnable) { + ExecutionContext captured = ExecutionContext.getContext(); + return new Thread(() -> captured.apply(runnable)); + } + }); final boolean old = QueryTable.setMemoizeResults(true); try { @@ -3644,5 +3650,8 @@ public void removeSource(@NotNull Runnable updateSource) {} public void requestRefresh() { throw new UnsupportedOperationException(); } + + @Override + public void stop() {} } } diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableWhereTest.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableWhereTest.java index 5d6621d0eb0..4d511bb2a80 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableWhereTest.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableWhereTest.java @@ -833,7 +833,7 @@ public void testInterFilterInterruption() { // we want to make sure we can push something through the thread pool and are not hogging it final CountDownLatch latch = new CountDownLatch(1); - OperationInitializationThreadPool.executorService().submit(latch::countDown); + ExecutionContext.getContext().getInitializer().submit(latch::countDown); waitForLatch(latch); assertEquals(0, fastCounter.invokes.get()); diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestConditionFilterGeneration.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestConditionFilterGeneration.java index 5c208d88db7..bc2f176fa87 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestConditionFilterGeneration.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestConditionFilterGeneration.java @@ -30,7 +30,6 @@ public void setUp() { .newQueryLibrary("DEFAULT") .captureQueryCompiler() .captureQueryScope() - .captureUpdateGraph() .build().open(); } diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestFormulaColumnGeneration.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestFormulaColumnGeneration.java index 1265f4ae1a1..bf3621f4f81 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestFormulaColumnGeneration.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestFormulaColumnGeneration.java @@ -49,7 +49,6 @@ public void setUp() { .newQueryLibrary("DEFAULT") .captureQueryCompiler() .captureQueryScope() - .captureUpdateGraph() .build().open(); } diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestFunctionGeneratedTableFactory.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestFunctionGeneratedTableFactory.java index d8cadb6b33f..b44c1d8d864 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestFunctionGeneratedTableFactory.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestFunctionGeneratedTableFactory.java @@ -20,7 +20,7 @@ import java.util.Random; -import static io.deephaven.engine.table.impl.util.TestKeyedArrayBackedMutableTable.handleDelayedRefresh; +import static io.deephaven.engine.table.impl.util.TestKeyedArrayBackedInputTable.handleDelayedRefresh; import static io.deephaven.engine.testutil.TstUtils.*; import static io.deephaven.engine.util.TableTools.*; @@ -68,13 +68,13 @@ public void testNoSources() { } public void testMultipleSources() throws Exception { - final AppendOnlyArrayBackedMutableTable source1 = AppendOnlyArrayBackedMutableTable.make(TableDefinition.of( + final AppendOnlyArrayBackedInputTable source1 = AppendOnlyArrayBackedInputTable.make(TableDefinition.of( ColumnDefinition.of("StringCol", Type.stringType()))); - final BaseArrayBackedMutableTable.ArrayBackedMutableInputTable inputTable1 = source1.makeHandler(); + final BaseArrayBackedInputTable.ArrayBackedInputTableUpdater inputTable1 = source1.makeUpdater(); - final AppendOnlyArrayBackedMutableTable source2 = AppendOnlyArrayBackedMutableTable.make(TableDefinition.of( + final AppendOnlyArrayBackedInputTable source2 = AppendOnlyArrayBackedInputTable.make(TableDefinition.of( ColumnDefinition.of("IntCol", Type.intType()))); - final BaseArrayBackedMutableTable.ArrayBackedMutableInputTable inputTable2 = source2.makeHandler(); + final BaseArrayBackedInputTable.ArrayBackedInputTableUpdater inputTable2 = source2.makeUpdater(); final Table functionBacked = FunctionGeneratedTableFactory.create(() -> source1.lastBy().naturalJoin(source2, ""), source1, source2); @@ -82,9 +82,9 @@ public void testMultipleSources() throws Exception { assertEquals(functionBacked.size(), 0); handleDelayedRefresh(() -> { - inputTable1.addAsync(newTable(stringCol("StringCol", "MyString")), false, t -> { + inputTable1.addAsync(newTable(stringCol("StringCol", "MyString")), t -> { }); - inputTable2.addAsync(newTable(intCol("IntCol", 12345)), false, t -> { + inputTable2.addAsync(newTable(intCol("IntCol", 12345)), t -> { }); }, source1, source2); diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedInputTable.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedInputTable.java new file mode 100644 index 00000000000..72d468c4e8b --- /dev/null +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedInputTable.java @@ -0,0 +1,202 @@ +/** + * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.engine.table.impl.util; + +import io.deephaven.UncheckedDeephavenException; +import io.deephaven.engine.context.ExecutionContext; +import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.FailureListener; +import io.deephaven.engine.table.impl.TableUpdateValidator; +import io.deephaven.engine.testutil.ControlledUpdateGraph; +import io.deephaven.engine.testutil.junit4.EngineCleanup; +import io.deephaven.engine.util.TableTools; +import io.deephaven.engine.util.input.InputTableUpdater; +import io.deephaven.util.function.ThrowingRunnable; +import junit.framework.TestCase; +import org.junit.Rule; +import org.junit.Test; + +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.CountDownLatch; + +import static io.deephaven.engine.testutil.TstUtils.assertTableEquals; +import static io.deephaven.engine.util.TableTools.showWithRowSet; +import static io.deephaven.engine.util.TableTools.stringCol; + +public class TestKeyedArrayBackedInputTable { + + @Rule + public final EngineCleanup liveTableTestCase = new EngineCleanup(); + + @Test + public void testSimple() throws Exception { + final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), + stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); + + final KeyedArrayBackedInputTable kabut = KeyedArrayBackedInputTable.make(input, "Name"); + final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); + final Table validatorResult = validator.getResultTable(); + final FailureListener failureListener = new FailureListener(); + validatorResult.addUpdateListener(failureListener); + + assertTableEquals(input, kabut); + + final InputTableUpdater inputTableUpdater = (InputTableUpdater) kabut.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + TestCase.assertNotNull(inputTableUpdater); + + final Table input2 = TableTools.newTable(stringCol("Name", "Randy"), stringCol("Employer", "USGS")); + + handleDelayedRefresh(() -> inputTableUpdater.add(input2), kabut); + assertTableEquals(TableTools.merge(input, input2), kabut); + + final Table input3 = TableTools.newTable(stringCol("Name", "Randy"), stringCol("Employer", "Tegridy")); + handleDelayedRefresh(() -> inputTableUpdater.add(input3), kabut); + assertTableEquals(TableTools.merge(input, input3), kabut); + + + final Table input4 = TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Cogswell")); + handleDelayedRefresh(() -> inputTableUpdater.add(input4), kabut); + showWithRowSet(kabut); + + assertTableEquals(TableTools.merge(input, input3, input4).lastBy("Name"), kabut); + + final Table input5 = + TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Spacely Sprockets")); + handleDelayedRefresh(() -> inputTableUpdater.add(input5), kabut); + showWithRowSet(kabut); + + assertTableEquals(TableTools.merge(input, input3, input4, input5).lastBy("Name"), kabut); + + final long sizeBeforeDelete = kabut.size(); + System.out.println("KABUT.rowSet before delete: " + kabut.getRowSet()); + final Table delete1 = TableTools.newTable(stringCol("Name", "Earl")); + handleDelayedRefresh(() -> inputTableUpdater.delete(delete1), kabut); + System.out.println("KABUT.rowSet after delete: " + kabut.getRowSet()); + final long sizeAfterDelete = kabut.size(); + TestCase.assertEquals(sizeBeforeDelete - 1, sizeAfterDelete); + + showWithRowSet(kabut); + + final Table expected = TableTools.merge( + TableTools.merge(input, input3, input4, input5).update("Deleted=false"), + delete1.update("Employer=(String)null", "Deleted=true")) + .lastBy("Name").where("Deleted=false").dropColumns("Deleted"); + showWithRowSet(expected); + + assertTableEquals(expected, kabut); + } + + @Test + public void testAppendOnly() throws Exception { + final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), + stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); + + final AppendOnlyArrayBackedInputTable aoabmt = AppendOnlyArrayBackedInputTable.make(input); + final TableUpdateValidator validator = TableUpdateValidator.make("aoabmt", aoabmt); + final Table validatorResult = validator.getResultTable(); + final FailureListener failureListener = new FailureListener(); + validatorResult.addUpdateListener(failureListener); + + assertTableEquals(input, aoabmt); + + final InputTableUpdater inputTableUpdater = + (InputTableUpdater) aoabmt.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + TestCase.assertNotNull(inputTableUpdater); + + final Table input2 = + TableTools.newTable(stringCol("Name", "Randy", "George"), stringCol("Employer", "USGS", "Cogswell")); + + handleDelayedRefresh(() -> inputTableUpdater.add(input2), aoabmt); + assertTableEquals(TableTools.merge(input, input2), aoabmt); + } + + @Test + public void testFilteredAndSorted() throws Exception { + final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), + stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); + + final KeyedArrayBackedInputTable kabut = KeyedArrayBackedInputTable.make(input, "Name"); + final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); + final Table validatorResult = validator.getResultTable(); + final FailureListener failureListener = new FailureListener(); + validatorResult.addUpdateListener(failureListener); + + assertTableEquals(input, kabut); + + final Table fs = kabut.where("Name.length() == 4").sort("Name"); + + final InputTableUpdater inputTableUpdater = (InputTableUpdater) fs.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + TestCase.assertNotNull(inputTableUpdater); + + final Table delete = TableTools.newTable(stringCol("Name", "Fred")); + + handleDelayedRefresh(() -> inputTableUpdater.delete(delete), kabut); + assertTableEquals(input.where("Name != `Fred`"), kabut); + } + + + @Test + public void testAddBack() throws Exception { + final Table input = TableTools.newTable(stringCol("Name"), stringCol("Employer")); + + final KeyedArrayBackedInputTable kabut = KeyedArrayBackedInputTable.make(input, "Name"); + final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); + final Table validatorResult = validator.getResultTable(); + final FailureListener failureListener = new FailureListener(); + validatorResult.addUpdateListener(failureListener); + + assertTableEquals(input, kabut); + + final InputTableUpdater inputTableUpdater = (InputTableUpdater) kabut.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + TestCase.assertNotNull(inputTableUpdater); + + final Table input2 = + TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Spacely Sprockets")); + + handleDelayedRefresh(() -> inputTableUpdater.add(input2), kabut); + assertTableEquals(input2, kabut); + + handleDelayedRefresh(() -> inputTableUpdater.delete(input2.view("Name")), kabut); + assertTableEquals(input, kabut); + + handleDelayedRefresh(() -> inputTableUpdater.add(input2), kabut); + assertTableEquals(input2, kabut); + } + + public static void handleDelayedRefresh(final ThrowingRunnable action, + final BaseArrayBackedInputTable... tables) throws Exception { + final Thread refreshThread; + final CountDownLatch gate = new CountDownLatch(tables.length); + + Arrays.stream(tables).forEach(t -> t.setOnPendingChange(gate::countDown)); + try { + final ControlledUpdateGraph updateGraph = ExecutionContext.getContext().getUpdateGraph().cast(); + refreshThread = new Thread(() -> { + // If this unexpected interruption happens, the test thread may hang in action.run() + // indefinitely. Best to hope it's already queued the pending action and proceed with run. + updateGraph.runWithinUnitTestCycle(() -> { + try { + gate.await(); + } catch (InterruptedException ignored) { + // If this unexpected interruption happens, the test thread may hang in action.run() + // indefinitely. Best to hope it's already queued the pending action and proceed with run. + } + Arrays.stream(tables).forEach(BaseArrayBackedInputTable::run); + }); + }); + + refreshThread.start(); + action.run(); + } finally { + Arrays.stream(tables).forEach(t -> t.setOnPendingChange(null)); + } + try { + refreshThread.join(); + } catch (InterruptedException e) { + throw new UncheckedDeephavenException( + "Interrupted unexpectedly while waiting for run cycle to complete", e); + } + } +} diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedMutableTable.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedMutableTable.java deleted file mode 100644 index a211071cbe5..00000000000 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedMutableTable.java +++ /dev/null @@ -1,333 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.engine.table.impl.util; - -import io.deephaven.UncheckedDeephavenException; -import io.deephaven.base.SleepUtil; -import io.deephaven.datastructures.util.CollectionUtil; -import io.deephaven.engine.context.ExecutionContext; -import io.deephaven.engine.table.Table; -import io.deephaven.engine.table.impl.FailureListener; -import io.deephaven.engine.table.impl.TableUpdateValidator; -import io.deephaven.engine.testutil.ControlledUpdateGraph; -import io.deephaven.engine.testutil.junit4.EngineCleanup; -import io.deephaven.engine.util.TableTools; -import io.deephaven.engine.util.config.InputTableStatusListener; -import io.deephaven.engine.util.config.MutableInputTable; -import io.deephaven.util.function.ThrowingRunnable; -import junit.framework.TestCase; -import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; -import org.junit.Rule; -import org.junit.Test; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Map; -import java.util.concurrent.CountDownLatch; - -import static io.deephaven.engine.testutil.TstUtils.assertTableEquals; -import static io.deephaven.engine.util.TableTools.showWithRowSet; -import static io.deephaven.engine.util.TableTools.stringCol; - -public class TestKeyedArrayBackedMutableTable { - - @Rule - public final EngineCleanup liveTableTestCase = new EngineCleanup(); - - @Test - public void testSimple() throws Exception { - final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), - stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); - - final KeyedArrayBackedMutableTable kabut = KeyedArrayBackedMutableTable.make(input, "Name"); - final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); - final Table validatorResult = validator.getResultTable(); - final FailureListener failureListener = new FailureListener(); - validatorResult.addUpdateListener(failureListener); - - assertTableEquals(input, kabut); - - final MutableInputTable mutableInputTable = (MutableInputTable) kabut.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - TestCase.assertNotNull(mutableInputTable); - - final Table input2 = TableTools.newTable(stringCol("Name", "Randy"), stringCol("Employer", "USGS")); - - handleDelayedRefresh(() -> mutableInputTable.add(input2), kabut); - assertTableEquals(TableTools.merge(input, input2), kabut); - - final Table input3 = TableTools.newTable(stringCol("Name", "Randy"), stringCol("Employer", "Tegridy")); - handleDelayedRefresh(() -> mutableInputTable.add(input3), kabut); - assertTableEquals(TableTools.merge(input, input3), kabut); - - - final Table input4 = TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Cogswell")); - handleDelayedRefresh(() -> mutableInputTable.add(input4), kabut); - showWithRowSet(kabut); - - assertTableEquals(TableTools.merge(input, input3, input4).lastBy("Name"), kabut); - - final Table input5 = - TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Spacely Sprockets")); - handleDelayedRefresh(() -> mutableInputTable.add(input5), kabut); - showWithRowSet(kabut); - - assertTableEquals(TableTools.merge(input, input3, input4, input5).lastBy("Name"), kabut); - - final long sizeBeforeDelete = kabut.size(); - System.out.println("KABUT.rowSet before delete: " + kabut.getRowSet()); - final Table delete1 = TableTools.newTable(stringCol("Name", "Earl")); - handleDelayedRefresh(() -> mutableInputTable.delete(delete1), kabut); - System.out.println("KABUT.rowSet after delete: " + kabut.getRowSet()); - final long sizeAfterDelete = kabut.size(); - TestCase.assertEquals(sizeBeforeDelete - 1, sizeAfterDelete); - - showWithRowSet(kabut); - - final Table expected = TableTools.merge( - TableTools.merge(input, input3, input4, input5).update("Deleted=false"), - delete1.update("Employer=(String)null", "Deleted=true")) - .lastBy("Name").where("Deleted=false").dropColumns("Deleted"); - showWithRowSet(expected); - - assertTableEquals(expected, kabut); - } - - @Test - public void testAppendOnly() throws Exception { - final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), - stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); - - final AppendOnlyArrayBackedMutableTable aoabmt = AppendOnlyArrayBackedMutableTable.make(input); - final TableUpdateValidator validator = TableUpdateValidator.make("aoabmt", aoabmt); - final Table validatorResult = validator.getResultTable(); - final FailureListener failureListener = new FailureListener(); - validatorResult.addUpdateListener(failureListener); - - assertTableEquals(input, aoabmt); - - final MutableInputTable mutableInputTable = - (MutableInputTable) aoabmt.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - TestCase.assertNotNull(mutableInputTable); - - final Table input2 = - TableTools.newTable(stringCol("Name", "Randy", "George"), stringCol("Employer", "USGS", "Cogswell")); - - handleDelayedRefresh(() -> mutableInputTable.add(input2), aoabmt); - assertTableEquals(TableTools.merge(input, input2), aoabmt); - } - - @Test - public void testFilteredAndSorted() throws Exception { - final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), - stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); - - final KeyedArrayBackedMutableTable kabut = KeyedArrayBackedMutableTable.make(input, "Name"); - final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); - final Table validatorResult = validator.getResultTable(); - final FailureListener failureListener = new FailureListener(); - validatorResult.addUpdateListener(failureListener); - - assertTableEquals(input, kabut); - - final Table fs = kabut.where("Name.length() == 4").sort("Name"); - - final MutableInputTable mutableInputTable = (MutableInputTable) fs.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - TestCase.assertNotNull(mutableInputTable); - - final Table delete = TableTools.newTable(stringCol("Name", "Fred")); - - handleDelayedRefresh(() -> mutableInputTable.delete(delete), kabut); - assertTableEquals(input.where("Name != `Fred`"), kabut); - } - - @Test - public void testAddRows() throws Throwable { - final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), - stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); - - final KeyedArrayBackedMutableTable kabut = KeyedArrayBackedMutableTable.make(input, "Name"); - final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); - final Table validatorResult = validator.getResultTable(); - final FailureListener failureListener = new FailureListener(); - validatorResult.addUpdateListener(failureListener); - - assertTableEquals(input, kabut); - - final MutableInputTable mutableInputTable = (MutableInputTable) kabut.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - TestCase.assertNotNull(mutableInputTable); - - final Table input2 = TableTools.newTable(stringCol("Name", "Randy"), stringCol("Employer", "USGS")); - - final Map randyMap = - CollectionUtil.mapFromArray(String.class, Object.class, "Name", "Randy", "Employer", "USGS"); - final TestStatusListener listener = new TestStatusListener(); - mutableInputTable.addRow(randyMap, true, listener); - SleepUtil.sleep(100); - listener.assertIncomplete(); - final ControlledUpdateGraph updateGraph = ExecutionContext.getContext().getUpdateGraph().cast(); - updateGraph.runWithinUnitTestCycle(kabut::run); - assertTableEquals(TableTools.merge(input, input2), kabut); - listener.waitForCompletion(); - listener.assertSuccess(); - - // TODO: should we throw the exception from the initial palce, should we defer edit checking to the UGP which - // would make it consistent, but also slower to produce errors and uglier for reporting? - final TestStatusListener listener2 = new TestStatusListener(); - final Map randyMap2 = - CollectionUtil.mapFromArray(String.class, Object.class, "Name", "Randy", "Employer", "Tegridy"); - mutableInputTable.addRow(randyMap2, false, listener2); - SleepUtil.sleep(100); - listener2.assertIncomplete(); - updateGraph.runWithinUnitTestCycle(kabut::run); - assertTableEquals(TableTools.merge(input, input2), kabut); - listener2.waitForCompletion(); - listener2.assertFailure(IllegalArgumentException.class, "Can not edit keys Randy"); - } - - @Test - public void testAddBack() throws Exception { - final Table input = TableTools.newTable(stringCol("Name"), stringCol("Employer")); - - final KeyedArrayBackedMutableTable kabut = KeyedArrayBackedMutableTable.make(input, "Name"); - final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); - final Table validatorResult = validator.getResultTable(); - final FailureListener failureListener = new FailureListener(); - validatorResult.addUpdateListener(failureListener); - - assertTableEquals(input, kabut); - - final MutableInputTable mutableInputTable = (MutableInputTable) kabut.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - TestCase.assertNotNull(mutableInputTable); - - final Table input2 = - TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Spacely Sprockets")); - - handleDelayedRefresh(() -> mutableInputTable.add(input2), kabut); - assertTableEquals(input2, kabut); - - handleDelayedRefresh(() -> mutableInputTable.delete(input2.view("Name")), kabut); - assertTableEquals(input, kabut); - - handleDelayedRefresh(() -> mutableInputTable.add(input2), kabut); - assertTableEquals(input2, kabut); - } - - @Test - public void testSetRows() { - final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), - stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso"), - stringCol("Spouse", "Wilma", "Jane", "Fran")); - - final KeyedArrayBackedMutableTable kabut = KeyedArrayBackedMutableTable.make(input, "Name"); - final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); - final Table validatorResult = validator.getResultTable(); - final FailureListener failureListener = new FailureListener(); - validatorResult.addUpdateListener(failureListener); - - assertTableEquals(input, kabut); - - final MutableInputTable mutableInputTable = (MutableInputTable) kabut.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - TestCase.assertNotNull(mutableInputTable); - - final Table defaultValues = input.where("Name=`George`"); - final Table ex2 = TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Cogswell"), - stringCol("Spouse", "Jane")); - - final Map cogMap = - CollectionUtil.mapFromArray(String.class, Object.class, "Name", "George", "Employer", "Cogswell"); - mutableInputTable.setRow(defaultValues, 0, cogMap); - SleepUtil.sleep(100); - final ControlledUpdateGraph updateGraph = ExecutionContext.getContext().getUpdateGraph().cast(); - updateGraph.runWithinUnitTestCycle(kabut::run); - assertTableEquals(TableTools.merge(input, ex2).lastBy("Name"), kabut); - } - - private static class TestStatusListener implements InputTableStatusListener { - boolean success = false; - Throwable error = null; - - @Override - public synchronized void onError(Throwable t) { - if (success || error != null) { - throw new IllegalStateException("Can not complete listener twice!"); - } - error = t; - notifyAll(); - } - - @Override - public synchronized void onSuccess() { - if (success || error != null) { - throw new IllegalStateException("Can not complete listener twice!"); - } - success = true; - notifyAll(); - } - - private synchronized void assertIncomplete() { - TestCase.assertFalse(success); - TestCase.assertNull(error); - } - - private void waitForCompletion() throws InterruptedException { - synchronized (this) { - while (!success && error == null) { - wait(); - } - } - } - - private synchronized void assertSuccess() throws Throwable { - if (!success) { - throw error; - } - } - - private synchronized void assertFailure(@NotNull final Class errorClass, - @Nullable final String errorMessage) { - TestCase.assertFalse(success); - TestCase.assertNotNull(error); - TestCase.assertTrue(errorClass.isAssignableFrom(error.getClass())); - if (errorMessage != null) { - TestCase.assertEquals(errorMessage, error.getMessage()); - } - } - } - - public static void handleDelayedRefresh(final ThrowingRunnable action, - final BaseArrayBackedMutableTable... tables) throws Exception { - final Thread refreshThread; - final CountDownLatch gate = new CountDownLatch(tables.length); - - Arrays.stream(tables).forEach(t -> t.setOnPendingChange(gate::countDown)); - try { - final ControlledUpdateGraph updateGraph = ExecutionContext.getContext().getUpdateGraph().cast(); - refreshThread = new Thread(() -> { - // If this unexpected interruption happens, the test thread may hang in action.run() - // indefinitely. Best to hope it's already queued the pending action and proceed with run. - updateGraph.runWithinUnitTestCycle(() -> { - try { - gate.await(); - } catch (InterruptedException ignored) { - // If this unexpected interruption happens, the test thread may hang in action.run() - // indefinitely. Best to hope it's already queued the pending action and proceed with run. - } - Arrays.stream(tables).forEach(BaseArrayBackedMutableTable::run); - }); - }); - - refreshThread.start(); - action.run(); - } finally { - Arrays.stream(tables).forEach(t -> t.setOnPendingChange(null)); - } - try { - refreshThread.join(); - } catch (InterruptedException e) { - throw new UncheckedDeephavenException( - "Interrupted unexpectedly while waiting for run cycle to complete", e); - } - } -} diff --git a/engine/table/src/test/java/io/deephaven/engine/updategraph/impl/TestEventDrivenUpdateGraph.java b/engine/table/src/test/java/io/deephaven/engine/updategraph/impl/TestEventDrivenUpdateGraph.java new file mode 100644 index 00000000000..ed398ca15c3 --- /dev/null +++ b/engine/table/src/test/java/io/deephaven/engine/updategraph/impl/TestEventDrivenUpdateGraph.java @@ -0,0 +1,266 @@ +package io.deephaven.engine.updategraph.impl; + +import io.deephaven.api.agg.Aggregation; +import io.deephaven.configuration.DataDir; +import io.deephaven.engine.context.ExecutionContext; +import io.deephaven.engine.context.QueryCompiler; +import io.deephaven.engine.rowset.RowSet; +import io.deephaven.engine.rowset.RowSetFactory; +import io.deephaven.engine.rowset.TrackingRowSet; +import io.deephaven.engine.table.ColumnSource; +import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.QueryTable; +import io.deephaven.engine.table.impl.perf.UpdatePerformanceTracker; +import io.deephaven.engine.table.impl.sources.LongSingleValueSource; +import io.deephaven.engine.testutil.TstUtils; +import io.deephaven.engine.updategraph.UpdateGraph; +import io.deephaven.engine.util.TableTools; +import io.deephaven.util.SafeCloseable; +import io.deephaven.util.annotations.ReflexiveUse; +import junit.framework.TestCase; +import org.junit.*; + +import java.nio.file.Path; +import java.util.Collections; + +import static io.deephaven.engine.context.TestExecutionContext.OPERATION_INITIALIZATION; +import static io.deephaven.engine.util.TableTools.*; +import static org.junit.Assert.assertEquals; + +public class TestEventDrivenUpdateGraph { + EventDrivenUpdateGraph defaultUpdateGraph; + + @Before + public void before() { + // the default update is necessary for the update performance tracker + clearUpdateGraphInstances(); + UpdatePerformanceTracker.resetForUnitTests(); + defaultUpdateGraph = EventDrivenUpdateGraph.newBuilder(PeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME).build(); + } + + @After + public void after() { + clearUpdateGraphInstances(); + UpdatePerformanceTracker.resetForUnitTests(); + } + + private static void clearUpdateGraphInstances() { + BaseUpdateGraph.removeInstance(PeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME); + BaseUpdateGraph.removeInstance("TestEDUG"); + BaseUpdateGraph.removeInstance("TestEDUG1"); + BaseUpdateGraph.removeInstance("TestEDUG2"); + } + + /** + * QueryTable that adds one row per cycle. + */ + final static class SourceThatRefreshes extends QueryTable implements Runnable { + public SourceThatRefreshes(UpdateGraph updateGraph) { + super(RowSetFactory.empty().toTracking(), Collections.emptyMap()); + setAttribute(Table.APPEND_ONLY_TABLE_ATTRIBUTE, Boolean.TRUE); + updateGraph.addSource(this); + } + + @Override + public void run() { + final RowSet added; + if (getRowSet().isEmpty()) { + added = RowSetFactory.fromKeys(0); + } else { + added = RowSetFactory.fromKeys(getRowSet().lastRowKey() + 1); + } + getRowSet().writableCast().insert(added); + notifyListeners(added, RowSetFactory.empty(), RowSetFactory.empty()); + } + } + + /** + * QueryTable that modifies its single row on each cycle. + */ + final static class SourceThatModifiesItself extends QueryTable implements Runnable { + final LongSingleValueSource svcs; + + public SourceThatModifiesItself(UpdateGraph updateGraph) { + super(RowSetFactory.fromKeys(42).toTracking(), Collections.singletonMap("V", new LongSingleValueSource())); + svcs = (LongSingleValueSource) getColumnSource("V", long.class); + svcs.startTrackingPrevValues(); + updateGraph.addSource(this); + svcs.set(0L); + } + + @Override + public void run() { + svcs.set(svcs.getLong(0) + 1); + notifyListeners(RowSetFactory.empty(), RowSetFactory.empty(), getRowSet().copy()); + } + } + + private QueryCompiler compilerForUnitTests() { + final Path queryCompilerDir = DataDir.get() + .resolve("io.deephaven.engine.updategraph.impl.TestEventDrivenUpdateGraph.compilerForUnitTests"); + + return QueryCompiler.create(queryCompilerDir.toFile(), getClass().getClassLoader()); + } + + @Test + public void testSimpleAdd() { + final EventDrivenUpdateGraph eventDrivenUpdateGraph = EventDrivenUpdateGraph.newBuilder("TestEDUG").build(); + + final ExecutionContext context = ExecutionContext.newBuilder() + .setUpdateGraph(eventDrivenUpdateGraph) + .emptyQueryScope() + .newQueryLibrary() + .setOperationInitializer(OPERATION_INITIALIZATION) + .setQueryCompiler(compilerForUnitTests()) + .build(); + try (final SafeCloseable ignored = context.open()) { + final SourceThatRefreshes sourceThatRefreshes = new SourceThatRefreshes(eventDrivenUpdateGraph); + final Table updated = + eventDrivenUpdateGraph.sharedLock().computeLocked(() -> sourceThatRefreshes.update("X=i")); + + int steps = 0; + do { + TestCase.assertEquals(steps, updated.size()); + eventDrivenUpdateGraph.requestRefresh(); + } while (steps++ < 100); + TestCase.assertEquals(steps, updated.size()); + } + } + + @Test + public void testSimpleModify() { + final EventDrivenUpdateGraph eventDrivenUpdateGraph = new EventDrivenUpdateGraph.Builder("TestEDUG").build(); + + final ExecutionContext context = ExecutionContext.newBuilder() + .setUpdateGraph(eventDrivenUpdateGraph) + .emptyQueryScope() + .newQueryLibrary() + .setOperationInitializer(OPERATION_INITIALIZATION) + .setQueryCompiler(compilerForUnitTests()) + .build(); + try (final SafeCloseable ignored = context.open()) { + final SourceThatModifiesItself modifySource = new SourceThatModifiesItself(eventDrivenUpdateGraph); + final Table updated = + eventDrivenUpdateGraph.sharedLock().computeLocked(() -> modifySource.update("X=2 * V")); + + final ColumnSource xcs = updated.getColumnSource("X"); + + int steps = 0; + do { + TestCase.assertEquals(1, updated.size()); + eventDrivenUpdateGraph.requestRefresh(); + + TableTools.showWithRowSet(modifySource); + + final TrackingRowSet rowSet = updated.getRowSet(); + System.out.println("Step = " + steps); + final long xv = xcs.getLong(rowSet.firstRowKey()); + TestCase.assertEquals(2L * (steps + 1), xv); + } while (steps++ < 100); + TestCase.assertEquals(1, updated.size()); + } + } + + @Test + public void testUpdatePerformanceTracker() { + final Table upt = UpdatePerformanceTracker.getQueryTable(); + + + final EventDrivenUpdateGraph eventDrivenUpdateGraph1 = EventDrivenUpdateGraph.newBuilder("TestEDUG1").build(); + final EventDrivenUpdateGraph eventDrivenUpdateGraph2 = EventDrivenUpdateGraph.newBuilder("TestEDUG2").build(); + + // first empty flush + eventDrivenUpdateGraph1.requestRefresh(); + eventDrivenUpdateGraph2.requestRefresh(); + + final long start = System.currentTimeMillis(); + + final int count1 = 10; + final int count2 = 20; + final int time1 = 10; + final int time2 = 5; + + // the work we care about + final Object ref1 = doWork(eventDrivenUpdateGraph1, time1, count1 - 1); + final Object ref2 = doWork(eventDrivenUpdateGraph2, time2, count2 - 1); + + // force a flush + eventDrivenUpdateGraph1.resetNextFlushTime(); + eventDrivenUpdateGraph2.resetNextFlushTime(); + eventDrivenUpdateGraph1.requestRefresh(); + eventDrivenUpdateGraph2.requestRefresh(); + + defaultUpdateGraph.requestRefresh(); + + final Table inRange; + final ExecutionContext context = ExecutionContext.newBuilder() + .setUpdateGraph(defaultUpdateGraph) + .emptyQueryScope() + .newQueryLibrary() + .setQueryCompiler(compilerForUnitTests()) + .setOperationInitializer(OPERATION_INITIALIZATION) + .build(); + try (final SafeCloseable ignored = context.open()) { + final Table uptAgged = upt.where("!isNull(EntryId)").aggBy( + Aggregation.AggSum("UsageNanos", "InvocationCount", "RowsModified"), + "UpdateGraph", "EntryId"); + assertEquals(defaultUpdateGraph, uptAgged.getUpdateGraph()); + inRange = defaultUpdateGraph.sharedLock().computeLocked(() -> uptAgged.update( + "EIUExpectedMillis = UpdateGraph==`TestEDUG1` ? " + time1 + " : " + time2, + "TotalExpectedTime=InvocationCount * EIUExpectedMillis * 1_000_000L", + "InRange=(UsageNanos > 0.9 * TotalExpectedTime) && (UsageNanos < 1.5 * TotalExpectedTime)")); + } + TableTools.show(inRange); + + final Table compare = + inRange.dropColumns("EntryId", "UsageNanos", "EIUExpectedMillis", "TotalExpectedTime"); + TableTools.show(compare); + + final Table expect = TableTools.newTable(stringCol("UpdateGraph", "TestEDUG1", "TestEDUG2"), + longCol("InvocationCount", count1, count2), + longCol("RowsModified", count1, count2), booleanCol("InRange", true, true)); + TstUtils.assertTableEquals(expect, compare); + } + + @ReflexiveUse(referrers = "TestEventDrivenUpdateGraph") + static public T sleepValue(long duration, T retVal) { + final Object blech = new Object(); + // noinspection SynchronizationOnLocalVariableOrMethodParameter + synchronized (blech) { + try { + final long milliSeconds = duration / 1_000_000L; + final int nanos = (int) (duration % 1_000_000L); + blech.wait(milliSeconds, nanos); + } catch (InterruptedException ignored) { + } + } + return retVal; + } + + private Object doWork(final EventDrivenUpdateGraph eventDrivenUpdateGraph, final int durationMillis, + final int steps) { + final ExecutionContext context = ExecutionContext.newBuilder() + .setUpdateGraph(eventDrivenUpdateGraph) + .emptyQueryScope() + .newQueryLibrary() + .setQueryCompiler(compilerForUnitTests()) + .setOperationInitializer(OPERATION_INITIALIZATION) + .build(); + try (final SafeCloseable ignored = context.open()) { + final SourceThatModifiesItself modifySource = new SourceThatModifiesItself(eventDrivenUpdateGraph); + final Table updated = + eventDrivenUpdateGraph.sharedLock().computeLocked(() -> modifySource.update("X=" + + getClass().getName() + ".sleepValue(" + (1000L * 1000L * durationMillis) + ", 2 * V)")); + + int step = 0; + do { + TestCase.assertEquals(1, updated.size()); + eventDrivenUpdateGraph.requestRefresh(); + } while (++step < steps); + TestCase.assertEquals(1, updated.size()); + + // so that we do not lose the reference + return updated; + } + } +} diff --git a/engine/table/src/test/java/io/deephaven/engine/util/file/TestTrackedFileHandleFactory.java b/engine/table/src/test/java/io/deephaven/engine/util/file/TestTrackedFileHandleFactory.java index 8d28daf9d7c..d1f832086c5 100644 --- a/engine/table/src/test/java/io/deephaven/engine/util/file/TestTrackedFileHandleFactory.java +++ b/engine/table/src/test/java/io/deephaven/engine/util/file/TestTrackedFileHandleFactory.java @@ -5,8 +5,6 @@ import io.deephaven.base.testing.BaseCachedJMockTestCase; import io.deephaven.base.verify.RequirementFailure; -import io.deephaven.io.sched.Scheduler; -import io.deephaven.io.sched.TimedJob; import junit.framework.TestCase; import org.junit.After; import org.junit.Before; @@ -15,6 +13,8 @@ import java.io.File; import java.io.IOException; import java.nio.file.Files; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; public class TestTrackedFileHandleFactory extends BaseCachedJMockTestCase { @@ -23,7 +23,7 @@ public class TestTrackedFileHandleFactory extends BaseCachedJMockTestCase { private static final double TARGET_USAGE_RATIO = 0.9; private static final int TARGET_USAGE_THRESHOLD = 90; - private Scheduler scheduler; + private ScheduledExecutorService scheduler; private TrackedFileHandleFactory FHCUT; @@ -33,13 +33,15 @@ public void setUp() throws Exception { FILE = Files.createTempFile(TestTrackedFileHandleFactory.class.getName(), ".dat").toFile(); - scheduler = mock(Scheduler.class); + scheduler = mock(ScheduledExecutorService.class); checking(new Expectations() { { - one(scheduler).currentTimeMillis(); - will(returnValue(0L)); - one(scheduler).installJob(with(any(TimedJob.class)), with(equal(60000L))); + one(scheduler).scheduleAtFixedRate( + with(any(Runnable.class)), + with(equal(60000L)), + with(equal(60000L)), + with(equal(TimeUnit.MILLISECONDS))); } }); diff --git a/engine/table/src/test/java/io/deephaven/engine/util/scripts/TestGroovyDeephavenSession.java b/engine/table/src/test/java/io/deephaven/engine/util/scripts/TestGroovyDeephavenSession.java index 572d71da8c9..e1074176a6e 100644 --- a/engine/table/src/test/java/io/deephaven/engine/util/scripts/TestGroovyDeephavenSession.java +++ b/engine/table/src/test/java/io/deephaven/engine/util/scripts/TestGroovyDeephavenSession.java @@ -16,6 +16,7 @@ import io.deephaven.function.Sort; import io.deephaven.plugin.type.ObjectTypeLookup.NoOp; import io.deephaven.util.SafeCloseable; +import io.deephaven.util.thread.ThreadInitializationFactory; import org.apache.commons.lang3.mutable.MutableInt; import org.junit.After; import org.junit.Assert; @@ -48,7 +49,7 @@ public void setup() throws IOException { livenessScope = new LivenessScope(); LivenessScopeStack.push(livenessScope); session = new GroovyDeephavenSession( - ExecutionContext.getContext().getUpdateGraph(), NoOp.INSTANCE, null, + ExecutionContext.getContext().getUpdateGraph(), ThreadInitializationFactory.NO_OP, NoOp.INSTANCE, null, GroovyDeephavenSession.RunScripts.none()); executionContext = session.getExecutionContext().open(); } diff --git a/engine/test-utils/build.gradle b/engine/test-utils/build.gradle index b11d14731e2..743e05a1dc5 100644 --- a/engine/test-utils/build.gradle +++ b/engine/test-utils/build.gradle @@ -13,7 +13,6 @@ dependencies { implementation project(':engine-tuple') implementation project(':base-test-utils') implementation project(':engine-rowset-test-utils') - implementation project(':FishUtil') implementation project(':extensions-source-support') implementation depCommonsLang3 diff --git a/engine/test-utils/src/main/java/io/deephaven/engine/context/TestExecutionContext.java b/engine/test-utils/src/main/java/io/deephaven/engine/context/TestExecutionContext.java index 2c64edea940..a0ee07d8112 100644 --- a/engine/test-utils/src/main/java/io/deephaven/engine/context/TestExecutionContext.java +++ b/engine/test-utils/src/main/java/io/deephaven/engine/context/TestExecutionContext.java @@ -1,9 +1,15 @@ package io.deephaven.engine.context; import io.deephaven.auth.AuthContext; +import io.deephaven.engine.table.impl.OperationInitializationThreadPool; import io.deephaven.engine.testutil.ControlledUpdateGraph; +import io.deephaven.util.thread.ThreadInitializationFactory; public class TestExecutionContext { + public static final ControlledUpdateGraph UPDATE_GRAPH = new ControlledUpdateGraph(); + + public static final OperationInitializationThreadPool OPERATION_INITIALIZATION = + new OperationInitializationThreadPool(ThreadInitializationFactory.NO_OP); public static ExecutionContext createForUnitTests() { return new ExecutionContext.Builder(new AuthContext.SuperUser()) @@ -11,7 +17,8 @@ public static ExecutionContext createForUnitTests() { .newQueryScope() .newQueryLibrary() .setQueryCompiler(QueryCompiler.createForUnitTests()) - .setUpdateGraph(ControlledUpdateGraph.INSTANCE) + .setUpdateGraph(UPDATE_GRAPH) + .setOperationInitializer(OPERATION_INITIALIZATION) .build(); } } diff --git a/engine/test-utils/src/main/java/io/deephaven/engine/testutil/ControlledUpdateGraph.java b/engine/test-utils/src/main/java/io/deephaven/engine/testutil/ControlledUpdateGraph.java index a24c1778486..0ca0d815015 100644 --- a/engine/test-utils/src/main/java/io/deephaven/engine/testutil/ControlledUpdateGraph.java +++ b/engine/test-utils/src/main/java/io/deephaven/engine/testutil/ControlledUpdateGraph.java @@ -1,13 +1,11 @@ package io.deephaven.engine.testutil; import io.deephaven.engine.updategraph.impl.PeriodicUpdateGraph; +import io.deephaven.util.thread.ThreadInitializationFactory; // TODO (deephaven-core#3886): Extract test functionality from PeriodicUpdateGraph public class ControlledUpdateGraph extends PeriodicUpdateGraph { - - public static final ControlledUpdateGraph INSTANCE = new ControlledUpdateGraph(); - - private ControlledUpdateGraph() { - super("TEST", true, 1000, 25, -1); + public ControlledUpdateGraph() { + super("TEST", true, 1000, 25, -1, ThreadInitializationFactory.NO_OP); } } diff --git a/engine/time/build.gradle b/engine/time/build.gradle index 697af28e26f..3dac58f277c 100644 --- a/engine/time/build.gradle +++ b/engine/time/build.gradle @@ -16,7 +16,6 @@ dependencies { implementation project(':engine-function') implementation project(':Configuration') implementation project(':log-factory') - implementation project(':FishUtil') implementation depJdom2 testImplementation TestTools.projectDependency(project, 'Base') diff --git a/engine/updategraph/build.gradle b/engine/updategraph/build.gradle index 1194f5ed0a9..69063b30bd0 100644 --- a/engine/updategraph/build.gradle +++ b/engine/updategraph/build.gradle @@ -12,8 +12,6 @@ dependencies { implementation project(':hotspot') implementation project(':log-factory') implementation project(':Configuration') - implementation project(':Net') - implementation project(':FishUtil') implementation depCommonsLang3 compileOnly 'com.google.code.findbugs:jsr305:3.0.2' diff --git a/engine/updategraph/src/main/java/io/deephaven/engine/liveness/Liveness.java b/engine/updategraph/src/main/java/io/deephaven/engine/liveness/Liveness.java index 9b6476ff4e8..f00655d5e14 100644 --- a/engine/updategraph/src/main/java/io/deephaven/engine/liveness/Liveness.java +++ b/engine/updategraph/src/main/java/io/deephaven/engine/liveness/Liveness.java @@ -5,14 +5,15 @@ import io.deephaven.configuration.Configuration; import io.deephaven.io.logger.Logger; -import io.deephaven.io.sched.Scheduler; -import io.deephaven.io.sched.TimedJob; import io.deephaven.engine.updategraph.DynamicNode; import io.deephaven.util.HeapDump; import io.deephaven.internal.log.LoggerFactory; import org.jetbrains.annotations.NotNull; import java.io.IOException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; /** * Utility class for liveness-related instrumentation. @@ -77,14 +78,18 @@ private static void maybeLogOutstandingCount() { intervalLastOutstandingCount = intervalMinOutstandingCount = intervalMaxOutstandingCount = outstandingCount; } - public static void scheduleCountReport(@NotNull final Scheduler scheduler) { - scheduler.installJob(new TimedJob() { - @Override - public final void timedOut() { - maybeLogOutstandingCount(); - scheduler.installJob(this, scheduler.currentTimeMillis() + OUTSTANDING_COUNT_LOG_INTERVAL_MILLIS); - } - }, 0L); + /** + * Schedule a job to log the count of known outstanding {@link LivenessReferent LivenessReferents}. + * + * @param scheduler The {@link ScheduledExecutorService} to use + * @return The {@link ScheduledFuture} for the scheduled job + */ + public static ScheduledFuture scheduleCountReport(@NotNull final ScheduledExecutorService scheduler) { + return scheduler.scheduleAtFixedRate( + Liveness::maybeLogOutstandingCount, + 0L, + OUTSTANDING_COUNT_LOG_INTERVAL_MILLIS, + TimeUnit.MILLISECONDS); } private Liveness() {} diff --git a/engine/updategraph/src/main/java/io/deephaven/engine/updategraph/OperationInitializer.java b/engine/updategraph/src/main/java/io/deephaven/engine/updategraph/OperationInitializer.java new file mode 100644 index 00000000000..d5d4337e292 --- /dev/null +++ b/engine/updategraph/src/main/java/io/deephaven/engine/updategraph/OperationInitializer.java @@ -0,0 +1,42 @@ +package io.deephaven.engine.updategraph; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Future; + +/** + * Provides guidance for initialization operations on how they can parallelize. + */ +public interface OperationInitializer { + OperationInitializer NON_PARALLELIZABLE = new OperationInitializer() { + @Override + public boolean canParallelize() { + return false; + } + + @Override + public Future submit(Runnable runnable) { + runnable.run(); + return CompletableFuture.completedFuture(null); + } + + @Override + public int parallelismFactor() { + return 1; + } + }; + + /** + * Whether the current thread can parallelize operations using this OperationInitialization. + */ + boolean canParallelize(); + + /** + * Submits a task to run in this thread pool. + */ + Future submit(Runnable runnable); + + /** + * Number of threads that are potentially available. + */ + int parallelismFactor(); +} diff --git a/engine/updategraph/src/main/java/io/deephaven/engine/updategraph/UpdateGraph.java b/engine/updategraph/src/main/java/io/deephaven/engine/updategraph/UpdateGraph.java index e0227c50a8c..67cd6bf3add 100644 --- a/engine/updategraph/src/main/java/io/deephaven/engine/updategraph/UpdateGraph.java +++ b/engine/updategraph/src/main/java/io/deephaven/engine/updategraph/UpdateGraph.java @@ -179,12 +179,18 @@ default void checkInitiateSerialTableOperation() { return; } throw new IllegalStateException(String.format( - "May not initiate serial table operations: exclusiveLockHeld=%s, sharedLockHeld=%s, currentThreadProcessesUpdates=%s", + "May not initiate serial table operations for update graph %s: exclusiveLockHeld=%s, sharedLockHeld=%s, currentThreadProcessesUpdates=%s", + getName(), exclusiveLock().isHeldByCurrentThread(), sharedLock().isHeldByCurrentThread(), currentThreadProcessesUpdates())); } + /** + * Attempt to stop this update graph, and cease processing further notifications. + */ + void stop(); + // endregion thread control // region refresh control diff --git a/extensions/barrage/src/main/java/io/deephaven/extensions/barrage/util/BarrageUtil.java b/extensions/barrage/src/main/java/io/deephaven/extensions/barrage/util/BarrageUtil.java index 298e97f988f..b246c4fa6b4 100755 --- a/extensions/barrage/src/main/java/io/deephaven/extensions/barrage/util/BarrageUtil.java +++ b/extensions/barrage/src/main/java/io/deephaven/extensions/barrage/util/BarrageUtil.java @@ -36,7 +36,7 @@ import io.deephaven.proto.util.Exceptions; import io.deephaven.api.util.NameValidator; import io.deephaven.engine.util.ColumnFormatting; -import io.deephaven.engine.util.config.MutableInputTable; +import io.deephaven.engine.util.input.InputTableUpdater; import io.deephaven.chunk.ChunkType; import io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse; import io.deephaven.util.type.TypeUtils; @@ -148,9 +148,10 @@ public static int makeTableSchemaPayload( final Map schemaMetadata = attributesToMetadata(attributes); final Map descriptions = GridAttributes.getColumnDescriptions(attributes); - final MutableInputTable inputTable = (MutableInputTable) attributes.get(Table.INPUT_TABLE_ATTRIBUTE); + final InputTableUpdater inputTableUpdater = (InputTableUpdater) attributes.get(Table.INPUT_TABLE_ATTRIBUTE); final List fields = columnDefinitionsToFields( - descriptions, inputTable, tableDefinition, tableDefinition.getColumns(), ignored -> new HashMap<>(), + descriptions, inputTableUpdater, tableDefinition, tableDefinition.getColumns(), + ignored -> new HashMap<>(), attributes, options.columnsAsList()) .collect(Collectors.toList()); @@ -180,12 +181,12 @@ public static Map attributesToMetadata(@NotNull final Map columnDefinitionsToFields( @NotNull final Map columnDescriptions, - @Nullable final MutableInputTable inputTable, + @Nullable final InputTableUpdater inputTableUpdater, @NotNull final TableDefinition tableDefinition, @NotNull final Collection> columnDefinitions, @NotNull final Function> fieldMetadataFactory, @NotNull final Map attributes) { - return columnDefinitionsToFields(columnDescriptions, inputTable, tableDefinition, columnDefinitions, + return columnDefinitionsToFields(columnDescriptions, inputTableUpdater, tableDefinition, columnDefinitions, fieldMetadataFactory, attributes, false); @@ -197,7 +198,7 @@ private static boolean isDataTypeSortable(final Class dataType) { public static Stream columnDefinitionsToFields( @NotNull final Map columnDescriptions, - @Nullable final MutableInputTable inputTable, + @Nullable final InputTableUpdater inputTableUpdater, @NotNull final TableDefinition tableDefinition, @NotNull final Collection> columnDefinitions, @NotNull final Function> fieldMetadataFactory, @@ -274,8 +275,8 @@ public static Stream columnDefinitionsToFields( if (columnDescription != null) { putMetadata(metadata, "description", columnDescription); } - if (inputTable != null) { - putMetadata(metadata, "inputtable.isKey", inputTable.getKeyNames().contains(name) + ""); + if (inputTableUpdater != null) { + putMetadata(metadata, "inputtable.isKey", inputTableUpdater.getKeyNames().contains(name) + ""); } if (columnsAsList) { @@ -422,6 +423,10 @@ private static void setConversionFactor(final ConvertedArrowSchema result, final result.conversionFactors[i] = factor; } + public static TableDefinition convertTableDefinition(final ExportedTableCreationResponse response) { + return convertArrowSchema(SchemaHelper.flatbufSchema(response)).tableDef; + } + public static ConvertedArrowSchema convertArrowSchema(final ExportedTableCreationResponse response) { return convertArrowSchema(SchemaHelper.flatbufSchema(response)); } diff --git a/gradle.properties b/gradle.properties index 982e8a1aee9..218d6376648 100644 --- a/gradle.properties +++ b/gradle.properties @@ -9,7 +9,7 @@ # Re-builders who want to inherit the base version, but have their own qualifier can set -PdeephavenBaseQualifier="customQualifier": "X.Y.Z-customQualifier". # # Re-builders who want a fully custom version can set -PdeephavenBaseVersion="customVersion" -PdeephavenBaseQualifier="": "customVersion". -deephavenBaseVersion=0.31.0 +deephavenBaseVersion=0.32.0 deephavenBaseQualifier=SNAPSHOT #org.gradle.debug diff --git a/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSession.java b/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSession.java index ec6ba5a7485..fbd8e6c5c25 100644 --- a/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSession.java +++ b/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSession.java @@ -38,7 +38,7 @@ public BarrageSubscription subscribe(final TableSpec tableSpec, final BarrageSub @Override public BarrageSubscription subscribe(final TableHandle tableHandle, final BarrageSubscriptionOptions options) { - return new BarrageSubscriptionImpl(this, session.executor(), tableHandle.newRef(), options); + return BarrageSubscription.make(this, session.executor(), tableHandle.newRef(), options); } @Override diff --git a/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSnapshot.java b/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSnapshot.java index 0d5a6e9ea7e..9003f7517e0 100644 --- a/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSnapshot.java +++ b/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSnapshot.java @@ -5,17 +5,35 @@ import io.deephaven.engine.rowset.RowSet; import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.TableDefinition; import io.deephaven.extensions.barrage.BarrageSnapshotOptions; import io.deephaven.qst.table.TableSpec; +import org.jetbrains.annotations.Nullable; import java.util.BitSet; import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; /** * A {@code BarrageSnapshot} represents a snapshot of a table that may or may not be filtered to a viewport of the * remote source table. */ public interface BarrageSnapshot { + /** + * Create a {@code BarrageSnapshot} from a {@link TableHandle}. + * + * @param session the Deephaven session that this export belongs to + * @param executorService an executor service used to flush metrics when enabled + * @param tableHandle the tableHandle to snapshot (ownership is transferred to the snapshot) + * @param options the transport level options for this snapshot + * @return a {@code BarrageSnapshot} + */ + static BarrageSnapshot make( + final BarrageSession session, @Nullable final ScheduledExecutorService executorService, + final TableHandle tableHandle, final BarrageSnapshotOptions options) { + return new BarrageSnapshotImpl(session, executorService, tableHandle, options); + } + interface Factory { /** * Sources a barrage snapshot from a {@link TableSpec}. diff --git a/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSnapshotImpl.java b/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSnapshotImpl.java index 60f37d9c2fb..3965540ad01 100644 --- a/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSnapshotImpl.java +++ b/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSnapshotImpl.java @@ -69,13 +69,15 @@ public class BarrageSnapshotImpl extends ReferenceCountedLivenessNode implements /** * Represents a BarrageSnapshot. + *

    + * See {@link BarrageSnapshot#make}. * * @param session the Deephaven session that this export belongs to * @param executorService an executor service used to flush metrics when enabled * @param tableHandle the tableHandle to snapshot (ownership is transferred to the snapshot) * @param options the transport level options for this snapshot */ - public BarrageSnapshotImpl( + BarrageSnapshotImpl( final BarrageSession session, @Nullable final ScheduledExecutorService executorService, final TableHandle tableHandle, final BarrageSnapshotOptions options) { super(false); @@ -360,6 +362,10 @@ public void onError(@NotNull final Throwable t) { } } + + /** + * The Completable Future is used to encapsulate the concept that the table is filled with requested data. + */ private class SnapshotCompletableFuture extends CompletableFuture

{ @Override public boolean cancel(boolean mayInterruptIfRunning) { diff --git a/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSubscription.java b/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSubscription.java index 5268ce77627..7fca15aa115 100644 --- a/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSubscription.java +++ b/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSubscription.java @@ -3,19 +3,42 @@ */ package io.deephaven.client.impl; +import io.deephaven.engine.liveness.LivenessScope; +import io.deephaven.engine.liveness.LivenessScopeStack; import io.deephaven.engine.rowset.RowSet; import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.TableDefinition; import io.deephaven.extensions.barrage.BarrageSubscriptionOptions; import io.deephaven.qst.table.TableSpec; +import io.deephaven.util.SafeCloseable; import java.util.BitSet; import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; /** * A {@code BarrageSubscription} represents a subscription over a table that may or may not be filtered to a viewport of * the remote source table. */ public interface BarrageSubscription { + /** + * Create a {@code BarrageSubscription} from a {@link TableHandle}. + * + * @param session the Deephaven session that this export belongs to + * @param executorService an executor service used to flush stats + * @param tableHandle the tableHandle to subscribe to (ownership is transferred to the subscription) + * @param options the transport level options for this subscription + * @return a {@code BarrageSubscription} from a {@link TableHandle} + */ + static BarrageSubscription make( + final BarrageSession session, final ScheduledExecutorService executorService, + final TableHandle tableHandle, final BarrageSubscriptionOptions options) { + final LivenessScope scope = new LivenessScope(); + try (final SafeCloseable ignored = LivenessScopeStack.open(scope, false)) { + return new BarrageSubscriptionImpl(session, executorService, tableHandle, options, scope); + } + } + interface Factory { /** * Sources a barrage subscription from a {@link TableSpec}. diff --git a/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSubscriptionImpl.java b/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSubscriptionImpl.java index 5d1a0006e43..69d05206b16 100644 --- a/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSubscriptionImpl.java +++ b/java-client/barrage/src/main/java/io/deephaven/client/impl/BarrageSubscriptionImpl.java @@ -12,12 +12,13 @@ import io.deephaven.base.log.LogOutput; import io.deephaven.chunk.ChunkType; import io.deephaven.engine.exceptions.RequestCancelledException; -import io.deephaven.engine.liveness.ReferenceCountedLivenessNode; +import io.deephaven.engine.liveness.*; import io.deephaven.engine.rowset.RowSet; import io.deephaven.engine.rowset.WritableRowSet; import io.deephaven.engine.table.Table; import io.deephaven.engine.table.TableDefinition; import io.deephaven.engine.table.impl.util.BarrageMessage; +import io.deephaven.engine.updategraph.DynamicNode; import io.deephaven.engine.updategraph.UpdateGraph; import io.deephaven.engine.updategraph.UpdateGraphAwareCompletableFuture; import io.deephaven.extensions.barrage.BarrageSubscriptionOptions; @@ -25,6 +26,7 @@ import io.deephaven.extensions.barrage.util.*; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; +import io.deephaven.util.annotations.FinalDefault; import io.deephaven.util.annotations.VisibleForTesting; import io.grpc.CallOptions; import io.grpc.ClientCall; @@ -42,10 +44,8 @@ import java.io.InputStream; import java.nio.ByteBuffer; import java.util.BitSet; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.Future; +import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; -import java.util.concurrent.ScheduledExecutorService; /** * This class is an intermediary helper class that uses a {@code DoExchange} to populate a {@link BarrageTable} using @@ -64,31 +64,36 @@ public class BarrageSubscriptionImpl extends ReferenceCountedLivenessNode implem private final CheckForCompletion checkForCompletion; private final BarrageTable resultTable; + private LivenessScope constructionScope; private volatile FutureAdapter future; private boolean subscribed; private boolean isSnapshot; - private volatile int connected = 1; private static final AtomicIntegerFieldUpdater CONNECTED_UPDATER = AtomicIntegerFieldUpdater.newUpdater(BarrageSubscriptionImpl.class, "connected"); /** * Represents a BarrageSubscription. + *

+ * See {@link BarrageSubscription#make}. * * @param session the Deephaven session that this export belongs to * @param executorService an executor service used to flush stats * @param tableHandle the tableHandle to subscribe to (ownership is transferred to the subscription) * @param options the transport level options for this subscription + * @param constructionScope the scope used for constructing this */ - public BarrageSubscriptionImpl( + BarrageSubscriptionImpl( final BarrageSession session, final ScheduledExecutorService executorService, - final TableHandle tableHandle, final BarrageSubscriptionOptions options) { + final TableHandle tableHandle, final BarrageSubscriptionOptions options, + final LivenessScope constructionScope) { super(false); this.logName = tableHandle.exportId().toString(); this.tableHandle = tableHandle; this.options = options; + this.constructionScope = constructionScope; final BarrageUtil.ConvertedArrowSchema schema = BarrageUtil.convertArrowSchema(tableHandle.response()); final TableDefinition tableDefinition = schema.tableDef; @@ -451,35 +456,162 @@ public void onError(@NotNull final Throwable t) { } private interface FutureAdapter extends Future

{ + boolean completeExceptionally(Throwable ex); + boolean complete(Table value); - boolean completeExceptionally(Throwable ex); + /** + * Called when the hand-off from the future is complete to release the construction scope. + */ + void maybeRelease(); + + @FunctionalInterface + interface Supplier { + Table get() throws InterruptedException, ExecutionException, TimeoutException; + } + + @FinalDefault + default Table doGet(final Supplier supplier) throws InterruptedException, ExecutionException, TimeoutException { + boolean throwingTimeout = false; + try { + final Table result = supplier.get(); + + if (result instanceof LivenessArtifact && DynamicNode.notDynamicOrIsRefreshing(result)) { + ((LivenessArtifact) result).manageWithCurrentScope(); + } + + return result; + } catch (final TimeoutException toe) { + throwingTimeout = true; + throw toe; + } finally { + if (!throwingTimeout) { + maybeRelease(); + } + } + } } + private static final AtomicIntegerFieldUpdater CF_WAS_RELEASED = + AtomicIntegerFieldUpdater.newUpdater(CompletableFutureAdapter.class, "wasReleased"); + + /** + * The Completable Future is used when this thread is not blocking the update graph progression. + *

+ * We will keep the result table alive until the user calls {@link Future#get get()} on the future. Note that this + * only protects the getters on {@link Future} not the entire {@link CompletionStage} interface. + *

+ * Subsequent calls to {@link Future#get get()} will only succeed if the result is still alive and will increase the + * reference count of the result table. + */ private class CompletableFutureAdapter extends CompletableFuture

implements FutureAdapter { + + volatile int wasReleased; + @Override public boolean cancel(boolean mayInterruptIfRunning) { - if (super.cancel(mayInterruptIfRunning)) { - BarrageSubscriptionImpl.this.cancel("cancelled by user"); - return true; + try { + if (super.cancel(mayInterruptIfRunning)) { + BarrageSubscriptionImpl.this.cancel("cancelled by user"); + return true; + } + } finally { + maybeRelease(); } return false; } + + @Override + public boolean completeExceptionally(Throwable ex) { + maybeRelease(); + return super.completeExceptionally(ex); + } + + @Override + public Table get(final long timeout, @NotNull final TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + return doGet(() -> super.get(timeout, unit)); + } + + @Override + public Table get() throws InterruptedException, ExecutionException { + try { + return doGet(super::get); + } catch (TimeoutException toe) { + throw new IllegalStateException("Unexpected TimeoutException", toe); + } + } + + @Override + public void maybeRelease() { + if (CF_WAS_RELEASED.compareAndSet(this, 0, 1)) { + constructionScope.release(); + constructionScope = null; + } + } } + private static final AtomicIntegerFieldUpdater UG_WAS_RELEASED = + AtomicIntegerFieldUpdater.newUpdater(UpdateGraphAwareFutureAdapter.class, "wasReleased"); + + /** + * The Update Graph Aware Future is used when waiting directly on this thread would otherwise be blocking update + * graph progression. + *

+ * We will keep the result table alive until the user calls {@link Future#get get()} on the future. + *

+ * Subsequent calls to {@link Future#get get()} will only succeed if the result is still alive and will increase the + * reference count of the result table. + */ private class UpdateGraphAwareFutureAdapter extends UpdateGraphAwareCompletableFuture

implements FutureAdapter { + + volatile int wasReleased; + public UpdateGraphAwareFutureAdapter(@NotNull final UpdateGraph updateGraph) { super(updateGraph); } @Override public boolean cancel(boolean mayInterruptIfRunning) { - if (super.cancel(mayInterruptIfRunning)) { - BarrageSubscriptionImpl.this.cancel("cancelled by user"); - return true; + try { + if (super.cancel(mayInterruptIfRunning)) { + BarrageSubscriptionImpl.this.cancel("cancelled by user"); + return true; + } + } finally { + maybeRelease(); } return false; } + + @Override + public boolean completeExceptionally(Throwable ex) { + maybeRelease(); + return super.completeExceptionally(ex); + } + + @Override + public Table get(final long timeout, @NotNull final TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + return doGet(() -> super.get(timeout, unit)); + } + + @Override + public Table get() throws InterruptedException, ExecutionException { + try { + return doGet(super::get); + } catch (TimeoutException toe) { + throw new IllegalStateException("Unexpected TimeoutException", toe); + } + } + + @Override + public void maybeRelease() { + if (UG_WAS_RELEASED.compareAndSet(this, 0, 1)) { + constructionScope.release(); + constructionScope = null; + } + } } } diff --git a/java-client/session/src/main/java/io/deephaven/client/impl/BatchTableRequestBuilder.java b/java-client/session/src/main/java/io/deephaven/client/impl/BatchTableRequestBuilder.java index b200b5ab37a..7f92e29850a 100644 --- a/java-client/session/src/main/java/io/deephaven/client/impl/BatchTableRequestBuilder.java +++ b/java-client/session/src/main/java/io/deephaven/client/impl/BatchTableRequestBuilder.java @@ -40,6 +40,7 @@ import io.deephaven.proto.backplane.grpc.Condition; import io.deephaven.proto.backplane.grpc.CreateInputTableRequest; import io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind; +import io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.Blink; import io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.InMemoryAppendOnly; import io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.InMemoryKeyBacked; import io.deephaven.proto.backplane.grpc.CrossJoinTablesRequest; @@ -76,6 +77,7 @@ import io.deephaven.qst.table.AggregateAllTable; import io.deephaven.qst.table.AggregateTable; import io.deephaven.qst.table.AsOfJoinTable; +import io.deephaven.qst.table.BlinkInputTable; import io.deephaven.qst.table.Clock.Visitor; import io.deephaven.qst.table.ClockSystem; import io.deephaven.qst.table.DropColumnsTable; @@ -535,6 +537,11 @@ public InputTableKind visit(InMemoryKeyBackedInputTable inMemoryKeyBacked) { return InputTableKind.newBuilder().setInMemoryKeyBacked( InMemoryKeyBacked.newBuilder().addAllKeyColumns(inMemoryKeyBacked.keys())).build(); } + + @Override + public InputTableKind visit(BlinkInputTable blinkInputTable) { + return InputTableKind.newBuilder().setBlink(Blink.getDefaultInstance()).build(); + } })); return op(Builder::setCreateInputTable, builder); } diff --git a/plugin/src/main/java/io/deephaven/plugin/Plugin.java b/plugin/src/main/java/io/deephaven/plugin/Plugin.java index 0acff4743f3..3dbfcd2409a 100644 --- a/plugin/src/main/java/io/deephaven/plugin/Plugin.java +++ b/plugin/src/main/java/io/deephaven/plugin/Plugin.java @@ -10,6 +10,7 @@ * A plugin is a structured extension point for user-definable behavior. * * @see ObjectType + * @see JsPlugin */ public interface Plugin extends Registration { diff --git a/plugin/src/main/java/io/deephaven/plugin/js/JsPlugin.java b/plugin/src/main/java/io/deephaven/plugin/js/JsPlugin.java index be4a5a1a86f..278ce7f079e 100644 --- a/plugin/src/main/java/io/deephaven/plugin/js/JsPlugin.java +++ b/plugin/src/main/java/io/deephaven/plugin/js/JsPlugin.java @@ -3,16 +3,145 @@ */ package io.deephaven.plugin.js; +import io.deephaven.annotations.BuildableStyle; import io.deephaven.plugin.Plugin; +import io.deephaven.plugin.PluginBase; +import org.immutables.value.Value.Check; +import org.immutables.value.Value.Default; +import org.immutables.value.Value.Immutable; + +import java.nio.file.Files; +import java.nio.file.Path; /** - * A js plugin is a {@link Plugin} that allows adding javascript code under the server's URL path "js-plugins/". See - * deephaven-plugins#js-plugins for more details - * about the underlying construction for js plugins. + * A JS plugin is a {@link Plugin} that allows for custom javascript and related content to be served, see + * {@link io.deephaven.plugin.js}. + * + *

+ * For example, if the following JS plugin was the only JS plugin installed + * + *

+ * JsPlugin.builder()
+ *         .name("foo")
+ *         .version("1.0.0")
+ *         .main(Path.of("dist/index.js"))
+ *         .path(Path.of("/path-to/my-plugin"))
+ *         .build()
+ * 
+ * + * the manifest served at "js-plugins/manifest.json" would be equivalent to + * + *
+ * {
+ *   "plugins": [
+ *     {
+ *       "name": "foo",
+ *       "version": "1.0.0",
+ *       "main": "dist/index.js"
+ *     }
+ *   ]
+ * }
+ * 
* - * @see JsPluginPackagePath - * @see JsPluginManifestPath + * and the file "/path-to/my-plugin/dist/index.js" would be served at "js-plugins/foo/dist/index.js". All other files of + * the form "/path-to/my-plugin/{somePath}" will be served at "js-plugins/foo/{somePath}". */ -public interface JsPlugin extends Plugin { +@Immutable +@BuildableStyle +public abstract class JsPlugin extends PluginBase { + + public static Builder builder() { + return ImmutableJsPlugin.builder(); + } + + /** + * The JS plugin name. The JS plugin contents will be served via the URL path "js-plugins/{name}/", as well as + * included as the "name" field for the manifest entry in "js-plugins/manifest.json". + * + * @return the name + */ + public abstract String name(); + + /** + * The JS plugin version. Will be included as the "version" field for the manifest entry in + * "js-plugins/manifest.json". + * + * @return the version + */ + public abstract String version(); + + /** + * The main JS file path, specified relative to {@link #path()}. The main JS file must exist + * ({@code Files.isRegularFile(root().resolve(main()))}) and must be included in {@link #paths()}. Will be included + * as the "main" field for the manifest entry in "js-plugins/manifest.json". + * + * @return the main JS file path + */ + public abstract Path main(); + + /** + * The directory path of the resources to serve. The resources will be served via the URL path + * "js-plugins/{name}/{relativeToPath}". The path must exist ({@code Files.isDirectory(path())}). + * + * @return the path + */ + public abstract Path path(); + + /** + * The subset of resources from {@link #path()} to serve. Production installations should preferably be packaged + * with the exact resources necessary (and thus served with {@link Paths#all()}). During development, other subsets + * may be useful if {@link #path()} contains content unrelated to the JS content. By default, is + * {@link Paths#all()}. + * + * @return the paths + */ + @Default + public Paths paths() { + return Paths.all(); + } + + @Override + public final > T walk(V visitor) { + return visitor.visit(this); + } + + @Check + final void checkPath() { + if (!Files.isDirectory(path())) { + throw new IllegalArgumentException(String.format("path ('%s') must exist and be a directory", path())); + } + } + + @Check + final void checkMain() { + final Path mainPath = path().resolve(main()); + if (!Files.isRegularFile(mainPath)) { + throw new IllegalArgumentException(String.format("main ('%s') must exist and be a regular file", mainPath)); + } + } + + @Check + final void checkPaths() { + if (!(paths() instanceof PathsInternal)) { + throw new IllegalArgumentException("Must construct one of the approved Paths"); + } + final Path relativeMain = path().relativize(path().resolve(main())); + if (!((PathsInternal) paths()).matches(relativeMain)) { + throw new IllegalArgumentException(String.format("main ('%s') is not in paths", relativeMain)); + } + } + + public interface Builder { + Builder name(String name); + + Builder version(String version); + + Builder main(Path main); + + Builder path(Path path); + + Builder paths(Paths paths); + JsPlugin build(); + } } diff --git a/plugin/src/main/java/io/deephaven/plugin/js/JsPluginBase.java b/plugin/src/main/java/io/deephaven/plugin/js/JsPluginBase.java deleted file mode 100644 index 93c0ef604c3..00000000000 --- a/plugin/src/main/java/io/deephaven/plugin/js/JsPluginBase.java +++ /dev/null @@ -1,15 +0,0 @@ -/** - * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.plugin.js; - -import io.deephaven.plugin.Plugin; -import io.deephaven.plugin.PluginBase; - -public abstract class JsPluginBase extends PluginBase implements JsPlugin { - - @Override - public final > T walk(V visitor) { - return visitor.visit(this); - } -} diff --git a/plugin/src/main/java/io/deephaven/plugin/js/JsPluginManifestPath.java b/plugin/src/main/java/io/deephaven/plugin/js/JsPluginManifestPath.java deleted file mode 100644 index 78673a94e51..00000000000 --- a/plugin/src/main/java/io/deephaven/plugin/js/JsPluginManifestPath.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.plugin.js; - -import io.deephaven.annotations.SimpleStyle; -import org.immutables.value.Value.Immutable; -import org.immutables.value.Value.Parameter; - -import java.nio.file.Path; - -/** - * A manifest-based js plugin sourced from a {@value MANIFEST_JSON} file. - */ -@Immutable -@SimpleStyle -public abstract class JsPluginManifestPath extends JsPluginBase { - - public static final String MANIFEST_JSON = "manifest.json"; - - /** - * Creates a manifest-based js plugin from {@code manifestRoot}. - * - * @param manifestRoot the manifest root directory path - * @return the manifest-based js plugin - */ - public static JsPluginManifestPath of(Path manifestRoot) { - return ImmutableJsPluginManifestPath.of(manifestRoot); - } - - /** - * The manifest root path directory path. - * - * @return the manifest root directory path - */ - @Parameter - public abstract Path path(); - - /** - * The {@value MANIFEST_JSON} file path, relative to {@link #path()}. Equivalent to - * {@code path().resolve(MANIFEST_JSON)}. - * - * @return the manifest json file path - */ - public final Path manifestJson() { - return path().resolve(MANIFEST_JSON); - } - - /** - * Equivalent to {@code JsPluginPackagePath.of(path().resolve(name))}. - * - * @param name the package name - * @return the package path - */ - public final JsPluginPackagePath packagePath(String name) { - return JsPluginPackagePath.of(path().resolve(name)); - } -} diff --git a/plugin/src/main/java/io/deephaven/plugin/js/JsPluginPackagePath.java b/plugin/src/main/java/io/deephaven/plugin/js/JsPluginPackagePath.java deleted file mode 100644 index 079f186d76b..00000000000 --- a/plugin/src/main/java/io/deephaven/plugin/js/JsPluginPackagePath.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.plugin.js; - -import io.deephaven.annotations.SimpleStyle; -import org.immutables.value.Value.Immutable; -import org.immutables.value.Value.Parameter; - -import java.nio.file.Path; - -/** - * A package-based js plugin sourced from a {@value PACKAGE_JSON} file. - */ -@Immutable -@SimpleStyle -public abstract class JsPluginPackagePath extends JsPluginBase { - public static final String PACKAGE_JSON = "package.json"; - - /** - * Creates a package-based js plugin from {@code packageRoot}. - * - * @param packageRoot the package root directory path - * @return the package-based js plugin - */ - public static JsPluginPackagePath of(Path packageRoot) { - return ImmutableJsPluginPackagePath.of(packageRoot); - } - - /** - * The package root directory path. - * - * @return the package root directory path - */ - @Parameter - public abstract Path path(); - - /** - * The {@value PACKAGE_JSON} file path. Equivalent to {@code path().resolve(PACKAGE_JSON)}. - * - * @return the package json file path - */ - public final Path packageJson() { - return path().resolve(PACKAGE_JSON); - } -} diff --git a/plugin/src/main/java/io/deephaven/plugin/js/Paths.java b/plugin/src/main/java/io/deephaven/plugin/js/Paths.java new file mode 100644 index 00000000000..9bee0545ce6 --- /dev/null +++ b/plugin/src/main/java/io/deephaven/plugin/js/Paths.java @@ -0,0 +1,53 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.plugin.js; + +import java.nio.file.Path; + +/** + * The subset of paths to serve, see {@link JsPlugin#paths()}. + */ +public interface Paths { + + /** + * Includes all paths. + * + * @return the paths + */ + static Paths all() { + return PathsAll.ALL; + } + + /** + * Includes only the paths that are prefixed by {@code prefix}. + * + * @param prefix the prefix + * @return the paths + */ + static Paths ofPrefixes(Path prefix) { + // Note: we have specific overload for single element to explicitly differentiate from Iterable overload since + // Path extends Iterable. + return PathsPrefixes.builder().addPrefixes(prefix).build(); + } + + /** + * Includes only the paths that are prefixed by one of {@code prefixes}. + * + * @param prefixes the prefixes + * @return the paths + */ + static Paths ofPrefixes(Path... prefixes) { + return PathsPrefixes.builder().addPrefixes(prefixes).build(); + } + + /** + * Includes only the paths that are prefixed by one of {@code prefixes}. + * + * @param prefixes the prefixes + * @return the paths + */ + static Paths ofPrefixes(Iterable prefixes) { + return PathsPrefixes.builder().addAllPrefixes(prefixes).build(); + } +} diff --git a/plugin/src/main/java/io/deephaven/plugin/js/PathsAll.java b/plugin/src/main/java/io/deephaven/plugin/js/PathsAll.java new file mode 100644 index 00000000000..dc9909923b3 --- /dev/null +++ b/plugin/src/main/java/io/deephaven/plugin/js/PathsAll.java @@ -0,0 +1,15 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.plugin.js; + +import java.nio.file.Path; + +enum PathsAll implements PathsInternal { + ALL; + + @Override + public boolean matches(Path path) { + return true; + } +} diff --git a/plugin/src/main/java/io/deephaven/plugin/js/PathsInternal.java b/plugin/src/main/java/io/deephaven/plugin/js/PathsInternal.java new file mode 100644 index 00000000000..70b777aef01 --- /dev/null +++ b/plugin/src/main/java/io/deephaven/plugin/js/PathsInternal.java @@ -0,0 +1,10 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.plugin.js; + +import java.nio.file.PathMatcher; + +interface PathsInternal extends Paths, PathMatcher { + +} diff --git a/plugin/src/main/java/io/deephaven/plugin/js/PathsPrefixes.java b/plugin/src/main/java/io/deephaven/plugin/js/PathsPrefixes.java new file mode 100644 index 00000000000..affb0331054 --- /dev/null +++ b/plugin/src/main/java/io/deephaven/plugin/js/PathsPrefixes.java @@ -0,0 +1,53 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.plugin.js; + +import io.deephaven.annotations.BuildableStyle; +import org.immutables.value.Value.Check; +import org.immutables.value.Value.Immutable; + +import java.nio.file.Path; +import java.util.Set; + +@Immutable +@BuildableStyle +abstract class PathsPrefixes implements PathsInternal { + + public static Builder builder() { + return ImmutablePathsPrefixes.builder(); + } + + public abstract Set prefixes(); + + @Override + public final boolean matches(Path path) { + if (prefixes().contains(path)) { + return true; + } + // Note: we could make a more efficient impl w/ a tree-based approach based on the names + for (Path prefix : prefixes()) { + if (path.startsWith(prefix)) { + return true; + } + } + return false; + } + + @Check + final void checkPrefixesNonEmpty() { + if (prefixes().isEmpty()) { + throw new IllegalArgumentException("prefixes must be non-empty"); + } + } + + interface Builder { + Builder addPrefixes(Path element); + + Builder addPrefixes(Path... elements); + + Builder addAllPrefixes(Iterable elements); + + PathsPrefixes build(); + } +} diff --git a/plugin/src/main/java/io/deephaven/plugin/js/package-info.java b/plugin/src/main/java/io/deephaven/plugin/js/package-info.java new file mode 100644 index 00000000000..3f3ee3f3101 --- /dev/null +++ b/plugin/src/main/java/io/deephaven/plugin/js/package-info.java @@ -0,0 +1,18 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ + +/** + * The Deephaven server supports {@link io.deephaven.plugin.js.JsPlugin JS plugins} which allow custom javascript (and + * related content) to be served under the HTTP path "js-plugins/". + * + *

+ * A "js-plugins/manifest.json" is served that allows clients to discover what JS plugins are installed. This will be a + * JSON object, and will have a "plugins" array, with object elements that have a "name", "version", and "main". All + * files served via a specific plugin will be accessed under "js-plugins/{name}/". The main entry file for a plugin will + * be accessed at "js-plugins/{name}/{main}". The "version" is currently for informational purposes only. + * + * @see deephaven-plugins for Deephaven-maintained JS + * plugins + */ +package io.deephaven.plugin.js; diff --git a/props/configs/src/main/resources/dh-defaults.prop b/props/configs/src/main/resources/dh-defaults.prop index 99ff56f26cb..be2dcba5eb9 100644 --- a/props/configs/src/main/resources/dh-defaults.prop +++ b/props/configs/src/main/resources/dh-defaults.prop @@ -60,7 +60,3 @@ client.configuration.list=java.version,deephaven.version,barrage.version,http.se # jar, and a class that is found in that jar. Any such keys will be made available to the client.configuration.list # as .version. client.version.list=deephaven=io.deephaven.engine.table.Table,barrage=io.deephaven.barrage.flatbuf.BarrageMessageWrapper - - -# Specifies additional setup to run on threads that can perform table operations with user code. Comma-separated list, instances must be of type io.deephaven.util.thread.ThreadInitializationFactory -thread.initialization=io.deephaven.server.console.python.DebuggingInitializer diff --git a/props/test-configs/src/main/resources/dh-tests.prop b/props/test-configs/src/main/resources/dh-tests.prop index 64dfddae10a..f7d2503aa35 100644 --- a/props/test-configs/src/main/resources/dh-tests.prop +++ b/props/test-configs/src/main/resources/dh-tests.prop @@ -102,4 +102,3 @@ client.version.list= authentication.anonymous.warn=false deephaven.console.type=none -thread.initialization= diff --git a/proto/proto-backplane-grpc/src/main/proto/deephaven/proto/table.proto b/proto/proto-backplane-grpc/src/main/proto/deephaven/proto/table.proto index 441732cda9c..2dcdd486f5b 100644 --- a/proto/proto-backplane-grpc/src/main/proto/deephaven/proto/table.proto +++ b/proto/proto-backplane-grpc/src/main/proto/deephaven/proto/table.proto @@ -1210,9 +1210,12 @@ message CreateInputTableRequest { message InMemoryKeyBacked { repeated string key_columns = 1; } + message Blink { + } oneof kind { InMemoryAppendOnly in_memory_append_only = 1; InMemoryKeyBacked in_memory_key_backed = 2; + Blink blink = 3; } } diff --git a/py/client/pydeephaven/proto/table_pb2.py b/py/client/pydeephaven/proto/table_pb2.py index 11a6db4c8ba..ccf1ea47559 100644 --- a/py/client/pydeephaven/proto/table_pb2.py +++ b/py/client/pydeephaven/proto/table_pb2.py @@ -14,7 +14,7 @@ from pydeephaven.proto import ticket_pb2 as deephaven_dot_proto_dot_ticket__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1b\x64\x65\x65phaven/proto/table.proto\x12!io.deephaven.proto.backplane.grpc\x1a\x1c\x64\x65\x65phaven/proto/ticket.proto\"l\n\x0eTableReference\x12;\n\x06ticket\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.TicketH\x00\x12\x16\n\x0c\x62\x61tch_offset\x18\x02 \x01(\x11H\x00\x42\x05\n\x03ref\"\xc6\x01\n\x1d\x45xportedTableCreationResponse\x12\x44\n\tresult_id\x18\x01 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\nerror_info\x18\x03 \x01(\t\x12\x15\n\rschema_header\x18\x04 \x01(\x0c\x12\x11\n\tis_static\x18\x05 \x01(\x08\x12\x10\n\x04size\x18\x06 \x01(\x12\x42\x02\x30\x01\"\x97\x01\n\x11\x46\x65tchTableRequest\x12\x44\n\tsource_id\x18\x01 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12<\n\tresult_id\x18\x02 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\"\xa0\x01\n\x1a\x41pplyPreviewColumnsRequest\x12\x44\n\tsource_id\x18\x01 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12<\n\tresult_id\x18\x02 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\"\x1d\n\x1b\x45xportedTableUpdatesRequest\"\x8c\x01\n\x1a\x45xportedTableUpdateMessage\x12<\n\texport_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x10\n\x04size\x18\x02 \x01(\x12\x42\x02\x30\x01\x12\x1e\n\x16update_failure_message\x18\x03 \x01(\t\"c\n\x11\x45mptyTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x10\n\x04size\x18\x02 \x01(\x12\x42\x02\x30\x01\"\xef\x01\n\x10TimeTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x1e\n\x10start_time_nanos\x18\x02 \x01(\x12\x42\x02\x30\x01H\x00\x12\x1b\n\x11start_time_string\x18\x05 \x01(\tH\x00\x12\x1a\n\x0cperiod_nanos\x18\x03 \x01(\x12\x42\x02\x30\x01H\x01\x12\x17\n\rperiod_string\x18\x06 \x01(\tH\x01\x12\x13\n\x0b\x62link_table\x18\x04 \x01(\x08\x42\x0c\n\nstart_timeB\x08\n\x06period\"\xb1\x01\n\x15SelectOrUpdateRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x14\n\x0c\x63olumn_specs\x18\x03 \x03(\t\"\x8c\x02\n\x0bMathContext\x12\x11\n\tprecision\x18\x01 \x01(\x11\x12R\n\rrounding_mode\x18\x02 \x01(\x0e\x32;.io.deephaven.proto.backplane.grpc.MathContext.RoundingMode\"\x95\x01\n\x0cRoundingMode\x12\x1f\n\x1bROUNDING_MODE_NOT_SPECIFIED\x10\x00\x12\x06\n\x02UP\x10\x01\x12\x08\n\x04\x44OWN\x10\x02\x12\x0b\n\x07\x43\x45ILING\x10\x03\x12\t\n\x05\x46LOOR\x10\x04\x12\x0b\n\x07HALF_UP\x10\x05\x12\r\n\tHALF_DOWN\x10\x06\x12\r\n\tHALF_EVEN\x10\x07\x12\x0f\n\x0bUNNECESSARY\x10\x08\"\xdb\x02\n\x13UpdateByWindowScale\x12[\n\x05ticks\x18\x01 \x01(\x0b\x32J.io.deephaven.proto.backplane.grpc.UpdateByWindowScale.UpdateByWindowTicksH\x00\x12Y\n\x04time\x18\x02 \x01(\x0b\x32I.io.deephaven.proto.backplane.grpc.UpdateByWindowScale.UpdateByWindowTimeH\x00\x1a$\n\x13UpdateByWindowTicks\x12\r\n\x05ticks\x18\x01 \x01(\x01\x1a^\n\x12UpdateByWindowTime\x12\x0e\n\x06\x63olumn\x18\x01 \x01(\t\x12\x13\n\x05nanos\x18\x02 \x01(\x12\x42\x02\x30\x01H\x00\x12\x19\n\x0f\x64uration_string\x18\x03 \x01(\tH\x00\x42\x08\n\x06windowB\x06\n\x04type\"\xe1\x03\n\x11UpdateByEmOptions\x12I\n\ron_null_value\x18\x01 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.BadDataBehavior\x12H\n\x0con_nan_value\x18\x02 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.BadDataBehavior\x12H\n\x0con_null_time\x18\x03 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.BadDataBehavior\x12R\n\x16on_negative_delta_time\x18\x04 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.BadDataBehavior\x12N\n\x12on_zero_delta_time\x18\x05 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.BadDataBehavior\x12I\n\x11\x62ig_value_context\x18\x06 \x01(\x0b\x32..io.deephaven.proto.backplane.grpc.MathContext\"f\n\x14UpdateByDeltaOptions\x12N\n\rnull_behavior\x18\x01 \x01(\x0e\x32\x37.io.deephaven.proto.backplane.grpc.UpdateByNullBehavior\"\x99\x34\n\x0fUpdateByRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12S\n\x07options\x18\x03 \x01(\x0b\x32\x42.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOptions\x12X\n\noperations\x18\x04 \x03(\x0b\x32\x44.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation\x12\x18\n\x10group_by_columns\x18\x05 \x03(\t\x1a\xc3\x03\n\x0fUpdateByOptions\x12\x1c\n\x0fuse_redirection\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x1b\n\x0e\x63hunk_capacity\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12.\n!max_static_sparse_memory_overhead\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12$\n\x17initial_hash_table_size\x18\x04 \x01(\x05H\x03\x88\x01\x01\x12 \n\x13maximum_load_factor\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12\x1f\n\x12target_load_factor\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x44\n\x0cmath_context\x18\x07 \x01(\x0b\x32..io.deephaven.proto.backplane.grpc.MathContextB\x12\n\x10_use_redirectionB\x11\n\x0f_chunk_capacityB$\n\"_max_static_sparse_memory_overheadB\x1a\n\x18_initial_hash_table_sizeB\x16\n\x14_maximum_load_factorB\x15\n\x13_target_load_factor\x1a\xf2-\n\x11UpdateByOperation\x12\x65\n\x06\x63olumn\x18\x01 \x01(\x0b\x32S.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumnH\x00\x1a\xed,\n\x0eUpdateByColumn\x12n\n\x04spec\x18\x01 \x01(\x0b\x32`.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec\x12\x13\n\x0bmatch_pairs\x18\x02 \x03(\t\x1a\xd5+\n\x0cUpdateBySpec\x12\x85\x01\n\x03sum\x18\x01 \x01(\x0b\x32v.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByCumulativeSumH\x00\x12\x85\x01\n\x03min\x18\x02 \x01(\x0b\x32v.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByCumulativeMinH\x00\x12\x85\x01\n\x03max\x18\x03 \x01(\x0b\x32v.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByCumulativeMaxH\x00\x12\x8d\x01\n\x07product\x18\x04 \x01(\x0b\x32z.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByCumulativeProductH\x00\x12}\n\x04\x66ill\x18\x05 \x01(\x0b\x32m.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByFillH\x00\x12{\n\x03\x65ma\x18\x06 \x01(\x0b\x32l.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByEmaH\x00\x12\x8a\x01\n\x0brolling_sum\x18\x07 \x01(\x0b\x32s.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingSumH\x00\x12\x8e\x01\n\rrolling_group\x18\x08 \x01(\x0b\x32u.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingGroupH\x00\x12\x8a\x01\n\x0brolling_avg\x18\t \x01(\x0b\x32s.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingAvgH\x00\x12\x8a\x01\n\x0brolling_min\x18\n \x01(\x0b\x32s.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingMinH\x00\x12\x8a\x01\n\x0brolling_max\x18\x0b \x01(\x0b\x32s.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingMaxH\x00\x12\x92\x01\n\x0frolling_product\x18\x0c \x01(\x0b\x32w.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingProductH\x00\x12\x7f\n\x05\x64\x65lta\x18\r \x01(\x0b\x32n.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByDeltaH\x00\x12{\n\x03\x65ms\x18\x0e \x01(\x0b\x32l.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByEmsH\x00\x12\x80\x01\n\x06\x65m_min\x18\x0f \x01(\x0b\x32n.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByEmMinH\x00\x12\x80\x01\n\x06\x65m_max\x18\x10 \x01(\x0b\x32n.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByEmMaxH\x00\x12\x80\x01\n\x06\x65m_std\x18\x11 \x01(\x0b\x32n.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByEmStdH\x00\x12\x8e\x01\n\rrolling_count\x18\x12 \x01(\x0b\x32u.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingCountH\x00\x12\x8a\x01\n\x0brolling_std\x18\x13 \x01(\x0b\x32s.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingStdH\x00\x12\x8c\x01\n\x0crolling_wavg\x18\x14 \x01(\x0b\x32t.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingWAvgH\x00\x1a\x17\n\x15UpdateByCumulativeSum\x1a\x17\n\x15UpdateByCumulativeMin\x1a\x17\n\x15UpdateByCumulativeMax\x1a\x1b\n\x19UpdateByCumulativeProduct\x1a\x0e\n\x0cUpdateByFill\x1a\xa2\x01\n\x0bUpdateByEma\x12\x45\n\x07options\x18\x01 \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.UpdateByEmOptions\x12L\n\x0cwindow_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xa2\x01\n\x0bUpdateByEms\x12\x45\n\x07options\x18\x01 \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.UpdateByEmOptions\x12L\n\x0cwindow_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xa4\x01\n\rUpdateByEmMin\x12\x45\n\x07options\x18\x01 \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.UpdateByEmOptions\x12L\n\x0cwindow_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xa4\x01\n\rUpdateByEmMax\x12\x45\n\x07options\x18\x01 \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.UpdateByEmOptions\x12L\n\x0cwindow_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xa4\x01\n\rUpdateByEmStd\x12\x45\n\x07options\x18\x01 \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.UpdateByEmOptions\x12L\n\x0cwindow_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1aY\n\rUpdateByDelta\x12H\n\x07options\x18\x01 \x01(\x0b\x32\x37.io.deephaven.proto.backplane.grpc.UpdateByDeltaOptions\x1a\xc0\x01\n\x12UpdateByRollingSum\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc2\x01\n\x14UpdateByRollingGroup\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc0\x01\n\x12UpdateByRollingAvg\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc0\x01\n\x12UpdateByRollingMin\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc0\x01\n\x12UpdateByRollingMax\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc4\x01\n\x16UpdateByRollingProduct\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc2\x01\n\x14UpdateByRollingCount\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc0\x01\n\x12UpdateByRollingStd\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xd8\x01\n\x13UpdateByRollingWAvg\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12\x15\n\rweight_column\x18\x03 \x01(\tB\x06\n\x04typeB\x06\n\x04type\"\xb1\x01\n\x15SelectDistinctRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x14\n\x0c\x63olumn_names\x18\x03 \x03(\t\"\xae\x01\n\x12\x44ropColumnsRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x14\n\x0c\x63olumn_names\x18\x03 \x03(\t\"\xb5\x01\n\x1eUnstructuredFilterTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x0f\n\x07\x66ilters\x18\x03 \x03(\t\"\xad\x01\n\x11HeadOrTailRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x14\n\x08num_rows\x18\x03 \x01(\x12\x42\x02\x30\x01\"\xce\x01\n\x13HeadOrTailByRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x14\n\x08num_rows\x18\x03 \x01(\x12\x42\x02\x30\x01\x12\x1d\n\x15group_by_column_specs\x18\x04 \x03(\t\"\xc3\x01\n\x0eUngroupRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x11\n\tnull_fill\x18\x03 \x01(\x08\x12\x1a\n\x12\x63olumns_to_ungroup\x18\x04 \x03(\t\"\xad\x01\n\x12MergeTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x45\n\nsource_ids\x18\x02 \x03(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x12\n\nkey_column\x18\x03 \x01(\t\"\x9a\x01\n\x14SnapshotTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\"\xb1\x02\n\x18SnapshotWhenTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07\x62\x61se_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x45\n\ntrigger_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x0f\n\x07initial\x18\x04 \x01(\x08\x12\x13\n\x0bincremental\x18\x05 \x01(\x08\x12\x0f\n\x07history\x18\x06 \x01(\x08\x12\x15\n\rstamp_columns\x18\x07 \x03(\t\"\xa7\x02\n\x16\x43rossJoinTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x18\n\x10\x63olumns_to_match\x18\x04 \x03(\t\x12\x16\n\x0e\x63olumns_to_add\x18\x05 \x03(\t\x12\x14\n\x0creserve_bits\x18\x06 \x01(\x05\"\x93\x02\n\x18NaturalJoinTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x18\n\x10\x63olumns_to_match\x18\x04 \x03(\t\x12\x16\n\x0e\x63olumns_to_add\x18\x05 \x03(\t\"\x91\x02\n\x16\x45xactJoinTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x18\n\x10\x63olumns_to_match\x18\x04 \x03(\t\x12\x16\n\x0e\x63olumns_to_add\x18\x05 \x03(\t\"\x90\x02\n\x15LeftJoinTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x18\n\x10\x63olumns_to_match\x18\x04 \x03(\t\x12\x16\n\x0e\x63olumns_to_add\x18\x05 \x03(\t\"\xd1\x03\n\x15\x41sOfJoinTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x18\n\x10\x63olumns_to_match\x18\x04 \x03(\t\x12\x16\n\x0e\x63olumns_to_add\x18\x05 \x03(\t\x12\\\n\x10\x61s_of_match_rule\x18\x07 \x01(\x0e\x32\x42.io.deephaven.proto.backplane.grpc.AsOfJoinTablesRequest.MatchRule\"]\n\tMatchRule\x12\x13\n\x0fLESS_THAN_EQUAL\x10\x00\x12\r\n\tLESS_THAN\x10\x01\x12\x16\n\x12GREATER_THAN_EQUAL\x10\x02\x12\x10\n\x0cGREATER_THAN\x10\x03\x1a\x02\x18\x01:\x02\x18\x01\"\xa6\x02\n\x12\x41jRajTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x1b\n\x13\x65xact_match_columns\x18\x04 \x03(\t\x12\x14\n\x0c\x61s_of_column\x18\x05 \x01(\t\x12\x16\n\x0e\x63olumns_to_add\x18\x06 \x03(\t\"\xcb\x06\n\x16RangeJoinTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x1b\n\x13\x65xact_match_columns\x18\x04 \x03(\t\x12\x19\n\x11left_start_column\x18\x05 \x01(\t\x12\x62\n\x10range_start_rule\x18\x06 \x01(\x0e\x32H.io.deephaven.proto.backplane.grpc.RangeJoinTablesRequest.RangeStartRule\x12\x1a\n\x12right_range_column\x18\x07 \x01(\t\x12^\n\x0erange_end_rule\x18\x08 \x01(\x0e\x32\x46.io.deephaven.proto.backplane.grpc.RangeJoinTablesRequest.RangeEndRule\x12\x17\n\x0fleft_end_column\x18\t \x01(\t\x12\x44\n\x0c\x61ggregations\x18\n \x03(\x0b\x32..io.deephaven.proto.backplane.grpc.Aggregation\"v\n\x0eRangeStartRule\x12\x15\n\x11START_UNSPECIFIED\x10\x00\x12\r\n\tLESS_THAN\x10\x01\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x02\x12&\n\"LESS_THAN_OR_EQUAL_ALLOW_PRECEDING\x10\x03\"{\n\x0cRangeEndRule\x12\x13\n\x0f\x45ND_UNSPECIFIED\x10\x00\x12\x10\n\x0cGREATER_THAN\x10\x01\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x02\x12)\n%GREATER_THAN_OR_EQUAL_ALLOW_FOLLOWING\x10\x03\"\xfe\x04\n\x15\x43omboAggregateRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12V\n\naggregates\x18\x03 \x03(\x0b\x32\x42.io.deephaven.proto.backplane.grpc.ComboAggregateRequest.Aggregate\x12\x18\n\x10group_by_columns\x18\x04 \x03(\t\x12\x13\n\x0b\x66orce_combo\x18\x05 \x01(\x08\x1a\xad\x01\n\tAggregate\x12N\n\x04type\x18\x01 \x01(\x0e\x32@.io.deephaven.proto.backplane.grpc.ComboAggregateRequest.AggType\x12\x13\n\x0bmatch_pairs\x18\x02 \x03(\t\x12\x13\n\x0b\x63olumn_name\x18\x03 \x01(\t\x12\x12\n\npercentile\x18\x04 \x01(\x01\x12\x12\n\navg_median\x18\x05 \x01(\x08\"\xa5\x01\n\x07\x41ggType\x12\x07\n\x03SUM\x10\x00\x12\x0b\n\x07\x41\x42S_SUM\x10\x01\x12\t\n\x05GROUP\x10\x02\x12\x07\n\x03\x41VG\x10\x03\x12\t\n\x05\x43OUNT\x10\x04\x12\t\n\x05\x46IRST\x10\x05\x12\x08\n\x04LAST\x10\x06\x12\x07\n\x03MIN\x10\x07\x12\x07\n\x03MAX\x10\x08\x12\n\n\x06MEDIAN\x10\t\x12\x0e\n\nPERCENTILE\x10\n\x12\x07\n\x03STD\x10\x0b\x12\x07\n\x03VAR\x10\x0c\x12\x10\n\x0cWEIGHTED_AVG\x10\r:\x02\x18\x01\"\xed\x01\n\x13\x41ggregateAllRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x38\n\x04spec\x18\x03 \x01(\x0b\x32*.io.deephaven.proto.backplane.grpc.AggSpec\x12\x18\n\x10group_by_columns\x18\x04 \x03(\t\"\xd7\x17\n\x07\x41ggSpec\x12K\n\x07\x61\x62s_sum\x18\x01 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecAbsSumH\x00\x12i\n\x16\x61pproximate_percentile\x18\x02 \x01(\x0b\x32G.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecApproximatePercentileH\x00\x12\x44\n\x03\x61vg\x18\x03 \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecAvgH\x00\x12Y\n\x0e\x63ount_distinct\x18\x04 \x01(\x0b\x32?.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecCountDistinctH\x00\x12N\n\x08\x64istinct\x18\x05 \x01(\x0b\x32:.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecDistinctH\x00\x12H\n\x05\x66irst\x18\x06 \x01(\x0b\x32\x37.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecFirstH\x00\x12L\n\x07\x66ormula\x18\x07 \x01(\x0b\x32\x39.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecFormulaH\x00\x12J\n\x06\x66reeze\x18\x08 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecFreezeH\x00\x12H\n\x05group\x18\t \x01(\x0b\x32\x37.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecGroupH\x00\x12\x46\n\x04last\x18\n \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecLastH\x00\x12\x44\n\x03max\x18\x0b \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecMaxH\x00\x12J\n\x06median\x18\x0c \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecMedianH\x00\x12\x44\n\x03min\x18\r \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecMinH\x00\x12R\n\npercentile\x18\x0e \x01(\x0b\x32<.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecPercentileH\x00\x12P\n\x0csorted_first\x18\x0f \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecSortedH\x00\x12O\n\x0bsorted_last\x18\x10 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecSortedH\x00\x12\x44\n\x03std\x18\x11 \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecStdH\x00\x12\x44\n\x03sum\x18\x12 \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecSumH\x00\x12M\n\x08t_digest\x18\x13 \x01(\x0b\x32\x39.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecTDigestH\x00\x12J\n\x06unique\x18\x14 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecUniqueH\x00\x12R\n\x0cweighted_avg\x18\x15 \x01(\x0b\x32:.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecWeightedH\x00\x12R\n\x0cweighted_sum\x18\x16 \x01(\x0b\x32:.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecWeightedH\x00\x12\x44\n\x03var\x18\x17 \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecVarH\x00\x1a\\\n\x1c\x41ggSpecApproximatePercentile\x12\x12\n\npercentile\x18\x01 \x01(\x01\x12\x18\n\x0b\x63ompression\x18\x02 \x01(\x01H\x00\x88\x01\x01\x42\x0e\n\x0c_compression\x1a+\n\x14\x41ggSpecCountDistinct\x12\x13\n\x0b\x63ount_nulls\x18\x01 \x01(\x08\x1a(\n\x0f\x41ggSpecDistinct\x12\x15\n\rinclude_nulls\x18\x01 \x01(\x08\x1a\x36\n\x0e\x41ggSpecFormula\x12\x0f\n\x07\x66ormula\x18\x01 \x01(\t\x12\x13\n\x0bparam_token\x18\x02 \x01(\t\x1a/\n\rAggSpecMedian\x12\x1e\n\x16\x61verage_evenly_divided\x18\x01 \x01(\x08\x1aG\n\x11\x41ggSpecPercentile\x12\x12\n\npercentile\x18\x01 \x01(\x01\x12\x1e\n\x16\x61verage_evenly_divided\x18\x02 \x01(\x08\x1a`\n\rAggSpecSorted\x12O\n\x07\x63olumns\x18\x01 \x03(\x0b\x32>.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecSortedColumn\x1a*\n\x13\x41ggSpecSortedColumn\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x1a:\n\x0e\x41ggSpecTDigest\x12\x18\n\x0b\x63ompression\x18\x01 \x01(\x01H\x00\x88\x01\x01\x42\x0e\n\x0c_compression\x1a\x88\x01\n\rAggSpecUnique\x12\x15\n\rinclude_nulls\x18\x01 \x01(\x08\x12`\n\x13non_unique_sentinel\x18\x02 \x01(\x0b\x32\x43.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecNonUniqueSentinel\x1a\xb5\x02\n\x18\x41ggSpecNonUniqueSentinel\x12\x42\n\nnull_value\x18\x01 \x01(\x0e\x32,.io.deephaven.proto.backplane.grpc.NullValueH\x00\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x13\n\tint_value\x18\x03 \x01(\x11H\x00\x12\x18\n\nlong_value\x18\x04 \x01(\x12\x42\x02\x30\x01H\x00\x12\x15\n\x0b\x66loat_value\x18\x05 \x01(\x02H\x00\x12\x16\n\x0c\x64ouble_value\x18\x06 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x07 \x01(\x08H\x00\x12\x14\n\nbyte_value\x18\x08 \x01(\x11H\x00\x12\x15\n\x0bshort_value\x18\t \x01(\x11H\x00\x12\x14\n\nchar_value\x18\n \x01(\x11H\x00\x42\x06\n\x04type\x1a(\n\x0f\x41ggSpecWeighted\x12\x15\n\rweight_column\x18\x01 \x01(\t\x1a\x0f\n\rAggSpecAbsSum\x1a\x0c\n\nAggSpecAvg\x1a\x0e\n\x0c\x41ggSpecFirst\x1a\x0f\n\rAggSpecFreeze\x1a\x0e\n\x0c\x41ggSpecGroup\x1a\r\n\x0b\x41ggSpecLast\x1a\x0c\n\nAggSpecMax\x1a\x0c\n\nAggSpecMin\x1a\x0c\n\nAggSpecStd\x1a\x0c\n\nAggSpecSum\x1a\x0c\n\nAggSpecVarB\x06\n\x04type\"\xdc\x02\n\x10\x41ggregateRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12L\n\x11initial_groups_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x16\n\x0epreserve_empty\x18\x04 \x01(\x08\x12\x44\n\x0c\x61ggregations\x18\x05 \x03(\x0b\x32..io.deephaven.proto.backplane.grpc.Aggregation\x12\x18\n\x10group_by_columns\x18\x06 \x03(\t\"\xd3\x05\n\x0b\x41ggregation\x12T\n\x07\x63olumns\x18\x01 \x01(\x0b\x32\x41.io.deephaven.proto.backplane.grpc.Aggregation.AggregationColumnsH\x00\x12P\n\x05\x63ount\x18\x02 \x01(\x0b\x32?.io.deephaven.proto.backplane.grpc.Aggregation.AggregationCountH\x00\x12Y\n\rfirst_row_key\x18\x03 \x01(\x0b\x32@.io.deephaven.proto.backplane.grpc.Aggregation.AggregationRowKeyH\x00\x12X\n\x0clast_row_key\x18\x04 \x01(\x0b\x32@.io.deephaven.proto.backplane.grpc.Aggregation.AggregationRowKeyH\x00\x12X\n\tpartition\x18\x05 \x01(\x0b\x32\x43.io.deephaven.proto.backplane.grpc.Aggregation.AggregationPartitionH\x00\x1a\x63\n\x12\x41ggregationColumns\x12\x38\n\x04spec\x18\x01 \x01(\x0b\x32*.io.deephaven.proto.backplane.grpc.AggSpec\x12\x13\n\x0bmatch_pairs\x18\x02 \x03(\t\x1a\'\n\x10\x41ggregationCount\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x1a(\n\x11\x41ggregationRowKey\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x1aM\n\x14\x41ggregationPartition\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x12 \n\x18include_group_by_columns\x18\x02 \x01(\x08\x42\x06\n\x04type\"\xe1\x01\n\x0eSortDescriptor\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x12\x13\n\x0bis_absolute\x18\x02 \x01(\x08\x12R\n\tdirection\x18\x03 \x01(\x0e\x32?.io.deephaven.proto.backplane.grpc.SortDescriptor.SortDirection\"Q\n\rSortDirection\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x17\n\nDESCENDING\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x12\r\n\tASCENDING\x10\x01\x12\x0b\n\x07REVERSE\x10\x02\"\xd8\x01\n\x10SortTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12@\n\x05sorts\x18\x03 \x03(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.SortDescriptor\"\xd7\x01\n\x12\x46ilterTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12=\n\x07\x66ilters\x18\x03 \x03(\x0b\x32,.io.deephaven.proto.backplane.grpc.Condition\"\xf9\x01\n\x0eSeekRowRequest\x12<\n\tsource_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x18\n\x0cstarting_row\x18\x02 \x01(\x12\x42\x02\x30\x01\x12\x13\n\x0b\x63olumn_name\x18\x03 \x01(\t\x12>\n\nseek_value\x18\x04 \x01(\x0b\x32*.io.deephaven.proto.backplane.grpc.Literal\x12\x13\n\x0binsensitive\x18\x05 \x01(\x08\x12\x10\n\x08\x63ontains\x18\x06 \x01(\x08\x12\x13\n\x0bis_backward\x18\x07 \x01(\x08\")\n\x0fSeekRowResponse\x12\x16\n\nresult_row\x18\x01 \x01(\x12\x42\x02\x30\x01\" \n\tReference\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\"\x91\x01\n\x07Literal\x12\x16\n\x0cstring_value\x18\x01 \x01(\tH\x00\x12\x16\n\x0c\x64ouble_value\x18\x02 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x03 \x01(\x08H\x00\x12\x18\n\nlong_value\x18\x04 \x01(\x12\x42\x02\x30\x01H\x00\x12\x1d\n\x0fnano_time_value\x18\x05 \x01(\x12\x42\x02\x30\x01H\x00\x42\x07\n\x05value\"\x91\x01\n\x05Value\x12\x41\n\treference\x18\x01 \x01(\x0b\x32,.io.deephaven.proto.backplane.grpc.ReferenceH\x00\x12=\n\x07literal\x18\x02 \x01(\x0b\x32*.io.deephaven.proto.backplane.grpc.LiteralH\x00\x42\x06\n\x04\x64\x61ta\"\xbc\x05\n\tCondition\x12>\n\x03\x61nd\x18\x01 \x01(\x0b\x32/.io.deephaven.proto.backplane.grpc.AndConditionH\x00\x12<\n\x02or\x18\x02 \x01(\x0b\x32..io.deephaven.proto.backplane.grpc.OrConditionH\x00\x12>\n\x03not\x18\x03 \x01(\x0b\x32/.io.deephaven.proto.backplane.grpc.NotConditionH\x00\x12\x46\n\x07\x63ompare\x18\x04 \x01(\x0b\x32\x33.io.deephaven.proto.backplane.grpc.CompareConditionH\x00\x12<\n\x02in\x18\x05 \x01(\x0b\x32..io.deephaven.proto.backplane.grpc.InConditionH\x00\x12\x44\n\x06invoke\x18\x06 \x01(\x0b\x32\x32.io.deephaven.proto.backplane.grpc.InvokeConditionH\x00\x12\x45\n\x07is_null\x18\x07 \x01(\x0b\x32\x32.io.deephaven.proto.backplane.grpc.IsNullConditionH\x00\x12\x46\n\x07matches\x18\x08 \x01(\x0b\x32\x33.io.deephaven.proto.backplane.grpc.MatchesConditionH\x00\x12H\n\x08\x63ontains\x18\t \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.ContainsConditionH\x00\x12\x44\n\x06search\x18\n \x01(\x0b\x32\x32.io.deephaven.proto.backplane.grpc.SearchConditionH\x00\x42\x06\n\x04\x64\x61ta\"M\n\x0c\x41ndCondition\x12=\n\x07\x66ilters\x18\x01 \x03(\x0b\x32,.io.deephaven.proto.backplane.grpc.Condition\"L\n\x0bOrCondition\x12=\n\x07\x66ilters\x18\x01 \x03(\x0b\x32,.io.deephaven.proto.backplane.grpc.Condition\"L\n\x0cNotCondition\x12<\n\x06\x66ilter\x18\x01 \x01(\x0b\x32,.io.deephaven.proto.backplane.grpc.Condition\"\xac\x03\n\x10\x43ompareCondition\x12W\n\toperation\x18\x01 \x01(\x0e\x32\x44.io.deephaven.proto.backplane.grpc.CompareCondition.CompareOperation\x12L\n\x10\x63\x61se_sensitivity\x18\x02 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.CaseSensitivity\x12\x35\n\x03lhs\x18\x03 \x01(\x0b\x32(.io.deephaven.proto.backplane.grpc.Value\x12\x35\n\x03rhs\x18\x04 \x01(\x0b\x32(.io.deephaven.proto.backplane.grpc.Value\"\x82\x01\n\x10\x43ompareOperation\x12\r\n\tLESS_THAN\x10\x00\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x01\x12\x10\n\x0cGREATER_THAN\x10\x02\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x03\x12\n\n\x06\x45QUALS\x10\x04\x12\x0e\n\nNOT_EQUALS\x10\x05\"\x95\x02\n\x0bInCondition\x12\x38\n\x06target\x18\x01 \x01(\x0b\x32(.io.deephaven.proto.backplane.grpc.Value\x12<\n\ncandidates\x18\x02 \x03(\x0b\x32(.io.deephaven.proto.backplane.grpc.Value\x12L\n\x10\x63\x61se_sensitivity\x18\x03 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.CaseSensitivity\x12@\n\nmatch_type\x18\x04 \x01(\x0e\x32,.io.deephaven.proto.backplane.grpc.MatchType\"\x98\x01\n\x0fInvokeCondition\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x38\n\x06target\x18\x02 \x01(\x0b\x32(.io.deephaven.proto.backplane.grpc.Value\x12;\n\targuments\x18\x03 \x03(\x0b\x32(.io.deephaven.proto.backplane.grpc.Value\"R\n\x0fIsNullCondition\x12?\n\treference\x18\x01 \x01(\x0b\x32,.io.deephaven.proto.backplane.grpc.Reference\"\xf2\x01\n\x10MatchesCondition\x12?\n\treference\x18\x01 \x01(\x0b\x32,.io.deephaven.proto.backplane.grpc.Reference\x12\r\n\x05regex\x18\x02 \x01(\t\x12L\n\x10\x63\x61se_sensitivity\x18\x03 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.CaseSensitivity\x12@\n\nmatch_type\x18\x04 \x01(\x0e\x32,.io.deephaven.proto.backplane.grpc.MatchType\"\xfb\x01\n\x11\x43ontainsCondition\x12?\n\treference\x18\x01 \x01(\x0b\x32,.io.deephaven.proto.backplane.grpc.Reference\x12\x15\n\rsearch_string\x18\x02 \x01(\t\x12L\n\x10\x63\x61se_sensitivity\x18\x03 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.CaseSensitivity\x12@\n\nmatch_type\x18\x04 \x01(\x0e\x32,.io.deephaven.proto.backplane.grpc.MatchType\"s\n\x0fSearchCondition\x12\x15\n\rsearch_string\x18\x01 \x01(\t\x12I\n\x13optional_references\x18\x02 \x03(\x0b\x32,.io.deephaven.proto.backplane.grpc.Reference\"\x94\x01\n\x0e\x46lattenRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\"\x96\x01\n\x10MetaTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\"\xb4\x03\n\x19RunChartDownsampleRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x13\n\x0bpixel_count\x18\x03 \x01(\x05\x12Z\n\nzoom_range\x18\x04 \x01(\x0b\x32\x46.io.deephaven.proto.backplane.grpc.RunChartDownsampleRequest.ZoomRange\x12\x15\n\rx_column_name\x18\x05 \x01(\t\x12\x16\n\x0ey_column_names\x18\x06 \x03(\t\x1as\n\tZoomRange\x12\x1f\n\x0emin_date_nanos\x18\x01 \x01(\x03\x42\x02\x30\x01H\x00\x88\x01\x01\x12\x1f\n\x0emax_date_nanos\x18\x02 \x01(\x03\x42\x02\x30\x01H\x01\x88\x01\x01\x42\x11\n\x0f_min_date_nanosB\x11\n\x0f_max_date_nanos\"\xf5\x04\n\x17\x43reateInputTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12L\n\x0fsource_table_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReferenceH\x00\x12\x10\n\x06schema\x18\x03 \x01(\x0cH\x00\x12W\n\x04kind\x18\x04 \x01(\x0b\x32I.io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind\x1a\xd4\x02\n\x0eInputTableKind\x12}\n\x15in_memory_append_only\x18\x01 \x01(\x0b\x32\\.io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.InMemoryAppendOnlyH\x00\x12{\n\x14in_memory_key_backed\x18\x02 \x01(\x0b\x32[.io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.InMemoryKeyBackedH\x00\x1a\x14\n\x12InMemoryAppendOnly\x1a(\n\x11InMemoryKeyBacked\x12\x13\n\x0bkey_columns\x18\x01 \x03(\tB\x06\n\x04kindB\x0c\n\ndefinition\"\x83\x02\n\x0eWhereInRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x10\n\x08inverted\x18\x04 \x01(\x08\x12\x18\n\x10\x63olumns_to_match\x18\x05 \x03(\t\"\xea\x01\n\x17\x43olumnStatisticsRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x13\n\x0b\x63olumn_name\x18\x03 \x01(\t\x12\x1f\n\x12unique_value_limit\x18\x04 \x01(\x05H\x00\x88\x01\x01\x42\x15\n\x13_unique_value_limit\"\xc8\x19\n\x11\x42\x61tchTableRequest\x12K\n\x03ops\x18\x01 \x03(\x0b\x32>.io.deephaven.proto.backplane.grpc.BatchTableRequest.Operation\x1a\xe5\x18\n\tOperation\x12K\n\x0b\x65mpty_table\x18\x01 \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.EmptyTableRequestH\x00\x12I\n\ntime_table\x18\x02 \x01(\x0b\x32\x33.io.deephaven.proto.backplane.grpc.TimeTableRequestH\x00\x12M\n\x0c\x64rop_columns\x18\x03 \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.DropColumnsRequestH\x00\x12J\n\x06update\x18\x04 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequestH\x00\x12O\n\x0blazy_update\x18\x05 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequestH\x00\x12H\n\x04view\x18\x06 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequestH\x00\x12O\n\x0bupdate_view\x18\x07 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequestH\x00\x12J\n\x06select\x18\x08 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequestH\x00\x12S\n\x0fselect_distinct\x18\t \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.SelectDistinctRequestH\x00\x12G\n\x06\x66ilter\x18\n \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.FilterTableRequestH\x00\x12`\n\x13unstructured_filter\x18\x0b \x01(\x0b\x32\x41.io.deephaven.proto.backplane.grpc.UnstructuredFilterTableRequestH\x00\x12\x43\n\x04sort\x18\x0c \x01(\x0b\x32\x33.io.deephaven.proto.backplane.grpc.SortTableRequestH\x00\x12\x44\n\x04head\x18\r \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.HeadOrTailRequestH\x00\x12\x44\n\x04tail\x18\x0e \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.HeadOrTailRequestH\x00\x12I\n\x07head_by\x18\x0f \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.HeadOrTailByRequestH\x00\x12I\n\x07tail_by\x18\x10 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.HeadOrTailByRequestH\x00\x12\x44\n\x07ungroup\x18\x11 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.UngroupRequestH\x00\x12\x46\n\x05merge\x18\x12 \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.MergeTablesRequestH\x00\x12S\n\x0f\x63ombo_aggregate\x18\x13 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.ComboAggregateRequestH\x00\x12\x44\n\x07\x66latten\x18\x15 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.FlattenRequestH\x00\x12\\\n\x14run_chart_downsample\x18\x16 \x01(\x0b\x32<.io.deephaven.proto.backplane.grpc.RunChartDownsampleRequestH\x00\x12O\n\ncross_join\x18\x17 \x01(\x0b\x32\x39.io.deephaven.proto.backplane.grpc.CrossJoinTablesRequestH\x00\x12S\n\x0cnatural_join\x18\x18 \x01(\x0b\x32;.io.deephaven.proto.backplane.grpc.NaturalJoinTablesRequestH\x00\x12O\n\nexact_join\x18\x19 \x01(\x0b\x32\x39.io.deephaven.proto.backplane.grpc.ExactJoinTablesRequestH\x00\x12M\n\tleft_join\x18\x1a \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.LeftJoinTablesRequestH\x00\x12R\n\nas_of_join\x18\x1b \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AsOfJoinTablesRequestB\x02\x18\x01H\x00\x12K\n\x0b\x66\x65tch_table\x18\x1c \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.FetchTableRequestH\x00\x12^\n\x15\x61pply_preview_columns\x18\x1e \x01(\x0b\x32=.io.deephaven.proto.backplane.grpc.ApplyPreviewColumnsRequestH\x00\x12X\n\x12\x63reate_input_table\x18\x1f \x01(\x0b\x32:.io.deephaven.proto.backplane.grpc.CreateInputTableRequestH\x00\x12G\n\tupdate_by\x18 \x01(\x0b\x32\x32.io.deephaven.proto.backplane.grpc.UpdateByRequestH\x00\x12\x45\n\x08where_in\x18! \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.WhereInRequestH\x00\x12O\n\raggregate_all\x18\" \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.AggregateAllRequestH\x00\x12H\n\taggregate\x18# \x01(\x0b\x32\x33.io.deephaven.proto.backplane.grpc.AggregateRequestH\x00\x12K\n\x08snapshot\x18$ \x01(\x0b\x32\x37.io.deephaven.proto.backplane.grpc.SnapshotTableRequestH\x00\x12T\n\rsnapshot_when\x18% \x01(\x0b\x32;.io.deephaven.proto.backplane.grpc.SnapshotWhenTableRequestH\x00\x12I\n\nmeta_table\x18& \x01(\x0b\x32\x33.io.deephaven.proto.backplane.grpc.MetaTableRequestH\x00\x12O\n\nrange_join\x18\' \x01(\x0b\x32\x39.io.deephaven.proto.backplane.grpc.RangeJoinTablesRequestH\x00\x12\x43\n\x02\x61j\x18( \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AjRajTablesRequestH\x00\x12\x44\n\x03raj\x18) \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AjRajTablesRequestH\x00\x12W\n\x11\x63olumn_statistics\x18* \x01(\x0b\x32:.io.deephaven.proto.backplane.grpc.ColumnStatisticsRequestH\x00\x42\x04\n\x02opJ\x04\x08\x14\x10\x15J\x04\x08\x1d\x10\x1e*b\n\x0f\x42\x61\x64\x44\x61taBehavior\x12#\n\x1f\x42\x41\x44_DATA_BEHAVIOR_NOT_SPECIFIED\x10\x00\x12\t\n\x05THROW\x10\x01\x12\t\n\x05RESET\x10\x02\x12\x08\n\x04SKIP\x10\x03\x12\n\n\x06POISON\x10\x04*t\n\x14UpdateByNullBehavior\x12\x1f\n\x1bNULL_BEHAVIOR_NOT_SPECIFIED\x10\x00\x12\x12\n\x0eNULL_DOMINATES\x10\x01\x12\x13\n\x0fVALUE_DOMINATES\x10\x02\x12\x12\n\x0eZERO_DOMINATES\x10\x03*\x1b\n\tNullValue\x12\x0e\n\nNULL_VALUE\x10\x00*2\n\x0f\x43\x61seSensitivity\x12\x0e\n\nMATCH_CASE\x10\x00\x12\x0f\n\x0bIGNORE_CASE\x10\x01*&\n\tMatchType\x12\x0b\n\x07REGULAR\x10\x00\x12\x0c\n\x08INVERTED\x10\x01\x32\xa8\x30\n\x0cTableService\x12\x91\x01\n GetExportedTableCreationResponse\x12).io.deephaven.proto.backplane.grpc.Ticket\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x86\x01\n\nFetchTable\x12\x34.io.deephaven.proto.backplane.grpc.FetchTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x98\x01\n\x13\x41pplyPreviewColumns\x12=.io.deephaven.proto.backplane.grpc.ApplyPreviewColumnsRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x86\x01\n\nEmptyTable\x12\x34.io.deephaven.proto.backplane.grpc.EmptyTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x84\x01\n\tTimeTable\x12\x33.io.deephaven.proto.backplane.grpc.TimeTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x88\x01\n\x0b\x44ropColumns\x12\x35.io.deephaven.proto.backplane.grpc.DropColumnsRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x86\x01\n\x06Update\x12\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x8a\x01\n\nLazyUpdate\x12\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x84\x01\n\x04View\x12\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x8a\x01\n\nUpdateView\x12\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x86\x01\n\x06Select\x12\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x82\x01\n\x08UpdateBy\x12\x32.io.deephaven.proto.backplane.grpc.UpdateByRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x8e\x01\n\x0eSelectDistinct\x12\x38.io.deephaven.proto.backplane.grpc.SelectDistinctRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x83\x01\n\x06\x46ilter\x12\x35.io.deephaven.proto.backplane.grpc.FilterTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x9b\x01\n\x12UnstructuredFilter\x12\x41.io.deephaven.proto.backplane.grpc.UnstructuredFilterTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x7f\n\x04Sort\x12\x33.io.deephaven.proto.backplane.grpc.SortTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x80\x01\n\x04Head\x12\x34.io.deephaven.proto.backplane.grpc.HeadOrTailRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x80\x01\n\x04Tail\x12\x34.io.deephaven.proto.backplane.grpc.HeadOrTailRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x84\x01\n\x06HeadBy\x12\x36.io.deephaven.proto.backplane.grpc.HeadOrTailByRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x84\x01\n\x06TailBy\x12\x36.io.deephaven.proto.backplane.grpc.HeadOrTailByRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x80\x01\n\x07Ungroup\x12\x31.io.deephaven.proto.backplane.grpc.UngroupRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x88\x01\n\x0bMergeTables\x12\x35.io.deephaven.proto.backplane.grpc.MergeTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x90\x01\n\x0f\x43rossJoinTables\x12\x39.io.deephaven.proto.backplane.grpc.CrossJoinTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x94\x01\n\x11NaturalJoinTables\x12;.io.deephaven.proto.backplane.grpc.NaturalJoinTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x90\x01\n\x0f\x45xactJoinTables\x12\x39.io.deephaven.proto.backplane.grpc.ExactJoinTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x8e\x01\n\x0eLeftJoinTables\x12\x38.io.deephaven.proto.backplane.grpc.LeftJoinTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x91\x01\n\x0e\x41sOfJoinTables\x12\x38.io.deephaven.proto.backplane.grpc.AsOfJoinTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x03\x88\x02\x01\x12\x85\x01\n\x08\x41jTables\x12\x35.io.deephaven.proto.backplane.grpc.AjRajTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x86\x01\n\tRajTables\x12\x35.io.deephaven.proto.backplane.grpc.AjRajTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x90\x01\n\x0fRangeJoinTables\x12\x39.io.deephaven.proto.backplane.grpc.RangeJoinTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x91\x01\n\x0e\x43omboAggregate\x12\x38.io.deephaven.proto.backplane.grpc.ComboAggregateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x03\x88\x02\x01\x12\x8a\x01\n\x0c\x41ggregateAll\x12\x36.io.deephaven.proto.backplane.grpc.AggregateAllRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x84\x01\n\tAggregate\x12\x33.io.deephaven.proto.backplane.grpc.AggregateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x87\x01\n\x08Snapshot\x12\x37.io.deephaven.proto.backplane.grpc.SnapshotTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x8f\x01\n\x0cSnapshotWhen\x12;.io.deephaven.proto.backplane.grpc.SnapshotWhenTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x80\x01\n\x07\x46latten\x12\x31.io.deephaven.proto.backplane.grpc.FlattenRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x96\x01\n\x12RunChartDownsample\x12<.io.deephaven.proto.backplane.grpc.RunChartDownsampleRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x92\x01\n\x10\x43reateInputTable\x12:.io.deephaven.proto.backplane.grpc.CreateInputTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x80\x01\n\x07WhereIn\x12\x31.io.deephaven.proto.backplane.grpc.WhereInRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x83\x01\n\x05\x42\x61tch\x12\x34.io.deephaven.proto.backplane.grpc.BatchTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x30\x01\x12\x99\x01\n\x14\x45xportedTableUpdates\x12>.io.deephaven.proto.backplane.grpc.ExportedTableUpdatesRequest\x1a=.io.deephaven.proto.backplane.grpc.ExportedTableUpdateMessage\"\x00\x30\x01\x12r\n\x07SeekRow\x12\x31.io.deephaven.proto.backplane.grpc.SeekRowRequest\x1a\x32.io.deephaven.proto.backplane.grpc.SeekRowResponse\"\x00\x12\x84\x01\n\tMetaTable\x12\x33.io.deephaven.proto.backplane.grpc.MetaTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x99\x01\n\x17\x43omputeColumnStatistics\x12:.io.deephaven.proto.backplane.grpc.ColumnStatisticsRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x42\x41H\x01P\x01Z;github.com/deephaven/deephaven-core/go/internal/proto/tableb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1b\x64\x65\x65phaven/proto/table.proto\x12!io.deephaven.proto.backplane.grpc\x1a\x1c\x64\x65\x65phaven/proto/ticket.proto\"l\n\x0eTableReference\x12;\n\x06ticket\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.TicketH\x00\x12\x16\n\x0c\x62\x61tch_offset\x18\x02 \x01(\x11H\x00\x42\x05\n\x03ref\"\xc6\x01\n\x1d\x45xportedTableCreationResponse\x12\x44\n\tresult_id\x18\x01 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\nerror_info\x18\x03 \x01(\t\x12\x15\n\rschema_header\x18\x04 \x01(\x0c\x12\x11\n\tis_static\x18\x05 \x01(\x08\x12\x10\n\x04size\x18\x06 \x01(\x12\x42\x02\x30\x01\"\x97\x01\n\x11\x46\x65tchTableRequest\x12\x44\n\tsource_id\x18\x01 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12<\n\tresult_id\x18\x02 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\"\xa0\x01\n\x1a\x41pplyPreviewColumnsRequest\x12\x44\n\tsource_id\x18\x01 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12<\n\tresult_id\x18\x02 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\"\x1d\n\x1b\x45xportedTableUpdatesRequest\"\x8c\x01\n\x1a\x45xportedTableUpdateMessage\x12<\n\texport_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x10\n\x04size\x18\x02 \x01(\x12\x42\x02\x30\x01\x12\x1e\n\x16update_failure_message\x18\x03 \x01(\t\"c\n\x11\x45mptyTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x10\n\x04size\x18\x02 \x01(\x12\x42\x02\x30\x01\"\xef\x01\n\x10TimeTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x1e\n\x10start_time_nanos\x18\x02 \x01(\x12\x42\x02\x30\x01H\x00\x12\x1b\n\x11start_time_string\x18\x05 \x01(\tH\x00\x12\x1a\n\x0cperiod_nanos\x18\x03 \x01(\x12\x42\x02\x30\x01H\x01\x12\x17\n\rperiod_string\x18\x06 \x01(\tH\x01\x12\x13\n\x0b\x62link_table\x18\x04 \x01(\x08\x42\x0c\n\nstart_timeB\x08\n\x06period\"\xb1\x01\n\x15SelectOrUpdateRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x14\n\x0c\x63olumn_specs\x18\x03 \x03(\t\"\x8c\x02\n\x0bMathContext\x12\x11\n\tprecision\x18\x01 \x01(\x11\x12R\n\rrounding_mode\x18\x02 \x01(\x0e\x32;.io.deephaven.proto.backplane.grpc.MathContext.RoundingMode\"\x95\x01\n\x0cRoundingMode\x12\x1f\n\x1bROUNDING_MODE_NOT_SPECIFIED\x10\x00\x12\x06\n\x02UP\x10\x01\x12\x08\n\x04\x44OWN\x10\x02\x12\x0b\n\x07\x43\x45ILING\x10\x03\x12\t\n\x05\x46LOOR\x10\x04\x12\x0b\n\x07HALF_UP\x10\x05\x12\r\n\tHALF_DOWN\x10\x06\x12\r\n\tHALF_EVEN\x10\x07\x12\x0f\n\x0bUNNECESSARY\x10\x08\"\xdb\x02\n\x13UpdateByWindowScale\x12[\n\x05ticks\x18\x01 \x01(\x0b\x32J.io.deephaven.proto.backplane.grpc.UpdateByWindowScale.UpdateByWindowTicksH\x00\x12Y\n\x04time\x18\x02 \x01(\x0b\x32I.io.deephaven.proto.backplane.grpc.UpdateByWindowScale.UpdateByWindowTimeH\x00\x1a$\n\x13UpdateByWindowTicks\x12\r\n\x05ticks\x18\x01 \x01(\x01\x1a^\n\x12UpdateByWindowTime\x12\x0e\n\x06\x63olumn\x18\x01 \x01(\t\x12\x13\n\x05nanos\x18\x02 \x01(\x12\x42\x02\x30\x01H\x00\x12\x19\n\x0f\x64uration_string\x18\x03 \x01(\tH\x00\x42\x08\n\x06windowB\x06\n\x04type\"\xe1\x03\n\x11UpdateByEmOptions\x12I\n\ron_null_value\x18\x01 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.BadDataBehavior\x12H\n\x0con_nan_value\x18\x02 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.BadDataBehavior\x12H\n\x0con_null_time\x18\x03 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.BadDataBehavior\x12R\n\x16on_negative_delta_time\x18\x04 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.BadDataBehavior\x12N\n\x12on_zero_delta_time\x18\x05 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.BadDataBehavior\x12I\n\x11\x62ig_value_context\x18\x06 \x01(\x0b\x32..io.deephaven.proto.backplane.grpc.MathContext\"f\n\x14UpdateByDeltaOptions\x12N\n\rnull_behavior\x18\x01 \x01(\x0e\x32\x37.io.deephaven.proto.backplane.grpc.UpdateByNullBehavior\"\x99\x34\n\x0fUpdateByRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12S\n\x07options\x18\x03 \x01(\x0b\x32\x42.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOptions\x12X\n\noperations\x18\x04 \x03(\x0b\x32\x44.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation\x12\x18\n\x10group_by_columns\x18\x05 \x03(\t\x1a\xc3\x03\n\x0fUpdateByOptions\x12\x1c\n\x0fuse_redirection\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x1b\n\x0e\x63hunk_capacity\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12.\n!max_static_sparse_memory_overhead\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12$\n\x17initial_hash_table_size\x18\x04 \x01(\x05H\x03\x88\x01\x01\x12 \n\x13maximum_load_factor\x18\x05 \x01(\x01H\x04\x88\x01\x01\x12\x1f\n\x12target_load_factor\x18\x06 \x01(\x01H\x05\x88\x01\x01\x12\x44\n\x0cmath_context\x18\x07 \x01(\x0b\x32..io.deephaven.proto.backplane.grpc.MathContextB\x12\n\x10_use_redirectionB\x11\n\x0f_chunk_capacityB$\n\"_max_static_sparse_memory_overheadB\x1a\n\x18_initial_hash_table_sizeB\x16\n\x14_maximum_load_factorB\x15\n\x13_target_load_factor\x1a\xf2-\n\x11UpdateByOperation\x12\x65\n\x06\x63olumn\x18\x01 \x01(\x0b\x32S.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumnH\x00\x1a\xed,\n\x0eUpdateByColumn\x12n\n\x04spec\x18\x01 \x01(\x0b\x32`.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec\x12\x13\n\x0bmatch_pairs\x18\x02 \x03(\t\x1a\xd5+\n\x0cUpdateBySpec\x12\x85\x01\n\x03sum\x18\x01 \x01(\x0b\x32v.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByCumulativeSumH\x00\x12\x85\x01\n\x03min\x18\x02 \x01(\x0b\x32v.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByCumulativeMinH\x00\x12\x85\x01\n\x03max\x18\x03 \x01(\x0b\x32v.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByCumulativeMaxH\x00\x12\x8d\x01\n\x07product\x18\x04 \x01(\x0b\x32z.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByCumulativeProductH\x00\x12}\n\x04\x66ill\x18\x05 \x01(\x0b\x32m.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByFillH\x00\x12{\n\x03\x65ma\x18\x06 \x01(\x0b\x32l.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByEmaH\x00\x12\x8a\x01\n\x0brolling_sum\x18\x07 \x01(\x0b\x32s.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingSumH\x00\x12\x8e\x01\n\rrolling_group\x18\x08 \x01(\x0b\x32u.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingGroupH\x00\x12\x8a\x01\n\x0brolling_avg\x18\t \x01(\x0b\x32s.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingAvgH\x00\x12\x8a\x01\n\x0brolling_min\x18\n \x01(\x0b\x32s.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingMinH\x00\x12\x8a\x01\n\x0brolling_max\x18\x0b \x01(\x0b\x32s.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingMaxH\x00\x12\x92\x01\n\x0frolling_product\x18\x0c \x01(\x0b\x32w.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingProductH\x00\x12\x7f\n\x05\x64\x65lta\x18\r \x01(\x0b\x32n.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByDeltaH\x00\x12{\n\x03\x65ms\x18\x0e \x01(\x0b\x32l.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByEmsH\x00\x12\x80\x01\n\x06\x65m_min\x18\x0f \x01(\x0b\x32n.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByEmMinH\x00\x12\x80\x01\n\x06\x65m_max\x18\x10 \x01(\x0b\x32n.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByEmMaxH\x00\x12\x80\x01\n\x06\x65m_std\x18\x11 \x01(\x0b\x32n.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByEmStdH\x00\x12\x8e\x01\n\rrolling_count\x18\x12 \x01(\x0b\x32u.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingCountH\x00\x12\x8a\x01\n\x0brolling_std\x18\x13 \x01(\x0b\x32s.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingStdH\x00\x12\x8c\x01\n\x0crolling_wavg\x18\x14 \x01(\x0b\x32t.io.deephaven.proto.backplane.grpc.UpdateByRequest.UpdateByOperation.UpdateByColumn.UpdateBySpec.UpdateByRollingWAvgH\x00\x1a\x17\n\x15UpdateByCumulativeSum\x1a\x17\n\x15UpdateByCumulativeMin\x1a\x17\n\x15UpdateByCumulativeMax\x1a\x1b\n\x19UpdateByCumulativeProduct\x1a\x0e\n\x0cUpdateByFill\x1a\xa2\x01\n\x0bUpdateByEma\x12\x45\n\x07options\x18\x01 \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.UpdateByEmOptions\x12L\n\x0cwindow_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xa2\x01\n\x0bUpdateByEms\x12\x45\n\x07options\x18\x01 \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.UpdateByEmOptions\x12L\n\x0cwindow_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xa4\x01\n\rUpdateByEmMin\x12\x45\n\x07options\x18\x01 \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.UpdateByEmOptions\x12L\n\x0cwindow_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xa4\x01\n\rUpdateByEmMax\x12\x45\n\x07options\x18\x01 \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.UpdateByEmOptions\x12L\n\x0cwindow_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xa4\x01\n\rUpdateByEmStd\x12\x45\n\x07options\x18\x01 \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.UpdateByEmOptions\x12L\n\x0cwindow_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1aY\n\rUpdateByDelta\x12H\n\x07options\x18\x01 \x01(\x0b\x32\x37.io.deephaven.proto.backplane.grpc.UpdateByDeltaOptions\x1a\xc0\x01\n\x12UpdateByRollingSum\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc2\x01\n\x14UpdateByRollingGroup\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc0\x01\n\x12UpdateByRollingAvg\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc0\x01\n\x12UpdateByRollingMin\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc0\x01\n\x12UpdateByRollingMax\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc4\x01\n\x16UpdateByRollingProduct\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc2\x01\n\x14UpdateByRollingCount\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xc0\x01\n\x12UpdateByRollingStd\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x1a\xd8\x01\n\x13UpdateByRollingWAvg\x12T\n\x14reverse_window_scale\x18\x01 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12T\n\x14\x66orward_window_scale\x18\x02 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.UpdateByWindowScale\x12\x15\n\rweight_column\x18\x03 \x01(\tB\x06\n\x04typeB\x06\n\x04type\"\xb1\x01\n\x15SelectDistinctRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x14\n\x0c\x63olumn_names\x18\x03 \x03(\t\"\xae\x01\n\x12\x44ropColumnsRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x14\n\x0c\x63olumn_names\x18\x03 \x03(\t\"\xb5\x01\n\x1eUnstructuredFilterTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x0f\n\x07\x66ilters\x18\x03 \x03(\t\"\xad\x01\n\x11HeadOrTailRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x14\n\x08num_rows\x18\x03 \x01(\x12\x42\x02\x30\x01\"\xce\x01\n\x13HeadOrTailByRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x14\n\x08num_rows\x18\x03 \x01(\x12\x42\x02\x30\x01\x12\x1d\n\x15group_by_column_specs\x18\x04 \x03(\t\"\xc3\x01\n\x0eUngroupRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x11\n\tnull_fill\x18\x03 \x01(\x08\x12\x1a\n\x12\x63olumns_to_ungroup\x18\x04 \x03(\t\"\xad\x01\n\x12MergeTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x45\n\nsource_ids\x18\x02 \x03(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x12\n\nkey_column\x18\x03 \x01(\t\"\x9a\x01\n\x14SnapshotTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\"\xb1\x02\n\x18SnapshotWhenTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07\x62\x61se_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x45\n\ntrigger_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x0f\n\x07initial\x18\x04 \x01(\x08\x12\x13\n\x0bincremental\x18\x05 \x01(\x08\x12\x0f\n\x07history\x18\x06 \x01(\x08\x12\x15\n\rstamp_columns\x18\x07 \x03(\t\"\xa7\x02\n\x16\x43rossJoinTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x18\n\x10\x63olumns_to_match\x18\x04 \x03(\t\x12\x16\n\x0e\x63olumns_to_add\x18\x05 \x03(\t\x12\x14\n\x0creserve_bits\x18\x06 \x01(\x05\"\x93\x02\n\x18NaturalJoinTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x18\n\x10\x63olumns_to_match\x18\x04 \x03(\t\x12\x16\n\x0e\x63olumns_to_add\x18\x05 \x03(\t\"\x91\x02\n\x16\x45xactJoinTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x18\n\x10\x63olumns_to_match\x18\x04 \x03(\t\x12\x16\n\x0e\x63olumns_to_add\x18\x05 \x03(\t\"\x90\x02\n\x15LeftJoinTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x18\n\x10\x63olumns_to_match\x18\x04 \x03(\t\x12\x16\n\x0e\x63olumns_to_add\x18\x05 \x03(\t\"\xd1\x03\n\x15\x41sOfJoinTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x18\n\x10\x63olumns_to_match\x18\x04 \x03(\t\x12\x16\n\x0e\x63olumns_to_add\x18\x05 \x03(\t\x12\\\n\x10\x61s_of_match_rule\x18\x07 \x01(\x0e\x32\x42.io.deephaven.proto.backplane.grpc.AsOfJoinTablesRequest.MatchRule\"]\n\tMatchRule\x12\x13\n\x0fLESS_THAN_EQUAL\x10\x00\x12\r\n\tLESS_THAN\x10\x01\x12\x16\n\x12GREATER_THAN_EQUAL\x10\x02\x12\x10\n\x0cGREATER_THAN\x10\x03\x1a\x02\x18\x01:\x02\x18\x01\"\xa6\x02\n\x12\x41jRajTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x1b\n\x13\x65xact_match_columns\x18\x04 \x03(\t\x12\x14\n\x0c\x61s_of_column\x18\x05 \x01(\t\x12\x16\n\x0e\x63olumns_to_add\x18\x06 \x03(\t\"\xcb\x06\n\x16RangeJoinTablesRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x1b\n\x13\x65xact_match_columns\x18\x04 \x03(\t\x12\x19\n\x11left_start_column\x18\x05 \x01(\t\x12\x62\n\x10range_start_rule\x18\x06 \x01(\x0e\x32H.io.deephaven.proto.backplane.grpc.RangeJoinTablesRequest.RangeStartRule\x12\x1a\n\x12right_range_column\x18\x07 \x01(\t\x12^\n\x0erange_end_rule\x18\x08 \x01(\x0e\x32\x46.io.deephaven.proto.backplane.grpc.RangeJoinTablesRequest.RangeEndRule\x12\x17\n\x0fleft_end_column\x18\t \x01(\t\x12\x44\n\x0c\x61ggregations\x18\n \x03(\x0b\x32..io.deephaven.proto.backplane.grpc.Aggregation\"v\n\x0eRangeStartRule\x12\x15\n\x11START_UNSPECIFIED\x10\x00\x12\r\n\tLESS_THAN\x10\x01\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x02\x12&\n\"LESS_THAN_OR_EQUAL_ALLOW_PRECEDING\x10\x03\"{\n\x0cRangeEndRule\x12\x13\n\x0f\x45ND_UNSPECIFIED\x10\x00\x12\x10\n\x0cGREATER_THAN\x10\x01\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x02\x12)\n%GREATER_THAN_OR_EQUAL_ALLOW_FOLLOWING\x10\x03\"\xfe\x04\n\x15\x43omboAggregateRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12V\n\naggregates\x18\x03 \x03(\x0b\x32\x42.io.deephaven.proto.backplane.grpc.ComboAggregateRequest.Aggregate\x12\x18\n\x10group_by_columns\x18\x04 \x03(\t\x12\x13\n\x0b\x66orce_combo\x18\x05 \x01(\x08\x1a\xad\x01\n\tAggregate\x12N\n\x04type\x18\x01 \x01(\x0e\x32@.io.deephaven.proto.backplane.grpc.ComboAggregateRequest.AggType\x12\x13\n\x0bmatch_pairs\x18\x02 \x03(\t\x12\x13\n\x0b\x63olumn_name\x18\x03 \x01(\t\x12\x12\n\npercentile\x18\x04 \x01(\x01\x12\x12\n\navg_median\x18\x05 \x01(\x08\"\xa5\x01\n\x07\x41ggType\x12\x07\n\x03SUM\x10\x00\x12\x0b\n\x07\x41\x42S_SUM\x10\x01\x12\t\n\x05GROUP\x10\x02\x12\x07\n\x03\x41VG\x10\x03\x12\t\n\x05\x43OUNT\x10\x04\x12\t\n\x05\x46IRST\x10\x05\x12\x08\n\x04LAST\x10\x06\x12\x07\n\x03MIN\x10\x07\x12\x07\n\x03MAX\x10\x08\x12\n\n\x06MEDIAN\x10\t\x12\x0e\n\nPERCENTILE\x10\n\x12\x07\n\x03STD\x10\x0b\x12\x07\n\x03VAR\x10\x0c\x12\x10\n\x0cWEIGHTED_AVG\x10\r:\x02\x18\x01\"\xed\x01\n\x13\x41ggregateAllRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x38\n\x04spec\x18\x03 \x01(\x0b\x32*.io.deephaven.proto.backplane.grpc.AggSpec\x12\x18\n\x10group_by_columns\x18\x04 \x03(\t\"\xd7\x17\n\x07\x41ggSpec\x12K\n\x07\x61\x62s_sum\x18\x01 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecAbsSumH\x00\x12i\n\x16\x61pproximate_percentile\x18\x02 \x01(\x0b\x32G.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecApproximatePercentileH\x00\x12\x44\n\x03\x61vg\x18\x03 \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecAvgH\x00\x12Y\n\x0e\x63ount_distinct\x18\x04 \x01(\x0b\x32?.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecCountDistinctH\x00\x12N\n\x08\x64istinct\x18\x05 \x01(\x0b\x32:.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecDistinctH\x00\x12H\n\x05\x66irst\x18\x06 \x01(\x0b\x32\x37.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecFirstH\x00\x12L\n\x07\x66ormula\x18\x07 \x01(\x0b\x32\x39.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecFormulaH\x00\x12J\n\x06\x66reeze\x18\x08 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecFreezeH\x00\x12H\n\x05group\x18\t \x01(\x0b\x32\x37.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecGroupH\x00\x12\x46\n\x04last\x18\n \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecLastH\x00\x12\x44\n\x03max\x18\x0b \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecMaxH\x00\x12J\n\x06median\x18\x0c \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecMedianH\x00\x12\x44\n\x03min\x18\r \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecMinH\x00\x12R\n\npercentile\x18\x0e \x01(\x0b\x32<.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecPercentileH\x00\x12P\n\x0csorted_first\x18\x0f \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecSortedH\x00\x12O\n\x0bsorted_last\x18\x10 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecSortedH\x00\x12\x44\n\x03std\x18\x11 \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecStdH\x00\x12\x44\n\x03sum\x18\x12 \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecSumH\x00\x12M\n\x08t_digest\x18\x13 \x01(\x0b\x32\x39.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecTDigestH\x00\x12J\n\x06unique\x18\x14 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecUniqueH\x00\x12R\n\x0cweighted_avg\x18\x15 \x01(\x0b\x32:.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecWeightedH\x00\x12R\n\x0cweighted_sum\x18\x16 \x01(\x0b\x32:.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecWeightedH\x00\x12\x44\n\x03var\x18\x17 \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecVarH\x00\x1a\\\n\x1c\x41ggSpecApproximatePercentile\x12\x12\n\npercentile\x18\x01 \x01(\x01\x12\x18\n\x0b\x63ompression\x18\x02 \x01(\x01H\x00\x88\x01\x01\x42\x0e\n\x0c_compression\x1a+\n\x14\x41ggSpecCountDistinct\x12\x13\n\x0b\x63ount_nulls\x18\x01 \x01(\x08\x1a(\n\x0f\x41ggSpecDistinct\x12\x15\n\rinclude_nulls\x18\x01 \x01(\x08\x1a\x36\n\x0e\x41ggSpecFormula\x12\x0f\n\x07\x66ormula\x18\x01 \x01(\t\x12\x13\n\x0bparam_token\x18\x02 \x01(\t\x1a/\n\rAggSpecMedian\x12\x1e\n\x16\x61verage_evenly_divided\x18\x01 \x01(\x08\x1aG\n\x11\x41ggSpecPercentile\x12\x12\n\npercentile\x18\x01 \x01(\x01\x12\x1e\n\x16\x61verage_evenly_divided\x18\x02 \x01(\x08\x1a`\n\rAggSpecSorted\x12O\n\x07\x63olumns\x18\x01 \x03(\x0b\x32>.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecSortedColumn\x1a*\n\x13\x41ggSpecSortedColumn\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x1a:\n\x0e\x41ggSpecTDigest\x12\x18\n\x0b\x63ompression\x18\x01 \x01(\x01H\x00\x88\x01\x01\x42\x0e\n\x0c_compression\x1a\x88\x01\n\rAggSpecUnique\x12\x15\n\rinclude_nulls\x18\x01 \x01(\x08\x12`\n\x13non_unique_sentinel\x18\x02 \x01(\x0b\x32\x43.io.deephaven.proto.backplane.grpc.AggSpec.AggSpecNonUniqueSentinel\x1a\xb5\x02\n\x18\x41ggSpecNonUniqueSentinel\x12\x42\n\nnull_value\x18\x01 \x01(\x0e\x32,.io.deephaven.proto.backplane.grpc.NullValueH\x00\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x13\n\tint_value\x18\x03 \x01(\x11H\x00\x12\x18\n\nlong_value\x18\x04 \x01(\x12\x42\x02\x30\x01H\x00\x12\x15\n\x0b\x66loat_value\x18\x05 \x01(\x02H\x00\x12\x16\n\x0c\x64ouble_value\x18\x06 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x07 \x01(\x08H\x00\x12\x14\n\nbyte_value\x18\x08 \x01(\x11H\x00\x12\x15\n\x0bshort_value\x18\t \x01(\x11H\x00\x12\x14\n\nchar_value\x18\n \x01(\x11H\x00\x42\x06\n\x04type\x1a(\n\x0f\x41ggSpecWeighted\x12\x15\n\rweight_column\x18\x01 \x01(\t\x1a\x0f\n\rAggSpecAbsSum\x1a\x0c\n\nAggSpecAvg\x1a\x0e\n\x0c\x41ggSpecFirst\x1a\x0f\n\rAggSpecFreeze\x1a\x0e\n\x0c\x41ggSpecGroup\x1a\r\n\x0b\x41ggSpecLast\x1a\x0c\n\nAggSpecMax\x1a\x0c\n\nAggSpecMin\x1a\x0c\n\nAggSpecStd\x1a\x0c\n\nAggSpecSum\x1a\x0c\n\nAggSpecVarB\x06\n\x04type\"\xdc\x02\n\x10\x41ggregateRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12L\n\x11initial_groups_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x16\n\x0epreserve_empty\x18\x04 \x01(\x08\x12\x44\n\x0c\x61ggregations\x18\x05 \x03(\x0b\x32..io.deephaven.proto.backplane.grpc.Aggregation\x12\x18\n\x10group_by_columns\x18\x06 \x03(\t\"\xd3\x05\n\x0b\x41ggregation\x12T\n\x07\x63olumns\x18\x01 \x01(\x0b\x32\x41.io.deephaven.proto.backplane.grpc.Aggregation.AggregationColumnsH\x00\x12P\n\x05\x63ount\x18\x02 \x01(\x0b\x32?.io.deephaven.proto.backplane.grpc.Aggregation.AggregationCountH\x00\x12Y\n\rfirst_row_key\x18\x03 \x01(\x0b\x32@.io.deephaven.proto.backplane.grpc.Aggregation.AggregationRowKeyH\x00\x12X\n\x0clast_row_key\x18\x04 \x01(\x0b\x32@.io.deephaven.proto.backplane.grpc.Aggregation.AggregationRowKeyH\x00\x12X\n\tpartition\x18\x05 \x01(\x0b\x32\x43.io.deephaven.proto.backplane.grpc.Aggregation.AggregationPartitionH\x00\x1a\x63\n\x12\x41ggregationColumns\x12\x38\n\x04spec\x18\x01 \x01(\x0b\x32*.io.deephaven.proto.backplane.grpc.AggSpec\x12\x13\n\x0bmatch_pairs\x18\x02 \x03(\t\x1a\'\n\x10\x41ggregationCount\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x1a(\n\x11\x41ggregationRowKey\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x1aM\n\x14\x41ggregationPartition\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x12 \n\x18include_group_by_columns\x18\x02 \x01(\x08\x42\x06\n\x04type\"\xe1\x01\n\x0eSortDescriptor\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\x12\x13\n\x0bis_absolute\x18\x02 \x01(\x08\x12R\n\tdirection\x18\x03 \x01(\x0e\x32?.io.deephaven.proto.backplane.grpc.SortDescriptor.SortDirection\"Q\n\rSortDirection\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x17\n\nDESCENDING\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x12\r\n\tASCENDING\x10\x01\x12\x0b\n\x07REVERSE\x10\x02\"\xd8\x01\n\x10SortTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12@\n\x05sorts\x18\x03 \x03(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.SortDescriptor\"\xd7\x01\n\x12\x46ilterTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12=\n\x07\x66ilters\x18\x03 \x03(\x0b\x32,.io.deephaven.proto.backplane.grpc.Condition\"\xf9\x01\n\x0eSeekRowRequest\x12<\n\tsource_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x18\n\x0cstarting_row\x18\x02 \x01(\x12\x42\x02\x30\x01\x12\x13\n\x0b\x63olumn_name\x18\x03 \x01(\t\x12>\n\nseek_value\x18\x04 \x01(\x0b\x32*.io.deephaven.proto.backplane.grpc.Literal\x12\x13\n\x0binsensitive\x18\x05 \x01(\x08\x12\x10\n\x08\x63ontains\x18\x06 \x01(\x08\x12\x13\n\x0bis_backward\x18\x07 \x01(\x08\")\n\x0fSeekRowResponse\x12\x16\n\nresult_row\x18\x01 \x01(\x12\x42\x02\x30\x01\" \n\tReference\x12\x13\n\x0b\x63olumn_name\x18\x01 \x01(\t\"\x91\x01\n\x07Literal\x12\x16\n\x0cstring_value\x18\x01 \x01(\tH\x00\x12\x16\n\x0c\x64ouble_value\x18\x02 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x03 \x01(\x08H\x00\x12\x18\n\nlong_value\x18\x04 \x01(\x12\x42\x02\x30\x01H\x00\x12\x1d\n\x0fnano_time_value\x18\x05 \x01(\x12\x42\x02\x30\x01H\x00\x42\x07\n\x05value\"\x91\x01\n\x05Value\x12\x41\n\treference\x18\x01 \x01(\x0b\x32,.io.deephaven.proto.backplane.grpc.ReferenceH\x00\x12=\n\x07literal\x18\x02 \x01(\x0b\x32*.io.deephaven.proto.backplane.grpc.LiteralH\x00\x42\x06\n\x04\x64\x61ta\"\xbc\x05\n\tCondition\x12>\n\x03\x61nd\x18\x01 \x01(\x0b\x32/.io.deephaven.proto.backplane.grpc.AndConditionH\x00\x12<\n\x02or\x18\x02 \x01(\x0b\x32..io.deephaven.proto.backplane.grpc.OrConditionH\x00\x12>\n\x03not\x18\x03 \x01(\x0b\x32/.io.deephaven.proto.backplane.grpc.NotConditionH\x00\x12\x46\n\x07\x63ompare\x18\x04 \x01(\x0b\x32\x33.io.deephaven.proto.backplane.grpc.CompareConditionH\x00\x12<\n\x02in\x18\x05 \x01(\x0b\x32..io.deephaven.proto.backplane.grpc.InConditionH\x00\x12\x44\n\x06invoke\x18\x06 \x01(\x0b\x32\x32.io.deephaven.proto.backplane.grpc.InvokeConditionH\x00\x12\x45\n\x07is_null\x18\x07 \x01(\x0b\x32\x32.io.deephaven.proto.backplane.grpc.IsNullConditionH\x00\x12\x46\n\x07matches\x18\x08 \x01(\x0b\x32\x33.io.deephaven.proto.backplane.grpc.MatchesConditionH\x00\x12H\n\x08\x63ontains\x18\t \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.ContainsConditionH\x00\x12\x44\n\x06search\x18\n \x01(\x0b\x32\x32.io.deephaven.proto.backplane.grpc.SearchConditionH\x00\x42\x06\n\x04\x64\x61ta\"M\n\x0c\x41ndCondition\x12=\n\x07\x66ilters\x18\x01 \x03(\x0b\x32,.io.deephaven.proto.backplane.grpc.Condition\"L\n\x0bOrCondition\x12=\n\x07\x66ilters\x18\x01 \x03(\x0b\x32,.io.deephaven.proto.backplane.grpc.Condition\"L\n\x0cNotCondition\x12<\n\x06\x66ilter\x18\x01 \x01(\x0b\x32,.io.deephaven.proto.backplane.grpc.Condition\"\xac\x03\n\x10\x43ompareCondition\x12W\n\toperation\x18\x01 \x01(\x0e\x32\x44.io.deephaven.proto.backplane.grpc.CompareCondition.CompareOperation\x12L\n\x10\x63\x61se_sensitivity\x18\x02 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.CaseSensitivity\x12\x35\n\x03lhs\x18\x03 \x01(\x0b\x32(.io.deephaven.proto.backplane.grpc.Value\x12\x35\n\x03rhs\x18\x04 \x01(\x0b\x32(.io.deephaven.proto.backplane.grpc.Value\"\x82\x01\n\x10\x43ompareOperation\x12\r\n\tLESS_THAN\x10\x00\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x01\x12\x10\n\x0cGREATER_THAN\x10\x02\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x03\x12\n\n\x06\x45QUALS\x10\x04\x12\x0e\n\nNOT_EQUALS\x10\x05\"\x95\x02\n\x0bInCondition\x12\x38\n\x06target\x18\x01 \x01(\x0b\x32(.io.deephaven.proto.backplane.grpc.Value\x12<\n\ncandidates\x18\x02 \x03(\x0b\x32(.io.deephaven.proto.backplane.grpc.Value\x12L\n\x10\x63\x61se_sensitivity\x18\x03 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.CaseSensitivity\x12@\n\nmatch_type\x18\x04 \x01(\x0e\x32,.io.deephaven.proto.backplane.grpc.MatchType\"\x98\x01\n\x0fInvokeCondition\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x38\n\x06target\x18\x02 \x01(\x0b\x32(.io.deephaven.proto.backplane.grpc.Value\x12;\n\targuments\x18\x03 \x03(\x0b\x32(.io.deephaven.proto.backplane.grpc.Value\"R\n\x0fIsNullCondition\x12?\n\treference\x18\x01 \x01(\x0b\x32,.io.deephaven.proto.backplane.grpc.Reference\"\xf2\x01\n\x10MatchesCondition\x12?\n\treference\x18\x01 \x01(\x0b\x32,.io.deephaven.proto.backplane.grpc.Reference\x12\r\n\x05regex\x18\x02 \x01(\t\x12L\n\x10\x63\x61se_sensitivity\x18\x03 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.CaseSensitivity\x12@\n\nmatch_type\x18\x04 \x01(\x0e\x32,.io.deephaven.proto.backplane.grpc.MatchType\"\xfb\x01\n\x11\x43ontainsCondition\x12?\n\treference\x18\x01 \x01(\x0b\x32,.io.deephaven.proto.backplane.grpc.Reference\x12\x15\n\rsearch_string\x18\x02 \x01(\t\x12L\n\x10\x63\x61se_sensitivity\x18\x03 \x01(\x0e\x32\x32.io.deephaven.proto.backplane.grpc.CaseSensitivity\x12@\n\nmatch_type\x18\x04 \x01(\x0e\x32,.io.deephaven.proto.backplane.grpc.MatchType\"s\n\x0fSearchCondition\x12\x15\n\rsearch_string\x18\x01 \x01(\t\x12I\n\x13optional_references\x18\x02 \x03(\x0b\x32,.io.deephaven.proto.backplane.grpc.Reference\"\x94\x01\n\x0e\x46lattenRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\"\x96\x01\n\x10MetaTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\"\xb4\x03\n\x19RunChartDownsampleRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x13\n\x0bpixel_count\x18\x03 \x01(\x05\x12Z\n\nzoom_range\x18\x04 \x01(\x0b\x32\x46.io.deephaven.proto.backplane.grpc.RunChartDownsampleRequest.ZoomRange\x12\x15\n\rx_column_name\x18\x05 \x01(\t\x12\x16\n\x0ey_column_names\x18\x06 \x03(\t\x1as\n\tZoomRange\x12\x1f\n\x0emin_date_nanos\x18\x01 \x01(\x03\x42\x02\x30\x01H\x00\x88\x01\x01\x12\x1f\n\x0emax_date_nanos\x18\x02 \x01(\x03\x42\x02\x30\x01H\x01\x88\x01\x01\x42\x11\n\x0f_min_date_nanosB\x11\n\x0f_max_date_nanos\"\xe0\x05\n\x17\x43reateInputTableRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12L\n\x0fsource_table_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReferenceH\x00\x12\x10\n\x06schema\x18\x03 \x01(\x0cH\x00\x12W\n\x04kind\x18\x04 \x01(\x0b\x32I.io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind\x1a\xbf\x03\n\x0eInputTableKind\x12}\n\x15in_memory_append_only\x18\x01 \x01(\x0b\x32\\.io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.InMemoryAppendOnlyH\x00\x12{\n\x14in_memory_key_backed\x18\x02 \x01(\x0b\x32[.io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.InMemoryKeyBackedH\x00\x12`\n\x05\x62link\x18\x03 \x01(\x0b\x32O.io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.BlinkH\x00\x1a\x14\n\x12InMemoryAppendOnly\x1a(\n\x11InMemoryKeyBacked\x12\x13\n\x0bkey_columns\x18\x01 \x03(\t\x1a\x07\n\x05\x42linkB\x06\n\x04kindB\x0c\n\ndefinition\"\x83\x02\n\x0eWhereInRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x42\n\x07left_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x43\n\x08right_id\x18\x03 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x10\n\x08inverted\x18\x04 \x01(\x08\x12\x18\n\x10\x63olumns_to_match\x18\x05 \x03(\t\"\xea\x01\n\x17\x43olumnStatisticsRequest\x12<\n\tresult_id\x18\x01 \x01(\x0b\x32).io.deephaven.proto.backplane.grpc.Ticket\x12\x44\n\tsource_id\x18\x02 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.TableReference\x12\x13\n\x0b\x63olumn_name\x18\x03 \x01(\t\x12\x1f\n\x12unique_value_limit\x18\x04 \x01(\x05H\x00\x88\x01\x01\x42\x15\n\x13_unique_value_limit\"\xc8\x19\n\x11\x42\x61tchTableRequest\x12K\n\x03ops\x18\x01 \x03(\x0b\x32>.io.deephaven.proto.backplane.grpc.BatchTableRequest.Operation\x1a\xe5\x18\n\tOperation\x12K\n\x0b\x65mpty_table\x18\x01 \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.EmptyTableRequestH\x00\x12I\n\ntime_table\x18\x02 \x01(\x0b\x32\x33.io.deephaven.proto.backplane.grpc.TimeTableRequestH\x00\x12M\n\x0c\x64rop_columns\x18\x03 \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.DropColumnsRequestH\x00\x12J\n\x06update\x18\x04 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequestH\x00\x12O\n\x0blazy_update\x18\x05 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequestH\x00\x12H\n\x04view\x18\x06 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequestH\x00\x12O\n\x0bupdate_view\x18\x07 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequestH\x00\x12J\n\x06select\x18\x08 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequestH\x00\x12S\n\x0fselect_distinct\x18\t \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.SelectDistinctRequestH\x00\x12G\n\x06\x66ilter\x18\n \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.FilterTableRequestH\x00\x12`\n\x13unstructured_filter\x18\x0b \x01(\x0b\x32\x41.io.deephaven.proto.backplane.grpc.UnstructuredFilterTableRequestH\x00\x12\x43\n\x04sort\x18\x0c \x01(\x0b\x32\x33.io.deephaven.proto.backplane.grpc.SortTableRequestH\x00\x12\x44\n\x04head\x18\r \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.HeadOrTailRequestH\x00\x12\x44\n\x04tail\x18\x0e \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.HeadOrTailRequestH\x00\x12I\n\x07head_by\x18\x0f \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.HeadOrTailByRequestH\x00\x12I\n\x07tail_by\x18\x10 \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.HeadOrTailByRequestH\x00\x12\x44\n\x07ungroup\x18\x11 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.UngroupRequestH\x00\x12\x46\n\x05merge\x18\x12 \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.MergeTablesRequestH\x00\x12S\n\x0f\x63ombo_aggregate\x18\x13 \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.ComboAggregateRequestH\x00\x12\x44\n\x07\x66latten\x18\x15 \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.FlattenRequestH\x00\x12\\\n\x14run_chart_downsample\x18\x16 \x01(\x0b\x32<.io.deephaven.proto.backplane.grpc.RunChartDownsampleRequestH\x00\x12O\n\ncross_join\x18\x17 \x01(\x0b\x32\x39.io.deephaven.proto.backplane.grpc.CrossJoinTablesRequestH\x00\x12S\n\x0cnatural_join\x18\x18 \x01(\x0b\x32;.io.deephaven.proto.backplane.grpc.NaturalJoinTablesRequestH\x00\x12O\n\nexact_join\x18\x19 \x01(\x0b\x32\x39.io.deephaven.proto.backplane.grpc.ExactJoinTablesRequestH\x00\x12M\n\tleft_join\x18\x1a \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.LeftJoinTablesRequestH\x00\x12R\n\nas_of_join\x18\x1b \x01(\x0b\x32\x38.io.deephaven.proto.backplane.grpc.AsOfJoinTablesRequestB\x02\x18\x01H\x00\x12K\n\x0b\x66\x65tch_table\x18\x1c \x01(\x0b\x32\x34.io.deephaven.proto.backplane.grpc.FetchTableRequestH\x00\x12^\n\x15\x61pply_preview_columns\x18\x1e \x01(\x0b\x32=.io.deephaven.proto.backplane.grpc.ApplyPreviewColumnsRequestH\x00\x12X\n\x12\x63reate_input_table\x18\x1f \x01(\x0b\x32:.io.deephaven.proto.backplane.grpc.CreateInputTableRequestH\x00\x12G\n\tupdate_by\x18 \x01(\x0b\x32\x32.io.deephaven.proto.backplane.grpc.UpdateByRequestH\x00\x12\x45\n\x08where_in\x18! \x01(\x0b\x32\x31.io.deephaven.proto.backplane.grpc.WhereInRequestH\x00\x12O\n\raggregate_all\x18\" \x01(\x0b\x32\x36.io.deephaven.proto.backplane.grpc.AggregateAllRequestH\x00\x12H\n\taggregate\x18# \x01(\x0b\x32\x33.io.deephaven.proto.backplane.grpc.AggregateRequestH\x00\x12K\n\x08snapshot\x18$ \x01(\x0b\x32\x37.io.deephaven.proto.backplane.grpc.SnapshotTableRequestH\x00\x12T\n\rsnapshot_when\x18% \x01(\x0b\x32;.io.deephaven.proto.backplane.grpc.SnapshotWhenTableRequestH\x00\x12I\n\nmeta_table\x18& \x01(\x0b\x32\x33.io.deephaven.proto.backplane.grpc.MetaTableRequestH\x00\x12O\n\nrange_join\x18\' \x01(\x0b\x32\x39.io.deephaven.proto.backplane.grpc.RangeJoinTablesRequestH\x00\x12\x43\n\x02\x61j\x18( \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AjRajTablesRequestH\x00\x12\x44\n\x03raj\x18) \x01(\x0b\x32\x35.io.deephaven.proto.backplane.grpc.AjRajTablesRequestH\x00\x12W\n\x11\x63olumn_statistics\x18* \x01(\x0b\x32:.io.deephaven.proto.backplane.grpc.ColumnStatisticsRequestH\x00\x42\x04\n\x02opJ\x04\x08\x14\x10\x15J\x04\x08\x1d\x10\x1e*b\n\x0f\x42\x61\x64\x44\x61taBehavior\x12#\n\x1f\x42\x41\x44_DATA_BEHAVIOR_NOT_SPECIFIED\x10\x00\x12\t\n\x05THROW\x10\x01\x12\t\n\x05RESET\x10\x02\x12\x08\n\x04SKIP\x10\x03\x12\n\n\x06POISON\x10\x04*t\n\x14UpdateByNullBehavior\x12\x1f\n\x1bNULL_BEHAVIOR_NOT_SPECIFIED\x10\x00\x12\x12\n\x0eNULL_DOMINATES\x10\x01\x12\x13\n\x0fVALUE_DOMINATES\x10\x02\x12\x12\n\x0eZERO_DOMINATES\x10\x03*\x1b\n\tNullValue\x12\x0e\n\nNULL_VALUE\x10\x00*2\n\x0f\x43\x61seSensitivity\x12\x0e\n\nMATCH_CASE\x10\x00\x12\x0f\n\x0bIGNORE_CASE\x10\x01*&\n\tMatchType\x12\x0b\n\x07REGULAR\x10\x00\x12\x0c\n\x08INVERTED\x10\x01\x32\xa8\x30\n\x0cTableService\x12\x91\x01\n GetExportedTableCreationResponse\x12).io.deephaven.proto.backplane.grpc.Ticket\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x86\x01\n\nFetchTable\x12\x34.io.deephaven.proto.backplane.grpc.FetchTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x98\x01\n\x13\x41pplyPreviewColumns\x12=.io.deephaven.proto.backplane.grpc.ApplyPreviewColumnsRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x86\x01\n\nEmptyTable\x12\x34.io.deephaven.proto.backplane.grpc.EmptyTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x84\x01\n\tTimeTable\x12\x33.io.deephaven.proto.backplane.grpc.TimeTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x88\x01\n\x0b\x44ropColumns\x12\x35.io.deephaven.proto.backplane.grpc.DropColumnsRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x86\x01\n\x06Update\x12\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x8a\x01\n\nLazyUpdate\x12\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x84\x01\n\x04View\x12\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x8a\x01\n\nUpdateView\x12\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x86\x01\n\x06Select\x12\x38.io.deephaven.proto.backplane.grpc.SelectOrUpdateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x82\x01\n\x08UpdateBy\x12\x32.io.deephaven.proto.backplane.grpc.UpdateByRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x8e\x01\n\x0eSelectDistinct\x12\x38.io.deephaven.proto.backplane.grpc.SelectDistinctRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x83\x01\n\x06\x46ilter\x12\x35.io.deephaven.proto.backplane.grpc.FilterTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x9b\x01\n\x12UnstructuredFilter\x12\x41.io.deephaven.proto.backplane.grpc.UnstructuredFilterTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x7f\n\x04Sort\x12\x33.io.deephaven.proto.backplane.grpc.SortTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x80\x01\n\x04Head\x12\x34.io.deephaven.proto.backplane.grpc.HeadOrTailRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x80\x01\n\x04Tail\x12\x34.io.deephaven.proto.backplane.grpc.HeadOrTailRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x84\x01\n\x06HeadBy\x12\x36.io.deephaven.proto.backplane.grpc.HeadOrTailByRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x84\x01\n\x06TailBy\x12\x36.io.deephaven.proto.backplane.grpc.HeadOrTailByRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x80\x01\n\x07Ungroup\x12\x31.io.deephaven.proto.backplane.grpc.UngroupRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x88\x01\n\x0bMergeTables\x12\x35.io.deephaven.proto.backplane.grpc.MergeTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x90\x01\n\x0f\x43rossJoinTables\x12\x39.io.deephaven.proto.backplane.grpc.CrossJoinTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x94\x01\n\x11NaturalJoinTables\x12;.io.deephaven.proto.backplane.grpc.NaturalJoinTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x90\x01\n\x0f\x45xactJoinTables\x12\x39.io.deephaven.proto.backplane.grpc.ExactJoinTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x8e\x01\n\x0eLeftJoinTables\x12\x38.io.deephaven.proto.backplane.grpc.LeftJoinTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x91\x01\n\x0e\x41sOfJoinTables\x12\x38.io.deephaven.proto.backplane.grpc.AsOfJoinTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x03\x88\x02\x01\x12\x85\x01\n\x08\x41jTables\x12\x35.io.deephaven.proto.backplane.grpc.AjRajTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x86\x01\n\tRajTables\x12\x35.io.deephaven.proto.backplane.grpc.AjRajTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x90\x01\n\x0fRangeJoinTables\x12\x39.io.deephaven.proto.backplane.grpc.RangeJoinTablesRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x91\x01\n\x0e\x43omboAggregate\x12\x38.io.deephaven.proto.backplane.grpc.ComboAggregateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x03\x88\x02\x01\x12\x8a\x01\n\x0c\x41ggregateAll\x12\x36.io.deephaven.proto.backplane.grpc.AggregateAllRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x84\x01\n\tAggregate\x12\x33.io.deephaven.proto.backplane.grpc.AggregateRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x87\x01\n\x08Snapshot\x12\x37.io.deephaven.proto.backplane.grpc.SnapshotTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x8f\x01\n\x0cSnapshotWhen\x12;.io.deephaven.proto.backplane.grpc.SnapshotWhenTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x80\x01\n\x07\x46latten\x12\x31.io.deephaven.proto.backplane.grpc.FlattenRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x96\x01\n\x12RunChartDownsample\x12<.io.deephaven.proto.backplane.grpc.RunChartDownsampleRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x92\x01\n\x10\x43reateInputTable\x12:.io.deephaven.proto.backplane.grpc.CreateInputTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x80\x01\n\x07WhereIn\x12\x31.io.deephaven.proto.backplane.grpc.WhereInRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x83\x01\n\x05\x42\x61tch\x12\x34.io.deephaven.proto.backplane.grpc.BatchTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x30\x01\x12\x99\x01\n\x14\x45xportedTableUpdates\x12>.io.deephaven.proto.backplane.grpc.ExportedTableUpdatesRequest\x1a=.io.deephaven.proto.backplane.grpc.ExportedTableUpdateMessage\"\x00\x30\x01\x12r\n\x07SeekRow\x12\x31.io.deephaven.proto.backplane.grpc.SeekRowRequest\x1a\x32.io.deephaven.proto.backplane.grpc.SeekRowResponse\"\x00\x12\x84\x01\n\tMetaTable\x12\x33.io.deephaven.proto.backplane.grpc.MetaTableRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x12\x99\x01\n\x17\x43omputeColumnStatistics\x12:.io.deephaven.proto.backplane.grpc.ColumnStatisticsRequest\x1a@.io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse\"\x00\x42\x41H\x01P\x01Z;github.com/deephaven/deephaven-core/go/internal/proto/tableb\x06proto3') _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'deephaven.proto.table_pb2', globals()) @@ -64,16 +64,16 @@ _TABLESERVICE.methods_by_name['AsOfJoinTables']._serialized_options = b'\210\002\001' _TABLESERVICE.methods_by_name['ComboAggregate']._options = None _TABLESERVICE.methods_by_name['ComboAggregate']._serialized_options = b'\210\002\001' - _BADDATABEHAVIOR._serialized_start=27752 - _BADDATABEHAVIOR._serialized_end=27850 - _UPDATEBYNULLBEHAVIOR._serialized_start=27852 - _UPDATEBYNULLBEHAVIOR._serialized_end=27968 - _NULLVALUE._serialized_start=27970 - _NULLVALUE._serialized_end=27997 - _CASESENSITIVITY._serialized_start=27999 - _CASESENSITIVITY._serialized_end=28049 - _MATCHTYPE._serialized_start=28051 - _MATCHTYPE._serialized_end=28089 + _BADDATABEHAVIOR._serialized_start=27859 + _BADDATABEHAVIOR._serialized_end=27957 + _UPDATEBYNULLBEHAVIOR._serialized_start=27959 + _UPDATEBYNULLBEHAVIOR._serialized_end=28075 + _NULLVALUE._serialized_start=28077 + _NULLVALUE._serialized_end=28104 + _CASESENSITIVITY._serialized_start=28106 + _CASESENSITIVITY._serialized_end=28156 + _MATCHTYPE._serialized_start=28158 + _MATCHTYPE._serialized_end=28196 _TABLEREFERENCE._serialized_start=96 _TABLEREFERENCE._serialized_end=204 _EXPORTEDTABLECREATIONRESPONSE._serialized_start=207 @@ -313,21 +313,23 @@ _RUNCHARTDOWNSAMPLEREQUEST_ZOOMRANGE._serialized_start=23229 _RUNCHARTDOWNSAMPLEREQUEST_ZOOMRANGE._serialized_end=23344 _CREATEINPUTTABLEREQUEST._serialized_start=23347 - _CREATEINPUTTABLEREQUEST._serialized_end=23976 + _CREATEINPUTTABLEREQUEST._serialized_end=24083 _CREATEINPUTTABLEREQUEST_INPUTTABLEKIND._serialized_start=23622 - _CREATEINPUTTABLEREQUEST_INPUTTABLEKIND._serialized_end=23962 - _CREATEINPUTTABLEREQUEST_INPUTTABLEKIND_INMEMORYAPPENDONLY._serialized_start=23892 - _CREATEINPUTTABLEREQUEST_INPUTTABLEKIND_INMEMORYAPPENDONLY._serialized_end=23912 - _CREATEINPUTTABLEREQUEST_INPUTTABLEKIND_INMEMORYKEYBACKED._serialized_start=23914 - _CREATEINPUTTABLEREQUEST_INPUTTABLEKIND_INMEMORYKEYBACKED._serialized_end=23954 - _WHEREINREQUEST._serialized_start=23979 - _WHEREINREQUEST._serialized_end=24238 - _COLUMNSTATISTICSREQUEST._serialized_start=24241 - _COLUMNSTATISTICSREQUEST._serialized_end=24475 - _BATCHTABLEREQUEST._serialized_start=24478 - _BATCHTABLEREQUEST._serialized_end=27750 - _BATCHTABLEREQUEST_OPERATION._serialized_start=24577 - _BATCHTABLEREQUEST_OPERATION._serialized_end=27750 - _TABLESERVICE._serialized_start=28092 - _TABLESERVICE._serialized_end=34276 + _CREATEINPUTTABLEREQUEST_INPUTTABLEKIND._serialized_end=24069 + _CREATEINPUTTABLEREQUEST_INPUTTABLEKIND_INMEMORYAPPENDONLY._serialized_start=23990 + _CREATEINPUTTABLEREQUEST_INPUTTABLEKIND_INMEMORYAPPENDONLY._serialized_end=24010 + _CREATEINPUTTABLEREQUEST_INPUTTABLEKIND_INMEMORYKEYBACKED._serialized_start=24012 + _CREATEINPUTTABLEREQUEST_INPUTTABLEKIND_INMEMORYKEYBACKED._serialized_end=24052 + _CREATEINPUTTABLEREQUEST_INPUTTABLEKIND_BLINK._serialized_start=24054 + _CREATEINPUTTABLEREQUEST_INPUTTABLEKIND_BLINK._serialized_end=24061 + _WHEREINREQUEST._serialized_start=24086 + _WHEREINREQUEST._serialized_end=24345 + _COLUMNSTATISTICSREQUEST._serialized_start=24348 + _COLUMNSTATISTICSREQUEST._serialized_end=24582 + _BATCHTABLEREQUEST._serialized_start=24585 + _BATCHTABLEREQUEST._serialized_end=27857 + _BATCHTABLEREQUEST_OPERATION._serialized_start=24684 + _BATCHTABLEREQUEST_OPERATION._serialized_end=27857 + _TABLESERVICE._serialized_start=28199 + _TABLESERVICE._serialized_end=34383 # @@protoc_insertion_point(module_scope) diff --git a/py/client/pydeephaven/session.py b/py/client/pydeephaven/session.py index 217743ea5b7..df4d4107f79 100644 --- a/py/client/pydeephaven/session.py +++ b/py/client/pydeephaven/session.py @@ -334,12 +334,11 @@ def _keep_alive(self): self._keep_alive_timer.start() def _refresh_token(self): - with self._r_lock: - try: - self._flight_client.authenticate(self._auth_handler) - except Exception as e: - self.is_connected = False - raise DHError("failed to refresh auth token") from e + try: + self._flight_client.authenticate(self._auth_handler) + except Exception as e: + self.is_connected = False + raise DHError("failed to refresh auth token") from e @property def is_alive(self) -> bool: @@ -385,10 +384,9 @@ def run_script(self, script: str) -> None: Raises: DHError """ - with self._r_lock: - response = self.console_service.run_script(script) - if response.error_message != '': - raise DHError("could not run script: " + response.error_message) + response = self.console_service.run_script(script) + if response.error_message != '': + raise DHError("could not run script: " + response.error_message) def open_table(self, name: str) -> Table: """Opens a table in the global scope with the given name on the server. diff --git a/py/client/tests/test_session.py b/py/client/tests/test_session.py index fe75d170ff3..b5b71bc83d5 100644 --- a/py/client/tests/test_session.py +++ b/py/client/tests/test_session.py @@ -56,6 +56,10 @@ def test_time_table(self): session.bind_table("t", t) session.run_script(""" from deephaven import empty_table +try: + del t1 +except NameError: + pass t1 = empty_table(0) if t.is_blink else None """) self.assertNotIn("t1", session.tables) @@ -64,6 +68,10 @@ def test_time_table(self): session.bind_table("t", t) session.run_script(""" from deephaven import empty_table +try: + del t1 +except NameError: + pass t1 = empty_table(0) if t.is_blink else None """) self.assertIn("t1", session.tables) diff --git a/py/server/deephaven/_udf.py b/py/server/deephaven/_udf.py new file mode 100644 index 00000000000..fba76b3472a --- /dev/null +++ b/py/server/deephaven/_udf.py @@ -0,0 +1,420 @@ +# +# Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending +# + +from __future__ import annotations + +import inspect +import re +from dataclasses import dataclass, field +from functools import wraps +from typing import Callable, List, Any, Union, Tuple, _GenericAlias + +import numba +import numpy +import numpy as np + +from deephaven import DHError, dtypes +from deephaven.dtypes import _np_ndarray_component_type, _np_dtype_char, _NUMPY_INT_TYPE_CODES, \ + _NUMPY_FLOATING_TYPE_CODES, _component_np_dtype_char, _J_ARRAY_NP_TYPE_MAP, _PRIMITIVE_DTYPE_NULL_MAP, _scalar, \ + _BUILDABLE_ARRAY_DTYPE_MAP +from deephaven.jcompat import _j_array_to_numpy_array +from deephaven.time import to_np_datetime64 + +# For unittest vectorization +test_vectorization = False +vectorized_count = 0 + + +_SUPPORTED_NP_TYPE_CODES = {"b", "h", "H", "i", "l", "f", "d", "?", "U", "M", "O"} + + +@dataclass +class _ParsedParamAnnotation: + orig_types: set[type] = field(default_factory=set) + encoded_types: set[str] = field(default_factory=set) + none_allowed: bool = False + has_array: bool = False + int_char: str = None + floating_char: str = None + + +@dataclass +class _ParsedReturnAnnotation: + orig_type: type = None + encoded_type: str = None + none_allowed: bool = False + has_array: bool = False + + +@dataclass +class _ParsedSignature: + fn: Callable = None + params: List[_ParsedParamAnnotation] = field(default_factory=list) + ret_annotation: _ParsedReturnAnnotation = None + + @property + def encoded(self) -> str: + """Encode the signature of a Python function by mapping the annotations of the parameter types and the return + type to numpy dtype chars (i,l,h,f,d,b,?,U,M,O) and '[' for array, 'N' for NoneType. and pack them into a + string with parameter type chars first, in their original order, followed by the delimiter string '->', + then the return type char. If a parameter or the return of the function is not annotated, + the default 'O' - object type, will be used. + """ + param_str = ",".join(["".join(p.encoded_types) for p in self.params]) + # ret_annotation has only one parsed annotation, and it might be Optional which means it contains 'N' in the + # encoded type. We need to remove it. + return_type_code = re.sub(r"[N]", "", self.ret_annotation.encoded_type) + return param_str + "->" + return_type_code + + +def _encode_param_type(t: type) -> str: + """Returns the numpy based char codes for the given type. + If the type is a numpy ndarray, prefix the numpy dtype char with '[' using Java convention + If the type is a NoneType (as in Optional or as None in Union), return 'N' + """ + if t is type(None): + return "N" + + # find the component type if it is numpy ndarray + component_type = _np_ndarray_component_type(t) + if component_type: + t = component_type + + tc = _np_dtype_char(t) + tc = tc if tc in _SUPPORTED_NP_TYPE_CODES else "O" + + if component_type: + tc = "[" + tc + return tc + + +def _parse_param_annotation(annotation: Any) -> _ParsedParamAnnotation: + """ Parse a parameter annotation in a function's signature """ + p_annotation = _ParsedParamAnnotation() + + if annotation is inspect._empty: + p_annotation.encoded_types.add("O") + p_annotation.none_allowed = True + elif isinstance(annotation, _GenericAlias) and annotation.__origin__ == Union: + for t in annotation.__args__: + _parse_type_no_nested(annotation, p_annotation, t) + else: + _parse_type_no_nested(annotation, p_annotation, annotation) + return p_annotation + + +def _parse_type_no_nested(annotation: Any, p_annotation: _ParsedParamAnnotation, t: type) -> None: + """ Parse a specific type (top level or nested in a top-level Union annotation) without handling nested types + (e.g. a nested Union). The result is stored in the given _ParsedAnnotation object. + """ + p_annotation.orig_types.add(t) + tc = _encode_param_type(t) + if "[" in tc: + p_annotation.has_array = True + if tc in {"N", "O"}: + p_annotation.none_allowed = True + if tc in _NUMPY_INT_TYPE_CODES: + if p_annotation.int_char and p_annotation.int_char != tc: + raise DHError(message=f"multiple integer types in annotation: {annotation}, " + f"types: {p_annotation.int_char}, {tc}. this is not supported because it is not " + f"clear which Deephaven null value to use when checking for nulls in the argument") + p_annotation.int_char = tc + if tc in _NUMPY_FLOATING_TYPE_CODES: + if p_annotation.floating_char and p_annotation.floating_char != tc: + raise DHError(message=f"multiple floating types in annotation: {annotation}, " + f"types: {p_annotation.floating_char}, {tc}. this is not supported because it is not " + f"clear which Deephaven null value to use when checking for nulls in the argument") + p_annotation.floating_char = tc + p_annotation.encoded_types.add(tc) + + +def _parse_return_annotation(annotation: Any) -> _ParsedReturnAnnotation: + """ Parse a function's return annotation + + The return annotation is treated differently from the parameter annotations. We don't apply the same check and are + only interested in getting the array-like type right. Any nonsensical annotation will be treated as object type. + This definitely can be improved in the future. + """ + + pra = _ParsedReturnAnnotation() + + t = annotation + pra.orig_type = t + if isinstance(annotation, _GenericAlias) and annotation.__origin__ == Union and len(annotation.__args__) == 2: + # if the annotation is a Union of two types, we'll use the non-None type + if annotation.__args__[1] == type(None): # noqa: E721 + t = annotation.__args__[0] + elif annotation.__args__[0] == type(None): # noqa: E721 + t = annotation.__args__[1] + component_char = _component_np_dtype_char(t) + if component_char: + pra.encoded_type = "[" + component_char + pra.has_array = True + else: + pra.encoded_type = _np_dtype_char(t) + return pra + + +def _parse_numba_signature(fn: Union[numba.np.ufunc.gufunc.GUFunc, numba.np.ufunc.dufunc.DUFunc]) -> _ParsedSignature: + """ Parse a numba function's signature""" + sigs = fn.types # in the format of ll->l, ff->f,dd->d,OO->O, etc. + if sigs: + p_sig = _ParsedSignature(fn) + + # for now, we only support one signature for a numba function because the query engine is not ready to handle + # multiple signatures for vectorization https://github.com/deephaven/deephaven-core/issues/4762 + sig = sigs[0] + params, rt_char = sig.split("->") + + p_sig.params = [] + p_sig.ret_annotation = _ParsedReturnAnnotation() + p_sig.ret_annotation.encoded_type = rt_char + + if isinstance(fn, numba.np.ufunc.dufunc.DUFunc): + for p in params: + pa = _ParsedParamAnnotation() + pa.encoded_types.add(p) + if p in _NUMPY_INT_TYPE_CODES: + pa.int_char = p + if p in _NUMPY_FLOATING_TYPE_CODES: + pa.floating_char = p + p_sig.params.append(pa) + else: # GUFunc + # An example: @guvectorize([(int64[:], int64[:], int64[:])], "(m),(n)->(n)" + input_output_decl = fn.signature # "(m),(n)->(n)" in the above example + input_decl, output_decl = input_output_decl.split("->") + # remove the parentheses so that empty string indicates no array, non-empty string indicates array + input_decl = re.sub("[()]", "", input_decl).split(",") + output_decl = re.sub("[()]", "", output_decl) + + for p, d in zip(params, input_decl): + pa = _ParsedParamAnnotation() + if d: + pa.encoded_types.add("[" + p) + pa.has_array = True + else: + pa.encoded_types.add(p) + if p in _NUMPY_INT_TYPE_CODES: + pa.int_char = p + if p in _NUMPY_FLOATING_TYPE_CODES: + pa.floating_char = p + p_sig.params.append(pa) + + if output_decl: + p_sig.ret_annotation.has_array = True + return p_sig + else: + raise DHError(message=f"numba decorated functions must have an explicitly defined signature: {fn}") + + +def _parse_np_ufunc_signature(fn: numpy.ufunc) -> _ParsedSignature: + """ Parse the signature of a numpy ufunc """ + + # numpy ufuncs actually have signature encoded in their 'types' attribute, we want to better support + # them in the future (https://github.com/deephaven/deephaven-core/issues/4762) + p_sig = _ParsedSignature(fn) + if fn.nin > 0: + pa = _ParsedParamAnnotation() + pa.encoded_types.add("O") + p_sig.params = [pa] * fn.nin + p_sig.ret_annotation = _ParsedReturnAnnotation() + p_sig.ret_annotation.encoded_type = "O" + return p_sig + + +def _parse_signature(fn: Callable) -> _ParsedSignature: + """ Parse the signature of a function """ + + if isinstance(fn, (numba.np.ufunc.gufunc.GUFunc, numba.np.ufunc.dufunc.DUFunc)): + return _parse_numba_signature(fn) + elif isinstance(fn, numpy.ufunc): + return _parse_np_ufunc_signature(fn) + else: + p_sig = _ParsedSignature(fn=fn) + sig = inspect.signature(fn) + for n, p in sig.parameters.items(): + p_sig.params.append(_parse_param_annotation(p.annotation)) + + p_sig.ret_annotation = _parse_return_annotation(sig.return_annotation) + return p_sig + + +def _convert_arg(param: _ParsedParamAnnotation, arg: Any) -> Any: + """ Convert a single argument to the type specified by the annotation """ + if arg is None: + if not param.none_allowed: + raise TypeError(f"Argument {arg} is not compatible with annotation {param.orig_types}") + else: + return None + + # if the arg is a Java array + if np_dtype := _J_ARRAY_NP_TYPE_MAP.get(type(arg)): + encoded_type = "[" + np_dtype.char + # if it matches one of the encoded types, convert it + if encoded_type in param.encoded_types: + dtype = dtypes.from_np_dtype(np_dtype) + return _j_array_to_numpy_array(dtype, arg, conv_null=True, type_promotion=False) + # if the annotation is missing, or it is a generic object type, return the arg + elif "O" in param.encoded_types: + return arg + else: + raise TypeError(f"Argument {arg} is not compatible with annotation {param.encoded_types}") + else: # if the arg is not a Java array + specific_types = param.encoded_types - {"N", "O"} # remove NoneType and object type + if specific_types: + for t in specific_types: + if t.startswith("["): + if isinstance(arg, np.ndarray) and arg.dtype.char == t[1]: + return arg + continue + + dtype = dtypes.from_np_dtype(np.dtype(t)) + dh_null = _PRIMITIVE_DTYPE_NULL_MAP.get(dtype) + + if param.int_char and isinstance(arg, int): + if arg == dh_null: + if param.none_allowed: + return None + else: + raise DHError(f"Argument {arg} is not compatible with annotation {param.orig_types}") + else: + return np.dtype(param.int_char).type(arg) + elif param.floating_char and isinstance(arg, float): + if isinstance(arg, float): + if arg == dh_null: + return np.nan if "N" not in param.encoded_types else None + else: + return np.dtype(param.floating_char).type(arg) + elif t == "?" and isinstance(arg, bool): + return arg + elif t == "M": + try: + return to_np_datetime64(arg) + except Exception as e: + # don't raise an error, if this is the only annotation, the else block of the for loop will + # catch it and raise a TypeError + pass + elif t == "U" and isinstance(arg, str): + return arg + else: # didn't return from inside the for loop + if "O" in param.encoded_types: + return arg + else: + raise TypeError(f"Argument {arg} is not compatible with annotation {param.orig_types}") + else: # if no annotation or generic object, return arg + return arg + + +def _convert_args(p_sig: _ParsedSignature, args: Tuple[Any, ...]) -> List[Any]: + """ Convert all arguments to the types specified by the annotations. + Given that the number of arguments and the number of parameters may not match (in the presence of keyword, + var-positional, or var-keyword parameters), we have the following rules: + If the number of arguments is less than the number of parameters, the remaining parameters are left as is. + If the number of arguments is greater than the number of parameters, the extra arguments are left as is. + + Python's function call mechanism will raise an exception if it can't resolve the parameters with the arguments. + """ + converted_args = [_convert_arg(param, arg) for param, arg in zip(p_sig.params, args)] + converted_args.extend(args[len(converted_args):]) + return converted_args + + +def _py_udf(fn: Callable): + """A decorator that acts as a transparent translator for Python UDFs used in Deephaven query formulas between + Python and Java. This decorator is intended for use by the Deephaven query engine and should not be used by + users. + + It carries out two conversions: + 1. convert Python function return values to Java values. + For properly annotated functions, including numba vectorized and guvectorized ones, this decorator inspects the + signature of the function and determines its return type, including supported primitive types and arrays of + the supported primitive types. It then converts the return value of the function to the corresponding Java value + of the same type. For unsupported types, the decorator returns the original Python value which appears as + org.jpy.PyObject in Java. + 2. convert Java function arguments to Python values based on the signature of the function. + """ + if hasattr(fn, "return_type"): + return fn + p_sig = _parse_signature(fn) + # build a signature string for vectorization by removing NoneType, array char '[', and comma from the encoded types + # since vectorization only supports UDFs with a single signature and enforces an exact match, any non-compliant + # signature (e.g. Union with more than 1 non-NoneType) will be rejected by the vectorizer. + sig_str_vectorization = re.sub(r"[\[N,]", "", p_sig.encoded) + return_array = p_sig.ret_annotation.has_array + ret_dtype = dtypes.from_np_dtype(np.dtype(p_sig.ret_annotation.encoded_type[-1])) + + @wraps(fn) + def wrapper(*args, **kwargs): + converted_args = _convert_args(p_sig, args) + # kwargs are not converted because they are not used in the UDFs + ret = fn(*converted_args, **kwargs) + if return_array: + return dtypes.array(ret_dtype, ret) + elif ret_dtype == dtypes.PyObject: + return ret + else: + return _scalar(ret, ret_dtype) + + wrapper.j_name = ret_dtype.j_name + real_ret_dtype = _BUILDABLE_ARRAY_DTYPE_MAP.get(ret_dtype, dtypes.PyObject) if return_array else ret_dtype + + if hasattr(ret_dtype.j_type, 'jclass'): + j_class = real_ret_dtype.j_type.jclass + else: + j_class = real_ret_dtype.qst_type.clazz() + + wrapper.return_type = j_class + wrapper.signature = sig_str_vectorization + + return wrapper + + +def _dh_vectorize(fn): + """A decorator to vectorize a Python function used in Deephaven query formulas and invoked on a row basis. + + If this annotation is not used on a query function, the Deephaven query engine will make an effort to vectorize + the function. If vectorization is not possible, the query engine will use the original, non-vectorized function. + If this annotation is used on a function, the Deephaven query engine will use the vectorized function in a query, + or an error will result if the function can not be vectorized. + + When this decorator is used on a function, the number and type of input and output arguments are changed. + These changes are only intended for use by the Deephaven query engine. Users are discouraged from using + vectorized functions in non-query code, since the function signature may change in future versions. + + The current vectorized function signature includes (1) the size of the input arrays, (2) the output array, + and (3) the input arrays. + """ + p_sig = _parse_signature(fn) + ret_dtype = dtypes.from_np_dtype(np.dtype(p_sig.ret_annotation.encoded_type[-1])) + + @wraps(fn) + def wrapper(*args): + if len(args) != len(p_sig.params) + 2: + raise ValueError( + f"The number of arguments doesn't match the function signature. {len(args) - 2}, {p_sig.encoded}") + if args[0] <= 0: + raise ValueError(f"The chunk size argument must be a positive integer. {args[0]}") + + chunk_size = args[0] + chunk_result = args[1] + if args[2:]: + vectorized_args = zip(*args[2:]) + for i in range(chunk_size): + scalar_args = next(vectorized_args) + converted_args = _convert_args(p_sig, scalar_args) + chunk_result[i] = _scalar(fn(*converted_args), ret_dtype) + else: + for i in range(chunk_size): + chunk_result[i] = _scalar(fn(), ret_dtype) + + return chunk_result + + wrapper.callable = fn + wrapper.dh_vectorized = True + + if test_vectorization: + global vectorized_count + vectorized_count += 1 + + return wrapper \ No newline at end of file diff --git a/py/server/deephaven/dtypes.py b/py/server/deephaven/dtypes.py index 5f5857ffdbe..56d2f25ca0d 100644 --- a/py/server/deephaven/dtypes.py +++ b/py/server/deephaven/dtypes.py @@ -102,6 +102,8 @@ def __call__(self, *args, **kwargs): """Double-precision floating-point number type""" string = DType(j_name="java.lang.String", qst_type=_JQstType.stringType(), np_type=np.str_) """String type""" +Character = DType(j_name="java.lang.Character") +"""Character type""" BigDecimal = DType(j_name="java.math.BigDecimal") """Java BigDecimal type""" StringSet = DType(j_name="io.deephaven.stringset.StringSet") @@ -188,6 +190,20 @@ def __call__(self, *args, **kwargs): } +_J_ARRAY_NP_TYPE_MAP = { + boolean_array.j_type: np.dtype("?"), + byte_array.j_type: np.dtype("b"), + char_array.j_type: np.dtype("uint16"), + short_array.j_type: np.dtype("h"), + int32_array.j_type: np.dtype("i"), + long_array.j_type: np.dtype("l"), + float32_array.j_type: np.dtype("f"), + double_array.j_type: np.dtype("d"), + string_array.j_type: np.dtype("U"), + instant_array.j_type: np.dtype("datetime64[ns]"), +} + + def null_remap(dtype: DType) -> Callable[[Any], Any]: """ Creates a null value remap function for the provided DType. @@ -325,8 +341,19 @@ def from_np_dtype(np_dtype: Union[np.dtype, pd.api.extensions.ExtensionDtype]) - return PyObject -_NUMPY_INT_TYPE_CODES = ["i", "l", "h", "b"] -_NUMPY_FLOATING_TYPE_CODES = ["f", "d"] +_NUMPY_INT_TYPE_CODES = {"b", "h", "H", "i", "l"} +_NUMPY_FLOATING_TYPE_CODES = {"f", "d"} + + +def _is_py_null(x: Any) -> bool: + """Checks if the value is a Python null value, i.e. None or NaN, or Pandas.NA.""" + if x is None: + return True + + try: + return bool(pd.isna(x)) + except (TypeError, ValueError): + return False def _scalar(x: Any, dtype: DType) -> Any: @@ -336,12 +363,14 @@ def _scalar(x: Any, dtype: DType) -> Any: # NULL_BOOL will appear in Java as a byte value which causes a cast error. We just let JPY converts it to Java null # and the engine has casting logic to handle it. - if x is None and dtype != bool_ and _PRIMITIVE_DTYPE_NULL_MAP.get(dtype): - return _PRIMITIVE_DTYPE_NULL_MAP[dtype] + if (dt := _PRIMITIVE_DTYPE_NULL_MAP.get(dtype)) and _is_py_null(x) and dtype not in (bool_, char): + return dt try: if hasattr(x, "dtype"): - if x.dtype.char in _NUMPY_INT_TYPE_CODES: + if x.dtype.char == 'H': # np.uint16 maps to Java char + return Character(int(x)) + elif x.dtype.char in _NUMPY_INT_TYPE_CODES: return int(x) elif x.dtype.char in _NUMPY_FLOATING_TYPE_CODES: return float(x) @@ -382,20 +411,32 @@ def _component_np_dtype_char(t: type) -> Optional[str]: if isinstance(t, _GenericAlias) and issubclass(t.__origin__, Sequence): component_type = t.__args__[0] + if not component_type: + component_type = _np_ndarray_component_type(t) + + if component_type: + return _np_dtype_char(component_type) + else: + return None + + +def _np_ndarray_component_type(t: type) -> Optional[type]: + """Returns the numpy ndarray component type if the type is a numpy ndarray, otherwise return None.""" + # Py3.8: npt.NDArray can be used in Py 3.8 as a generic alias, but a specific alias (e.g. npt.NDArray[np.int64]) # is an instance of a private class of np, yet we don't have a choice but to use it. And when npt.NDArray is used, # the 1st argument is typing.Any, the 2nd argument is another generic alias of which the 1st argument is the # component type - if not component_type and sys.version_info.minor == 8: + component_type = None + if sys.version_info.major == 3 and sys.version_info.minor == 8: if isinstance(t, np._typing._generic_alias._GenericAlias) and t.__origin__ == np.ndarray: component_type = t.__args__[1].__args__[0] - # Py3.9+, np.ndarray as a generic alias is only supported in Python 3.9+, also npt.NDArray is still available but a # specific alias (e.g. npt.NDArray[np.int64]) now is an instance of typing.GenericAlias. # when npt.NDArray is used, the 1st argument is typing.Any, the 2nd argument is another generic alias of which # the 1st argument is the component type # when np.ndarray is used, the 1st argument is the component type - if not component_type and sys.version_info.minor > 8: + if not component_type and sys.version_info.major == 3 and sys.version_info.minor > 8: import types if isinstance(t, types.GenericAlias) and (issubclass(t.__origin__, Sequence) or t.__origin__ == np.ndarray): nargs = len(t.__args__) @@ -406,8 +447,4 @@ def _component_np_dtype_char(t: type) -> Optional[str]: a1 = t.__args__[1] if a0 == typing.Any and isinstance(a1, types.GenericAlias): component_type = a1.__args__[0] - - if component_type: - return _np_dtype_char(component_type) - else: - return None + return component_type diff --git a/py/server/deephaven/jcompat.py b/py/server/deephaven/jcompat.py index d12f0d01f64..c1d54a2f443 100644 --- a/py/server/deephaven/jcompat.py +++ b/py/server/deephaven/jcompat.py @@ -5,12 +5,29 @@ """ This module provides Java compatibility support including convenience functions to create some widely used Java data structures from corresponding Python ones in order to be able to call Java methods. """ -from typing import Any, Callable, Dict, Iterable, List, Sequence, Set, TypeVar, Union +from typing import Any, Callable, Dict, Iterable, List, Sequence, Set, TypeVar, Union, Tuple, Literal import jpy +import numpy as np +import pandas as pd +from deephaven import dtypes, DHError from deephaven._wrapper import unwrap, wrap_j_object -from deephaven.dtypes import DType +from deephaven.dtypes import DType, _PRIMITIVE_DTYPE_NULL_MAP, _J_ARRAY_NP_TYPE_MAP + +_NULL_BOOLEAN_AS_BYTE = jpy.get_type("io.deephaven.util.BooleanUtils").NULL_BOOLEAN_AS_BYTE +_JPrimitiveArrayConversionUtility = jpy.get_type("io.deephaven.integrations.common.PrimitiveArrayConversionUtility") + +_DH_PANDAS_NULLABLE_TYPE_MAP: Dict[DType, pd.api.extensions.ExtensionDtype] = { + dtypes.bool_: pd.BooleanDtype, + dtypes.byte: pd.Int8Dtype, + dtypes.short: pd.Int16Dtype, + dtypes.char: pd.UInt16Dtype, + dtypes.int32: pd.Int32Dtype, + dtypes.int64: pd.Int64Dtype, + dtypes.float32: pd.Float32Dtype, + dtypes.float64: pd.Float64Dtype, +} def is_java_type(obj: Any) -> bool: @@ -181,11 +198,109 @@ def to_sequence(v: Union[T, Sequence[T]] = None, wrapped: bool = False) -> Seque return () if wrapped: if not isinstance(v, Sequence) or isinstance(v, str): - return (v, ) + return (v,) else: return tuple(v) if not isinstance(v, Sequence) or isinstance(v, str): - return (unwrap(v), ) + return (unwrap(v),) else: return tuple((unwrap(o) for o in v)) + + +def _j_array_to_numpy_array(dtype: DType, j_array: jpy.JType, conv_null: bool, type_promotion: bool = False) -> \ + np.ndarray: + """ Produces a numpy array from the DType and given Java array. + + Args: + dtype (DType): The dtype of the Java array + j_array (jpy.JType): The Java array to convert + conv_null (bool): If True, convert nulls to the null value for the dtype + type_promotion (bool): Ignored when conv_null is False. When type_promotion is False, (1) input Java integer, + boolean, or character arrays containing Deephaven nulls yield an exception, (2) input Java float or double + arrays containing Deephaven nulls have null values converted to np.nan, and (3) input Java arrays without + Deephaven nulls are converted to the target type. When type_promotion is True, (1) input Java integer, + boolean, or character arrays containing Deephaven nulls are converted to np.float64 arrays and Deephaven + null values are converted to np.nan, (2) input Java float or double arrays containing Deephaven nulls have + null values converted to np.nan, and (3) input Java arrays without Deephaven nulls are converted to the + target type. Defaults to False. + + Returns: + np.ndarray: The numpy array + + Raises: + DHError + """ + if dtype.is_primitive: + np_array = np.frombuffer(j_array, dtype.np_type) + elif dtype == dtypes.Instant: + longs = _JPrimitiveArrayConversionUtility.translateArrayInstantToLong(j_array) + np_long_array = np.frombuffer(longs, np.int64) + np_array = np_long_array.view(dtype.np_type) + elif dtype == dtypes.bool_: + # dh nulls will be preserved and show up as True b/c the underlying byte array isn't modified + bytes_ = _JPrimitiveArrayConversionUtility.translateArrayBooleanToByte(j_array) + np_array = np.frombuffer(bytes_, dtype.np_type) + elif dtype == dtypes.string: + np_array = np.array(j_array, dtypes.string.np_type) + elif dtype.np_type is not np.object_: + try: + np_array = np.frombuffer(j_array, dtype.np_type) + except: + np_array = np.array(j_array, np.object_) + else: + np_array = np.array(j_array, np.object_) + + if conv_null: + if dh_null := _PRIMITIVE_DTYPE_NULL_MAP.get(dtype): + if dtype in (dtypes.float32, dtypes.float64): + np_array = np.copy(np_array) + np_array[np_array == dh_null] = np.nan + else: + if dtype is dtypes.bool_: # needs to change its type to byte for dh null detection + np_array = np.frombuffer(np_array, np.byte) + + if any(np_array[np_array == dh_null]): + if not type_promotion: + raise DHError(f"Problem creating numpy array. Java {dtype} array contains Deephaven null values, but numpy {np_array.dtype} array does not support null values") + np_array = np_array.astype(np.float64) + np_array[np_array == dh_null] = np.nan + else: + if dtype is dtypes.bool_: # needs to change its type back to bool + np_array = np.frombuffer(np_array, np.bool_) + return np_array + + return np_array + + +def _j_array_to_series(dtype: DType, j_array: jpy.JType, conv_null: bool) -> pd.Series: + """Produce a copy of the specified Java array as a pandas.Series object. + + Args: + dtype (DType): the dtype of the Java array + j_array (jpy.JType): the Java array + conv_null (bool): whether to check for Deephaven nulls in the data and automatically replace them with + pd.NA. + + Returns: + a pandas Series + + Raises: + DHError + """ + if conv_null and dtype == dtypes.bool_: + j_array = _JPrimitiveArrayConversionUtility.translateArrayBooleanToByte(j_array) + np_array = np.frombuffer(j_array, dtype=np.byte) + s = pd.Series(data=np_array, dtype=pd.Int8Dtype(), copy=False) + s.mask(s == _NULL_BOOLEAN_AS_BYTE, inplace=True) + return s.astype(pd.BooleanDtype(), copy=False) + + np_array = _j_array_to_numpy_array(dtype, j_array, conv_null=False) + if conv_null and (nv := _PRIMITIVE_DTYPE_NULL_MAP.get(dtype)) is not None: + pd_ex_dtype = _DH_PANDAS_NULLABLE_TYPE_MAP.get(dtype) + s = pd.Series(data=np_array, dtype=pd_ex_dtype(), copy=False) + s.mask(s == nv, inplace=True) + else: + s = pd.Series(data=np_array, copy=False) + + return s diff --git a/py/server/deephaven/numpy.py b/py/server/deephaven/numpy.py index 412b6e8b5ac..3cc898271b3 100644 --- a/py/server/deephaven/numpy.py +++ b/py/server/deephaven/numpy.py @@ -8,13 +8,13 @@ import jpy import numpy as np -from deephaven.dtypes import DType -from deephaven import DHError, dtypes, empty_table, new_table +from deephaven import DHError, dtypes, new_table from deephaven.column import Column, InputColumn +from deephaven.dtypes import DType +from deephaven.jcompat import _j_array_to_numpy_array from deephaven.table import Table -_JPrimitiveArrayConversionUtility = jpy.get_type("io.deephaven.integrations.common.PrimitiveArrayConversionUtility") _JDataAccessHelpers = jpy.get_type("io.deephaven.engine.table.impl.DataAccessHelpers") @@ -25,28 +25,9 @@ def _to_column_name(name: str) -> str: def column_to_numpy_array(col_def: Column, j_array: jpy.JType) -> np.ndarray: - """ Produces a numpy array from the given Java array and the Table column definition. """ + """ Produces a numpy array from the given Java array and the Table column definition.""" try: - if col_def.data_type.is_primitive: - np_array = np.frombuffer(j_array, col_def.data_type.np_type) - elif col_def.data_type == dtypes.Instant: - longs = _JPrimitiveArrayConversionUtility.translateArrayInstantToLong(j_array) - np_long_array = np.frombuffer(longs, np.int64) - np_array = np_long_array.view(col_def.data_type.np_type) - elif col_def.data_type == dtypes.bool_: - bytes_ = _JPrimitiveArrayConversionUtility.translateArrayBooleanToByte(j_array) - np_array = np.frombuffer(bytes_, col_def.data_type.np_type) - elif col_def.data_type == dtypes.string: - np_array = np.array([s for s in j_array], dtypes.string.np_type) - elif col_def.data_type.np_type is not np.object_: - try: - np_array = np.frombuffer(j_array, col_def.data_type.np_type) - except: - np_array = np.array(j_array, np.object_) - else: - np_array = np.array(j_array, np.object_) - - return np_array + return _j_array_to_numpy_array(col_def.data_type, j_array, conv_null=False, type_promotion=False) except DHError: raise except Exception as e: diff --git a/py/server/deephaven/pandas.py b/py/server/deephaven/pandas.py index 883622ce27b..8626b999e11 100644 --- a/py/server/deephaven/pandas.py +++ b/py/server/deephaven/pandas.py @@ -3,7 +3,7 @@ # """ This module supports the conversion between Deephaven tables and pandas DataFrames. """ -from typing import List, Dict, Tuple, Literal +from typing import List, Literal import jpy import numpy as np @@ -13,26 +13,14 @@ from deephaven import DHError, new_table, dtypes, arrow from deephaven.column import Column from deephaven.constants import NULL_BYTE, NULL_SHORT, NULL_INT, NULL_LONG, NULL_FLOAT, NULL_DOUBLE, NULL_CHAR -from deephaven.dtypes import DType -from deephaven.numpy import column_to_numpy_array, _make_input_column +from deephaven.jcompat import _j_array_to_series +from deephaven.numpy import _make_input_column from deephaven.table import Table _NULL_BOOLEAN_AS_BYTE = jpy.get_type("io.deephaven.util.BooleanUtils").NULL_BOOLEAN_AS_BYTE -_JPrimitiveArrayConversionUtility = jpy.get_type("io.deephaven.integrations.common.PrimitiveArrayConversionUtility") _JDataAccessHelpers = jpy.get_type("io.deephaven.engine.table.impl.DataAccessHelpers") _is_dtype_backend_supported = pd.__version__ >= "2.0.0" -_DTYPE_NULL_MAPPING: Dict[DType, Tuple] = { - dtypes.bool_: (_NULL_BOOLEAN_AS_BYTE, pd.BooleanDtype), - dtypes.byte: (NULL_BYTE, pd.Int8Dtype), - dtypes.short: (NULL_SHORT, pd.Int16Dtype), - dtypes.char: (NULL_CHAR, pd.UInt16Dtype), - dtypes.int32: (NULL_INT, pd.Int32Dtype), - dtypes.int64: (NULL_LONG, pd.Int64Dtype), - dtypes.float32: (NULL_FLOAT, pd.Float32Dtype), - dtypes.float64: (NULL_DOUBLE, pd.Float64Dtype), -} - def _column_to_series(table: Table, col_def: Column, conv_null: bool) -> pd.Series: """Produce a copy of the specified column as a pandas.Series object. @@ -51,29 +39,15 @@ def _column_to_series(table: Table, col_def: Column, conv_null: bool) -> pd.Seri """ try: data_col = _JDataAccessHelpers.getColumn(table.j_table, col_def.name) - if conv_null and col_def.data_type == dtypes.bool_: - j_array = _JPrimitiveArrayConversionUtility.translateArrayBooleanToByte(data_col.getDirect()) - np_array = np.frombuffer(j_array, dtype=np.byte) - s = pd.Series(data=np_array, dtype=pd.Int8Dtype(), copy=False) - s.mask(s == _NULL_BOOLEAN_AS_BYTE, inplace=True) - return s.astype(pd.BooleanDtype(), copy=False) - - np_array = column_to_numpy_array(col_def, data_col.getDirect()) - if conv_null and (null_pair := _DTYPE_NULL_MAPPING.get(col_def.data_type)) is not None: - nv = null_pair[0] - pd_ex_dtype = null_pair[1] - s = pd.Series(data=np_array, dtype=pd_ex_dtype(), copy=False) - s.mask(s == nv, inplace=True) - else: - s = pd.Series(data=np_array, copy=False) - return s + j_array = data_col.getDirect() + return _j_array_to_series(col_def.data_type, j_array, conv_null) except DHError: raise except Exception as e: raise DHError(e, message="failed to create a pandas Series for {col}") from e -_DTYPE_MAPPING_PYARROW = { +_PANDAS_ARROW_TYPE_MAP = { pa.int8(): pd.ArrowDtype(pa.int8()), pa.int16(): pd.ArrowDtype(pa.int16()), pa.int32(): pd.ArrowDtype(pa.int32()), @@ -90,7 +64,7 @@ def _column_to_series(table: Table, col_def: Column, conv_null: bool) -> pd.Seri pa.timestamp('ns', tz='UTC'): pd.ArrowDtype(pa.timestamp('ns', tz='UTC')), } -_DTYPE_MAPPING_NUMPY_NULLABLE = { +_PANDAS_NULLABLE_TYPE_MAP = { pa.int8(): pd.Int8Dtype(), pa.int16(): pd.Int16Dtype(), pa.uint16(): pd.UInt16Dtype(), @@ -107,8 +81,8 @@ def _column_to_series(table: Table, col_def: Column, conv_null: bool) -> pd.Seri } _PYARROW_TO_PANDAS_TYPE_MAPPERS = { - "pyarrow": _DTYPE_MAPPING_PYARROW.get, - "numpy_nullable": _DTYPE_MAPPING_NUMPY_NULLABLE.get, + "pyarrow": _PANDAS_ARROW_TYPE_MAP.get, + "numpy_nullable": _PANDAS_NULLABLE_TYPE_MAP.get, } @@ -180,7 +154,7 @@ def to_pandas(table: Table, cols: List[str] = None, raise DHError(e, "failed to create a pandas DataFrame from table.") from e -_EX_DTYPE_NULL_MAP = { +_PANDAS_EXTYPE_DH_NULL_MAP = { # This reflects the fact that in the server we use NULL_BOOLEAN_AS_BYTE - the byte encoding of null boolean to # translate boxed Boolean to/from primitive bytes pd.BooleanDtype: _NULL_BOOLEAN_AS_BYTE, @@ -209,7 +183,7 @@ def _map_na(array: [np.ndarray, pd.api.extensions.ExtensionArray]): if not isinstance(pd_dtype, pd.api.extensions.ExtensionDtype): return array - dh_null = _EX_DTYPE_NULL_MAP.get(type(pd_dtype)) or _EX_DTYPE_NULL_MAP.get(pd_dtype) + dh_null = _PANDAS_EXTYPE_DH_NULL_MAP.get(type(pd_dtype)) or _PANDAS_EXTYPE_DH_NULL_MAP.get(pd_dtype) # To preserve NaNs in floating point arrays, Pandas doesn't distinguish NaN/Null as far as NA testing is # concerned, thus its fillna() method will replace both NaN/Null in the data. if isinstance(pd_dtype, (pd.Float32Dtype, pd.Float64Dtype)) and isinstance(getattr(array, "_data"), np.ndarray): @@ -276,3 +250,4 @@ def to_table(df: pd.DataFrame, cols: List[str] = None) -> Table: raise except Exception as e: raise DHError(e, "failed to create a Deephaven Table from a pandas DataFrame.") from e + diff --git a/py/server/deephaven/server/__init__.py b/py/server/deephaven/server/__init__.py index b5af5621f70..cb8603f5850 100644 --- a/py/server/deephaven/server/__init__.py +++ b/py/server/deephaven/server/__init__.py @@ -1,6 +1,3 @@ # # Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending # - -# Packages under the deephaven.server heading are not meant to be called externally - it exists as a convenient place -# for the server to execute implementation logic via python diff --git a/py/server/deephaven/server/executors.py b/py/server/deephaven/server/executors.py new file mode 100644 index 00000000000..b655621468a --- /dev/null +++ b/py/server/deephaven/server/executors.py @@ -0,0 +1,63 @@ +# +# Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending +# +""" +Support for running operations on JVM server threads, so that they can be given work from python. Initially, there +are two executors, "serial" and "concurrent". Any task that will take an exclusive UGP lock should use the serial +executor, otherwise the concurrent executor should be used. In the future there may be a "fast" executor, for use +when there is no chance of using either lock. +""" + +from typing import Callable, Dict, List +import jpy +from deephaven.jcompat import j_runnable +from deephaven import DHError + + +_executors: Dict[str, Callable[[Callable[[], None]], None]] = {} + + +def has_executor(executor_name: str) -> bool: + """ + Returns True if an executor exists with that name. + """ + return executor_name in executor_names() + + +def executor_names() -> List[str]: + """ + Returns: the List of known executor names + """ + return list(_executors.keys()) + + +def submit_task(executor_name: str, task: Callable[[], None]) -> None: + """ + Submits a task to run on a named executor. If no such executor exists, raises KeyError. + + Typically, tasks should not block on other threads. Ensure tasks never block on other tasks submitted to the same executor. + + Args: + executor_name (str): the name of the executor to submit the task to + task (Callable[[], None]): the function to run on the named executor + + Raises: + KeyError if the executor name + """ + _executors[executor_name](task) + + +def _register_named_java_executor(executor_name: str, java_executor: jpy.JType) -> None: + """ + Provides a Java executor for user code to submit tasks to. Called during server startup. + + Args: + executor_name (str): the name of the executor to register + java_executor (jpy.JType): a Java Consumer instance + + Raises: + DHError + """ + if executor_name in executor_names(): + raise DHError(f"Executor with name {executor_name} already registered") + _executors[executor_name] = lambda task: java_executor.accept(j_runnable(task)) diff --git a/py/server/deephaven/table.py b/py/server/deephaven/table.py index 89fa8df9c19..922e6b3dcd1 100644 --- a/py/server/deephaven/table.py +++ b/py/server/deephaven/table.py @@ -11,13 +11,10 @@ import inspect from enum import Enum from enum import auto -from functools import wraps -from typing import Any, Optional, Callable, Dict, _GenericAlias +from typing import Any, Optional, Callable, Dict from typing import Sequence, List, Union, Protocol import jpy -import numba -import numpy as np from deephaven import DHError from deephaven import dtypes @@ -31,8 +28,6 @@ from deephaven.jcompat import to_sequence, j_array_list from deephaven.update_graph import auto_locking_ctx, UpdateGraph from deephaven.updateby import UpdateByOperation -from deephaven.dtypes import _BUILDABLE_ARRAY_DTYPE_MAP, _scalar, _np_dtype_char, \ - _component_np_dtype_char # Table _J_Table = jpy.get_type("io.deephaven.engine.table.Table") @@ -80,10 +75,6 @@ _JMultiJoinTable = jpy.get_type("io.deephaven.engine.table.MultiJoinTable") _JMultiJoinFactory = jpy.get_type("io.deephaven.engine.table.MultiJoinFactory") -# For unittest vectorization -_test_vectorization = False -_vectorized_count = 0 - class NodeType(Enum): """An enum of node types for RollupTable""" @@ -363,178 +354,6 @@ def _j_py_script_session() -> _JPythonScriptSession: return None -_SUPPORTED_NP_TYPE_CODES = ["i", "l", "h", "f", "d", "b", "?", "U", "M", "O"] - - -def _parse_annotation(annotation: Any) -> Any: - """Parse a Python annotation, for now mostly to extract the non-None type from an Optional(Union) annotation, - otherwise return the original annotation. """ - if isinstance(annotation, _GenericAlias) and annotation.__origin__ == Union and len(annotation.__args__) == 2: - if annotation.__args__[1] == type(None): # noqa: E721 - return annotation.__args__[0] - elif annotation.__args__[0] == type(None): # noqa: E721 - return annotation.__args__[1] - else: - return annotation - else: - return annotation - - -def _encode_signature(fn: Callable) -> str: - """Encode the signature of a Python function by mapping the annotations of the parameter types and the return - type to numpy dtype chars (i,l,h,f,d,b,?,U,M,O), and pack them into a string with parameter type chars first, - in their original order, followed by the delimiter string '->', then the return type_char. - - If a parameter or the return of the function is not annotated, the default 'O' - object type, will be used. - """ - try: - sig = inspect.signature(fn) - except: - # in case inspect.signature() fails, we'll just use the default 'O' - object type. - # numpy ufuncs actually have signature encoded in their 'types' attribute, we want to better support - # them in the future (https://github.com/deephaven/deephaven-core/issues/4762) - if type(fn) == np.ufunc: - return "O"*fn.nin + "->" + "O" - return "->O" - - np_type_codes = [] - for n, p in sig.parameters.items(): - p_annotation = _parse_annotation(p.annotation) - np_type_codes.append(_np_dtype_char(p_annotation)) - - return_annotation = _parse_annotation(sig.return_annotation) - return_type_code = _np_dtype_char(return_annotation) - np_type_codes = [c if c in _SUPPORTED_NP_TYPE_CODES else "O" for c in np_type_codes] - return_type_code = return_type_code if return_type_code in _SUPPORTED_NP_TYPE_CODES else "O" - - np_type_codes.extend(["-", ">", return_type_code]) - return "".join(np_type_codes) - - -def _udf_return_dtype(fn): - if isinstance(fn, (numba.np.ufunc.dufunc.DUFunc, numba.np.ufunc.gufunc.GUFunc)) and hasattr(fn, "types"): - return dtypes.from_np_dtype(np.dtype(fn.types[0][-1])) - else: - return dtypes.from_np_dtype(np.dtype(_encode_signature(fn)[-1])) - - -def _py_udf(fn: Callable): - """A decorator that acts as a transparent translator for Python UDFs used in Deephaven query formulas between - Python and Java. This decorator is intended for use by the Deephaven query engine and should not be used by - users. - - For now, this decorator is only capable of converting Python function return values to Java values. It - does not yet convert Java values in arguments to usable Python object (e.g. numpy arrays) or properly translate - Deephaven primitive null values. - - For properly annotated functions, including numba vectorized and guvectorized ones, this decorator inspects the - signature of the function and determines its return type, including supported primitive types and arrays of - the supported primitive types. It then converts the return value of the function to the corresponding Java value - of the same type. For unsupported types, the decorator returns the original Python value which appears as - org.jpy.PyObject in Java. - """ - - if hasattr(fn, "return_type"): - return fn - ret_dtype = _udf_return_dtype(fn) - - return_array = False - # If the function is a numba guvectorized function, examine the signature of the function to determine if it - # returns an array. - if isinstance(fn, numba.np.ufunc.gufunc.GUFunc): - sig = fn.signature - rtype = sig.split("->")[-1].strip("()") - if rtype: - return_array = True - else: - try: - return_annotation = _parse_annotation(inspect.signature(fn).return_annotation) - except ValueError: - # the function has no return annotation, and since we can't know what the exact type is, the return type - # defaults to the generic object type therefore it is not an array of a specific type, - # but see (https://github.com/deephaven/deephaven-core/issues/4762) for future imporvement to better support - # numpy ufuncs. - pass - else: - component_type = _component_np_dtype_char(return_annotation) - if component_type: - ret_dtype = dtypes.from_np_dtype(np.dtype(component_type)) - if ret_dtype in _BUILDABLE_ARRAY_DTYPE_MAP: - return_array = True - - @wraps(fn) - def wrapper(*args, **kwargs): - ret = fn(*args, **kwargs) - if return_array: - return dtypes.array(ret_dtype, ret) - elif ret_dtype == dtypes.PyObject: - return ret - else: - return _scalar(ret, ret_dtype) - - wrapper.j_name = ret_dtype.j_name - real_ret_dtype = _BUILDABLE_ARRAY_DTYPE_MAP.get(ret_dtype) if return_array else ret_dtype - - if hasattr(ret_dtype.j_type, 'jclass'): - j_class = real_ret_dtype.j_type.jclass - else: - j_class = real_ret_dtype.qst_type.clazz() - - wrapper.return_type = j_class - - return wrapper - - -def dh_vectorize(fn): - """A decorator to vectorize a Python function used in Deephaven query formulas and invoked on a row basis. - - If this annotation is not used on a query function, the Deephaven query engine will make an effort to vectorize - the function. If vectorization is not possible, the query engine will use the original, non-vectorized function. - If this annotation is used on a function, the Deephaven query engine will use the vectorized function in a query, - or an error will result if the function can not be vectorized. - - When this decorator is used on a function, the number and type of input and output arguments are changed. - These changes are only intended for use by the Deephaven query engine. Users are discouraged from using - vectorized functions in non-query code, since the function signature may change in future versions. - - The current vectorized function signature includes (1) the size of the input arrays, (2) the output array, - and (3) the input arrays. - """ - signature = _encode_signature(fn) - ret_dtype = _udf_return_dtype(fn) - - @wraps(fn) - def wrapper(*args): - if len(args) != len(signature) - len("->?") + 2: - raise ValueError( - f"The number of arguments doesn't match the function signature. {len(args) - 2}, {signature}") - if args[0] <= 0: - raise ValueError(f"The chunk size argument must be a positive integer. {args[0]}") - - chunk_size = args[0] - chunk_result = args[1] - if args[2:]: - vectorized_args = zip(*args[2:]) - for i in range(chunk_size): - scalar_args = next(vectorized_args) - chunk_result[i] = _scalar(fn(*scalar_args), ret_dtype) - else: - for i in range(chunk_size): - chunk_result[i] = _scalar(fn(), ret_dtype) - - return chunk_result - - wrapper.callable = fn - wrapper.signature = signature - wrapper.dh_vectorized = True - - if _test_vectorization: - global _vectorized_count - _vectorized_count += 1 - - return wrapper - - @contextlib.contextmanager def _query_scope_ctx(): """A context manager to set/unset query scope based on the scope of the most immediate caller code that invokes @@ -3712,6 +3531,7 @@ def update_by(self, ops: Union[UpdateByOperation, List[UpdateByOperation]], except Exception as e: raise DHError(e, "update-by operation on the PartitionedTableProxy failed.") from e + class MultiJoinInput(JObjectWrapper): """A MultiJoinInput represents the input tables, key columns and additional columns to be used in the multi-table natural join. """ @@ -3779,7 +3599,8 @@ def __init__(self, input: Union[Table, Sequence[Table], MultiJoinInput, Sequence with auto_locking_ctx(*tables): j_tables = to_sequence(input) self.j_multijointable = _JMultiJoinFactory.of(on, *j_tables) - elif isinstance(input, MultiJoinInput) or (isinstance(input, Sequence) and all(isinstance(ji, MultiJoinInput) for ji in input)): + elif isinstance(input, MultiJoinInput) or ( + isinstance(input, Sequence) and all(isinstance(ji, MultiJoinInput) for ji in input)): if on is not None: raise DHError(message="on parameter is not permitted when MultiJoinInput objects are provided.") wrapped_input = to_sequence(input, wrapped=True) @@ -3788,13 +3609,13 @@ def __init__(self, input: Union[Table, Sequence[Table], MultiJoinInput, Sequence input = to_sequence(input) self.j_multijointable = _JMultiJoinFactory.of(*input) else: - raise DHError(message="input must be a Table, a sequence of Tables, a MultiJoinInput, or a sequence of MultiJoinInputs.") + raise DHError( + message="input must be a Table, a sequence of Tables, a MultiJoinInput, or a sequence of MultiJoinInputs.") except Exception as e: raise DHError(e, "failed to build a MultiJoinTable object.") from e - def multi_join(input: Union[Table, Sequence[Table], MultiJoinInput, Sequence[MultiJoinInput]], on: Union[str, Sequence[str]] = None) -> MultiJoinTable: """ The multi_join method creates a new table by performing a multi-table natural join on the input tables. The result @@ -3812,4 +3633,4 @@ def multi_join(input: Union[Table, Sequence[Table], MultiJoinInput, Sequence[Mul MultiJoinTable: the result of the multi-table natural join operation. To access the underlying Table, use the table() method. """ - return MultiJoinTable(input, on) \ No newline at end of file + return MultiJoinTable(input, on) diff --git a/py/server/deephaven/table_factory.py b/py/server/deephaven/table_factory.py index 033d4c7aec2..5dc5e934f17 100644 --- a/py/server/deephaven/table_factory.py +++ b/py/server/deephaven/table_factory.py @@ -24,10 +24,9 @@ _JTableFactory = jpy.get_type("io.deephaven.engine.table.TableFactory") _JTableTools = jpy.get_type("io.deephaven.engine.util.TableTools") _JDynamicTableWriter = jpy.get_type("io.deephaven.engine.table.impl.util.DynamicTableWriter") -_JMutableInputTable = jpy.get_type("io.deephaven.engine.util.config.MutableInputTable") -_JAppendOnlyArrayBackedMutableTable = jpy.get_type( - "io.deephaven.engine.table.impl.util.AppendOnlyArrayBackedMutableTable") -_JKeyedArrayBackedMutableTable = jpy.get_type("io.deephaven.engine.table.impl.util.KeyedArrayBackedMutableTable") +_JAppendOnlyArrayBackedInputTable = jpy.get_type( + "io.deephaven.engine.table.impl.util.AppendOnlyArrayBackedInputTable") +_JKeyedArrayBackedInputTable = jpy.get_type("io.deephaven.engine.table.impl.util.KeyedArrayBackedInputTable") _JTableDefinition = jpy.get_type("io.deephaven.engine.table.TableDefinition") _JTable = jpy.get_type("io.deephaven.engine.table.Table") _J_INPUT_TABLE_ATTRIBUTE = _JTable.INPUT_TABLE_ATTRIBUTE @@ -257,9 +256,9 @@ def __init__(self, col_defs: Dict[str, DType] = None, init_table: Table = None, key_cols = to_sequence(key_cols) if key_cols: - super().__init__(_JKeyedArrayBackedMutableTable.make(j_arg_1, key_cols)) + super().__init__(_JKeyedArrayBackedInputTable.make(j_arg_1, key_cols)) else: - super().__init__(_JAppendOnlyArrayBackedMutableTable.make(j_arg_1)) + super().__init__(_JAppendOnlyArrayBackedInputTable.make(j_arg_1)) self.j_input_table = self.j_table.getAttribute(_J_INPUT_TABLE_ATTRIBUTE) self.key_columns = key_cols except Exception as e: diff --git a/py/server/deephaven_internal/plugin/js/__init__.py b/py/server/deephaven_internal/plugin/js/__init__.py new file mode 100644 index 00000000000..b0f18f3a8da --- /dev/null +++ b/py/server/deephaven_internal/plugin/js/__init__.py @@ -0,0 +1,29 @@ +# +# Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending +# + +import jpy +import pathlib + +from deephaven.plugin.js import JsPlugin + +_JJsPlugin = jpy.get_type("io.deephaven.plugin.js.JsPlugin") +_JPath = jpy.get_type("java.nio.file.Path") + + +def to_j_js_plugin(js_plugin: JsPlugin) -> jpy.JType: + path = js_plugin.path() + if not isinstance(path, pathlib.Path): + # Adding a little bit of extra safety for this version of the server. + # There's potential that the return type of JsPlugin.path expands in the future. + raise Exception( + f"Expecting pathlib.Path, is type(js_plugin.path())={type(path)}, js_plugin={js_plugin}" + ) + j_path = _JPath.of(str(path)) + main_path = j_path.relativize(j_path.resolve(js_plugin.main)) + builder = _JJsPlugin.builder() + builder.name(js_plugin.name) + builder.version(js_plugin.version) + builder.main(main_path) + builder.path(j_path) + return builder.build() diff --git a/py/server/deephaven_internal/plugin/register.py b/py/server/deephaven_internal/plugin/register.py index a35d152c91c..91ec3c20e4e 100644 --- a/py/server/deephaven_internal/plugin/register.py +++ b/py/server/deephaven_internal/plugin/register.py @@ -8,9 +8,11 @@ from typing import Union, Type from deephaven.plugin import Plugin, Registration, Callback from deephaven.plugin.object_type import ObjectType +from deephaven.plugin.js import JsPlugin from .object import ObjectTypeAdapter +from .js import to_j_js_plugin -_JCallbackAdapter = jpy.get_type('io.deephaven.server.plugin.python.CallbackAdapter') +_JCallbackAdapter = jpy.get_type("io.deephaven.server.plugin.python.CallbackAdapter") def initialize_all_and_register_into(callback: _JCallbackAdapter): @@ -20,6 +22,7 @@ def initialize_all_and_register_into(callback: _JCallbackAdapter): class RegistrationAdapter(Callback): """Python implementation of Callback that delegates to its Java counterpart.""" + def __init__(self, callback: _JCallbackAdapter): self._callback = callback @@ -29,8 +32,10 @@ def register(self, plugin: Union[Plugin, Type[Plugin]]): plugin = plugin() if isinstance(plugin, ObjectType): self._callback.registerObjectType(plugin.name, ObjectTypeAdapter(plugin)) + elif isinstance(plugin, JsPlugin): + self._callback.registerJsPlugin(to_j_js_plugin(plugin)) else: - raise NotImplementedError + raise NotImplementedError(f"Unexpected type: {type(plugin)}") def __str__(self): return str(self._callback) diff --git a/py/server/setup.py b/py/server/setup.py index df83ea7e498..4161000e0d4 100644 --- a/py/server/setup.py +++ b/py/server/setup.py @@ -56,7 +56,7 @@ def _compute_version(): python_requires='>=3.8', install_requires=[ 'jpy>=0.14.0', - 'deephaven-plugin==0.5.0', + 'deephaven-plugin>=0.6.0', 'numpy', 'pandas>=1.5.0', 'pyarrow', diff --git a/py/server/test_helper/__init__.py b/py/server/test_helper/__init__.py index 4cc93ba1eaa..1dc3eaaca55 100644 --- a/py/server/test_helper/__init__.py +++ b/py/server/test_helper/__init__.py @@ -71,8 +71,9 @@ def start_jvm_for_tests(jvm_props: Dict[str, str] = None): global py_dh_session _JPeriodicUpdateGraph = jpy.get_type("io.deephaven.engine.updategraph.impl.PeriodicUpdateGraph") _j_test_update_graph = _JPeriodicUpdateGraph.newBuilder(_JPeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME).existingOrBuild() + no_op_operation_initializer = jpy.get_type("io.deephaven.util.thread.ThreadInitializationFactory").NO_OP _JPythonScriptSession = jpy.get_type("io.deephaven.integrations.python.PythonDeephavenSession") - py_dh_session = _JPythonScriptSession(_j_test_update_graph, py_scope_jpy) + py_dh_session = _JPythonScriptSession(_j_test_update_graph, no_op_operation_initializer, py_scope_jpy) def _expand_wildcards_in_list(elements): diff --git a/py/server/tests/test_numba_guvectorize.py b/py/server/tests/test_numba_guvectorize.py index c82b92296e3..79d9f87241f 100644 --- a/py/server/tests/test_numba_guvectorize.py +++ b/py/server/tests/test_numba_guvectorize.py @@ -5,7 +5,7 @@ import unittest import numpy as np -from numba import guvectorize, int64 +from numba import guvectorize, int64, int32 from deephaven import empty_table, dtypes from tests.testbase import BaseTestCase @@ -22,13 +22,13 @@ def g(x, res): for xi in x: res[0] += xi - t = empty_table(10).update(["X=i%3", "Y=i"]).group_by("X").update("Z=g(Y)") + t = empty_table(10).update(["X=i%3", "Y=ii"]).group_by("X").update("Z=g(Y)") m = t.meta_table self.assertEqual(t.columns[2].data_type, dtypes.int64) def test_vector_return(self): # vector and scalar input to vector ouput function - @guvectorize([(int64[:], int64, int64[:])], "(m),()->(m)", nopython=True) + @guvectorize([(int32[:], int32, int64[:])], "(m),()->(m)", nopython=True) def g(x, y, res): for i in range(len(x)): res[i] = x[i] + y @@ -61,7 +61,7 @@ def test_fixed_length_vector_return(self): dummy = np.array([0, 0], dtype=np.int64) # vector input to fixed-length vector ouput function -- second arg is a dummy just to get a fixed size output - @guvectorize([(int64[:], int64[:], int64[:])], "(m),(n)->(n)", nopython=True) + @guvectorize([(int32[:], int64[:], int64[:])], "(m),(n)->(n)", nopython=True) def g(x, dummy, res): res[0] = min(x) res[1] = max(x) @@ -78,7 +78,7 @@ def g(x, dummy, res): res[0] = np.min(x) res[1] = np.max(x) - t = empty_table(10).update(["X=i%3", "Y=i"]).group_by("X").update("Z=g(Y,dummy)") + t = empty_table(10).update(["X=i%3", "Y=ii"]).group_by("X").update("Z=g(Y,dummy)") self.assertEqual(t.columns[2].data_type, dtypes.long_array) def test_np_on_java_array2(self): @@ -86,7 +86,7 @@ def test_np_on_java_array2(self): def g(x, res): res[:] = x + 5 - t = empty_table(10).update(["X=i%3", "Y=i"]).group_by("X").update("Z=g(Y)") + t = empty_table(10).update(["X=i%3", "Y=ii"]).group_by("X").update("Z=g(Y)") self.assertEqual(t.columns[2].data_type, dtypes.long_array) diff --git a/py/server/tests/test_udf_numpy_args.py b/py/server/tests/test_udf_numpy_args.py new file mode 100644 index 00000000000..ba698a4b21c --- /dev/null +++ b/py/server/tests/test_udf_numpy_args.py @@ -0,0 +1,397 @@ +# +# Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending +# +import typing +from typing import Optional, Union, Any +import unittest + +import numpy as np +import numpy.typing as npt + +from deephaven import empty_table, DHError, dtypes +from deephaven.dtypes import double_array, int32_array, long_array, int16_array, char_array, int8_array, \ + float32_array +from tests.testbase import BaseTestCase + +_J_TYPE_NULL_MAP = { + "byte": "NULL_BYTE", + "short": "NULL_SHORT", + "char": "NULL_CHAR", + "int": "NULL_INT", + "long": "NULL_LONG", + "float": "NULL_FLOAT", + "double": "NULL_DOUBLE", +} + +_J_TYPE_NP_DTYPE_MAP = { + "byte": "np.int8", + "short": "np.int16", + "char": "np.uint16", + "int": "np.int32", + "long": "np.int64", + "float": "np.float32", + "double": "np.float64", +} + +_J_TYPE_J_ARRAY_TYPE_MAP = { + "byte": int8_array, + "short": int16_array, + "char": char_array, + "int": int32_array, + "long": long_array, + "float": float32_array, + "double": double_array, +} + + +class UDFNumpyTest(BaseTestCase): + def test_j_to_py_no_annotation_no_null(self): + col1_formula = "Col1 = i % 10" + for j_dtype, np_dtype in _J_TYPE_NP_DTYPE_MAP.items(): + col2_formula = f"Col2 = ({j_dtype})i" + with self.subTest(j_dtype): + tbl = empty_table(100).update([col1_formula, col2_formula]).group_by("Col1") + + func_str = f""" +def test_udf(col1, col2) -> bool: + j_array_type = _J_TYPE_J_ARRAY_TYPE_MAP[{j_dtype!r}].j_type + return isinstance(col1, int) and isinstance(col2, j_array_type) + """ + exec(func_str, globals()) + res = tbl.update("Col3 = test_udf(Col1, Col2)") + self.assertEqual(10, res.to_string().count("true")) + + def test_j_to_py_no_annotation_null(self): + col1_formula = "Col1 = i % 10" + for j_dtype, null_name in _J_TYPE_NULL_MAP.items(): + col2_formula = f"Col2 = i % 3 == 0? {null_name} : ({j_dtype})i" + with self.subTest(j_dtype): + tbl = empty_table(100).update([col1_formula, col2_formula]).group_by("Col1") + + func_str = f""" +def test_udf(col1, col2) -> bool: + j_array_type = _J_TYPE_J_ARRAY_TYPE_MAP[{j_dtype!r}].j_type + return (isinstance(col1, int) and isinstance(col2, j_array_type) and np.any(np.array(col2) == {null_name})) + """ + exec(f"from deephaven.constants import {null_name}", globals()) + exec(func_str, globals()) + res = tbl.update("Col3 = test_udf(Col1, Col2)") + self.assertEqual(10, res.to_string().count("true")) + exec(f"del {null_name}", globals()) + + def test_jarray_to_np_array_no_null(self): + col1_formula = "Col1 = i % 10" + for j_dtype, np_dtype in _J_TYPE_NP_DTYPE_MAP.items(): + col2_formula = f"Col2 = ({j_dtype})i" + with self.subTest(j_dtype): + tbl = empty_table(100).update([col1_formula, col2_formula]).group_by("Col1") + + func_str = f""" +def test_udf(col1, col2: np.ndarray[{np_dtype}]) -> bool: + return (isinstance(col1, int) and isinstance(col2, np.ndarray) and col2.dtype.type == {np_dtype} and np.nanmean( + col2) == np.mean( col2)) + """ + exec(func_str, globals()) + res = tbl.update("Col3 = test_udf(Col1, Col2)") + self.assertEqual(10, res.to_string().count("true")) + + def test_jarray_to_np_array_null(self): + col1_formula = "Col1 = i % 10" + for j_dtype, null_name in _J_TYPE_NULL_MAP.items(): + col2_formula = f"Col2 = i % 3 == 0? {null_name} : ({j_dtype})i" + with self.subTest(j_dtype): + tbl = empty_table(100).update([col1_formula, col2_formula]).group_by("Col1") + + func_str = f""" +def test_udf(col1, col2: np.ndarray[{_J_TYPE_NP_DTYPE_MAP[j_dtype]}]) -> bool: + return (isinstance(col1, int) and isinstance(col2, np.ndarray) and col2.dtype.type == + {_J_TYPE_NP_DTYPE_MAP[j_dtype]} and np.nanmean(col2) != np.mean( col2)) + """ + exec(func_str, globals()) + + # for floating point types, DH nulls are auto converted to np.nan + # for integer types, DH nulls in the array raise exceptions + if j_dtype in ("float", "double"): + res = tbl.update("Col3 = test_udf(Col1, Col2)") + self.assertEqual(10, res.to_string().count("true")) + else: + with self.assertRaises(DHError) as cm: + tbl.update("Col3 = test_udf(Col1, Col2)") + self.assertRegex(str(cm.exception), "Java .* array contains Deephaven null values, but numpy .* " + "array does not support ") + + def test_j_scalar_to_py_no_null(self): + col1_formula = "Col1 = i % 10" + for j_dtype, null_name in _J_TYPE_NULL_MAP.items(): + col2_formula = f"Col2 = ({j_dtype})i" + with self.subTest(j_dtype): + np_type = _J_TYPE_NP_DTYPE_MAP[j_dtype] + func = f""" +def test_udf(col: {np_type}) -> bool: + if not isinstance(col, {np_type}): + return False + if np.isnan(col): + return False + else: + return True + """ + exec(func, globals()) + with self.subTest(j_dtype): + tbl = empty_table(100).update([col1_formula, col2_formula]) + res = tbl.update("Col3 = test_udf(Col2)") + self.assertEqual(10, res.to_string().count("true")) + + func = f""" +def test_udf(col: Optional[{np_type}]) -> bool: + if not isinstance(col, {np_type}): + return False + if col is None: + return False + else: + return True + """ + exec(func, globals()) + with self.subTest(j_dtype): + tbl = empty_table(100).update([col1_formula, col2_formula]) + res = tbl.update("Col3 = test_udf(Col2)") + self.assertEqual(10, res.to_string().count("true")) + + def test_j_scalar_to_py_null(self): + col1_formula = "Col1 = i % 10" + for data_type, null_name in _J_TYPE_NULL_MAP.items(): + col2_formula = f"Col2 = i % 3 == 0? {null_name} : ({data_type})i" + with self.subTest(data_type): + np_type = _J_TYPE_NP_DTYPE_MAP[data_type] + func = f""" +def test_udf(col: {np_type}) -> bool: + if np.isnan(col): + return True + else: + if not isinstance(col, {np_type}): + return True + return False +""" + exec(func, globals()) + with self.subTest(data_type): + tbl = empty_table(100).update([col1_formula, col2_formula]) + # for floating point types, DH nulls are auto converted to np.nan + # for integer types, DH nulls in the array raise exceptions + if data_type in ("float", "double"): + res = tbl.update("Col3 = test_udf(Col2)") + self.assertEqual(4, res.to_string().count("true")) + else: + with self.assertRaises(DHError) as cm: + res = tbl.update("Col3 = test_udf(Col2)") + self.assertRegex(str(cm.exception), "Argument .* is not compatible with annotation*") + + func = f""" +def test_udf(col: Optional[{np_type}]) -> bool: + if col is None: + return True + else: + if not isinstance(col, {np_type}): + return True + return False +""" + exec(func, globals()) + with self.subTest(data_type): + tbl = empty_table(100).update([col1_formula, col2_formula]) + res = tbl.update("Col3 = test_udf(Col2)") + self.assertEqual(4, res.to_string().count("true")) + + def test_weird_cases(self): + def f(p1: Union[np.ndarray[typing.Any], None]) -> bool: + return bool(p1) + + with self.assertRaises(DHError) as cm: + t = empty_table(10).update(["X1 = f(i)"]) + + def f1(p1: Union[np.int16, np.int32]) -> bool: + return bool(p1) + + with self.assertRaises(DHError) as cm: + t = empty_table(10).update(["X1 = f1(i)"]) + + def f11(p1: Union[float, np.float32]) -> bool: + return bool(p1) + + with self.assertRaises(DHError) as cm: + t = empty_table(10).update(["X1 = f11(i)"]) + + def f2(p1: Union[np.int16, np.float64]) -> Union[Optional[bool]]: + return bool(p1) + + t = empty_table(10).update(["X1 = f2(i)"]) + self.assertEqual(t.columns[0].data_type, dtypes.bool_) + self.assertEqual(9, t.to_string().count("true")) + + def f21(p1: Union[np.int16, np.float64]) -> Union[Optional[bool], int]: + return bool(p1) + + with self.assertRaises(DHError) as cm: + t = empty_table(10).update(["X1 = f21(i)"]) + + def f3(p1: Union[np.int16, np.float64], p2=None) -> bool: + return bool(p1) + + t = empty_table(10).update(["X1 = f3(i)"]) + self.assertEqual(t.columns[0].data_type, dtypes.bool_) + + def f4(p1: Union[np.int16, np.float64], p2=None) -> bool: + return bool(p1) + + t = empty_table(10).update(["X1 = f4((double)i)"]) + self.assertEqual(t.columns[0].data_type, dtypes.bool_) + with self.assertRaises(DHError) as cm: + t = empty_table(10).update(["X1 = f4(now())"]) + self.assertRegex(str(cm.exception), "Argument .* is not compatible with annotation*") + + def f41(p1: Union[np.int16, np.float64, Union[Any]], p2=None) -> bool: + return bool(p1) + + t = empty_table(10).update(["X1 = f41(now())"]) + self.assertEqual(t.columns[0].data_type, dtypes.bool_) + + def f42(p1: Union[np.int16, np.float64, np.datetime64], p2=None) -> bool: + return p1.dtype.char == "M" + + t = empty_table(10).update(["X1 = f42(now())"]) + self.assertEqual(t.columns[0].data_type, dtypes.bool_) + self.assertEqual(10, t.to_string().count("true")) + + def f5(col1, col2: np.ndarray[np.int32]) -> bool: + return np.nanmean(col2) == np.mean(col2) + + t = empty_table(10).update(["X = i % 3", "Y = i"]).group_by("X") + t = t.update(["X1 = f5(X, Y)"]) + with self.assertRaises(DHError) as cm: + t = t.update(["X1 = f5(X, null)"]) + self.assertRegex(str(cm.exception), "Argument .* is not compatible with annotation*") + + def f51(col1, col2: Optional[np.ndarray[np.int32]]) -> bool: + return np.nanmean(col2) == np.mean(col2) + + t = empty_table(10).update(["X = i % 3", "Y = i"]).group_by("X") + t = t.update(["X1 = f51(X, Y)"]) + with self.assertRaises(DHError) as cm: + t = t.update(["X1 = f51(X, null)"]) + self.assertRegex(str(cm.exception), "unsupported operand type.*NoneType") + + t = empty_table(10).update(["X = i % 3", "Y = i"]).group_by("X") + + def f6(*args: np.int32, col2: np.ndarray[np.int32]) -> bool: + return np.nanmean(col2) == np.mean(col2) + with self.assertRaises(DHError) as cm: + t1 = t.update(["X1 = f6(X, Y)"]) + self.assertIn("missing 1 required keyword-only argument", str(cm.exception)) + + with self.assertRaises(DHError) as cm: + t1 = t.update(["X1 = f6(X, Y=null)"]) + self.assertIn("not compatible with annotation", str(cm.exception)) + + def test_str_bool_datetime_array(self): + with self.subTest("str"): + def f1(p1: np.ndarray[str], p2=None) -> bool: + return bool(len(p1)) + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? `deephaven`: null"]).group_by("X") + t1 = t.update(["X1 = f1(Y)"]) + self.assertEqual(t1.columns[2].data_type, dtypes.bool_) + with self.assertRaises(DHError) as cm: + t2 = t.update(["X1 = f1(null, Y )"]) + self.assertRegex(str(cm.exception), "Argument .* is not compatible with annotation*") + + def f11(p1: Union[np.ndarray[str], None], p2=None) -> bool: + return bool(len(p1)) if p1 is not None else False + t2 = t.update(["X1 = f11(null, Y)"]) + self.assertEqual(3, t2.to_string().count("false")) + + with self.subTest("datetime"): + def f2(p1: np.ndarray[np.datetime64], p2=None) -> bool: + return bool(len(p1)) + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? now() : null"]).group_by("X") + t1 = t.update(["X1 = f2(Y)"]) + self.assertEqual(t1.columns[2].data_type, dtypes.bool_) + with self.assertRaises(DHError) as cm: + t2 = t.update(["X1 = f2(null, Y )"]) + self.assertRegex(str(cm.exception), "Argument .* is not compatible with annotation*") + + def f21(p1: Union[np.ndarray[np.datetime64], None], p2=None) -> bool: + return bool(len(p1)) if p1 is not None else False + t2 = t.update(["X1 = f21(null, Y)"]) + self.assertEqual(3, t2.to_string().count("false")) + + with self.subTest("boolean"): + def f3(p1: np.ndarray[np.bool_], p2=None) -> bool: + return bool(len(p1)) + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? true : null"]).group_by("X") + with self.assertRaises(DHError) as cm: + t1 = t.update(["X1 = f3(Y)"]) + self.assertRegex(str(cm.exception), "Java .* array contains Deephaven null values, but numpy .* " + "array does not support ") + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? true : false"]).group_by("X") + t1 = t.update(["X1 = f3(Y)"]) + self.assertEqual(t1.columns[2].data_type, dtypes.bool_) + with self.assertRaises(DHError) as cm: + t2 = t.update(["X1 = f3(null, Y )"]) + self.assertRegex(str(cm.exception), "Argument None is not compatible with annotation") + + def f31(p1: Optional[np.ndarray[bool]], p2=None) -> bool: + return bool(len(p1)) if p1 is not None else False + t2 = t.update(["X1 = f31(null, Y)"]) + self.assertEqual(3, t2.to_string("X1").count("false")) + + def test_str_bool_datetime_scalar(self): + with self.subTest("str"): + def f1(p1: str, p2=None) -> bool: + return p1 is None + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? `deephaven`: null"]) + with self.assertRaises(DHError) as cm: + t1 = t.update(["X1 = f1(Y)"]) + self.assertRegex(str(cm.exception), "Argument None is not compatible with annotation") + + def f11(p1: Union[str, None], p2=None) -> bool: + return p1 is None + t2 = t.update(["X1 = f11(Y)"]) + self.assertEqual(5, t2.to_string().count("false")) + + with self.subTest("datetime"): + def f2(p1: np.datetime64, p2=None) -> bool: + return p1 is None + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? now() : null"]) + with self.assertRaises(DHError) as cm: + t1 = t.update(["X1 = f2(Y)"]) + self.assertRegex(str(cm.exception), "Argument None is not compatible with annotation") + + def f21(p1: Union[np.datetime64, None], p2=None) -> bool: + return p1 is None + t2 = t.update(["X1 = f21(Y)"]) + self.assertEqual(5, t2.to_string().count("false")) + + with self.subTest("boolean"): + def f3(p1: np.bool_, p2=None) -> bool: + return p1 is None + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? true : null"]) + with self.assertRaises(DHError) as cm: + t1 = t.update(["X1 = f3(Y)"]) + self.assertRegex(str(cm.exception), "Argument None is not compatible with annotation") + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? true : false"]) + t1 = t.update(["X1 = f3(Y)"]) + self.assertEqual(t1.columns[2].data_type, dtypes.bool_) + self.assertEqual(0, t1.to_string("X1").count("true")) + + def f31(p1: Optional[np.bool_], p2=None) -> bool: + return p1 is None + t2 = t.update(["X1 = f31(null, Y)"]) + self.assertEqual(10, t2.to_string("X1").count("true")) + + +if __name__ == "__main__": + unittest.main() diff --git a/py/server/tests/test_pyfunc_return_java_values.py b/py/server/tests/test_udf_return_java_values.py similarity index 96% rename from py/server/tests/test_pyfunc_return_java_values.py rename to py/server/tests/test_udf_return_java_values.py index aef0d44cb93..1a7c78e3aa9 100644 --- a/py/server/tests/test_pyfunc_return_java_values.py +++ b/py/server/tests/test_udf_return_java_values.py @@ -23,7 +23,7 @@ dtypes.byte: "np.int8", dtypes.bool_: "np.bool_", dtypes.string: "np.str_", - # dtypes.char: "np.uint16", + dtypes.char: "np.uint16", } @@ -52,7 +52,7 @@ def test_array_return(self): "np.float64": dtypes.double_array, "bool": dtypes.boolean_array, "np.str_": dtypes.string_array, - # "np.uint16": dtypes.char_array, + "np.uint16": dtypes.char_array, } container_types = ["List", "Tuple", "list", "tuple", "Sequence", "np.ndarray"] for component_type, dh_dtype in component_types.items(): @@ -189,7 +189,7 @@ def f4557_1(x, y) -> np.ndarray[np.int64]: return np.array(x) + y # Testing https://github.com/deephaven/deephaven-core/issues/4562 - @nb.guvectorize([(nb.int64[:], nb.int64, nb.int64[:])], "(m),()->(m)", nopython=True) + @nb.guvectorize([(nb.int32[:], nb.int32, nb.int32[:])], "(m),()->(m)", nopython=True) def f4562_1(x, y, res): res[:] = x + y @@ -198,11 +198,11 @@ def f4562_1(x, y, res): "Y = f4562_1(B,3)" ]) self.assertEqual(t2.columns[2].data_type, dtypes.long_array) - self.assertEqual(t2.columns[3].data_type, dtypes.long_array) + self.assertEqual(t2.columns[3].data_type, dtypes.int32_array) t3 = t2.ungroup() self.assertEqual(t3.columns[2].data_type, dtypes.int64) - self.assertEqual(t3.columns[3].data_type, dtypes.int64) + self.assertEqual(t3.columns[3].data_type, dtypes.int32) def test_ndim_nparray_return_type(self): def f() -> np.ndarray[np.int64]: @@ -222,28 +222,29 @@ def f() -> npt.NDArray[np.int64]: def test_ndarray_weird_cases(self): def f() -> np.ndarray[typing.Any]: return np.array([1, 2], dtype=np.int64) - t = empty_table(10).update(["X1 = f()"]) self.assertEqual(t.columns[0].data_type, dtypes.PyObject) def f1() -> npt.NDArray[typing.Any]: return np.array([1, 2], dtype=np.int64) - t = empty_table(10).update(["X1 = f1()"]) self.assertEqual(t.columns[0].data_type, dtypes.PyObject) def f2() -> np.ndarray[typing.Any, np.int64]: return np.array([1, 2], dtype=np.int64) - t = empty_table(10).update(["X1 = f2()"]) self.assertEqual(t.columns[0].data_type, dtypes.PyObject) def f3() -> Union[None, None]: return np.array([1, 2], dtype=np.int64) - t = empty_table(10).update(["X1 = f3()"]) self.assertEqual(t.columns[0].data_type, dtypes.PyObject) + def f4() -> None: + return np.array([1, 2], dtype=np.int64) + t = empty_table(10).update(["X1 = f4()"]) + self.assertEqual(t.columns[0].data_type, dtypes.PyObject) + def test_optional_scalar_return(self): for dh_dtype, np_dtype in _J_TYPE_NP_DTYPE_MAP.items(): with self.subTest(dh_dtype=dh_dtype, np_dtype=np_dtype): diff --git a/py/server/tests/test_vectorization.py b/py/server/tests/test_vectorization.py index 82b9dccbe2c..8eb28e65cda 100644 --- a/py/server/tests/test_vectorization.py +++ b/py/server/tests/test_vectorization.py @@ -7,24 +7,24 @@ from typing import Optional import numpy as np -import deephaven from deephaven import DHError, empty_table, dtypes from deephaven import new_table from deephaven.column import int_col from deephaven.filters import Filter, and_ -from deephaven.table import dh_vectorize +import deephaven._udf as _udf +from deephaven._udf import _dh_vectorize as dh_vectorize from tests.testbase import BaseTestCase class VectorizationTestCase(BaseTestCase): def setUp(self): super().setUp() - deephaven.table._test_vectorization = True - deephaven.table._vectorized_count = 0 + _udf.test_vectorization = True + _udf.vectorized_count = 0 def tearDown(self) -> None: - deephaven.table._test_vectorization = False - deephaven.table._vectorized_count = 0 + _udf.test_vectorization = False + _udf.vectorized_count = 0 super().tearDown() def test_vectorization_exceptions(self): @@ -66,7 +66,7 @@ def py_plus(p1, p2) -> int: t = empty_table(1).update("X = py_plus(ii, ii)") - self.assertEqual(deephaven.table._vectorized_count, 1) + self.assertEqual(_udf.vectorized_count, 1) def test_vectorized_no_arg(self): def py_random() -> int: @@ -74,7 +74,7 @@ def py_random() -> int: t = empty_table(1).update("X = py_random()") - self.assertEqual(deephaven.table._vectorized_count, 1) + self.assertEqual(_udf.vectorized_count, 1) def test_vectorized_const_arg(self): def py_const(seed) -> int: @@ -84,27 +84,27 @@ def py_const(seed) -> int: expected_count = 0 t = empty_table(10).update("X = py_const(3)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) seed = 10 t = empty_table(10).update("X = py_const(seed)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = empty_table(10).update("X = py_const(30*1024*1024*1024)") - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = empty_table(10).update("X = py_const(30000000000L)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = empty_table(10).update("X = py_const(100.01)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = empty_table(10).update("X = py_const(100.01f)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) with self.assertRaises(DHError) as cm: t = empty_table(1).update("X = py_const(NULL_INT)") @@ -115,26 +115,26 @@ def py_const_str(s) -> str: t = empty_table(10).update("X = py_const_str(`Deephaven`)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = empty_table(10).update("X = py_const_str(null)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = empty_table(10).update("X = py_const_str(true)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = t.update("Y = py_const_str(X)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) def test_multiple_formulas(self): def pyfunc(p1, p2, p3) -> int: return p1 + p2 + p3 t = empty_table(1).update("X = i").update(["Y = pyfunc(X, i, 33)", "Z = pyfunc(X, ii, 66)"]) - self.assertEqual(deephaven.table._vectorized_count, 2) + self.assertEqual(_udf.vectorized_count, 2) self.assertIn("33", t.to_string(cols=["Y"])) self.assertIn("66", t.to_string(cols=["Z"])) @@ -144,7 +144,7 @@ def pyfunc(p1, p2, p3) -> int: return p1 + p2 + p3 t = empty_table(1).update("X = i").update(["Y = pyfunc(X, i, 33)", "Z = pyfunc(X, ii, 66)"]) - self.assertEqual(deephaven.table._vectorized_count, 1) + self.assertEqual(_udf.vectorized_count, 1) self.assertIn("33", t.to_string(cols=["Y"])) self.assertIn("66", t.to_string(cols=["Z"])) @@ -157,11 +157,11 @@ def pyfunc_bool(p1, p2, p3) -> bool: with self.assertRaises(DHError) as cm: t = empty_table(10).view(formulas=["I=ii", "J=(ii * 2)"]).where("pyfunc_int(I, 3, J)") - self.assertEqual(deephaven.table._vectorized_count, 0) + self.assertEqual(_udf.vectorized_count, 0) self.assertIn("boolean required", str(cm.exception)) t = empty_table(10).view(formulas=["I=ii", "J=(ii * 2)"]).where("pyfunc_bool(I, 3, J)") - self.assertEqual(deephaven.table._vectorized_count, 1) + self.assertEqual(_udf.vectorized_count, 1) self.assertGreater(t.size, 1) def test_multiple_filters(self): @@ -171,11 +171,11 @@ def pyfunc_bool(p1, p2, p3) -> bool: conditions = ["pyfunc_bool(I, 3, J)", "pyfunc_bool(i, 10, ii)"] filters = Filter.from_(conditions) t = empty_table(10).view(formulas=["I=ii", "J=(ii * 2)"]).where(filters) - self.assertEqual(2, deephaven.table._vectorized_count) + self.assertEqual(2, _udf.vectorized_count) filter_and = and_(filters) t1 = empty_table(10).view(formulas=["I=ii", "J=(ii * 2)"]).where(filter_and) - self.assertEqual(4, deephaven.table._vectorized_count) + self.assertEqual(4, _udf.vectorized_count) self.assertEqual(t1.size, t.size) self.assertEqual(9, t.size) @@ -187,11 +187,11 @@ def pyfunc_bool(p1, p2, p3) -> bool: conditions = ["pyfunc_bool(I, 3, J)", "pyfunc_bool(i, 10, ii)"] filters = Filter.from_(conditions) t = empty_table(10).view(formulas=["I=ii", "J=(ii * 2)"]).where(filters) - self.assertEqual(1, deephaven.table._vectorized_count) + self.assertEqual(1, _udf.vectorized_count) filter_and = and_(filters) t1 = empty_table(10).view(formulas=["I=ii", "J=(ii * 2)"]).where(filter_and) - self.assertEqual(1, deephaven.table._vectorized_count) + self.assertEqual(1, _udf.vectorized_count) self.assertEqual(t1.size, t.size) self.assertEqual(9, t.size) @@ -258,7 +258,7 @@ def sinc(x) -> np.double: t = empty_table(100).update(["X = 0.1 * i", "SincXS=((sinc(X)))"]) self.assertEqual(t.columns[1].data_type, dtypes.double) - self.assertEqual(deephaven.table._vectorized_count, 1) + self.assertEqual(_udf.vectorized_count, 1) def sinc2(x): return np.sinc(x) @@ -272,7 +272,7 @@ def pyfunc(p1: np.int32, p2: np.int32, p3: Optional[np.int32]) -> Optional[int]: return None if total % 3 == 0 else total t = empty_table(10).update("X = i").update(["Y = pyfunc(X, i, 13)", "Z = pyfunc(X, ii, 66)"]) - self.assertEqual(deephaven.table._vectorized_count, 2) + self.assertEqual(_udf.vectorized_count, 2) self.assertIn("13", t.to_string(cols=["Y"])) self.assertIn("null", t.to_string()) self.assertEqual(t.columns[1].data_type, dtypes.long) diff --git a/python-engine-test/src/test/java/io/deephaven/engine/table/impl/select/TestConditionFilter.java b/python-engine-test/src/test/java/io/deephaven/engine/table/impl/select/TestConditionFilter.java index a9b683a8630..9c3c1f38ab9 100644 --- a/python-engine-test/src/test/java/io/deephaven/engine/table/impl/select/TestConditionFilter.java +++ b/python-engine-test/src/test/java/io/deephaven/engine/table/impl/select/TestConditionFilter.java @@ -21,6 +21,7 @@ import io.deephaven.engine.util.PythonScopeJpyImpl; import io.deephaven.engine.table.ColumnSource; import io.deephaven.jpy.PythonTest; +import io.deephaven.util.thread.ThreadInitializationFactory; import org.apache.commons.lang3.exception.ExceptionUtils; import org.jpy.PyInputMode; import org.jpy.PyModule; @@ -376,6 +377,7 @@ private void check(String expression, Predicate> testPredica if (pythonScope == null) { final ExecutionContext context = new PythonDeephavenSession( ExecutionContext.getDefaultContext().getUpdateGraph(), + ThreadInitializationFactory.NO_OP, new PythonScopeJpyImpl(getMainGlobals().asDict())).getExecutionContext(); pythonScope = context.getQueryScope(); context.open(); diff --git a/qst/src/main/java/io/deephaven/qst/table/BlinkInputTable.java b/qst/src/main/java/io/deephaven/qst/table/BlinkInputTable.java new file mode 100644 index 00000000000..1b8f74ba7fc --- /dev/null +++ b/qst/src/main/java/io/deephaven/qst/table/BlinkInputTable.java @@ -0,0 +1,33 @@ +/** + * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.qst.table; + +import io.deephaven.annotations.NodeStyle; +import org.immutables.value.Value.Immutable; +import org.immutables.value.Value.Parameter; + +import java.util.UUID; + +/** + * Creates a blink input-table. + */ +@Immutable +@NodeStyle +public abstract class BlinkInputTable extends InputTableBase { + + public static BlinkInputTable of(TableSchema schema) { + return ImmutableBlinkInputTable.of(schema, UUID.randomUUID()); + } + + @Parameter + public abstract TableSchema schema(); + + @Parameter + abstract UUID id(); + + @Override + public final R walk(InputTable.Visitor visitor) { + return visitor.visit(this); + } +} diff --git a/qst/src/main/java/io/deephaven/qst/table/InputTable.java b/qst/src/main/java/io/deephaven/qst/table/InputTable.java index fd4e0ae6794..43caa995f6d 100644 --- a/qst/src/main/java/io/deephaven/qst/table/InputTable.java +++ b/qst/src/main/java/io/deephaven/qst/table/InputTable.java @@ -25,5 +25,7 @@ interface Visitor { R visit(InMemoryAppendOnlyInputTable inMemoryAppendOnly); R visit(InMemoryKeyBackedInputTable inMemoryKeyBacked); + + R visit(BlinkInputTable blinkInputTable); } } diff --git a/qst/src/main/java/io/deephaven/qst/table/TableLabelVisitor.java b/qst/src/main/java/io/deephaven/qst/table/TableLabelVisitor.java index a37a236879f..ec263e5cb0a 100644 --- a/qst/src/main/java/io/deephaven/qst/table/TableLabelVisitor.java +++ b/qst/src/main/java/io/deephaven/qst/table/TableLabelVisitor.java @@ -156,6 +156,11 @@ public String visit(InMemoryAppendOnlyInputTable inMemoryAppendOnly) { public String visit(InMemoryKeyBackedInputTable inMemoryKeyBacked) { return "InMemoryKeyBackedInputTable(...)"; } + + @Override + public String visit(BlinkInputTable blinkInputTable) { + return "BlinkInputTable(...)"; + } }); } diff --git a/server/build.gradle b/server/build.gradle index 2996be8a521..2ab80c615d6 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -14,7 +14,6 @@ dependencies { implementation project(':extensions-jdbc') implementation project(':Util'); implementation project(':Integrations') - implementation project(':FishUtil') implementation depCommonsLang3 Classpaths.inheritCommonsText(project, 'implementation') diff --git a/server/jetty/src/main/java/io/deephaven/server/jetty/CopyHelper.java b/server/jetty/src/main/java/io/deephaven/server/jetty/CopyHelper.java index 8b13de748a6..ccd6ab2f802 100644 --- a/server/jetty/src/main/java/io/deephaven/server/jetty/CopyHelper.java +++ b/server/jetty/src/main/java/io/deephaven/server/jetty/CopyHelper.java @@ -4,40 +4,77 @@ package io.deephaven.server.jetty; import java.io.IOException; +import java.nio.file.DirectoryNotEmptyException; import java.nio.file.FileVisitResult; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.PathMatcher; import java.nio.file.SimpleFileVisitor; import java.nio.file.StandardCopyOption; import java.nio.file.attribute.BasicFileAttributes; import java.util.Objects; class CopyHelper { - static void copyRecursive(Path src, Path dst) throws IOException { + static void copyRecursive(Path src, Path dst, PathMatcher pathMatcher) throws IOException { + copyRecursive(src, dst, pathMatcher, d -> true); + } + + static void copyRecursive(Path src, Path dst, PathMatcher pathMatcher, PathMatcher dirMatcher) throws IOException { Files.createDirectories(dst.getParent()); - Files.walkFileTree(src, new CopyRecursiveVisitor(src, dst)); + Files.walkFileTree(src, new CopyRecursiveVisitor(src, dst, pathMatcher, dirMatcher)); } private static class CopyRecursiveVisitor extends SimpleFileVisitor { private final Path src; private final Path dst; + private final PathMatcher pathMatcher; + private final PathMatcher dirMatcher; - public CopyRecursiveVisitor(Path src, Path dst) { + public CopyRecursiveVisitor(Path src, Path dst, PathMatcher pathMatcher, PathMatcher dirMatcher) { this.src = Objects.requireNonNull(src); this.dst = Objects.requireNonNull(dst); + this.pathMatcher = Objects.requireNonNull(pathMatcher); + this.dirMatcher = Objects.requireNonNull(dirMatcher); } @Override public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { - // Note: toString() necessary for src/dst that don't share the same root FS - Files.copy(dir, dst.resolve(src.relativize(dir).toString()), StandardCopyOption.COPY_ATTRIBUTES); - return FileVisitResult.CONTINUE; + final Path relativeDir = src.relativize(dir); + if (dirMatcher.matches(relativeDir) || pathMatcher.matches(relativeDir)) { + // Note: toString() necessary for src/dst that don't share the same root FS + Files.copy(dir, dst.resolve(relativeDir.toString()), StandardCopyOption.COPY_ATTRIBUTES); + return FileVisitResult.CONTINUE; + } + return FileVisitResult.SKIP_SUBTREE; } @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - // Note: toString() necessary for src/dst that don't share the same root FS - Files.copy(file, dst.resolve(src.relativize(file).toString()), StandardCopyOption.COPY_ATTRIBUTES); + final Path relativeFile = src.relativize(file); + if (pathMatcher.matches(relativeFile)) { + // Note: toString() necessary for src/dst that don't share the same root FS + Files.copy(file, dst.resolve(relativeFile.toString()), StandardCopyOption.COPY_ATTRIBUTES); + } + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { + if (exc != null) { + throw exc; + } + final Path relativeDir = src.relativize(dir); + if (!pathMatcher.matches(relativeDir)) { + // If the specific dir does not match as a path (even if it _did_ match as a directory), we + // "optimistically" try and delete it; if the directory is not empty (b/c some subpath matched and was + // copied), the delete will fail. (We could have an alternative impl that keeps track w/ a stack if any + // subpaths matched.) + try { + Files.delete(dir); + } catch (DirectoryNotEmptyException e) { + // ignore + } + } return FileVisitResult.CONTINUE; } } diff --git a/server/jetty/src/main/java/io/deephaven/server/jetty/JettyBackedGrpcServer.java b/server/jetty/src/main/java/io/deephaven/server/jetty/JettyBackedGrpcServer.java index 7b921233312..97e869601c8 100644 --- a/server/jetty/src/main/java/io/deephaven/server/jetty/JettyBackedGrpcServer.java +++ b/server/jetty/src/main/java/io/deephaven/server/jetty/JettyBackedGrpcServer.java @@ -106,7 +106,7 @@ public JettyBackedGrpcServer( context.setInitParameter(DefaultServlet.CONTEXT_INIT + "dirAllowed", "false"); // Cache all of the appropriate assets folders - for (String appRoot : List.of("/ide/", "/iframe/table/", "/iframe/chart/")) { + for (String appRoot : List.of("/ide/", "/iframe/table/", "/iframe/chart/", "/iframe/widget/")) { context.addFilter(NoCacheFilter.class, appRoot + "*", EnumSet.noneOf(DispatcherType.class)); context.addFilter(CacheFilter.class, appRoot + "assets/*", EnumSet.noneOf(DispatcherType.class)); } diff --git a/server/jetty/src/main/java/io/deephaven/server/jetty/JsPluginManifest.java b/server/jetty/src/main/java/io/deephaven/server/jetty/JsPluginManifest.java deleted file mode 100644 index 6b7f162834e..00000000000 --- a/server/jetty/src/main/java/io/deephaven/server/jetty/JsPluginManifest.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.server.jetty; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import io.deephaven.annotations.SimpleStyle; -import org.immutables.value.Value.Immutable; -import org.immutables.value.Value.Parameter; - -import java.util.List; - -@Immutable -@SimpleStyle -abstract class JsPluginManifest { - public static final String PLUGINS = "plugins"; - - @JsonCreator - public static JsPluginManifest of( - @JsonProperty(value = PLUGINS, required = true) List plugins) { - return ImmutableJsPluginManifest.of(plugins); - } - - @Parameter - @JsonProperty(PLUGINS) - public abstract List plugins(); -} diff --git a/server/jetty/src/main/java/io/deephaven/server/jetty/JsPlugins.java b/server/jetty/src/main/java/io/deephaven/server/jetty/JsPlugins.java index ffebf7b1700..41f1527c52a 100644 --- a/server/jetty/src/main/java/io/deephaven/server/jetty/JsPlugins.java +++ b/server/jetty/src/main/java/io/deephaven/server/jetty/JsPlugins.java @@ -4,19 +4,12 @@ package io.deephaven.server.jetty; import io.deephaven.plugin.js.JsPlugin; -import io.deephaven.plugin.js.JsPluginManifestPath; -import io.deephaven.plugin.js.JsPluginPackagePath; import io.deephaven.plugin.js.JsPluginRegistration; import java.io.IOException; -import java.io.InputStream; import java.io.UncheckedIOException; import java.net.URI; -import java.nio.file.Files; import java.util.Objects; -import java.util.function.Consumer; - -import static io.deephaven.server.jetty.Json.OBJECT_MAPPER; /** * Jetty-specific implementation of {@link JsPluginRegistration} to collect plugins and advertise their contents to @@ -42,56 +35,9 @@ public URI filesystem() { @Override public void register(JsPlugin jsPlugin) { try { - if (jsPlugin instanceof JsPluginPackagePath) { - copy((JsPluginPackagePath) jsPlugin, zipFs); - return; - } - if (jsPlugin instanceof JsPluginManifestPath) { - copyAll((JsPluginManifestPath) jsPlugin, zipFs); - return; - } + zipFs.add(jsPlugin); } catch (IOException e) { throw new UncheckedIOException(e); } - throw new IllegalStateException("Unexpected JsPlugin class: " + jsPlugin.getClass()); - } - - private static void copy(JsPluginPackagePath srcPackagePath, JsPluginsZipFilesystem dest) - throws IOException { - copy(srcPackagePath, dest, null); - } - - private static void copy(JsPluginPackagePath srcPackagePath, JsPluginsZipFilesystem dest, - JsPluginManifestEntry expected) - throws IOException { - final JsPluginManifestEntry srcEntry = entry(srcPackagePath); - if (expected != null && !expected.equals(srcEntry)) { - throw new IllegalStateException(String.format( - "Inconsistency between manifest.json and package.json, expected=%s, actual=%s", expected, - srcEntry)); - } - dest.copyFrom(srcPackagePath, srcEntry); - } - - private static void copyAll(JsPluginManifestPath srcManifestPath, JsPluginsZipFilesystem dest) throws IOException { - final JsPluginManifest manifestInfo = manifest(srcManifestPath); - for (JsPluginManifestEntry manifestEntry : manifestInfo.plugins()) { - final JsPluginPackagePath packagePath = srcManifestPath.packagePath(manifestEntry.name()); - copy(packagePath, dest, manifestEntry); - } - } - - private static JsPluginManifest manifest(JsPluginManifestPath manifest) throws IOException { - // jackson impl does buffering internally - try (final InputStream in = Files.newInputStream(manifest.manifestJson())) { - return OBJECT_MAPPER.readValue(in, JsPluginManifest.class); - } - } - - private static JsPluginManifestEntry entry(JsPluginPackagePath packagePath) throws IOException { - // jackson impl does buffering internally - try (final InputStream in = Files.newInputStream(packagePath.packageJson())) { - return OBJECT_MAPPER.readValue(in, JsPluginManifestEntry.class); - } } } diff --git a/server/jetty/src/main/java/io/deephaven/server/jetty/JsPluginsZipFilesystem.java b/server/jetty/src/main/java/io/deephaven/server/jetty/JsPluginsZipFilesystem.java index 554ac81098b..7250a8e71b0 100644 --- a/server/jetty/src/main/java/io/deephaven/server/jetty/JsPluginsZipFilesystem.java +++ b/server/jetty/src/main/java/io/deephaven/server/jetty/JsPluginsZipFilesystem.java @@ -4,8 +4,8 @@ package io.deephaven.server.jetty; import io.deephaven.configuration.CacheDir; -import io.deephaven.plugin.js.JsPluginManifestPath; -import io.deephaven.plugin.js.JsPluginPackagePath; +import io.deephaven.plugin.js.JsPlugin; +import io.deephaven.server.plugin.js.JsPluginManifest; import java.io.IOException; import java.io.OutputStream; @@ -14,6 +14,7 @@ import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.PathMatcher; import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.List; @@ -21,6 +22,7 @@ import java.util.Objects; import static io.deephaven.server.jetty.Json.OBJECT_MAPPER; +import static io.deephaven.server.plugin.js.JsPluginManifest.MANIFEST_JSON; class JsPluginsZipFilesystem { private static final String ZIP_ROOT = "/"; @@ -44,37 +46,38 @@ public static JsPluginsZipFilesystem create() throws IOException { } private final URI filesystem; - private final List entries; + private final List plugins; private JsPluginsZipFilesystem(URI filesystem) { this.filesystem = Objects.requireNonNull(filesystem); - this.entries = new ArrayList<>(); + this.plugins = new ArrayList<>(); } public URI filesystem() { return filesystem; } - public synchronized void copyFrom(JsPluginPackagePath srcPackagePath, JsPluginManifestEntry srcEntry) - throws IOException { - checkExisting(srcEntry); + public synchronized void add(JsPlugin plugin) throws IOException { + checkExisting(plugin.name()); // TODO(deephaven-core#3005): js-plugins checksum-based caching // Note: FileSystem#close is necessary to write out contents for ZipFileSystem try (final FileSystem fs = FileSystems.newFileSystem(filesystem, Map.of())) { - final JsPluginManifestPath manifest = manifest(fs); - copyRecursive(srcPackagePath, manifest.packagePath(srcEntry.name())); - entries.add(srcEntry); + final Path manifestRoot = manifestRoot(fs); + final Path dstPath = manifestRoot.resolve(plugin.name()); + // This is using internal knowledge that paths() must be PathsInternal and extends PathsMatcher. + final PathMatcher pathMatcher = (PathMatcher) plugin.paths(); + // If listing and traversing the contents of development directories (and skipping the copy) becomes + // too expensive, we can add logic here wrt PathsInternal/PathsPrefix to specify a dirMatcher. Or, + // properly route directly from the filesystem via Jetty. + CopyHelper.copyRecursive(plugin.path(), dstPath, pathMatcher); + plugins.add(plugin); writeManifest(fs); } } - private static void copyRecursive(JsPluginPackagePath src, JsPluginPackagePath dst) throws IOException { - CopyHelper.copyRecursive(src.path(), dst.path()); - } - - private void checkExisting(JsPluginManifestEntry info) { - for (JsPluginManifestEntry existing : entries) { - if (info.name().equals(existing.name())) { + private void checkExisting(String name) { + for (JsPlugin existing : plugins) { + if (name.equals(existing.name())) { // TODO(deephaven-core#3048): Improve JS plugin support around plugins with conflicting names throw new IllegalArgumentException(String.format( "js plugin with name '%s' already exists. See https://github.com/deephaven/deephaven-core/issues/3048", @@ -91,11 +94,11 @@ private synchronized void init() throws IOException { } private void writeManifest(FileSystem fs) throws IOException { - final Path manifestJson = manifest(fs).manifestJson(); + final Path manifestJson = manifestRoot(fs).resolve(MANIFEST_JSON); final Path manifestJsonTmp = manifestJson.resolveSibling(manifestJson.getFileName().toString() + ".tmp"); // jackson impl does buffering internally try (final OutputStream out = Files.newOutputStream(manifestJsonTmp)) { - OBJECT_MAPPER.writeValue(out, JsPluginManifest.of(entries)); + OBJECT_MAPPER.writeValue(out, manifest()); out.flush(); } Files.move(manifestJsonTmp, manifestJson, @@ -104,7 +107,11 @@ private void writeManifest(FileSystem fs) throws IOException { StandardCopyOption.ATOMIC_MOVE); } - private static JsPluginManifestPath manifest(FileSystem fs) { - return JsPluginManifestPath.of(fs.getPath(ZIP_ROOT)); + private JsPluginManifest manifest() { + return JsPluginManifest.from(plugins); + } + + private static Path manifestRoot(FileSystem fs) { + return fs.getPath(ZIP_ROOT); } } diff --git a/server/jetty/src/test/java/io/deephaven/server/jetty/JettyFlightRoundTripTest.java b/server/jetty/src/test/java/io/deephaven/server/jetty/JettyFlightRoundTripTest.java index 9d00dfd06b9..7278b18ee3f 100644 --- a/server/jetty/src/test/java/io/deephaven/server/jetty/JettyFlightRoundTripTest.java +++ b/server/jetty/src/test/java/io/deephaven/server/jetty/JettyFlightRoundTripTest.java @@ -6,20 +6,27 @@ import dagger.Component; import dagger.Module; import dagger.Provides; -import io.deephaven.server.arrow.ArrowModule; -import io.deephaven.server.config.ConfigServiceModule; -import io.deephaven.server.console.ConsoleModule; -import io.deephaven.server.log.LogModule; +import io.deephaven.server.jetty.js.Example123Registration; +import io.deephaven.server.jetty.js.Sentinel; +import io.deephaven.server.plugin.js.JsPluginsManifestRegistration; +import io.deephaven.server.plugin.js.JsPluginsNpmPackageRegistration; import io.deephaven.server.runner.ExecutionContextUnitTestModule; -import io.deephaven.server.session.ObfuscatingErrorTransformerModule; -import io.deephaven.server.session.SessionModule; -import io.deephaven.server.table.TableModule; -import io.deephaven.server.test.TestAuthModule; import io.deephaven.server.test.FlightMessageRoundTripTest; +import org.eclipse.jetty.client.HttpClient; +import org.eclipse.jetty.client.api.ContentResponse; +import org.eclipse.jetty.http.HttpFields; +import org.eclipse.jetty.http.HttpMethod; +import org.eclipse.jetty.http.HttpStatus; +import org.junit.Test; import javax.inject.Singleton; +import java.nio.file.Path; import java.time.Duration; import java.time.temporal.ChronoUnit; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; + +import static org.assertj.core.api.Assertions.assertThat; public class JettyFlightRoundTripTest extends FlightMessageRoundTripTest { @@ -36,18 +43,10 @@ static JettyConfig providesJettyConfig() { @Singleton @Component(modules = { - ArrowModule.class, - ConfigServiceModule.class, - ConsoleModule.class, ExecutionContextUnitTestModule.class, FlightTestModule.class, JettyServerModule.class, JettyTestConfig.class, - LogModule.class, - SessionModule.class, - TableModule.class, - TestAuthModule.class, - ObfuscatingErrorTransformerModule.class, }) public interface JettyTestComponent extends TestComponent { } @@ -56,4 +55,142 @@ public interface JettyTestComponent extends TestComponent { protected TestComponent component() { return DaggerJettyFlightRoundTripTest_JettyTestComponent.create(); } + + @Test + public void jsPlugins() throws Exception { + // Note: JettyFlightRoundTripTest is not the most minimal / appropriate bootstrapping for this test, but it is + // the most convenient since it has all of the necessary prerequisites + new Example123Registration().registerInto(component.registration()); + testJsPluginExamples(false, true, true); + } + + @Test + public void jsPluginsFromManifest() throws Exception { + // Note: JettyFlightRoundTripTest is not the most minimal / appropriate bootstrapping for this test, but it is + // the most convenient since it has all of the necessary prerequisites + final Path manifestRoot = Path.of(Sentinel.class.getResource("examples").toURI()); + new JsPluginsManifestRegistration(manifestRoot) + .registerInto(component.registration()); + testJsPluginExamples(false, false, true); + } + + @Test + public void jsPluginsFromNpmPackages() throws Exception { + // Note: JettyFlightRoundTripTest is not the most minimal / appropriate bootstrapping for this test, but it is + // the most convenient since it has all of the necessary prerequisites + final Path example1Root = Path.of(Sentinel.class.getResource("examples/@deephaven_test/example1").toURI()); + final Path example2Root = Path.of(Sentinel.class.getResource("examples/@deephaven_test/example2").toURI()); + // example3 is *not* a npm package, no package.json. + new JsPluginsNpmPackageRegistration(example1Root) + .registerInto(component.registration()); + new JsPluginsNpmPackageRegistration(example2Root) + .registerInto(component.registration()); + testJsPluginExamples(true, true, false); + } + + private void testJsPluginExamples(boolean example1IsLimited, boolean example2IsLimited, boolean hasExample3) + throws Exception { + final HttpClient client = new HttpClient(); + client.start(); + try { + if (hasExample3) { + manifestTest123(client); + } else { + manifestTest12(client); + } + example1Tests(client, example1IsLimited); + example2Tests(client, example2IsLimited); + if (hasExample3) { + example3Tests(client); + } + } finally { + client.stop(); + } + } + + private void manifestTest12(HttpClient client) throws InterruptedException, TimeoutException, ExecutionException { + final ContentResponse manifestResponse = get(client, "js-plugins/manifest.json"); + assertOk(manifestResponse, "application/json", + "{\"plugins\":[{\"name\":\"@deephaven_test/example1\",\"version\":\"0.1.0\",\"main\":\"dist/index.js\"},{\"name\":\"@deephaven_test/example2\",\"version\":\"0.2.0\",\"main\":\"dist/index.js\"}]}"); + } + + private void manifestTest123(HttpClient client) throws InterruptedException, TimeoutException, ExecutionException { + final ContentResponse manifestResponse = get(client, "js-plugins/manifest.json"); + assertOk(manifestResponse, "application/json", + "{\"plugins\":[{\"name\":\"@deephaven_test/example1\",\"version\":\"0.1.0\",\"main\":\"dist/index.js\"},{\"name\":\"@deephaven_test/example2\",\"version\":\"0.2.0\",\"main\":\"dist/index.js\"},{\"name\":\"@deephaven_test/example3\",\"version\":\"0.3.0\",\"main\":\"index.js\"}]}"); + } + + private void example1Tests(HttpClient client, boolean isLimited) + throws InterruptedException, TimeoutException, ExecutionException { + if (isLimited) { + assertThat(get(client, "js-plugins/@deephaven_test/example1/package.json").getStatus()) + .isEqualTo(HttpStatus.NOT_FOUND_404); + } else { + assertOk(get(client, "js-plugins/@deephaven_test/example1/package.json"), + "application/json", + "{\"name\":\"@deephaven_test/example1\",\"version\":\"0.1.0\",\"main\":\"dist/index.js\",\"files\":[\"dist\"]}"); + } + + assertOk( + get(client, "js-plugins/@deephaven_test/example1/dist/index.js"), + "text/javascript", + "// example1/dist/index.js"); + + assertOk( + get(client, "js-plugins/@deephaven_test/example1/dist/index2.js"), + "text/javascript", + "// example1/dist/index2.js"); + } + + private void example2Tests(HttpClient client, boolean isLimited) + throws InterruptedException, TimeoutException, ExecutionException { + if (isLimited) { + assertThat(get(client, "js-plugins/@deephaven_test/example2/package.json").getStatus()) + .isEqualTo(HttpStatus.NOT_FOUND_404); + } else { + assertOk(get(client, "js-plugins/@deephaven_test/example2/package.json"), + "application/json", + "{\"name\":\"@deephaven_test/example2\",\"version\":\"0.2.0\",\"main\":\"dist/index.js\",\"files\":[\"dist\"]}"); + } + + assertOk( + get(client, "js-plugins/@deephaven_test/example2/dist/index.js"), + "text/javascript", + "// example2/dist/index.js"); + + assertOk( + get(client, "js-plugins/@deephaven_test/example2/dist/index2.js"), + "text/javascript", + "// example2/dist/index2.js"); + } + + private void example3Tests(HttpClient client) throws InterruptedException, TimeoutException, ExecutionException { + assertOk( + get(client, "js-plugins/@deephaven_test/example3/index.js"), + "text/javascript", + "// example3/index.js"); + } + + private ContentResponse get(HttpClient client, String path) + throws InterruptedException, TimeoutException, ExecutionException { + return client + .newRequest("localhost", localPort) + .path(path) + .method(HttpMethod.GET) + .send(); + } + + private static void assertOk(ContentResponse response, String contentType, String expected) { + assertThat(response.getStatus()).isEqualTo(HttpStatus.OK_200); + assertThat(response.getMediaType()).isEqualTo(contentType); + assertThat(response.getContentAsString()).isEqualTo(expected); + assertNoCache(response); + } + + private static void assertNoCache(ContentResponse response) { + final HttpFields headers = response.getHeaders(); + assertThat(headers.getDateField("Expires")).isEqualTo(0); + assertThat(headers.get("Pragma")).isEqualTo("no-cache"); + assertThat(headers.get("Cache-control")).isEqualTo("no-cache, must-revalidate, pre-check=0, post-check=0"); + } } diff --git a/server/jetty/src/test/java/io/deephaven/server/jetty/js/Example123Registration.java b/server/jetty/src/test/java/io/deephaven/server/jetty/js/Example123Registration.java new file mode 100644 index 00000000000..9fd0e7300fb --- /dev/null +++ b/server/jetty/src/test/java/io/deephaven/server/jetty/js/Example123Registration.java @@ -0,0 +1,68 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.server.jetty.js; + +import io.deephaven.plugin.Registration; +import io.deephaven.plugin.js.JsPlugin; +import io.deephaven.plugin.js.Paths; + +import java.net.URISyntaxException; +import java.nio.file.Path; + +public final class Example123Registration implements Registration { + + public Example123Registration() {} + + @Override + public void registerInto(Callback callback) { + final JsPlugin example1; + final JsPlugin example2; + final JsPlugin example3; + try { + example1 = example1(); + example2 = example2(); + example3 = example3(); + } catch (URISyntaxException e) { + throw new RuntimeException(e); + } + callback.register(example1); + callback.register(example2); + callback.register(example3); + } + + private static JsPlugin example1() throws URISyntaxException { + final Path resourcePath = Path.of(Sentinel.class.getResource("examples/@deephaven_test/example1").toURI()); + final Path main = resourcePath.relativize(resourcePath.resolve("dist/index.js")); + return JsPlugin.builder() + .name("@deephaven_test/example1") + .version("0.1.0") + .main(main) + .path(resourcePath) + .build(); + } + + private static JsPlugin example2() throws URISyntaxException { + final Path resourcePath = Path.of(Sentinel.class.getResource("examples/@deephaven_test/example2").toURI()); + final Path dist = resourcePath.relativize(resourcePath.resolve("dist")); + final Path main = dist.resolve("index.js"); + return JsPlugin.builder() + .name("@deephaven_test/example2") + .version("0.2.0") + .main(main) + .path(resourcePath) + .paths(Paths.ofPrefixes(dist)) + .build(); + } + + private static JsPlugin example3() throws URISyntaxException { + final Path resourcePath = Path.of(Sentinel.class.getResource("examples/@deephaven_test/example3").toURI()); + final Path main = resourcePath.relativize(resourcePath.resolve("index.js")); + return JsPlugin.builder() + .name("@deephaven_test/example3") + .version("0.3.0") + .main(main) + .path(resourcePath) + .build(); + } +} diff --git a/server/jetty/src/test/java/io/deephaven/server/jetty/js/Sentinel.java b/server/jetty/src/test/java/io/deephaven/server/jetty/js/Sentinel.java new file mode 100644 index 00000000000..ff61539b94e --- /dev/null +++ b/server/jetty/src/test/java/io/deephaven/server/jetty/js/Sentinel.java @@ -0,0 +1,8 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.server.jetty.js; + +public class Sentinel { + // just for the class +} diff --git a/server/jetty/src/test/java/io/deephaven/server/plugin/js/JsPluginsManifestRegistration.java b/server/jetty/src/test/java/io/deephaven/server/plugin/js/JsPluginsManifestRegistration.java new file mode 100644 index 00000000000..508eae4521e --- /dev/null +++ b/server/jetty/src/test/java/io/deephaven/server/plugin/js/JsPluginsManifestRegistration.java @@ -0,0 +1,35 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.server.plugin.js; + +import io.deephaven.plugin.Registration; +import io.deephaven.plugin.js.JsPlugin; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.util.List; +import java.util.Objects; + +public class JsPluginsManifestRegistration implements Registration { + + private final Path path; + + public JsPluginsManifestRegistration(Path path) { + this.path = Objects.requireNonNull(path); + } + + @Override + public void registerInto(Callback callback) { + final List plugins; + try { + plugins = JsPluginsFromManifest.of(path); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + for (JsPlugin plugin : plugins) { + callback.register(plugin); + } + } +} diff --git a/server/jetty/src/test/java/io/deephaven/server/plugin/js/JsPluginsNpmPackageRegistration.java b/server/jetty/src/test/java/io/deephaven/server/plugin/js/JsPluginsNpmPackageRegistration.java new file mode 100644 index 00000000000..ded46d30b7d --- /dev/null +++ b/server/jetty/src/test/java/io/deephaven/server/plugin/js/JsPluginsNpmPackageRegistration.java @@ -0,0 +1,32 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.server.plugin.js; + +import io.deephaven.plugin.Registration; +import io.deephaven.plugin.js.JsPlugin; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.util.Objects; + +public class JsPluginsNpmPackageRegistration implements Registration { + + private final Path path; + + public JsPluginsNpmPackageRegistration(Path path) { + this.path = Objects.requireNonNull(path); + } + + @Override + public void registerInto(Callback callback) { + final JsPlugin plugin; + try { + plugin = JsPluginFromNpmPackage.of(path); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + callback.register(plugin); + } +} diff --git a/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example1/dist/index.js b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example1/dist/index.js new file mode 100644 index 00000000000..de46952cc24 --- /dev/null +++ b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example1/dist/index.js @@ -0,0 +1 @@ +// example1/dist/index.js \ No newline at end of file diff --git a/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example1/dist/index2.js b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example1/dist/index2.js new file mode 100644 index 00000000000..ef31df67846 --- /dev/null +++ b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example1/dist/index2.js @@ -0,0 +1 @@ +// example1/dist/index2.js \ No newline at end of file diff --git a/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example1/package.json b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example1/package.json new file mode 100644 index 00000000000..b3733e4fe6d --- /dev/null +++ b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example1/package.json @@ -0,0 +1 @@ +{"name":"@deephaven_test/example1","version":"0.1.0","main":"dist/index.js","files":["dist"]} \ No newline at end of file diff --git a/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example2/dist/index.js b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example2/dist/index.js new file mode 100644 index 00000000000..f84594080ee --- /dev/null +++ b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example2/dist/index.js @@ -0,0 +1 @@ +// example2/dist/index.js \ No newline at end of file diff --git a/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example2/dist/index2.js b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example2/dist/index2.js new file mode 100644 index 00000000000..536a8edceee --- /dev/null +++ b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example2/dist/index2.js @@ -0,0 +1 @@ +// example2/dist/index2.js \ No newline at end of file diff --git a/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example2/package.json b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example2/package.json new file mode 100644 index 00000000000..64ca82a446b --- /dev/null +++ b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example2/package.json @@ -0,0 +1 @@ +{"name":"@deephaven_test/example2","version":"0.2.0","main":"dist/index.js","files":["dist"]} \ No newline at end of file diff --git a/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example3/index.js b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example3/index.js new file mode 100644 index 00000000000..62c773b6ad4 --- /dev/null +++ b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/@deephaven_test/example3/index.js @@ -0,0 +1 @@ +// example3/index.js \ No newline at end of file diff --git a/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/manifest.json b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/manifest.json new file mode 100644 index 00000000000..eaa745d1b6b --- /dev/null +++ b/server/jetty/src/test/resources/io/deephaven/server/jetty/js/examples/manifest.json @@ -0,0 +1,19 @@ +{ + "plugins": [ + { + "name": "@deephaven_test/example1", + "main": "dist/index.js", + "version": "0.1.0" + }, + { + "name": "@deephaven_test/example2", + "main": "dist/index.js", + "version": "0.2.0" + }, + { + "name": "@deephaven_test/example3", + "main": "index.js", + "version": "0.3.0" + } + ] +} diff --git a/server/netty/src/test/java/io/deephaven/server/netty/NettyFlightRoundTripTest.java b/server/netty/src/test/java/io/deephaven/server/netty/NettyFlightRoundTripTest.java index 1876b0924c2..89cc9833d26 100644 --- a/server/netty/src/test/java/io/deephaven/server/netty/NettyFlightRoundTripTest.java +++ b/server/netty/src/test/java/io/deephaven/server/netty/NettyFlightRoundTripTest.java @@ -6,15 +6,7 @@ import dagger.Component; import dagger.Module; import dagger.Provides; -import io.deephaven.server.arrow.ArrowModule; -import io.deephaven.server.config.ConfigServiceModule; -import io.deephaven.server.console.ConsoleModule; -import io.deephaven.server.log.LogModule; import io.deephaven.server.runner.ExecutionContextUnitTestModule; -import io.deephaven.server.session.ObfuscatingErrorTransformerModule; -import io.deephaven.server.session.SessionModule; -import io.deephaven.server.table.TableModule; -import io.deephaven.server.test.TestAuthModule; import io.deephaven.server.test.FlightMessageRoundTripTest; import javax.inject.Singleton; @@ -36,18 +28,10 @@ static NettyConfig providesNettyConfig() { @Singleton @Component(modules = { - ArrowModule.class, - ConfigServiceModule.class, - ConsoleModule.class, ExecutionContextUnitTestModule.class, FlightTestModule.class, - LogModule.class, NettyServerModule.class, NettyTestConfig.class, - SessionModule.class, - TableModule.class, - TestAuthModule.class, - ObfuscatingErrorTransformerModule.class, }) public interface NettyTestComponent extends TestComponent { } diff --git a/server/src/main/java/io/deephaven/server/console/NoConsoleSessionModule.java b/server/src/main/java/io/deephaven/server/console/NoConsoleSessionModule.java index eef5255c7e4..a460bc6f1f7 100644 --- a/server/src/main/java/io/deephaven/server/console/NoConsoleSessionModule.java +++ b/server/src/main/java/io/deephaven/server/console/NoConsoleSessionModule.java @@ -12,6 +12,7 @@ import io.deephaven.engine.util.NoLanguageDeephavenSession; import io.deephaven.engine.util.ScriptSession; import io.deephaven.server.console.groovy.InitScriptsModule; +import io.deephaven.util.thread.ThreadInitializationFactory; import javax.inject.Named; @@ -26,7 +27,8 @@ ScriptSession bindScriptSession(NoLanguageDeephavenSession noLanguageSession) { @Provides NoLanguageDeephavenSession bindNoLanguageSession( - @Named(PeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME) final UpdateGraph updateGraph) { - return new NoLanguageDeephavenSession(updateGraph); + @Named(PeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME) final UpdateGraph updateGraph, + ThreadInitializationFactory threadInitializationFactory) { + return new NoLanguageDeephavenSession(updateGraph, threadInitializationFactory); } } diff --git a/server/src/main/java/io/deephaven/server/console/SessionToExecutionStateModule.java b/server/src/main/java/io/deephaven/server/console/SessionToExecutionStateModule.java deleted file mode 100644 index 633febd1a1e..00000000000 --- a/server/src/main/java/io/deephaven/server/console/SessionToExecutionStateModule.java +++ /dev/null @@ -1,19 +0,0 @@ -package io.deephaven.server.console; - -import dagger.Module; -import dagger.Provides; -import io.deephaven.engine.context.ExecutionContext; -import io.deephaven.engine.util.ScriptSession; -import io.deephaven.server.auth.AuthorizationProvider; - -/** - * Deprecated: use {@link ExecutionContextModule} instead. - */ -@Deprecated(since = "0.26.0", forRemoval = true) -@Module -public interface SessionToExecutionStateModule { - @Provides - static ExecutionContext bindExecutionContext(ScriptSession session, AuthorizationProvider authProvider) { - return ExecutionContextModule.bindExecutionContext(session, authProvider); - } -} diff --git a/server/src/main/java/io/deephaven/server/console/groovy/GroovyConsoleSessionModule.java b/server/src/main/java/io/deephaven/server/console/groovy/GroovyConsoleSessionModule.java index eeeead6567f..c5e8158568d 100644 --- a/server/src/main/java/io/deephaven/server/console/groovy/GroovyConsoleSessionModule.java +++ b/server/src/main/java/io/deephaven/server/console/groovy/GroovyConsoleSessionModule.java @@ -13,6 +13,7 @@ import io.deephaven.engine.util.GroovyDeephavenSession.RunScripts; import io.deephaven.engine.util.ScriptSession; import io.deephaven.plugin.type.ObjectTypeLookup; +import io.deephaven.util.thread.ThreadInitializationFactory; import javax.inject.Named; import java.io.IOException; @@ -30,11 +31,12 @@ ScriptSession bindScriptSession(final GroovyDeephavenSession groovySession) { @Provides GroovyDeephavenSession bindGroovySession( @Named(PeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME) final UpdateGraph updateGraph, + ThreadInitializationFactory threadInitializationFactory, final ObjectTypeLookup lookup, final ScriptSession.Listener listener, final RunScripts runScripts) { try { - return new GroovyDeephavenSession(updateGraph, lookup, listener, runScripts); + return new GroovyDeephavenSession(updateGraph, threadInitializationFactory, lookup, listener, runScripts); } catch (final IOException e) { throw new UncheckedIOException(e); } diff --git a/server/src/main/java/io/deephaven/server/console/python/PythonConsoleSessionModule.java b/server/src/main/java/io/deephaven/server/console/python/PythonConsoleSessionModule.java index 78999fb4c0c..d5f54618462 100644 --- a/server/src/main/java/io/deephaven/server/console/python/PythonConsoleSessionModule.java +++ b/server/src/main/java/io/deephaven/server/console/python/PythonConsoleSessionModule.java @@ -13,6 +13,7 @@ import io.deephaven.engine.util.ScriptSession; import io.deephaven.integrations.python.PythonDeephavenSession; import io.deephaven.plugin.type.ObjectTypeLookup; +import io.deephaven.util.thread.ThreadInitializationFactory; import javax.inject.Named; import java.io.IOException; @@ -30,11 +31,13 @@ ScriptSession bindScriptSession(PythonDeephavenSession pythonSession) { @Provides PythonDeephavenSession bindPythonSession( @Named(PeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME) final UpdateGraph updateGraph, + final ThreadInitializationFactory threadInitializationFactory, final ObjectTypeLookup lookup, final ScriptSession.Listener listener, final PythonEvaluatorJpy pythonEvaluator) { try { - return new PythonDeephavenSession(updateGraph, lookup, listener, true, pythonEvaluator); + return new PythonDeephavenSession(updateGraph, threadInitializationFactory, lookup, listener, true, + pythonEvaluator); } catch (IOException e) { throw new UncheckedIOException("Unable to run python startup scripts", e); } diff --git a/server/src/main/java/io/deephaven/server/console/python/PythonDebuggingModule.java b/server/src/main/java/io/deephaven/server/console/python/PythonDebuggingModule.java new file mode 100644 index 00000000000..7f4d70ab530 --- /dev/null +++ b/server/src/main/java/io/deephaven/server/console/python/PythonDebuggingModule.java @@ -0,0 +1,16 @@ +// +// Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending +// +package io.deephaven.server.console.python; + +import dagger.Binds; +import dagger.Module; +import dagger.multibindings.IntoSet; +import io.deephaven.util.thread.ThreadInitializationFactory; + +@Module +public interface PythonDebuggingModule { + @Binds + @IntoSet + ThreadInitializationFactory bindDebuggingInitializer(DebuggingInitializer debuggingInitializer); +} diff --git a/server/src/main/java/io/deephaven/server/plugin/PluginRegistrationVisitor.java b/server/src/main/java/io/deephaven/server/plugin/PluginRegistrationVisitor.java index 79d766f66f8..97ec345a555 100644 --- a/server/src/main/java/io/deephaven/server/plugin/PluginRegistrationVisitor.java +++ b/server/src/main/java/io/deephaven/server/plugin/PluginRegistrationVisitor.java @@ -11,7 +11,6 @@ import javax.inject.Inject; import java.util.Objects; -import java.util.function.Consumer; /** * Plugin {@link io.deephaven.plugin.Registration.Callback} implementation that forwards registered plugins to a diff --git a/server/src/main/java/io/deephaven/server/plugin/js/Jackson.java b/server/src/main/java/io/deephaven/server/plugin/js/Jackson.java new file mode 100644 index 00000000000..1454b3e5735 --- /dev/null +++ b/server/src/main/java/io/deephaven/server/plugin/js/Jackson.java @@ -0,0 +1,12 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.server.plugin.js; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; + +class Jackson { + static final ObjectMapper OBJECT_MAPPER = + new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); +} diff --git a/server/src/main/java/io/deephaven/server/plugin/js/JsPluginConfigDirRegistration.java b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginConfigDirRegistration.java new file mode 100644 index 00000000000..96b36d7d157 --- /dev/null +++ b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginConfigDirRegistration.java @@ -0,0 +1,65 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.server.plugin.js; + +import dagger.Binds; +import dagger.multibindings.IntoSet; +import io.deephaven.configuration.ConfigDir; +import io.deephaven.plugin.Registration; +import io.deephaven.plugin.js.JsPlugin; + +import javax.inject.Inject; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; + +import static io.deephaven.server.plugin.js.JsPluginManifest.MANIFEST_JSON; + + +/** + * Registers the {@link JsPlugin JS plugins} sourced from the {@link JsPluginManifest manifest} root located at + * {@link ConfigDir} / {@value JS_PLUGINS} (if {@value io.deephaven.server.plugin.js.JsPluginManifest#MANIFEST_JSON} + * exists). + */ +public final class JsPluginConfigDirRegistration implements Registration { + + public static final String JS_PLUGINS = "js-plugins"; + + /** + * Binds {@link JsPluginConfigDirRegistration} into the set of {@link Registration}. + */ + @dagger.Module + public interface Module { + @Binds + @IntoSet + Registration bindsRegistration(JsPluginConfigDirRegistration registration); + } + + @Inject + JsPluginConfigDirRegistration() {} + + @Override + public void registerInto(Callback callback) { + // /js-plugins/ (manifest root) + final Path manifestRoot = ConfigDir.get() + .map(p -> p.resolve(JS_PLUGINS).resolve(MANIFEST_JSON)) + .filter(Files::exists) + .map(Path::getParent) + .orElse(null); + if (manifestRoot == null) { + return; + } + final List plugins; + try { + plugins = JsPluginsFromManifest.of(manifestRoot); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + for (JsPlugin plugin : plugins) { + callback.register(plugin); + } + } +} diff --git a/server/src/main/java/io/deephaven/server/plugin/js/JsPluginFromNpmPackage.java b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginFromNpmPackage.java new file mode 100644 index 00000000000..a966904ba62 --- /dev/null +++ b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginFromNpmPackage.java @@ -0,0 +1,40 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.server.plugin.js; + +import io.deephaven.plugin.js.JsPlugin; +import io.deephaven.plugin.js.JsPlugin.Builder; +import io.deephaven.plugin.js.Paths; + +import java.io.IOException; +import java.nio.file.Path; + +class JsPluginFromNpmPackage { + + static JsPlugin of(Path packageRoot) throws IOException { + final Path packageJsonPath = packageRoot.resolve(JsPluginNpmPackageRegistration.PACKAGE_JSON); + final NpmPackage packageJson = NpmPackage.read(packageJsonPath); + final Path main = packageRoot.relativize(packageRoot.resolve(packageJson.main())); + final Paths paths; + if (main.getNameCount() > 1) { + // We're requiring that all of the necessary files to serve be under the top-level directory as sourced from + // package.json/main. For example, "build/index.js" -> "build", "dist/bundle/index.js" -> "dist". This + // supports development use cases where the top-level directory may be interspersed with unrelated + // development files (node_modules, .git, etc). + // + // Note: this logic only comes into play for development use cases where plugins are configured via + // deephaven.jsPlugins.myPlugin=/path/to/my/js + paths = Paths.ofPrefixes(main.subpath(0, 1)); + } else { + paths = Paths.all(); + } + final Builder builder = JsPlugin.builder() + .name(packageJson.name()) + .version(packageJson.version()) + .main(main) + .path(packageRoot) + .paths(paths); + return builder.build(); + } +} diff --git a/server/src/main/java/io/deephaven/server/plugin/js/JsPluginManifest.java b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginManifest.java new file mode 100644 index 00000000000..a74bb7e1c77 --- /dev/null +++ b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginManifest.java @@ -0,0 +1,48 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.server.plugin.js; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import io.deephaven.annotations.SimpleStyle; +import io.deephaven.plugin.js.JsPlugin; +import org.immutables.value.Value.Immutable; +import org.immutables.value.Value.Parameter; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.stream.Collectors; + +import static io.deephaven.server.plugin.js.Jackson.OBJECT_MAPPER; + +@Immutable +@SimpleStyle +public abstract class JsPluginManifest { + public static final String PLUGINS = "plugins"; + public static final String MANIFEST_JSON = "manifest.json"; + + @JsonCreator + public static JsPluginManifest of( + @JsonProperty(value = PLUGINS, required = true) List plugins) { + return ImmutableJsPluginManifest.of(plugins); + } + + public static JsPluginManifest from(List plugins) { + return of(plugins.stream().map(JsPluginManifestEntry::from).collect(Collectors.toList())); + } + + static JsPluginManifest read(Path manifestJson) throws IOException { + // jackson impl does buffering internally + try (final InputStream in = Files.newInputStream(manifestJson)) { + return OBJECT_MAPPER.readValue(in, JsPluginManifest.class); + } + } + + @Parameter + @JsonProperty(PLUGINS) + public abstract List plugins(); +} diff --git a/server/jetty/src/main/java/io/deephaven/server/jetty/JsPluginManifestEntry.java b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginManifestEntry.java similarity index 82% rename from server/jetty/src/main/java/io/deephaven/server/jetty/JsPluginManifestEntry.java rename to server/src/main/java/io/deephaven/server/plugin/js/JsPluginManifestEntry.java index 385397809d9..4aeca8b05ab 100644 --- a/server/jetty/src/main/java/io/deephaven/server/jetty/JsPluginManifestEntry.java +++ b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginManifestEntry.java @@ -1,11 +1,12 @@ /** * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending */ -package io.deephaven.server.jetty; +package io.deephaven.server.plugin.js; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import io.deephaven.annotations.SimpleStyle; +import io.deephaven.plugin.js.JsPlugin; import org.immutables.value.Value.Immutable; import org.immutables.value.Value.Parameter; @@ -14,7 +15,7 @@ */ @Immutable @SimpleStyle -abstract class JsPluginManifestEntry { +public abstract class JsPluginManifestEntry { public static final String NAME = "name"; public static final String VERSION = "version"; @@ -28,6 +29,10 @@ public static JsPluginManifestEntry of( return ImmutableJsPluginManifestEntry.of(name, version, main); } + public static JsPluginManifestEntry from(JsPlugin plugin) { + return of(plugin.name(), plugin.version(), plugin.main().toString()); + } + /** * The name of the plugin. */ diff --git a/server/src/main/java/io/deephaven/server/plugin/js/JsPluginManifestRegistration.java b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginManifestRegistration.java new file mode 100644 index 00000000000..93f1aa66f39 --- /dev/null +++ b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginManifestRegistration.java @@ -0,0 +1,56 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.server.plugin.js; + +import dagger.Binds; +import dagger.multibindings.IntoSet; +import io.deephaven.configuration.Configuration; +import io.deephaven.plugin.Registration; +import io.deephaven.plugin.js.JsPlugin; + +import javax.inject.Inject; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.util.List; + +/** + * Registers the {@link JsPlugin JS plugins} sourced from the {@link JsPluginManifest manifest} root configuration + * property {@value JsPluginManifestRegistration#JS_PLUGIN_RESOURCE_BASE}. + */ +public final class JsPluginManifestRegistration implements Registration { + + public static final String JS_PLUGIN_RESOURCE_BASE = JsPluginModule.DEEPHAVEN_JS_PLUGINS_PREFIX + "resourceBase"; + + /** + * Binds {@link JsPluginManifestRegistration} into the set of {@link Registration}. + */ + @dagger.Module + public interface Module { + @Binds + @IntoSet + Registration bindsRegistration(JsPluginManifestRegistration registration); + } + + @Inject + JsPluginManifestRegistration() {} + + @Override + public void registerInto(Callback callback) { + // deephaven.jsPlugins.resourceBase (manifest root) + final String resourceBase = Configuration.getInstance().getStringWithDefault(JS_PLUGIN_RESOURCE_BASE, null); + if (resourceBase == null) { + return; + } + final List plugins; + try { + plugins = JsPluginsFromManifest.of(Path.of(resourceBase)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + for (JsPlugin plugin : plugins) { + callback.register(plugin); + } + } +} diff --git a/server/src/main/java/io/deephaven/server/plugin/js/JsPluginModule.java b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginModule.java index 06f0dcf4ea9..1b966150836 100644 --- a/server/src/main/java/io/deephaven/server/plugin/js/JsPluginModule.java +++ b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginModule.java @@ -4,116 +4,19 @@ package io.deephaven.server.plugin.js; import dagger.Module; -import dagger.Provides; -import dagger.multibindings.ElementsIntoSet; import io.deephaven.configuration.ConfigDir; -import io.deephaven.configuration.Configuration; -import io.deephaven.plugin.Registration; -import io.deephaven.plugin.js.JsPluginManifestPath; -import io.deephaven.plugin.js.JsPluginPackagePath; - -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.HashSet; -import java.util.Iterator; -import java.util.Optional; -import java.util.Set; /** - * Provides the {@link JsPluginManifestPath manifest path} of {@value JS_PLUGIN_RESOURCE_BASE} if the configuration - * property is set. Provides the {@link JsPluginManifestPath manifest path} of {@link ConfigDir} / {@value JS_PLUGINS} - * if {@value JsPluginManifestPath#MANIFEST_JSON} exists. Provides the {@link JsPluginPackagePath package path} for all - * configuration properties that start with {@value DEEPHAVEN_JS_PLUGINS_PREFIX} and have a single part after. + * Includes the modules {@link JsPluginManifestRegistration.Module}, {@link JsPluginConfigDirRegistration.Module}, and + * {@link JsPluginNpmPackageRegistration.Module}; these modules add various means of configuration support for producing + * and registering {@link io.deephaven.plugin.js.JsPlugin}. */ -@Module +@Module(includes = { + JsPluginManifestRegistration.Module.class, + JsPluginConfigDirRegistration.Module.class, + JsPluginNpmPackageRegistration.Module.class, +}) public interface JsPluginModule { String DEEPHAVEN_JS_PLUGINS_PREFIX = "deephaven.jsPlugins."; - String JS_PLUGIN_RESOURCE_BASE = DEEPHAVEN_JS_PLUGINS_PREFIX + "resourceBase"; - String JS_PLUGINS = "js-plugins"; - - @Provides - @ElementsIntoSet - static Set providesResourceBaseRegistration() { - return jsPluginsResourceBase() - .map(Registration.class::cast) - .map(Set::of) - .orElseGet(Set::of); - } - - @Provides - @ElementsIntoSet - static Set providesConfigDirRegistration() { - return jsPluginsConfigDir() - .map(Registration.class::cast) - .map(Set::of) - .orElseGet(Set::of); - } - - @Provides - @ElementsIntoSet - static Set providesPackageRoots() { - return Set.copyOf(jsPluginsPackageRoots()); - } - - // deephaven.jsPlugins.resourceBase (manifest root) - private static Optional jsPluginsResourceBase() { - final String resourceBase = Configuration.getInstance().getStringWithDefault(JS_PLUGIN_RESOURCE_BASE, null); - return Optional.ofNullable(resourceBase) - .map(Path::of) - .map(JsPluginManifestPath::of); - } - - // /js-plugins/ (manifest root) - private static Optional jsPluginsConfigDir() { - return ConfigDir.get() - .map(JsPluginModule::resolveJsPlugins) - .map(JsPluginManifestPath::of) - .filter(JsPluginModule::manifestJsonExists); - } - - private static Path resolveJsPlugins(Path p) { - return p.resolve(JS_PLUGINS); - } - - private static boolean manifestJsonExists(JsPluginManifestPath path) { - return Files.exists(path.manifestJson()); - } - - // deephaven.jsPlugins. (package root) - private static Set jsPluginsPackageRoots() { - final Configuration config = Configuration.getInstance(); - final Set parts = partsThatStartWith(DEEPHAVEN_JS_PLUGINS_PREFIX, config); - final Set packageRoots = new HashSet<>(parts.size()); - for (String part : parts) { - final String propertyName = DEEPHAVEN_JS_PLUGINS_PREFIX + part; - if (JS_PLUGIN_RESOURCE_BASE.equals(propertyName)) { - // handled by jsPluginsResourceBase - continue; - } - final String packageRoot = config.getStringWithDefault(propertyName, null); - if (packageRoot == null) { - continue; - } - packageRoots.add(JsPluginPackagePath.of(Path.of(packageRoot))); - } - return packageRoots; - } - - private static Set partsThatStartWith(String prefix, Configuration configuration) { - final Set parts = new HashSet<>(); - final Iterator it = configuration.getProperties(prefix).keys().asIterator(); - while (it.hasNext()) { - final Object next = it.next(); - if (next instanceof String) { - parts.add(firstPart((String) next)); - } - } - return parts; - } - - private static String firstPart(String x) { - final int index = x.indexOf('.'); - return index == -1 ? x : x.substring(0, index); - } } diff --git a/server/src/main/java/io/deephaven/server/plugin/js/JsPluginNpmPackageRegistration.java b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginNpmPackageRegistration.java new file mode 100644 index 00000000000..0acf62337ce --- /dev/null +++ b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginNpmPackageRegistration.java @@ -0,0 +1,130 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.server.plugin.js; + +import dagger.Binds; +import dagger.multibindings.IntoSet; +import io.deephaven.configuration.Configuration; +import io.deephaven.plugin.Registration; +import io.deephaven.plugin.js.JsPlugin; + +import javax.inject.Inject; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.URI; +import java.nio.file.FileSystem; +import java.nio.file.FileSystemNotFoundException; +import java.nio.file.FileSystems; +import java.nio.file.Path; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +import static io.deephaven.server.plugin.js.JsPluginManifestRegistration.JS_PLUGIN_RESOURCE_BASE; +import static io.deephaven.server.plugin.js.JsPluginModule.DEEPHAVEN_JS_PLUGINS_PREFIX; + +/** + * Registers the {@link JsPlugin JS plugins} sourced from the NPM package roots as specified via the configuration + * properties that start with {@value io.deephaven.server.plugin.js.JsPluginModule#DEEPHAVEN_JS_PLUGINS_PREFIX}. This + * configuration is meant for development-oriented use-cases and is done on a "best-effort" basis. + * + *

+ * The configuration value of the above property corresponds to the {@link JsPlugin#path()} directory. A + * {@value PACKAGE_JSON} must exist in this directory (as specified via + * package-json). The {@value NAME} json value + * corresponds to {@link JsPlugin#name()}, the {@value VERSION} json value corresponds to {@link JsPlugin#version()}, + * and the {@value MAIN} json value corresponds to {@link JsPlugin#main()}. Furthermore, the top-level directory of the + * {@value MAIN} json value will be used to set {@link JsPlugin#paths()} using + * {@link io.deephaven.plugin.js.Paths#ofPrefixes(Path)}; for example, a {@value MAIN} of "build/index.js" will limit + * the resources to the "build/" directory; a {@value MAIN} of "dist/bundle/index.js" will limit the resources to the + * "dist/" directory. + */ +public final class JsPluginNpmPackageRegistration implements Registration { + public static final String PACKAGE_JSON = "package.json"; + public static final String NAME = "name"; + public static final String VERSION = "version"; + public static final String MAIN = "main"; + + // TODO(deephaven-core#4817): JS Plugins development is slow + // We may wish to make parsing NPM package.json easier with a bespoke "deephaven" field, or try to exactly match + // the semantics of the existing "files" field. This may not be necessary if the top-level directory of "main" works + // well enough, or if we have a non-copying jetty route-based impl. + + /** + * Binds {@link JsPluginNpmPackageRegistration} into the set of {@link Registration}. + */ + @dagger.Module + public interface Module { + @Binds + @IntoSet + Registration bindsRegistration(JsPluginNpmPackageRegistration registration); + } + + @Inject + JsPluginNpmPackageRegistration() {} + + @Override + public void registerInto(Callback callback) { + // deephaven.jsPlugins. (package root) + final Configuration config = Configuration.getInstance(); + final Set parts = partsThatStartWith(DEEPHAVEN_JS_PLUGINS_PREFIX, config); + for (String part : parts) { + final String propertyName = DEEPHAVEN_JS_PLUGINS_PREFIX + part; + if (JS_PLUGIN_RESOURCE_BASE.equals(propertyName)) { + // handled by jsPluginsResourceBase + continue; + } + final String packageRoot = config.getStringWithDefault(propertyName, null); + if (packageRoot == null) { + continue; + } + URI uri = URI.create(packageRoot); + if (uri.getScheme() == null) { + uri = URI.create("file:" + packageRoot); + } + final FileSystem fileSystem = getOrCreateFileSystem(uri); + final JsPlugin plugin; + try { + plugin = JsPluginFromNpmPackage.of(fileSystem.provider().getPath(uri)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + callback.register(plugin); + } + } + + private static FileSystem getOrCreateFileSystem(URI uri) { + if ("file".equalsIgnoreCase(uri.getScheme())) { + return FileSystems.getDefault(); + } + try { + return FileSystems.getFileSystem(uri); + } catch (FileSystemNotFoundException e) { + // ignore + } + try { + return FileSystems.newFileSystem(uri, Map.of()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private static Set partsThatStartWith(String prefix, Configuration configuration) { + final Set parts = new HashSet<>(); + final Iterator it = configuration.getProperties(prefix).keys().asIterator(); + while (it.hasNext()) { + final Object next = it.next(); + if (next instanceof String) { + parts.add(firstPart((String) next)); + } + } + return parts; + } + + private static String firstPart(String x) { + final int index = x.indexOf('.'); + return index == -1 ? x : x.substring(0, index); + } +} diff --git a/server/src/main/java/io/deephaven/server/plugin/js/JsPluginsFromManifest.java b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginsFromManifest.java new file mode 100644 index 00000000000..c07540d27db --- /dev/null +++ b/server/src/main/java/io/deephaven/server/plugin/js/JsPluginsFromManifest.java @@ -0,0 +1,33 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.server.plugin.js; + +import io.deephaven.plugin.js.JsPlugin; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +class JsPluginsFromManifest { + + static List of(Path manifestRoot) throws IOException { + final JsPluginManifest manifest = JsPluginManifest.read(manifestRoot.resolve(JsPluginManifest.MANIFEST_JSON)); + final List plugins = new ArrayList<>(manifest.plugins().size()); + for (JsPluginManifestEntry entry : manifest.plugins()) { + final Path pluginPath = manifestRoot.resolve(entry.name()); + final Path pluginMain = pluginPath.relativize(pluginPath.resolve(entry.main())); + final JsPlugin plugin = JsPlugin.builder() + .name(entry.name()) + .version(entry.version()) + .main(pluginMain) + .path(pluginPath) + .build(); + // We expect manifests to be "production" use cases - they should already be packed as appropriate. + // Additionally, there is no strict requirement that they have package.json anyways. + plugins.add(plugin); + } + return plugins; + } +} diff --git a/server/src/main/java/io/deephaven/server/plugin/js/NpmPackage.java b/server/src/main/java/io/deephaven/server/plugin/js/NpmPackage.java new file mode 100644 index 00000000000..711dfa34831 --- /dev/null +++ b/server/src/main/java/io/deephaven/server/plugin/js/NpmPackage.java @@ -0,0 +1,49 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.server.plugin.js; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import io.deephaven.annotations.SimpleStyle; +import org.immutables.value.Value.Immutable; +import org.immutables.value.Value.Parameter; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import static io.deephaven.server.plugin.js.Jackson.OBJECT_MAPPER; + +@Immutable +@SimpleStyle +abstract class NpmPackage { + + @JsonCreator + public static NpmPackage of( + @JsonProperty(value = JsPluginNpmPackageRegistration.NAME, required = true) String name, + @JsonProperty(value = JsPluginNpmPackageRegistration.VERSION, required = true) String version, + @JsonProperty(value = JsPluginNpmPackageRegistration.MAIN, required = true) String main) { + return ImmutableNpmPackage.of(name, version, main); + } + + public static NpmPackage read(Path packageJson) throws IOException { + // jackson impl does buffering internally + try (final InputStream in = Files.newInputStream(packageJson)) { + return OBJECT_MAPPER.readValue(in, NpmPackage.class); + } + } + + @Parameter + @JsonProperty(JsPluginNpmPackageRegistration.NAME) + public abstract String name(); + + @Parameter + @JsonProperty(JsPluginNpmPackageRegistration.VERSION) + public abstract String version(); + + @Parameter + @JsonProperty(JsPluginNpmPackageRegistration.MAIN) + public abstract String main(); +} diff --git a/server/src/main/java/io/deephaven/server/plugin/python/CallbackAdapter.java b/server/src/main/java/io/deephaven/server/plugin/python/CallbackAdapter.java index 27e2fcc09bd..7270a895b61 100644 --- a/server/src/main/java/io/deephaven/server/plugin/python/CallbackAdapter.java +++ b/server/src/main/java/io/deephaven/server/plugin/python/CallbackAdapter.java @@ -4,6 +4,7 @@ package io.deephaven.server.plugin.python; import io.deephaven.plugin.Registration.Callback; +import io.deephaven.plugin.js.JsPlugin; import org.jpy.PyObject; class CallbackAdapter { @@ -18,4 +19,9 @@ public CallbackAdapter(Callback callback) { public void registerObjectType(String name, PyObject objectTypeAdapter) { callback.register(new ObjectTypeAdapter(name, objectTypeAdapter)); } + + @SuppressWarnings("unused") + public void registerJsPlugin(JsPlugin jsPlugin) { + callback.register(jsPlugin); + } } diff --git a/server/src/main/java/io/deephaven/server/runner/DeephavenApiServer.java b/server/src/main/java/io/deephaven/server/runner/DeephavenApiServer.java index a13a29da41e..8ccc367a6b7 100644 --- a/server/src/main/java/io/deephaven/server/runner/DeephavenApiServer.java +++ b/server/src/main/java/io/deephaven/server/runner/DeephavenApiServer.java @@ -144,9 +144,6 @@ public DeephavenApiServer run() throws IOException, ClassNotFoundException, Time // noinspection resource executionContextProvider.get().open(); - log.info().append("Starting Operation Initialization Thread Pool...").endl(); - OperationInitializationThreadPool.start(); - log.info().append("Starting Update Graph...").endl(); getUpdateGraph().cast().start(); diff --git a/server/src/main/java/io/deephaven/server/runner/scheduler/SchedulerModule.java b/server/src/main/java/io/deephaven/server/runner/scheduler/SchedulerModule.java index c6710570b38..26f4b0b32c5 100644 --- a/server/src/main/java/io/deephaven/server/runner/scheduler/SchedulerModule.java +++ b/server/src/main/java/io/deephaven/server/runner/scheduler/SchedulerModule.java @@ -2,6 +2,7 @@ import dagger.Module; import dagger.Provides; +import dagger.multibindings.ElementsIntoSet; import io.deephaven.base.clock.Clock; import io.deephaven.chunk.util.pools.MultiChunkPool; import io.deephaven.engine.context.ExecutionContext; @@ -16,6 +17,8 @@ import javax.inject.Named; import javax.inject.Singleton; +import java.util.Collections; +import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -31,13 +34,25 @@ */ @Module public class SchedulerModule { + @Provides + @ElementsIntoSet + static Set primeThreadInitializers() { + return Collections.emptySet(); + } + + @Provides + static ThreadInitializationFactory provideThreadInitializationFactory(Set factories) { + return ThreadInitializationFactory.of(factories); + } @Provides @Singleton public static Scheduler provideScheduler( final @Named(PeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME) UpdateGraph updateGraph, - final @Named("scheduler.poolSize") int poolSize) { - final ThreadFactory concurrentThreadFactory = new ThreadFactory("Scheduler-Concurrent", updateGraph); + final @Named("scheduler.poolSize") int poolSize, + final ThreadInitializationFactory initializationFactory) { + final ThreadFactory concurrentThreadFactory = + new ThreadFactory("Scheduler-Concurrent", updateGraph, initializationFactory); final ScheduledExecutorService concurrentExecutor = new ScheduledThreadPoolExecutor(poolSize, concurrentThreadFactory) { @Override @@ -47,7 +62,8 @@ protected void afterExecute(final Runnable task, final Throwable error) { } }; - final ThreadFactory serialThreadFactory = new ThreadFactory("Scheduler-Serial", updateGraph); + final ThreadFactory serialThreadFactory = + new ThreadFactory("Scheduler-Serial", updateGraph, initializationFactory); final ExecutorService serialExecutor = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(), serialThreadFactory) { @@ -63,15 +79,18 @@ protected void afterExecute(final Runnable task, final Throwable error) { private static class ThreadFactory extends NamingThreadFactory { private final UpdateGraph updateGraph; + private final ThreadInitializationFactory initializationFactory; - public ThreadFactory(final String name, final UpdateGraph updateGraph) { + public ThreadFactory(final String name, final UpdateGraph updateGraph, + ThreadInitializationFactory initializationFactory) { super(DeephavenApiServer.class, name); this.updateGraph = updateGraph; + this.initializationFactory = initializationFactory; } @Override public Thread newThread(@NotNull final Runnable r) { - return super.newThread(ThreadInitializationFactory.wrapRunnable(() -> { + return super.newThread(initializationFactory.createInitializer(() -> { MultiChunkPool.enableDedicatedPoolForThisThread(); // noinspection resource ExecutionContext.getContext().withUpdateGraph(updateGraph).open(); diff --git a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java index 673e39e35b1..456afdfdac4 100644 --- a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java @@ -10,7 +10,8 @@ import io.deephaven.engine.table.TableDefinition; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; -import io.deephaven.engine.util.config.MutableInputTable; +import io.deephaven.engine.util.input.InputTableStatusListener; +import io.deephaven.engine.util.input.InputTableUpdater; import io.deephaven.extensions.barrage.util.GrpcUtil; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; @@ -74,13 +75,13 @@ public void addTableToInputTable( .onError(responseObserver) .require(targetTable, tableToAddExport) .submit(() -> { - Object inputTable = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - if (!(inputTable instanceof MutableInputTable)) { + Object inputTableAsObject = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + if (!(inputTableAsObject instanceof InputTableUpdater)) { throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "Table can't be used as an input table"); } - MutableInputTable mutableInputTable = (MutableInputTable) inputTable; + final InputTableUpdater inputTableUpdater = (InputTableUpdater) inputTableAsObject; Table tableToAdd = tableToAddExport.get(); authWiring.checkPermissionAddTableToInputTable( @@ -89,20 +90,25 @@ public void addTableToInputTable( // validate that the columns are compatible try { - mutableInputTable.validateAddOrModify(tableToAdd); + inputTableUpdater.validateAddOrModify(tableToAdd); } catch (TableDefinition.IncompatibleTableDefinitionException exception) { throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "Provided tables's columns are not compatible: " + exception.getMessage()); } // actually add the tables contents - try { - mutableInputTable.add(tableToAdd); - GrpcUtil.safelyComplete(responseObserver, AddTableResponse.getDefaultInstance()); - } catch (IOException ioException) { - throw Exceptions.statusRuntimeException(Code.DATA_LOSS, - "Error adding table to input table"); - } + inputTableUpdater.addAsync(tableToAdd, new InputTableStatusListener() { + @Override + public void onSuccess() { + GrpcUtil.safelyComplete(responseObserver, AddTableResponse.getDefaultInstance()); + } + + @Override + public void onError(Throwable t) { + GrpcUtil.safelyError(responseObserver, Exceptions.statusRuntimeException(Code.DATA_LOSS, + "Error adding table to input table")); + } + }); }); } } @@ -132,13 +138,13 @@ public void deleteTableFromInputTable( .onError(responseObserver) .require(targetTable, tableToRemoveExport) .submit(() -> { - Object inputTable = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - if (!(inputTable instanceof MutableInputTable)) { + Object inputTableAsObject = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + if (!(inputTableAsObject instanceof InputTableUpdater)) { throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "Table can't be used as an input table"); } - MutableInputTable mutableInputTable = (MutableInputTable) inputTable; + final InputTableUpdater inputTableUpdater = (InputTableUpdater) inputTableAsObject; Table tableToRemove = tableToRemoveExport.get(); authWiring.checkPermissionDeleteTableFromInputTable( @@ -147,7 +153,7 @@ public void deleteTableFromInputTable( // validate that the columns are compatible try { - mutableInputTable.validateDelete(tableToRemove); + inputTableUpdater.validateDelete(tableToRemove); } catch (TableDefinition.IncompatibleTableDefinitionException exception) { throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "Provided tables's columns are not compatible: " + exception.getMessage()); @@ -157,13 +163,18 @@ public void deleteTableFromInputTable( } // actually delete the table's contents - try { - mutableInputTable.delete(tableToRemove); - GrpcUtil.safelyComplete(responseObserver, DeleteTableResponse.getDefaultInstance()); - } catch (IOException ioException) { - throw Exceptions.statusRuntimeException(Code.DATA_LOSS, - "Error deleting table from inputtable"); - } + inputTableUpdater.deleteAsync(tableToRemove, new InputTableStatusListener() { + @Override + public void onSuccess() { + GrpcUtil.safelyComplete(responseObserver, DeleteTableResponse.getDefaultInstance()); + } + + @Override + public void onError(Throwable t) { + GrpcUtil.safelyError(responseObserver, Exceptions.statusRuntimeException(Code.DATA_LOSS, + "Error deleting table from inputtable")); + } + }); }); } } diff --git a/server/src/main/java/io/deephaven/server/table/ops/CreateInputTableGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/CreateInputTableGrpcImpl.java index 717542465a8..2889e9a0afa 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/CreateInputTableGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/CreateInputTableGrpcImpl.java @@ -8,14 +8,16 @@ import io.deephaven.datastructures.util.CollectionUtil; import io.deephaven.engine.table.Table; import io.deephaven.engine.table.TableDefinition; -import io.deephaven.engine.table.impl.util.AppendOnlyArrayBackedMutableTable; -import io.deephaven.engine.table.impl.util.KeyedArrayBackedMutableTable; +import io.deephaven.engine.table.impl.util.AppendOnlyArrayBackedInputTable; +import io.deephaven.engine.table.impl.util.KeyedArrayBackedInputTable; import io.deephaven.extensions.barrage.util.BarrageUtil; import io.deephaven.proto.backplane.grpc.BatchTableRequest; import io.deephaven.proto.backplane.grpc.CreateInputTableRequest; +import io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.KindCase; import io.deephaven.proto.flight.util.SchemaHelper; import io.deephaven.proto.util.Exceptions; import io.deephaven.server.session.SessionState; +import io.deephaven.stream.TablePublisher; import io.grpc.StatusRuntimeException; import org.apache.arrow.flatbuf.Schema; @@ -23,6 +25,7 @@ import javax.inject.Singleton; import java.util.Collections; import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; import static io.deephaven.proto.backplane.grpc.CreateInputTableRequest.InputTableKind.KindCase.KIND_NOT_SET; @@ -34,6 +37,8 @@ public class CreateInputTableGrpcImpl extends GrpcTableOperation provideAbstractScriptSession(final UpdateGraph updateGraph) { - return new NoLanguageDeephavenSession(updateGraph, "non-script-session"); + return new NoLanguageDeephavenSession(updateGraph, ThreadInitializationFactory.NO_OP, "non-script-session"); } @Provides @@ -186,11 +204,13 @@ public interface TestComponent { ExecutionContext executionContext(); TestAuthorizationProvider authorizationProvider(); + + Registration.Callback registration(); } private LogBuffer logBuffer; private GrpcServer server; - + protected int localPort; private FlightClient flightClient; protected SessionService sessionService; @@ -199,7 +219,7 @@ public interface TestComponent { private AbstractScriptSession scriptSession; private SafeCloseable executionContext; private Location serverLocation; - private TestComponent component; + protected TestComponent component; private ManagedChannel clientChannel; private ScheduledExecutorService clientScheduler; @@ -221,12 +241,12 @@ public void setup() throws IOException { server = component.server(); server.start(); - int actualPort = server.getPort(); + localPort = server.getPort(); scriptSession = component.scriptSession(); sessionService = component.sessionService(); - serverLocation = Location.forGrpcInsecure("localhost", actualPort); + serverLocation = Location.forGrpcInsecure("localhost", localPort); currentSession = sessionService.newSession(new AuthContext.SuperUser()); flightClient = FlightClient.builder().location(serverLocation) .allocator(new RootAllocator()).intercept(info -> new FlightClientMiddleware() { @@ -243,7 +263,7 @@ public void onHeadersReceived(CallHeaders incomingHeaders) {} public void onCallCompleted(CallStatus status) {} }).build(); - clientChannel = ManagedChannelBuilder.forTarget("localhost:" + actualPort) + clientChannel = ManagedChannelBuilder.forTarget("localhost:" + localPort) .usePlaintext() .intercept(new TestAuthClientInterceptor(currentSession.getExpiration().token.toString())) .build(); diff --git a/settings.gradle b/settings.gradle index f1474fcba81..587ce55a9d7 100644 --- a/settings.gradle +++ b/settings.gradle @@ -137,10 +137,6 @@ include(':DataStructures') include(':Configuration') -include(':FishUtil') - -include(':Net') - include(':Stats') include(':Container') diff --git a/sphinx/source/conf.py b/sphinx/source/conf.py index 61ae8ca5b95..27accbdb119 100644 --- a/sphinx/source/conf.py +++ b/sphinx/source/conf.py @@ -108,7 +108,8 @@ _JUpdateGraph = jpy.get_type("io.deephaven.engine.updategraph.impl.PeriodicUpdateGraph") docs_update_graph = _JUpdateGraph.newBuilder("PYTHON_DOCS").build() _JPythonScriptSession = jpy.get_type("io.deephaven.integrations.python.PythonDeephavenSession") -py_dh_session = _JPythonScriptSession(docs_update_graph, py_scope_jpy) +no_op_operation_initializer = jpy.get_type("io.deephaven.util.thread.ThreadInitializationFactory").NO_OP +py_dh_session = _JPythonScriptSession(docs_update_graph, no_op_operation_initializer, py_scope_jpy) py_dh_session.getExecutionContext().open() pygments_style = 'sphinx' diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/JsPartitionedTable.java b/web/client-api/src/main/java/io/deephaven/web/client/api/JsPartitionedTable.java index a00f093942b..628ee3d5180 100644 --- a/web/client-api/src/main/java/io/deephaven/web/client/api/JsPartitionedTable.java +++ b/web/client-api/src/main/java/io/deephaven/web/client/api/JsPartitionedTable.java @@ -1,6 +1,7 @@ package io.deephaven.web.client.api; import elemental2.core.JsArray; +import elemental2.core.JsObject; import elemental2.core.JsSet; import elemental2.dom.CustomEvent; import elemental2.dom.CustomEventInit; @@ -9,9 +10,11 @@ import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.partitionedtable_pb.GetTableRequest; import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.partitionedtable_pb.MergeRequest; import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.partitionedtable_pb.PartitionedTableDescriptor; +import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.table_pb.DropColumnsRequest; import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.ticket_pb.TypedTicket; import io.deephaven.web.client.api.barrage.WebBarrageUtils; import io.deephaven.web.client.api.barrage.def.ColumnDefinition; +import io.deephaven.web.client.api.barrage.def.InitialTableDefinition; import io.deephaven.web.client.api.lifecycle.HasLifecycle; import io.deephaven.web.client.api.subscription.SubscriptionTableData; import io.deephaven.web.client.api.subscription.TableSubscription; @@ -21,6 +24,7 @@ import io.deephaven.web.shared.data.RangeSet; import io.deephaven.web.shared.fu.JsConsumer; import jsinterop.annotations.JsIgnore; +import jsinterop.annotations.JsMethod; import jsinterop.annotations.JsProperty; import jsinterop.annotations.JsType; import jsinterop.base.Js; @@ -64,6 +68,10 @@ public class JsPartitionedTable extends HasLifecycle implements ServerObject { */ private final Map, JsLazy>> tables = new HashMap<>(); + private Column[] keyColumns; + + private Column[] columns; + @JsIgnore public JsPartitionedTable(WorkerConnection connection, JsWidget widget) { @@ -83,14 +91,22 @@ public Promise refetch() { descriptor = PartitionedTableDescriptor.deserializeBinary(w.getDataAsU8()); keyColumnTypes = new ArrayList<>(); - ColumnDefinition[] columnDefinitions = WebBarrageUtils.readColumnDefinitions( + InitialTableDefinition tableDefinition = WebBarrageUtils.readTableDefinition( WebBarrageUtils.readSchemaMessage(descriptor.getConstituentDefinitionSchema_asU8())); + ColumnDefinition[] columnDefinitions = tableDefinition.getColumns(); + Column[] columns = new Column[0]; + Column[] keyColumns = new Column[0]; for (int i = 0; i < columnDefinitions.length; i++) { ColumnDefinition columnDefinition = columnDefinitions[i]; + Column column = columnDefinition.makeJsColumn(columns.length, tableDefinition.getColumnsByName()); + columns[columns.length] = column; if (descriptor.getKeyColumnNamesList().indexOf(columnDefinition.getName()) != -1) { keyColumnTypes.add(columnDefinition.getType()); + keyColumns[keyColumns.length] = column; } } + this.columns = JsObject.freeze(columns); + this.keyColumns = JsObject.freeze(keyColumns); return w.getExportedObjects()[0].fetch(); }).then(result -> { @@ -188,7 +204,7 @@ private void populateLazyTable(List key) { /** * Fetch the table with the given key. - * + * * @param key The key to fetch. An array of values for each key column, in the same order as the key columns are. * @return Promise of dh.Table */ @@ -211,7 +227,7 @@ public Promise getTable(Object key) { /** * Open a new table that is the result of merging all constituent tables. See * {@link io.deephaven.engine.table.PartitionedTable#merge()} for details. - * + * * @return A merged representation of the constituent tables. */ public Promise getMergedTable() { @@ -228,7 +244,7 @@ public Promise getMergedTable() { /** * The set of all currently known keys. This is kept up to date, so getting the list after adding an event listener * for keyadded will ensure no keys are missed. - * + * * @return Set of Object */ public JsSet getKeys() { @@ -240,7 +256,7 @@ public JsSet getKeys() { /** * The count of known keys. - * + * * @return int */ @JsProperty(name = "size") @@ -248,6 +264,45 @@ public int size() { return tables.size(); } + /** + * An array of all the key columns that the tables are partitioned by. + * + * @return Array of Column + */ + @JsProperty + public Column[] getKeyColumns() { + return keyColumns; + } + + /** + * An array of the columns in the tables that can be retrieved from this partitioned table, including both key and + * non-key columns. + * + * @return Array of Column + */ + @JsProperty + public Column[] getColumns() { + return columns; + } + + /** + * Fetch a table containing all the valid keys of the partitioned table. + * + * @return Promise of a Table + */ + @JsMethod + public Promise getKeyTable() { + return connection.newState((c, state, metadata) -> { + DropColumnsRequest drop = new DropColumnsRequest(); + drop.setColumnNamesList(new String[] {descriptor.getConstituentColumnName()}); + drop.setSourceId(keys.state().getHandle().makeTableReference()); + drop.setResultId(state.getHandle().makeTicket()); + connection.tableServiceClient().dropColumns(drop, metadata, c::apply); + }, "drop constituent column") + .refetch(this, connection.metadata()) + .then(state -> Promise.resolve(new JsTable(connection, state))); + } + /** * Indicates that this PartitionedTable will no longer be used, removing subcriptions to updated keys, etc. This * will not affect tables in use. @@ -259,6 +314,8 @@ public void close() { if (subscription != null) { subscription.close(); } + + widget.close(); } } diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/LongWrapper.java b/web/client-api/src/main/java/io/deephaven/web/client/api/LongWrapper.java index ebea84e6d5d..7f2c9be99b6 100644 --- a/web/client-api/src/main/java/io/deephaven/web/client/api/LongWrapper.java +++ b/web/client-api/src/main/java/io/deephaven/web/client/api/LongWrapper.java @@ -41,4 +41,16 @@ public String valueOf() { public String toString() { return String.valueOf(value); } + + @JsIgnore + @Override + public boolean equals(Object obj) { + return obj instanceof LongWrapper && ((LongWrapper) obj).value == value; + } + + @JsIgnore + @Override + public int hashCode() { + return Long.hashCode(value); + } } diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/WorkerConnection.java b/web/client-api/src/main/java/io/deephaven/web/client/api/WorkerConnection.java index f9611a1bc57..7d4ea477962 100644 --- a/web/client-api/src/main/java/io/deephaven/web/client/api/WorkerConnection.java +++ b/web/client-api/src/main/java/io/deephaven/web/client/api/WorkerConnection.java @@ -766,21 +766,18 @@ public Promise getObject(JsVariableDefinition definition) { return getFigure(definition); } else if (JsVariableType.PANDAS.equalsIgnoreCase(definition.getType())) { return getWidget(definition) - .then(widget -> widget.getExportedObjects()[0].fetch()); + .then(JsWidget::refetch) + .then(widget -> { + widget.close(); + return widget.getExportedObjects()[0].fetch(); + }); } else if (JsVariableType.PARTITIONEDTABLE.equalsIgnoreCase(definition.getType())) { return getPartitionedTable(definition); } else if (JsVariableType.HIERARCHICALTABLE.equalsIgnoreCase(definition.getType())) { return getHierarchicalTable(definition); } else { - if (JsVariableType.TABLEMAP.equalsIgnoreCase(definition.getType())) { - JsLog.warn( - "TableMap is now known as PartitionedTable, fetching as a plain widget. To fetch as a PartitionedTable use that as the type."); - } - if (JsVariableType.TREETABLE.equalsIgnoreCase(definition.getType())) { - JsLog.warn( - "TreeTable is now HierarchicalTable, fetching as a plain widget. To fetch as a HierarchicalTable use that as this type."); - } - return getWidget(definition); + warnLegacyTicketTypes(definition.getType()); + return getWidget(definition).then(JsWidget::refetch); } } @@ -810,6 +807,45 @@ public Promise getJsObject(JsPropertyMap definitionObject) { } } + public Promise getObject(TypedTicket typedTicket) { + if (JsVariableType.TABLE.equalsIgnoreCase(typedTicket.getType())) { + throw new IllegalArgumentException("wrong way to get a table from a ticket"); + } else if (JsVariableType.FIGURE.equalsIgnoreCase(typedTicket.getType())) { + return new JsFigure(this, c -> { + JsWidget widget = new JsWidget(this, typedTicket); + widget.refetch().then(ignore -> { + c.apply(null, makeFigureFetchResponse(widget)); + return null; + }); + }).refetch(); + } else if (JsVariableType.PANDAS.equalsIgnoreCase(typedTicket.getType())) { + return getWidget(typedTicket) + .then(JsWidget::refetch) + .then(widget -> { + widget.close(); + return widget.getExportedObjects()[0].fetch(); + }); + } else if (JsVariableType.PARTITIONEDTABLE.equalsIgnoreCase(typedTicket.getType())) { + return new JsPartitionedTable(this, new JsWidget(this, typedTicket)).refetch(); + } else if (JsVariableType.HIERARCHICALTABLE.equalsIgnoreCase(typedTicket.getType())) { + return new JsWidget(this, typedTicket).refetch().then(w -> Promise.resolve(new JsTreeTable(this, w))); + } else { + warnLegacyTicketTypes(typedTicket.getType()); + return getWidget(typedTicket).then(JsWidget::refetch); + } + } + + private static void warnLegacyTicketTypes(String ticketType) { + if (JsVariableType.TABLEMAP.equalsIgnoreCase(ticketType)) { + JsLog.warn( + "TableMap is now known as PartitionedTable, fetching as a plain widget. To fetch as a PartitionedTable use that as the type."); + } + if (JsVariableType.TREETABLE.equalsIgnoreCase(ticketType)) { + JsLog.warn( + "TreeTable is now HierarchicalTable, fetching as a plain widget. To fetch as a HierarchicalTable use that as this type."); + } + } + @JsMethod @SuppressWarnings("ConstantConditions") public JsRunnable subscribeToFieldUpdates(JsConsumer callback) { @@ -911,12 +947,8 @@ public Promise getPartitionedTable(JsVariableDefinition varD .then(widget -> new JsPartitionedTable(this, widget).refetch()); } - public Promise getTreeTable(JsVariableDefinition varDef) { - return getWidget(varDef).then(w -> Promise.resolve(new JsTreeTable(this, w))); - } - public Promise getHierarchicalTable(JsVariableDefinition varDef) { - return getWidget(varDef).then(w -> Promise.resolve(new JsTreeTable(this, w))); + return getWidget(varDef).then(JsWidget::refetch).then(w -> Promise.resolve(new JsTreeTable(this, w))); } public Promise getFigure(JsVariableDefinition varDef) { @@ -926,13 +958,9 @@ public Promise getFigure(JsVariableDefinition varDef) { return whenServerReady("get a figure") .then(server -> new JsFigure(this, c -> { - getWidget(varDef).then(widget -> { - FetchObjectResponse legacyResponse = new FetchObjectResponse(); - legacyResponse.setData(widget.getDataAsU8()); - legacyResponse.setType(widget.getType()); - legacyResponse.setTypedExportIdsList(Arrays.stream(widget.getExportedObjects()) - .map(JsWidgetExportedObject::typedTicket).toArray(TypedTicket[]::new)); - c.apply(null, legacyResponse); + getWidget(varDef).then(JsWidget::refetch).then(widget -> { + c.apply(null, makeFigureFetchResponse(widget)); + widget.close(); return null; }, error -> { c.apply(error, null); @@ -941,6 +969,15 @@ public Promise getFigure(JsVariableDefinition varDef) { }).refetch()); } + private static FetchObjectResponse makeFigureFetchResponse(JsWidget widget) { + FetchObjectResponse legacyResponse = new FetchObjectResponse(); + legacyResponse.setData(widget.getDataAsU8()); + legacyResponse.setType(widget.getType()); + legacyResponse.setTypedExportIdsList(Arrays.stream(widget.getExportedObjects()) + .map(JsWidgetExportedObject::typedTicket).toArray(TypedTicket[]::new)); + return legacyResponse; + } + private TypedTicket createTypedTicket(JsVariableDefinition varDef) { TypedTicket typedTicket = new TypedTicket(); typedTicket.setTicket(TableTicket.createTicket(varDef)); @@ -960,7 +997,7 @@ public Promise getWidget(JsVariableDefinition varDef) { public Promise getWidget(TypedTicket typedTicket) { return whenServerReady("get a widget") - .then(response -> new JsWidget(this, typedTicket).refetch()); + .then(response -> Promise.resolve(new JsWidget(this, typedTicket))); } public void registerSimpleReconnectable(HasLifecycle figure) { diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/console/JsVariableDefinition.java b/web/client-api/src/main/java/io/deephaven/web/client/api/console/JsVariableDefinition.java index 11473ea63be..50dda49996d 100644 --- a/web/client-api/src/main/java/io/deephaven/web/client/api/console/JsVariableDefinition.java +++ b/web/client-api/src/main/java/io/deephaven/web/client/api/console/JsVariableDefinition.java @@ -12,8 +12,8 @@ /** * A format to describe a variable available to be read from the server. Application fields are optional, and only * populated when a variable is provided by application mode. - * - * APIs which take a VariableDefinition` must at least be provided an object with a type and id field. + *

+ * APIs which take a VariableDefinition must at least be provided an object with a type and id field. */ @TsInterface @TsName(namespace = "dh.ide", name = "VariableDefinition") @@ -28,6 +28,10 @@ public class JsVariableDefinition { private final String applicationName; public JsVariableDefinition(String type, String title, String id, String description) { + // base64('s/') ==> 'cy8' + if (!id.startsWith("cy8")) { + throw new IllegalArgumentException("Cannot create a VariableDefinition from a non-scope ticket"); + } this.type = type; this.title = title == null ? JS_UNAVAILABLE : title; this.id = id; diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/tree/JsTreeTable.java b/web/client-api/src/main/java/io/deephaven/web/client/api/tree/JsTreeTable.java index a1f0569955c..c6ffa93cbd3 100644 --- a/web/client-api/src/main/java/io/deephaven/web/client/api/tree/JsTreeTable.java +++ b/web/client-api/src/main/java/io/deephaven/web/client/api/tree/JsTreeTable.java @@ -979,8 +979,7 @@ public void close() { connection.unregisterSimpleReconnectable(this); - // Presently it is never necessary to release widget tickets, since they can't be export tickets. - // connection.releaseTicket(widget.getTicket()); + connection.releaseTicket(widget.getTicket()); if (filteredTable != null) { filteredTable.release(); diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/tree/enums/JsAggregationOperation.java b/web/client-api/src/main/java/io/deephaven/web/client/api/tree/enums/JsAggregationOperation.java index acd460e0859..79fa9a9ec5f 100644 --- a/web/client-api/src/main/java/io/deephaven/web/client/api/tree/enums/JsAggregationOperation.java +++ b/web/client-api/src/main/java/io/deephaven/web/client/api/tree/enums/JsAggregationOperation.java @@ -128,7 +128,9 @@ private static boolean isNumeric(String columnType) { case "long": case "short": case "char": - case "byte": { + case "byte": + case "java.math.BigDecimal": + case "java.math.BigInteger": { return true; } } diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/widget/JsWidget.java b/web/client-api/src/main/java/io/deephaven/web/client/api/widget/JsWidget.java index e5449ec42a9..792aaedea3b 100644 --- a/web/client-api/src/main/java/io/deephaven/web/client/api/widget/JsWidget.java +++ b/web/client-api/src/main/java/io/deephaven/web/client/api/widget/JsWidget.java @@ -4,7 +4,6 @@ package io.deephaven.web.client.api.widget; import com.vertispan.tsdefs.annotations.TsName; -import com.vertispan.tsdefs.annotations.TsTypeRef; import com.vertispan.tsdefs.annotations.TsUnion; import com.vertispan.tsdefs.annotations.TsUnionMember; import elemental2.core.ArrayBuffer; @@ -34,15 +33,57 @@ /** * A Widget represents a server side object that sends one or more responses to the client. The client can then * interpret these responses to see what to render, or how to respond. - * + *

* Most custom object types result in a single response being sent to the client, often with other exported objects, but * some will have streamed responses, and allow the client to send follow-up requests of its own. This class's API is * backwards compatible, but as such does not offer a way to tell the difference between a streaming or non-streaming * object type, the client code that handles the payloads is expected to know what to expect. See - * dh.WidgetMessageDetails for more information. - * + * {@link WidgetMessageDetails} for more information. + *

* When the promise that returns this object resolves, it will have the first response assigned to its fields. Later - * responses from the server will be emitted as "message" events. When the connection with the server ends + * responses from the server will be emitted as "message" events. When the connection with the server ends, the "close" + * event will be emitted. In this way, the connection will behave roughly in the same way as a WebSocket - either side + * can close, and after close no more messages will be processed. There can be some latency in closing locally while + * remote messages are still pending - it is up to implementations of plugins to handle this case. + *

+ * Also like WebSockets, the plugin API doesn't define how to serialize messages, and just handles any binary payloads. + * What it does handle however, is allowing those messages to include references to server-side objects with those + * payloads. Those server side objects might be tables or other built-in types in the Deephaven JS API, or could be + * objects usable through their own plugins. They also might have no plugin at all, allowing the client to hold a + * reference to them and pass them back to the server, either to the current plugin instance, or through another API. + * The {@code Widget} type does not specify how those objects should be used or their lifecycle, but leaves that + * entirely to the plugin. Messages will arrive in the order they were sent. + *

+ * This can suggest several patterns for how plugins operate: + *

    + *
  • The plugin merely exists to transport some other object to the client. This can be useful for objects which can + * easily be translated to some other type (like a Table) when the user clicks on it. An example of this is + * {@code pandas.DataFrame} will result in a widget that only contains a static + * {@link io.deephaven.web.client.api.JsTable}. Presently, the widget is immediately closed, and only the Table is + * provided to the JS API consumer.
  • + *
  • The plugin provides references to Tables and other objects, and those objects can live longer than the object + * which provided them. One concrete example of this could have been + * {@link io.deephaven.web.client.api.JsPartitionedTable} when fetching constituent tables, but it was implemented + * before bidirectional plugins were implemented. Another example of this is plugins that serve as a "factory", giving + * the user access to table manipulation/creation methods not supported by gRPC or the JS API.
  • + *
  • The plugin provides reference to Tables and other objects that only make sense within the context of the widget + * instance, so when the widget goes away, those objects should be released as well. This is also an example of + * {@link io.deephaven.web.client.api.JsPartitionedTable}, as the partitioned table tracks creation of new keys through + * an internal table instance.
  • + *
+ * + * Handling server objects in messages also has more than one potential pattern that can be used: + *
    + *
  • One object per message - the message clearly is about that object, no other details required.
  • + *
  • Objects indexed within their message - as each message comes with a list of objects, those objects can be + * referenced within the payload by index. This is roughly how {@link io.deephaven.web.client.api.widget.plot.JsFigure} + * behaves, where the figure descriptor schema includes an index for each created series, describing which table should + * be used, which columns should be mapped to each axis.
  • + *
  • Objects indexed since widget creation - each message would append its objects to a list created when the widget + * was first made, and any new exports that arrive in a new message would be appended to that list. Then, subsequent + * messages can reference objects already sent. This imposes a limitation where the client cannot release any exports + * without the server somehow signaling that it will never reference that export again.
  • + *
*/ // TODO consider reconnect support? This is somewhat tricky without understanding the semantics of the widget @TsName(namespace = "dh", name = "Widget") @@ -120,9 +161,9 @@ public Promise refetch() { messageStream.onStatus(status -> { if (!status.isOk()) { reject.onInvoke(status.getDetails()); - fireEvent(EVENT_CLOSE); - closeStream(); } + fireEvent(EVENT_CLOSE); + closeStream(); }); messageStream.onEnd(status -> { closeStream(); @@ -175,6 +216,10 @@ public String getDataAsString() { return new String(Js.uncheckedCast(response.getData().getPayload_asU8()), StandardCharsets.UTF_8); } + /** + * @return the exported objects sent in the initial message from the server. The client is responsible for closing + * them when finished using them. + */ @Override @JsProperty public JsWidgetExportedObject[] getExportedObjects() { @@ -226,8 +271,7 @@ default ArrayBufferView asView() { * @param references an array of objects that can be safely sent to the server */ @JsMethod - public void sendMessage(MessageUnion msg, - @JsOptional JsArray<@TsTypeRef(ServerObject.Union.class) ServerObject> references) { + public void sendMessage(MessageUnion msg, @JsOptional JsArray references) { if (messageStream == null) { return; } @@ -249,7 +293,7 @@ public void sendMessage(MessageUnion msg, } for (int i = 0; references != null && i < references.length; i++) { - ServerObject reference = references.getAt(i); + ServerObject reference = references.getAt(i).asServerObject(); data.addReferences(reference.typedTicket()); } diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/widget/JsWidgetExportedObject.java b/web/client-api/src/main/java/io/deephaven/web/client/api/widget/JsWidgetExportedObject.java index a8948d990fa..d6df425e1e9 100644 --- a/web/client-api/src/main/java/io/deephaven/web/client/api/widget/JsWidgetExportedObject.java +++ b/web/client-api/src/main/java/io/deephaven/web/client/api/widget/JsWidgetExportedObject.java @@ -5,22 +5,28 @@ import com.vertispan.tsdefs.annotations.TsInterface; import com.vertispan.tsdefs.annotations.TsName; +import elemental2.core.JsArray; import elemental2.promise.Promise; +import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.session_pb.ExportRequest; import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.table_pb.ExportedTableCreationResponse; +import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.ticket_pb.Ticket; import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.ticket_pb.TypedTicket; import io.deephaven.web.client.api.Callbacks; +import io.deephaven.web.client.api.JsLazy; import io.deephaven.web.client.api.JsTable; import io.deephaven.web.client.api.ServerObject; import io.deephaven.web.client.api.WorkerConnection; -import io.deephaven.web.client.api.console.JsVariableDefinition; import io.deephaven.web.client.api.console.JsVariableType; +import io.deephaven.web.client.fu.JsLog; import io.deephaven.web.client.state.ClientTableState; import jsinterop.annotations.JsMethod; +import jsinterop.annotations.JsNullable; import jsinterop.annotations.JsProperty; /** - * Represents a server-side object that may not yet have been fetched by the client. Does not memoize its result, so - * fetch() should only be called once, and calling close() on this object will also close the result of the fetch. + * Represents a server-side object that may not yet have been fetched by the client. When this object will no longer be + * used, if {@link #fetch()} is not called on this object, then {@link #close()} must be to ensure server-side resources + * are correctly freed. */ @TsInterface @TsName(namespace = "dh", name = "WidgetExportedObject") @@ -29,13 +35,46 @@ public class JsWidgetExportedObject implements ServerObject { private final TypedTicket ticket; + private final JsLazy> fetched; + public JsWidgetExportedObject(WorkerConnection connection, TypedTicket ticket) { this.connection = connection; this.ticket = ticket; + this.fetched = JsLazy.of(() -> { + if (getType() == null) { + return Promise.reject("Exported object has no type, can't be fetched"); + } + if (getType().equals(JsVariableType.TABLE)) { + return Callbacks.grpcUnaryPromise(c -> { + connection.tableServiceClient().getExportedTableCreationResponse(ticket.getTicket(), + connection.metadata(), + c::apply); + }).then(etcr -> { + ClientTableState cts = connection.newStateFromUnsolicitedTable(etcr, "table for widget"); + JsTable table = new JsTable(connection, cts); + // never attempt a reconnect, since we might have a different widget schema entirely + table.addEventListener(JsTable.EVENT_DISCONNECT, ignore -> table.close()); + return Promise.resolve(table); + }); + } else { + return this.connection.getObject(ticket); + } + }); } + /** + * Returns the type of this export, typically one of {@link JsVariableType}, but may also include plugin types. If + * null, this object cannot be fetched, but can be passed to the server, such as via + * {@link JsWidget#sendMessage(JsWidget.MessageUnion, JsArray)}. + * + * @return the string type of this server-side object, or null. + */ + @JsNullable @JsProperty public String getType() { + if (ticket.getType().isEmpty()) { + return null; + } return ticket.getType(); } @@ -47,32 +86,55 @@ public TypedTicket typedTicket() { return typedTicket; } + /** + * Exports another copy of this reference, allowing it to be fetched separately. Results in rejection if the ticket + * was already closed (either by calling {@link #close()} or closing the object returned from {@link #fetch()}). + * + * @return a promise returning a reexported copy of this object, still referencing the same server-side object. + */ + @JsMethod + public Promise reexport() { + Ticket reexportedTicket = connection.getConfig().newTicket(); + + // Future optimization - we could "race" these by running the export in the background, to avoid + // an extra round trip. + return Callbacks.grpcUnaryPromise(c -> { + ExportRequest req = new ExportRequest(); + req.setSourceId(ticket.getTicket()); + req.setResultId(reexportedTicket); + connection.sessionServiceClient().exportFromTicket(req, connection.metadata(), c::apply); + }).then(success -> { + TypedTicket typedTicket = new TypedTicket(); + typedTicket.setTicket(reexportedTicket); + typedTicket.setType(ticket.getType()); + return Promise.resolve(new JsWidgetExportedObject(connection, typedTicket)); + }); + } + + /** + * Returns a promise that will fetch the object represented by this reference. Multiple calls to this will return + * the same instance. + * + * @return a promise that will resolve to a client side object that represents the reference on the server. + */ @JsMethod public Promise fetch() { - if (getType().equals(JsVariableType.TABLE)) { - return Callbacks.grpcUnaryPromise(c -> { - connection.tableServiceClient().getExportedTableCreationResponse(ticket.getTicket(), - connection.metadata(), - c::apply); - }).then(etcr -> { - ClientTableState cts = connection.newStateFromUnsolicitedTable(etcr, "table for widget"); - JsTable table = new JsTable(connection, cts); - // never attempt a reconnect, since we might have a different widget schema entirely - table.addEventListener(JsTable.EVENT_DISCONNECT, ignore -> table.close()); - return Promise.resolve(table); - }); - } else { - return this.connection.getObject( - new JsVariableDefinition(ticket.getType(), null, ticket.getTicket().getTicket_asB64(), null)); + if (getType() != null) { + return fetched.get(); } + return Promise.reject("Can't fetch an object with no type (i.e. no server plugin implementation)"); } /** - * Releases the server-side resources associated with this object, regardless of whether or not other client-side - * objects exist that also use that object. + * Releases the server-side resources associated with this object, regardless of whether other client-side objects + * exist that also use that object. Should not be called after fetch() has been invoked. */ @JsMethod public void close() { - connection.releaseTicket(ticket.getTicket()); + if (!fetched.isAvailable()) { + connection.releaseTicket(ticket.getTicket()); + } else { + JsLog.warn("Cannot close, already fetched. Instead, close the fetched object."); + } } } diff --git a/web/client-api/src/main/java/io/deephaven/web/client/ide/IdeSession.java b/web/client-api/src/main/java/io/deephaven/web/client/ide/IdeSession.java index 6d8e1b69cb0..75bedece3c8 100644 --- a/web/client-api/src/main/java/io/deephaven/web/client/ide/IdeSession.java +++ b/web/client-api/src/main/java/io/deephaven/web/client/ide/IdeSession.java @@ -130,12 +130,12 @@ public Promise getFigure(String name) { */ public Promise getTreeTable(String name) { return connection.getVariableDefinition(name, JsVariableType.HIERARCHICALTABLE) - .then(connection::getTreeTable); + .then(connection::getHierarchicalTable); } public Promise getHierarchicalTable(String name) { return connection.getVariableDefinition(name, JsVariableType.HIERARCHICALTABLE) - .then(connection::getTreeTable); + .then(connection::getHierarchicalTable); } public Promise getObject(@TsTypeRef(JsVariableDescriptor.class) JsPropertyMap definitionObject) { diff --git a/web/client-ui/Dockerfile b/web/client-ui/Dockerfile index b5952a9d065..c408f6aafe4 100644 --- a/web/client-ui/Dockerfile +++ b/web/client-ui/Dockerfile @@ -2,9 +2,10 @@ FROM deephaven/node:local-build WORKDIR /usr/src/app # Most of the time, these versions are the same, except in cases where a patch only affects one of the packages -ARG WEB_VERSION=0.55.0 -ARG GRID_VERSION=0.55.0 -ARG CHART_VERSION=0.55.0 +ARG WEB_VERSION=0.57.1 +ARG GRID_VERSION=0.57.1 +ARG CHART_VERSION=0.57.1 +ARG WIDGET_VERSION=0.57.1 # Pull in the published code-studio package from npmjs and extract is RUN set -eux; \ @@ -31,3 +32,12 @@ RUN set -eux; \ mv package/build iframe/chart; \ rm -r package; \ rm deephaven-embed-chart-${CHART_VERSION}.tgz; + +# Pull in the published embed-widget package from npmjs and extract is +RUN set -eux; \ + npm pack @deephaven/embed-widget@${WIDGET_VERSION}; \ + tar --touch -xf deephaven-embed-widget-${WIDGET_VERSION}.tgz; \ + mkdir -p iframe; \ + mv package/build iframe/widget; \ + rm -r package; \ + rm deephaven-embed-widget-${WIDGET_VERSION}.tgz;