From e71913691fddbf46b125e94e0224e0385554a848 Mon Sep 17 00:00:00 2001 From: Ryan Caudy Date: Thu, 30 Nov 2023 13:00:11 -0500 Subject: [PATCH 01/25] Clean up some legacy code and improve build (#4674) * Avoid extra copy of TypeScript/JavaScript files in proto/raw-js-openapi/Dockerfile * Delete FishUtil. All usages have been eliminated, trivially inlined, or the files have migrated to more appropriate modules. * Refactor TrackedFileHandleFactory to use a ScheduledExecutorService instead of Scheduler * Refactor Liveness to use a ScheduledExecutorService instead of Scheduler for (currently unused) scheduleCountReport * Replace Scheduler usage in PeriodicUpdateGraph with ScheduledExecutorService. Standardize on nanoseconds for timing. Eliminate unnecessary initializations. * Replace Scheduler usage in StatsDriver with ScheduledExecutorService * Delete :Net. Delete io.sched package. Delete gradle :Net and fishnet projects/configurations. Delete dependencies on same. * Delete legacy utility that used reflection to reach into the guts of SelectorImpl --------- Co-authored-by: Colin Alworth --- .../base/stats/ItemUpdateListener.java | 11 +- .../base/stats/HistogramPower2Test.java | 2 +- .../base/stats/HistogramStateTest.java | 2 +- FishUtil/build.gradle | 14 - FishUtil/cpp/MicroTimer.cpp | 89 -- FishUtil/cpp/MicroTimer.h | 22 - FishUtil/cpp/SignalUtils.cpp | 9 - FishUtil/cpp/SignalUtils.h | 21 - FishUtil/gradle.properties | 1 - .../main/java/io/deephaven/util/DateUtil.java | 955 ----------------- .../java/io/deephaven/util/ExceptionUtil.java | 22 - .../main/java/io/deephaven/util/Mailer.java | 19 - .../deephaven/util/ThreadSafeDateFormat.java | 155 --- .../main/java/io/deephaven/util/Validate.java | 110 -- .../io/deephaven/util/formatters/ISO8601.java | 120 --- .../deephaven/util/signals/SignalSender.java | 111 -- .../deephaven/util/signals/SignalUtils.java | 110 -- .../io/deephaven/util/threads/ThreadDump.java | 48 - IO/src/main/java/io/deephaven/io/NioUtil.java | 109 -- .../main/java/io/deephaven/io/sched/Job.java | 66 -- .../java/io/deephaven/io/sched/JobState.java | 65 -- .../io/sched/JobStateTimeoutQueue.java | 211 ---- .../java/io/deephaven/io/sched/Scheduler.java | 199 ---- .../java/io/deephaven/io/sched/TimedJob.java | 30 - .../deephaven/io/sched/YASchedulerImpl.java | 979 ------------------ .../io/sched/TestJobStateTimeoutQueue.java | 124 --- Net/build.gradle | 30 - Net/gradle.properties | 1 - .../main/java/io/deephaven/net/CommBase.java | 111 -- .../deephaven/net/impl/nio/FastNIODriver.java | 285 ----- .../io/deephaven/net/impl/nio/NIODriver.java | 295 ------ Stats/build.gradle | 2 - .../io/deephaven/stats/StatsCPUCollector.java | 2 +- .../java/io/deephaven/stats/StatsDriver.java | 60 +- .../java/io/deephaven/stats}/util/OSUtil.java | 12 +- TableLogger/TableLogger.gradle | 6 +- .../util/process/BaseProcessEnvironment.java | 0 .../process/DefaultFatalErrorReporter.java | 0 .../process/DefaultProcessEnvironment.java | 0 .../util/process/FatalErrorReporter.java | 0 .../util/process/FatalErrorReporterBase.java | 0 .../util/process/LoggerShutdownTask.java | 0 .../util/process/OnetimeShutdownTask.java | 0 .../util/process/ProcessEnvironment.java | 0 .../util/process/ShutdownManager.java | 0 .../util/process/ShutdownManagerImpl.java | 2 +- ...eephaven.java-classpath-conventions.gradle | 8 +- engine/table/build.gradle | 2 - .../updategraph/impl/PeriodicUpdateGraph.java | 104 +- .../util/file/TrackedFileHandleFactory.java | 75 +- .../file/TestTrackedFileHandleFactory.java | 16 +- engine/test-utils/build.gradle | 1 - engine/time/build.gradle | 1 - engine/updategraph/build.gradle | 2 - .../deephaven/engine/liveness/Liveness.java | 25 +- proto/raw-js-openapi/Dockerfile | 5 - server/build.gradle | 1 - settings.gradle | 4 - 58 files changed, 173 insertions(+), 4481 deletions(-) delete mode 100644 FishUtil/build.gradle delete mode 100644 FishUtil/cpp/MicroTimer.cpp delete mode 100644 FishUtil/cpp/MicroTimer.h delete mode 100644 FishUtil/cpp/SignalUtils.cpp delete mode 100644 FishUtil/cpp/SignalUtils.h delete mode 100644 FishUtil/gradle.properties delete mode 100644 FishUtil/src/main/java/io/deephaven/util/DateUtil.java delete mode 100644 FishUtil/src/main/java/io/deephaven/util/ExceptionUtil.java delete mode 100644 FishUtil/src/main/java/io/deephaven/util/Mailer.java delete mode 100644 FishUtil/src/main/java/io/deephaven/util/ThreadSafeDateFormat.java delete mode 100644 FishUtil/src/main/java/io/deephaven/util/Validate.java delete mode 100644 FishUtil/src/main/java/io/deephaven/util/formatters/ISO8601.java delete mode 100644 FishUtil/src/main/java/io/deephaven/util/signals/SignalSender.java delete mode 100644 FishUtil/src/main/java/io/deephaven/util/signals/SignalUtils.java delete mode 100644 FishUtil/src/main/java/io/deephaven/util/threads/ThreadDump.java delete mode 100644 IO/src/main/java/io/deephaven/io/NioUtil.java delete mode 100644 IO/src/main/java/io/deephaven/io/sched/Job.java delete mode 100644 IO/src/main/java/io/deephaven/io/sched/JobState.java delete mode 100644 IO/src/main/java/io/deephaven/io/sched/JobStateTimeoutQueue.java delete mode 100644 IO/src/main/java/io/deephaven/io/sched/Scheduler.java delete mode 100644 IO/src/main/java/io/deephaven/io/sched/TimedJob.java delete mode 100644 IO/src/main/java/io/deephaven/io/sched/YASchedulerImpl.java delete mode 100644 IO/src/test/java/io/deephaven/io/sched/TestJobStateTimeoutQueue.java delete mode 100644 Net/build.gradle delete mode 100644 Net/gradle.properties delete mode 100644 Net/src/main/java/io/deephaven/net/CommBase.java delete mode 100644 Net/src/main/java/io/deephaven/net/impl/nio/FastNIODriver.java delete mode 100644 Net/src/main/java/io/deephaven/net/impl/nio/NIODriver.java rename {FishUtil/src/main/java/io/deephaven => Stats/src/main/java/io/deephaven/stats}/util/OSUtil.java (85%) rename {FishUtil => Util}/src/main/java/io/deephaven/util/process/BaseProcessEnvironment.java (100%) rename {FishUtil => Util}/src/main/java/io/deephaven/util/process/DefaultFatalErrorReporter.java (100%) rename {FishUtil => Util}/src/main/java/io/deephaven/util/process/DefaultProcessEnvironment.java (100%) rename {FishUtil => Util}/src/main/java/io/deephaven/util/process/FatalErrorReporter.java (100%) rename {FishUtil => Util}/src/main/java/io/deephaven/util/process/FatalErrorReporterBase.java (100%) rename {FishUtil => Util}/src/main/java/io/deephaven/util/process/LoggerShutdownTask.java (100%) rename {FishUtil => Util}/src/main/java/io/deephaven/util/process/OnetimeShutdownTask.java (100%) rename {FishUtil => Util}/src/main/java/io/deephaven/util/process/ProcessEnvironment.java (100%) rename {FishUtil => Util}/src/main/java/io/deephaven/util/process/ShutdownManager.java (100%) rename {FishUtil => Util}/src/main/java/io/deephaven/util/process/ShutdownManagerImpl.java (99%) diff --git a/Base/src/main/java/io/deephaven/base/stats/ItemUpdateListener.java b/Base/src/main/java/io/deephaven/base/stats/ItemUpdateListener.java index 4d092026ba6..cc9007f5e72 100644 --- a/Base/src/main/java/io/deephaven/base/stats/ItemUpdateListener.java +++ b/Base/src/main/java/io/deephaven/base/stats/ItemUpdateListener.java @@ -4,13 +4,10 @@ package io.deephaven.base.stats; public interface ItemUpdateListener { - public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, - String intervalName); - public static final ItemUpdateListener NULL = new ItemUpdateListener() { - public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, - String intervalName) { - // empty - } + void handleItemUpdated( + Item item, long now, long appNow, int intervalIndex, long intervalMillis, String intervalName); + + ItemUpdateListener NULL = (item, now, appNow, intervalIndex, intervalMillis, intervalName) -> { }; } diff --git a/Base/src/test/java/io/deephaven/base/stats/HistogramPower2Test.java b/Base/src/test/java/io/deephaven/base/stats/HistogramPower2Test.java index 6958e16994b..bd24cf5c052 100644 --- a/Base/src/test/java/io/deephaven/base/stats/HistogramPower2Test.java +++ b/Base/src/test/java/io/deephaven/base/stats/HistogramPower2Test.java @@ -33,7 +33,7 @@ public void testSample() throws Exception { // should have a count of 1 in bin[1]..bin[63]; bin[0]=2 Stats.update(new ItemUpdateListener() { - public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, + public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, String intervalName) { // Value v = item.getValue(); HistogramPower2 nh; diff --git a/Base/src/test/java/io/deephaven/base/stats/HistogramStateTest.java b/Base/src/test/java/io/deephaven/base/stats/HistogramStateTest.java index ea63a9100fd..2a23844c93f 100644 --- a/Base/src/test/java/io/deephaven/base/stats/HistogramStateTest.java +++ b/Base/src/test/java/io/deephaven/base/stats/HistogramStateTest.java @@ -23,7 +23,7 @@ public void testSample() throws Exception { // This should print 10 invocations every time Stats.update(new ItemUpdateListener() { - public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, + public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, String intervalName) { Value v = item.getValue(); History history = v.getHistory(); diff --git a/FishUtil/build.gradle b/FishUtil/build.gradle deleted file mode 100644 index bcadd9989db..00000000000 --- a/FishUtil/build.gradle +++ /dev/null @@ -1,14 +0,0 @@ -plugins { - id 'io.deephaven.project.register' - id 'java-library' -} - -dependencies { - implementation project(':Base') - implementation project(':DataStructures') - implementation project(':IO') - implementation project(':Configuration') - implementation project(':log-factory') - - testImplementation project(path: ':Base', configuration: 'tests') -} \ No newline at end of file diff --git a/FishUtil/cpp/MicroTimer.cpp b/FishUtil/cpp/MicroTimer.cpp deleted file mode 100644 index 1455d872f8e..00000000000 --- a/FishUtil/cpp/MicroTimer.cpp +++ /dev/null @@ -1,89 +0,0 @@ -#include "MicroTimer.h" - -#ifdef _WIN32 -#include -#include -static jdouble scale; -LARGE_INTEGER startupTick; -LARGE_INTEGER startupTime; -const double MICROS_IN_SEC = 1000000.0; - -JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM * vm, void * reserved) { - LARGE_INTEGER freq; - QueryPerformanceFrequency (&freq); - scale = freq.QuadPart / MICROS_IN_SEC; - - QueryPerformanceCounter(&startupTick); - - struct timeb startupTimeMillis; - ftime(&startupTimeMillis); - startupTime.QuadPart = startupTimeMillis.time; - startupTime.QuadPart *= 1000; - startupTime.QuadPart += startupTimeMillis.millitm; - startupTime.QuadPart *= 1000; - - return JNI_VERSION_1_2; -} - -JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_currentTimeMicrosNative - (JNIEnv * env, jclass cls) { - LARGE_INTEGER now; - QueryPerformanceCounter (&now); - LARGE_INTEGER diff; - diff.QuadPart = (now.QuadPart - startupTick.QuadPart) / scale; - return startupTime.QuadPart + diff.QuadPart; -} - -extern "C" JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_clockRealtimeNative - (JNIEnv * env, jclass cls) { - jlong micros = Java_io_deephaven_util_clock_MicroTimer_currentTimeMicrosNative(env, cls); - return micros * 1000L; -} - -extern "C" JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_clockMonotonicNative - (JNIEnv * env, jclass cls) { - jlong micros = Java_io_deephaven_util_clock_MicroTimer_currentTimeMicrosNative(env, cls); - return micros * 1000L; -} - - -#else -#include -#include -#include -const uint64_t MICROS_IN_SEC = 1000000L; - -JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_currentTimeMicrosNative - (JNIEnv * env, jclass cls) { - timeval now; - gettimeofday(&now, NULL); - return ((uint64_t) now.tv_sec * 1000000L) + now.tv_usec; -} - -extern "C" JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_clockRealtimeNative - (JNIEnv * env, jclass cls) { - timespec now; - clock_gettime(CLOCK_REALTIME, &now); - return ((uint64_t) now.tv_sec * 1000000000L) + now.tv_nsec; -} - -extern "C" JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_clockMonotonicNative - (JNIEnv * env, jclass cls) { - timespec now; - clock_gettime(CLOCK_MONOTONIC, &now); - return ((uint64_t) now.tv_sec * 1000000000L) + now.tv_nsec; -} - -#endif - -static __inline__ unsigned long long rdtsc(void) { - unsigned hi, lo; - __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi)); - return ((unsigned long long)lo)|(((unsigned long long)hi)<<32); -} - -extern "C" JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_rdtscNative - (JNIEnv * env, jclass cls) { - return (jlong) rdtsc(); -} - diff --git a/FishUtil/cpp/MicroTimer.h b/FishUtil/cpp/MicroTimer.h deleted file mode 100644 index e20fbca170f..00000000000 --- a/FishUtil/cpp/MicroTimer.h +++ /dev/null @@ -1,22 +0,0 @@ -/* DO NOT EDIT THIS FILE - it is machine generated */ -#include -/* Header for class io_deephaven_util_clock_MicroTimer */ - -#ifndef _Included_io_deephaven_util_clock_MicroTimer -#define _Included_io_deephaven_util_clock_MicroTimer - -#ifdef __cplusplus -extern "C" { -#endif -/* - * Class: io_deephaven_util_clock_MicroTimer - * Method: currentTimeMicrosNative - * Signature: ()J - */ -JNIEXPORT jlong JNICALL Java_io_deephaven_util_clock_MicroTimer_currentTimeMicrosNative - (JNIEnv *, jclass); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/FishUtil/cpp/SignalUtils.cpp b/FishUtil/cpp/SignalUtils.cpp deleted file mode 100644 index 5d4f2a75ca4..00000000000 --- a/FishUtil/cpp/SignalUtils.cpp +++ /dev/null @@ -1,9 +0,0 @@ -#include "SignalUtils.h" - -#include -#include - -extern "C" JNIEXPORT jint JNICALL Java_io_deephaven_util_signals_SignalUtils_sendSignalNative - (JNIEnv * env, jclass cls, jint pid, jint sig) { - return kill(pid, sig); -} diff --git a/FishUtil/cpp/SignalUtils.h b/FishUtil/cpp/SignalUtils.h deleted file mode 100644 index 281c9a8a34f..00000000000 --- a/FishUtil/cpp/SignalUtils.h +++ /dev/null @@ -1,21 +0,0 @@ -/* DO NOT EDIT THIS FILE - it is machine generated */ -#include -/* Header for class io_deephaven_util_signals_SignalUtils */ - -#ifndef _Included_io_deephaven_util_signals_SignalUtils -#define _Included_io_deephaven_util_signals_SignalUtils -#ifdef __cplusplus -extern "C" { -#endif -/* - * Class: io_deephaven_util_signals_SignalUtils - * Method: sendSignalNative - * Signature: (II)I - */ -JNIEXPORT jint JNICALL Java_io_deephaven_util_signals_SignalUtils_sendSignalNative - (JNIEnv *, jclass, jint, jint); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/FishUtil/gradle.properties b/FishUtil/gradle.properties deleted file mode 100644 index c186bbfdde1..00000000000 --- a/FishUtil/gradle.properties +++ /dev/null @@ -1 +0,0 @@ -io.deephaven.project.ProjectType=JAVA_PUBLIC diff --git a/FishUtil/src/main/java/io/deephaven/util/DateUtil.java b/FishUtil/src/main/java/io/deephaven/util/DateUtil.java deleted file mode 100644 index c5c5b66ee90..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/DateUtil.java +++ /dev/null @@ -1,955 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util; - -import io.deephaven.base.verify.Require; -import io.deephaven.base.verify.RequirementFailure; -import io.deephaven.configuration.PropertyFile; - -import java.io.File; -import java.text.DateFormat; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.*; - -// -------------------------------------------------------------------- -/** - * Useful methods for working with dates. Not for use in the critical path. - */ -public class DateUtil { - - public static final boolean DAYMASK_STRICT = true; - public static final boolean DAYMASK_NOT_STRICT = false; - - public static final int DAY_VALID = '1'; - public static final int DAY_INVALID = '0'; - public static final int DAY_OPTIONAL = '2'; - - public static final String DAYMASK_NORMAL_BUSINESS_WEEK = "0111110"; - - public static final long NANOS_PER_MICRO = 1000; - public static final long NANOS_PER_MILLI = NANOS_PER_MICRO * 1000; - public static final long NANOS_PER_SECOND = NANOS_PER_MILLI * 1000; - - public static final long MICROS_PER_MILLI = 1000; - public static final long MICROS_PER_SECOND = MICROS_PER_MILLI * 1000; - - public static final long MILLIS_PER_SECOND = 1000; - public static final long MILLIS_PER_MINUTE = MILLIS_PER_SECOND * 60; - public static final long MILLIS_PER_HOUR = MILLIS_PER_MINUTE * 60; - public static final long MILLIS_PER_DAY = MILLIS_PER_HOUR * 24; - - public static final int SECONDS_PER_MINUTE = 60; - public static final int SECONDS_PER_HOUR = SECONDS_PER_MINUTE * 60; - public static final int SECONDS_PER_DAY = SECONDS_PER_HOUR * 24; - - public static final int DAYS_PER_WEEK = 7; - - public static final long[] THOUSANDS = {1, 1000, 1000000, 1000000000}; - - /** Number of days in each month. (Jan==1, Feb is non-leap-year) */ - public static final int[] DAYS_PER_MONTH = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; - - /** Three letter abbreviations of month names. (Jan==1, title case) */ - public static final String[] MONTH_ABBREVIATIONS_3T = - {"Xxx", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; - - /** Three letter abbreviations of month names. (Jan==1, upper case) */ - public static final String[] MONTH_ABBREVIATIONS_3U = - {"XXX", "JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC"}; - - /** Three letter abbreviations of month names. (Jan==1, lower case) */ - public static final String[] MONTH_ABBREVIATIONS_3L = - {"xxx", "jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"}; - - // some useful formatting objects - /** Formats a year in YYYY format. */ - private static final DateFormat ms_dateFormatYear = new ThreadSafeDateFormat(new SimpleDateFormat("yyyy")); - /** Formats a month in MM format. */ - private static final DateFormat ms_dateFormatMonth = new ThreadSafeDateFormat(new SimpleDateFormat("MM")); - /** Formats a day in DD format. */ - private static final DateFormat ms_dateFormatDay = new ThreadSafeDateFormat(new SimpleDateFormat("dd")); - - private static final DateFormat ms_dateFormatHour = new ThreadSafeDateFormat(new SimpleDateFormat("HH")); - private static final DateFormat ms_dateFormatMinute = new ThreadSafeDateFormat(new SimpleDateFormat("mm")); - private static final DateFormat ms_dateFormatSecond = new ThreadSafeDateFormat(new SimpleDateFormat("ss")); - - - /** - * An easy way to get the OS-specific directory name component separator. - */ - private static final String DIR_SEP = File.separator; - - /** - * The "local" time zone. We make it explicit for testing purposes. - */ - private static TimeZone ms_localTimeZone = TimeZone.getDefault(); - - // ---------------------------------------------------------------- - /** Gets the "local" time zone. */ - public static TimeZone getLocalTimeZone() { - return ms_localTimeZone; - } - - // ---------------------------------------------------------------- - /** Gets the "local" time zone. */ - public static void setLocalTimeZone(TimeZone localTimeZone) { - Require.neqNull(localTimeZone, "localTimeZone"); - ms_localTimeZone = localTimeZone; - ms_dateFormatDay.setTimeZone(localTimeZone); - ms_dateFormatMonth.setTimeZone(localTimeZone); - ms_dateFormatYear.setTimeZone(localTimeZone); - } - - // ---------------------------------------------------------------- - public static boolean isLeapYear(int nYear) { - return 0 == nYear % 4 && (0 != nYear % 100 || 0 == nYear % 400) && 0 != nYear; - } - - // ---------------------------------------------------------------- - public static int getDaysInMonth(int nMonth, int nYear) { - Require.geq(nMonth, "nMonth", 1); - Require.leq(nMonth, "nMonth", 12); - return DAYS_PER_MONTH[nMonth] + (2 == nMonth && isLeapYear(nYear) ? 1 : 0); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in MMDD format. - */ - public static String getDateAsMMDD(Date date) { - Require.neqNull(date, "date"); - return ms_dateFormatMonth.format(date) + ms_dateFormatDay.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in MM format. - */ - public static String getDateAsMM(Date date) { - Require.neqNull(date, "date"); - return ms_dateFormatMonth.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in DD format. - */ - public static String getDateAsDD(Date date) { - Require.neqNull(date, "date"); - return ms_dateFormatDay.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in YYYYMM format. - */ - public static String getDateAsYYYYMM(Date date) { - Require.neqNull(date, "date"); - return ms_dateFormatYear.format(date) + ms_dateFormatMonth.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in YYYYMMDD format. - */ - public static String getDateAsYYYYMMDD(Date date) { - Require.neqNull(date, "date"); - return ms_dateFormatYear.format(date) + ms_dateFormatMonth.format(date) + ms_dateFormatDay.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in YYYYMMDD format. - */ - public static String getDateAsYYYYMMDD(long timeInMillis) { - Date date = new Date(timeInMillis); - return getDateAsYYYYMMDD(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in YYYYMMDDTHH:MM:SS format. - */ - public static String getDateAsYYYYdMMdDDTHHcMMcSS(Date date) { - return ms_dateFormatYear.format(date) + "-" + ms_dateFormatMonth.format(date) + "-" - + ms_dateFormatDay.format(date) + "T" + ms_dateFormatHour.format(date) + ":" - + ms_dateFormatMinute.format(date) + ":" + ms_dateFormatSecond.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in YYYYMMDDTHH:MM:SS format. - */ - public static String getDateAsYYYYdMMdDDTHHcMMcSS(long timeInMillis) { - Date date = new Date(timeInMillis); - return getDateAsYYYYdMMdDDTHHcMMcSS(date); - } - - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in MMDDYYYY format. - */ - public static String getDateAsMMDDYYYY(Date date) { - Require.neqNull(date, "date"); - return ms_dateFormatMonth.format(date) + ms_dateFormatDay.format(date) + ms_dateFormatYear.format(date); - } - - // ---------------------------------------------------------------- - /** - * Converts the given date (local timezone) to a string in YYYY/YYYYMM/YYYYMMDD format. - */ - public static String getDateAsPath(Date date) { - Require.neqNull(date, "date"); - String sYear = ms_dateFormatYear.format(date); - String sMonth = ms_dateFormatMonth.format(date); - String sDay = ms_dateFormatDay.format(date); - return sYear + DIR_SEP + sYear + sMonth + DIR_SEP + sYear + sMonth + sDay; - } - - // ---------------------------------------------------------------- - /** - * Converts the given integer in YYYYMMDD format to a string in YYYY/YYYYMM/YYYYMMDD format. - */ - public static String getYyyymmddIntAsPath(int nDateYyyymmdd) { - String sYyyymmdd = "00000000" + Integer.toString(nDateYyyymmdd); - sYyyymmdd = sYyyymmdd.substring(sYyyymmdd.length() - 8); - String sYear = sYyyymmdd.substring(0, 4); - String sMonth = sYyyymmdd.substring(4, 6); - String sDay = sYyyymmdd.substring(6, 8); - return sYear + DIR_SEP + sYear + sMonth + DIR_SEP + sYear + sMonth + sDay; - } - - // ---------------------------------------------------------------- - /** - * Gets the download path, in [DownloadBaseDir]/sDataSubdir/YYYY/YYYYMM/YYYYMMDD format given a date (local - * timezone). - */ - public static String getDateDownloadPath(PropertyFile configuration, String sDataSubdir, Date date) { - Require.nonempty(sDataSubdir, "sDataSubdir"); - Require.neqNull(date, "date"); - return configuration.getProperty("DownloadBaseDir") + DIR_SEP + sDataSubdir + DIR_SEP + getDateAsPath(date); - } - - // ---------------------------------------------------------------- - /** - * Gets the download path, in [DownloadBaseDir]/sDataSubdir/YYYY/YYYYMM/YYYYMMDD format given an integer in YYYYMMDD - * format. - */ - public static String getYyyymmddIntDownloadPath(PropertyFile configuration, String sDataSubdir, int nDateYyyymmdd) { - Require.nonempty(sDataSubdir, "sDataSubdir"); - return configuration.getProperty("DownloadBaseDir") + DIR_SEP + sDataSubdir + DIR_SEP - + getYyyymmddIntAsPath(nDateYyyymmdd); - } - - // ---------------------------------------------------------------- - /** Gets a date object representing the time 24 hours ago. */ - public static Date getDateYesterday() { - long timeNow = System.currentTimeMillis(); - long timeYesterday = timeNow - MILLIS_PER_DAY; - return new Date(timeYesterday); - } - - // ---------------------------------------------------------------- - /** - * Gets a date object representing the next day at the same hour (which may not be exactly 24 hours in the future). - */ - public static Date getNextDaySameTime(Date baseline, TimeZone zone) { - Require.neqNull(baseline, "baseline"); - Require.neqNull(zone, "zone"); - Calendar calendar = Calendar.getInstance(zone); - calendar.setTime(baseline); - calendar.add(Calendar.DATE, 1); - return calendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Subtracts zero or more 24hr periods from the given date until the day of week for the resulting date (local - * timezone) is a valid day according to the mask. If the strict flag is true, optional days are not considered - * valid. - *

- * See {@link #validateDayOfWeekMask}. - */ - public static Date getMostRecentValidDate(Date date, String sValidDaysMask, boolean bStrict) { - Require.neqNull(date, "date"); - validateDayOfWeekMask(sValidDaysMask, bStrict); - - Calendar calendar = Calendar.getInstance(ms_localTimeZone); - while (true) { - calendar.setTime(date); - char chDayType = sValidDaysMask.charAt(calendar.get(Calendar.DAY_OF_WEEK) - Calendar.SUNDAY); - if (DAY_VALID == chDayType || (!bStrict && DAY_OPTIONAL == chDayType)) { - break; - } - date = new Date(date.getTime() - MILLIS_PER_DAY); - } - return date; - } - - // ---------------------------------------------------------------- - /** - * Adds one or more 24hr periods from the given date until the day of week for the resulting date (local timezone) - * is a valid day according to the mask. If the strict flag is true, optional days are not considered valid. - *

- * See {@link #validateDayOfWeekMask}. - */ - public static Date getNextValidDate(Date date, String sValidDaysMask, boolean bStrict) { - Require.neqNull(date, "date"); - validateDayOfWeekMask(sValidDaysMask, bStrict); - - Calendar calendar = Calendar.getInstance(ms_localTimeZone); - while (true) { - date = new Date(date.getTime() + MILLIS_PER_DAY); - calendar.setTime(date); - char chDayType = sValidDaysMask.charAt(calendar.get(Calendar.DAY_OF_WEEK) - Calendar.SUNDAY); - if (DAY_VALID == chDayType || (!bStrict && DAY_OPTIONAL == chDayType)) { - break; - } - } - return date; - } - - // ---------------------------------------------------------------- - /** - * Returns the validity flag from the mask for the given date (local timezone). - *

- * See {@link #validateDayOfWeekMask}. - */ - public static int getDayValidity(Date date, String sValidDaysMask) { - Require.neqNull(date, "date"); - validateDayOfWeekMask(sValidDaysMask, DAYMASK_NOT_STRICT); - return sValidDaysMask.charAt(getDayOfWeek(date)); - } - - // ---------------------------------------------------------------- - /** - * Throws a requirement exception if the given day of week mask is not valid. There must be at least one valid day - * in the mask. If the strict flag is set, optional days are not considered valid. - *

- * See {@link #DAY_VALID}, {@link #DAY_INVALID}, {@link #DAY_OPTIONAL}, {@link #DAYMASK_STRICT}, - * {@link #DAYMASK_NOT_STRICT} - */ - public static void validateDayOfWeekMask(String sValidDaysMask, boolean bStrict) { - Require.neqNull(sValidDaysMask, "sValidDaysMask", 1); - Require.eq(sValidDaysMask.length(), "sValidDaysMask.length()", DAYS_PER_WEEK, 1); - int nValidDaysFound = 0; - for (int nIndex = 0; nIndex < DAYS_PER_WEEK; nIndex++) { - char chDayType = sValidDaysMask.charAt(nIndex); - Require.requirement(DAY_INVALID == chDayType || DAY_VALID == chDayType || DAY_OPTIONAL == chDayType, - "DAY_INVALID==chDayType || DAY_VALID==chDayType || DAY_OPTIONAL==chDayType", 1); - if (DAY_VALID == chDayType || (!bStrict && DAY_OPTIONAL == chDayType)) { - nValidDaysFound++; - } - } - Require.gtZero(nValidDaysFound, "nValidDaysFound", 1); - } - - // ---------------------------------------------------------------- - /** - * Gets the day of the week (Su == 0) for the given date (local timezone). - */ - public static int getDayOfWeek(Date date) { - Require.neqNull(date, "date"); - Calendar calendar = Calendar.getInstance(ms_localTimeZone); - calendar.setTime(date); - return calendar.get(Calendar.DAY_OF_WEEK) - Calendar.SUNDAY; - } - - // ---------------------------------------------------------------- - /** - * Gets the current date (local timezone) as an integer, in YYYYMMDD format. - */ - public static int getDateTodayAsYyyymmddInt() { - return getDateAsYyyymmddInt(new Date()); - } - - // ---------------------------------------------------------------- - /** - * Gets the given date (local timezone) as an integer, in YYYYMMDD format. - */ - public static int getDateAsYyyymmddInt(Date date) { - Require.neqNull(date, "date"); - Calendar calendar = Calendar.getInstance(ms_localTimeZone); - calendar.setTime(date); - return calendar.get(Calendar.YEAR) * 10000 + (calendar.get(Calendar.MONTH) + 1) * 100 - + calendar.get(Calendar.DAY_OF_MONTH); - } - - // ---------------------------------------------------------------- - /** Converts an integer in YYYYMMDD format into "YYYY-MM-DD". */ - public static String formatYyyymmddIntAsIso(int nDateYyyymmdd) { - return formatYyyymmddStringAsIso(Integer.toString(nDateYyyymmdd)); - } - - // ---------------------------------------------------------------- - /** Converts an integer in YYYYMMDD format into "MM/DD/YYYY". */ - public static String formatYyyymmddIntAsUs(int nDateYyyymmdd) { - return formatYyyymmddStringAsUs(Integer.toString(nDateYyyymmdd)); - } - - // ---------------------------------------------------------------- - /** Converts a String in YYYYMMDD format into "YYYY-MM-DD". */ - public static String formatYyyymmddStringAsIso(String sDateYyyymmdd) { - Require.neqNull(sDateYyyymmdd, "sDateYyyymmdd"); - Require.eq(sDateYyyymmdd.length(), "sDateYyyymmdd.length()", 8); - return sDateYyyymmdd.substring(0, 4) + "-" + sDateYyyymmdd.substring(4, 6) + "-" - + sDateYyyymmdd.substring(6, 8); - } - - // ---------------------------------------------------------------- - /** Converts a String in YYYYMMDD format into "MM/DD/YYYY". */ - public static String formatYyyymmddStringAsUs(String sDateYyyymmdd) { - Require.neqNull(sDateYyyymmdd, "sDateYyyymmdd"); - Require.eq(sDateYyyymmdd.length(), "sDateYyyymmdd.length()", 8); - return sDateYyyymmdd.substring(4, 6) + "/" + sDateYyyymmdd.substring(6, 8) + "/" - + sDateYyyymmdd.substring(0, 4); - } - - // ---------------------------------------------------------------- - /** Converts a String in (M|MM)/(D|DD)/(YY|YYYY) format into "YYYY-MM-DD". */ - public static String formatMmddyyyyStringAsIso(String sDateMmddyyyy) { - Require.neqNull(sDateMmddyyyy, "sDateMmddyyyy"); - String[] date = sDateMmddyyyy.split("/"); - String res; - - res = ((date[2].length() == 2) ? "20" + date[2] : date[2]); - res += "-" + ((date[0].length() == 1) ? "0" + date[0] : date[0]); - res += "-" + ((date[1].length() == 1) ? "0" + date[1] : date[1]); - - Require.eq(res.length(), "sDateMmddyyyy.length()", 10); - return res; - } - - // ---------------------------------------------------------------- - /** Converts a String in (M|MM)/(D|DD)/YYYY format into "YYYY-MM-DD". */ - public static String formatMmddyyyyStringAsIsoAllowNull(String sDateMmddyyyy) { - if (null == sDateMmddyyyy || sDateMmddyyyy.length() == 0) { - return ""; - } - Require.neqNull(sDateMmddyyyy, "sDateMmddyyyy"); - String[] date = sDateMmddyyyy.split("/"); - String res; - - res = date[2]; - res += "-" + ((date[0].length() == 1) ? "0" + date[0] : date[0]); - res += "-" + ((date[1].length() == 1) ? "0" + date[1] : date[1]); - - Require.eq(res.length(), "sDateMmddyyyy.length()", 10); - return res; - } - - - // ---------------------------------------------------------------- - /** Converts a String in DDM3UYYYY format into "YYYY-MM-DD". */ - public static String formatddM3UyyyyStringAsIso(String sDateddM3Uyyyy) { - Require.neqNull(sDateddM3Uyyyy, "sDateddM3Uyyyy"); - String res; - - res = sDateddM3Uyyyy.substring(5); - int monthValue = Arrays.asList(MONTH_ABBREVIATIONS_3U).indexOf(sDateddM3Uyyyy.substring(2, 5)); - res += "-" + ((monthValue < 10) ? "0" + monthValue : monthValue); - res += "-" + (sDateddM3Uyyyy.substring(0, 2)); - - Require.eq(res.length(), "sDateddM3Uyyyy.length()", 10); - return res; - } - - // ---------------------------------------------------------------- - /** Converts a String in DD-MMM-YYYY format into "YYYY-MM-DD". */ - public static String formatddMMMyyyyStringAsIso(String sDateddMMMyyyy) { - Require.neqNull(sDateddMMMyyyy, "sDateddMMMyyyy"); - String[] date = sDateddMMMyyyy.split("-"); - String res; - - res = date[2]; - int monthValue = Arrays.asList(MONTH_ABBREVIATIONS_3U).indexOf(date[1]); - res += "-" + ((monthValue < 10) ? "0" + monthValue : monthValue); - res += "-" + ((date[0].length() == 1) ? "0" + date[0] : date[0]); - - Require.eq(res.length(), "sDateddmmmyyyy.length()", 10); - return res; - } - - // ---------------------------------------------------------------- - /** Converts a String in DD-MMM-YY format into "YYYY-MM-DD". */ - public static String formatddMMMyyStringAsIso(String sDateddMMMyy) { - Require.neqNull(sDateddMMMyy, "sDateddMMMyy"); - String[] date = sDateddMMMyy.split("-"); - String res; - - res = date[2]; - int monthValue = Arrays.asList(MONTH_ABBREVIATIONS_3U).indexOf(date[1].toUpperCase()); - res += "-" + ((monthValue < 10) ? "0" + monthValue : monthValue); - res += "-" + ((date[0].length() == 1) ? "0" + date[0] : date[0]); - - Require.eq(res.length(), "sDateddmmmyyyy.length()", 10); - return res; - } - - - // ------------------------------------------------------------------ - /** Converts a String in "Mmm dd, YYYY" format int "YYYY-MM-DD". */ - public static String formatMmmddcYYYYStringAsIso(String sDateMmmddcYYYY) { - Require.neqNull(sDateMmmddcYYYY, "sDateMmmddcYYYY"); - String[] date = sDateMmmddcYYYY.split("[ ,]"); - String res; - - res = date[3]; - int monthValue = Arrays.asList(MONTH_ABBREVIATIONS_3T).indexOf(date[0]); - res += "-" + ((monthValue < 10) ? "0" + monthValue : monthValue); - res += "-" + date[1]; - - Require.eq(res.length(), "sDateMmmddcYYYY.length()", 10); - return res; - } - - // ------------------------------------------------------------------ - /** Converts a String in "YYYY-MM-DD" format into "MM/DD/YYYY" format. */ - public static String formatIsoAsMMsDDsYYYYString(String sDateYYYYdMMdDD) { - Require.neqNull(sDateYYYYdMMdDD, "sDateYYYYdMMdDD"); - String[] date = sDateYYYYdMMdDD.split("-"); - String res = date[1] + "/" + date[2] + "/" + date[0]; - Require.eq(res.length(), "sDateYYYYdMMdDD.length()", 10); - return res; - } - - /** - * Converts a date string into a date. - * - * @param date - * @param sourceFormat - * @param resultFormat - * @return date - * @throws ParseException - */ - public static String formatDateFromStringToString(String date, String sourceFormat, String resultFormat) { - final DateFormat sourceDateFormat = new SimpleDateFormat(sourceFormat); - final DateFormat resultDateFormat = new SimpleDateFormat(resultFormat); - return formatDateFromFormatToFormat(date, sourceDateFormat, resultDateFormat); - } - - /** - * Converts a date string into a date. - * - * @param date - * @param sourceDateFormat - * @param resultDateFormat - * @return date - * @throws ParseException - */ - public static String formatDateFromFormatToFormat(String date, DateFormat sourceDateFormat, - DateFormat resultDateFormat) { - try { - return resultDateFormat.format(sourceDateFormat.parse(date)); - } catch (ParseException e) { - throw new RuntimeException(e); - } - } - - // ################################################################ - - // ---------------------------------------------------------------- - /** - * Returns the absolute timestamp of the most recent occurrence (before or exactly on the - * referenceTimestamp) of a daily event. The time of day is taken from - * sPropertyNameRoot.time in "h:mm a" format. The time zone for calculations is taken from - * sPropertyNameRoot.timeZone. - */ - public static Date getTimestampOfMostRecentDailyEvent(PropertyFile configuration, String sPropertyNameRoot, - Date referenceTimestamp) { - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - Require.neqNull(referenceTimestamp, "referenceTimestamp"); - - // get the time zone of the event from the system properties - TimeZone timeZone = getTimeZoneOfEvent(configuration, sPropertyNameRoot); - - // get the time of day of the event from the system properties - Calendar eventTimestampCalendar = buildEventTimestampCalendar(timeZone, sPropertyNameRoot, configuration); - - // determine the exact timestamp of when the event happens today - Calendar referenceTimestampCalendar = Calendar.getInstance(timeZone); - referenceTimestampCalendar.setTime(referenceTimestamp); - eventTimestampCalendar.set( - referenceTimestampCalendar.get(Calendar.YEAR), - referenceTimestampCalendar.get(Calendar.MONTH), - referenceTimestampCalendar.get(Calendar.DAY_OF_MONTH)); - - // if the event happens in the future, then the most recent occurrence was the one that happened one day ago - if (eventTimestampCalendar.getTimeInMillis() > referenceTimestampCalendar.getTimeInMillis()) { - eventTimestampCalendar.add(Calendar.DAY_OF_MONTH, -1); - } - - return eventTimestampCalendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Returns the absolute timestamp of the occurrence of a daily event that happens in the same "day" as right now. - * The time of day of the event is taken from sPropertyNameRoot.time in "h:mm a" format. The - * time zone for calculations (and for determining the boundaries of "today") is taken from - * sPropertyNameRoot.timeZone. - */ - public static Date getTimestampOfEventToday(PropertyFile configuration, String sPropertyNameRoot) { - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - - // get the time zone of the event from the system properties - TimeZone timeZone = getTimeZoneOfEvent(configuration, sPropertyNameRoot); - - // get the time of day of the event from the system properties - Calendar eventTimestampCalendar = buildEventTimestampCalendar(timeZone, sPropertyNameRoot, configuration); - - // determine the exact timestamp of when the event happens today - Calendar referenceTimestampCalendar = Calendar.getInstance(timeZone); - eventTimestampCalendar.set( - referenceTimestampCalendar.get(Calendar.YEAR), - referenceTimestampCalendar.get(Calendar.MONTH), - referenceTimestampCalendar.get(Calendar.DAY_OF_MONTH)); - - return eventTimestampCalendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Returns the absolute timestamp of the occurrence of a daily event that happens in the same "day" as right now. - * The time of day of the event is taken from sPropertyNameRoot.time in "h:mm a" format. The - * time zone for calculations (and for determining the boundaries of "today") is taken from - * sPropertyNameRoot.timeZone. - */ - public static Date getTimestampOfEventToday(PropertyFile configuration, String sPropertyNameRoot, long nNowMillis) { - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - - // get the time zone of the event from the system properties - TimeZone timeZone = getTimeZoneOfEvent(configuration, sPropertyNameRoot); - - // get the time of day of the event from the system properties - Calendar eventTimestampCalendar = buildEventTimestampCalendar(timeZone, sPropertyNameRoot, configuration); - - // determine the exact timestamp of when the event happens today - Calendar referenceTimestampCalendar = Calendar.getInstance(timeZone); - referenceTimestampCalendar.setTimeInMillis(nNowMillis); - eventTimestampCalendar.set( - referenceTimestampCalendar.get(Calendar.YEAR), - referenceTimestampCalendar.get(Calendar.MONTH), - referenceTimestampCalendar.get(Calendar.DAY_OF_MONTH)); - - return eventTimestampCalendar.getTime(); - } - - // ---------------------------------------------------------------- - private static Calendar buildEventTimestampCalendar(TimeZone timeZone, String sPropertyNameRoot, - PropertyFile configuration) { - String sTimeProperty = sPropertyNameRoot + ".time"; - String sTime = configuration.getProperty(sTimeProperty); - Calendar eventTimestampCalendar = Calendar.getInstance(timeZone); - SimpleDateFormat timeFormat = new SimpleDateFormat("h:mm:ss a"); - timeFormat.setCalendar(eventTimestampCalendar); - try { - timeFormat.parse(sTime); - } catch (ParseException e) { - timeFormat = new SimpleDateFormat("h:mm a"); - timeFormat.setCalendar(eventTimestampCalendar); - try { - timeFormat.parse(sTime); - } catch (ParseException e2) { - throw Require.exceptionNeverCaught("Value of property " + sTimeProperty + " (\"" + sTime - + "\") not in proper format (\"" + timeFormat.toPattern() + "\").", e2); - } - } - return eventTimestampCalendar; - } - - // ---------------------------------------------------------------- - /** - * Gets the timestamp of an event based upon a daily event and a date (retrieved from properties) - */ - public static Date getTimestampOfEvent(PropertyFile configuration, String sEventPropertyRoot, - String sDateProperty) { - Require.nonempty(sEventPropertyRoot, "sEventPropertyRoot"); - Require.nonempty(sDateProperty, "sDateProperty"); - - // get the time zone of the event from the system properties - TimeZone timeZone = getTimeZoneOfEvent(configuration, sEventPropertyRoot); - - // get the time of day of the event from the system properties - Calendar eventTimestampCalendar = buildEventTimestampCalendar(timeZone, sEventPropertyRoot, configuration); - - // parse the date string and set the year, month, and day of the timestamp we are building - // note: time zone is irrelevant for the next step because we just want the numbers - we could use a regexp. - SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); - String sDate = configuration.getProperty(sDateProperty); - try { - dateFormat.parse(sDate); - } catch (ParseException e) { - throw Require.exceptionNeverCaught( - sDateProperty + " (\"" + sDate + "\") not in \"" + dateFormat.toPattern() + "\" format.", e); - } - Calendar dateCalendar = dateFormat.getCalendar(); - - // set the year, month, and day - eventTimestampCalendar.set(dateCalendar.get(Calendar.YEAR), dateCalendar.get(Calendar.MONTH), - dateCalendar.get(Calendar.DAY_OF_MONTH)); - - return eventTimestampCalendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Gets the timestamp of an event based upon a daily event and a date specified by year, month (jan=1), day - */ - public static Date getTimestampOfEvent(PropertyFile configuration, String sEventPropertyRoot, int nYear, int nMonth, - int nDay) { - Require.nonempty(sEventPropertyRoot, "sEventPropertyRoot"); - - // get the time zone of the event from the system properties - TimeZone timeZone = getTimeZoneOfEvent(configuration, sEventPropertyRoot); - - // get the time of day of the event from the system properties - Calendar eventTimestampCalendar = buildEventTimestampCalendar(timeZone, sEventPropertyRoot, configuration); - - // set the year, month, and day - eventTimestampCalendar.set(nYear, nMonth - 1, nDay); - - return eventTimestampCalendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Gets the timestamp of an event based upon a daily event and a date in YYYYMMDD format - */ - public static Date getTimestampOfEvent(PropertyFile configuration, String sEventPropertyRoot, int nYYYYMMDD) { - Require.nonempty(sEventPropertyRoot, "sEventPropertyRoot"); - return getTimestampOfEvent(configuration, sEventPropertyRoot, nYYYYMMDD / 10000, (nYYYYMMDD / 100) % 100, - nYYYYMMDD % 100); - } - - // ---------------------------------------------------------------- - /** Gets the time zone associated with a particular daily event. */ - public static TimeZone getTimeZoneOfEvent(PropertyFile configuration, String sPropertyNameRoot) { - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - return TimeZone.getTimeZone(configuration.getProperty(sPropertyNameRoot + ".timeZone")); - } - - // ---------------------------------------------------------------- - /** - * Returns a date (noon in the local time zone) which is the date of the most recent occurrence (before or exactly - * on the referenceTimestamp) of the specified event, in the event's timezone. - */ - public static Date getDateOfMostRecentDailyEvent(PropertyFile configuration, String sPropertyNameRoot, - Date referenceTimestamp) { - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - Require.neqNull(referenceTimestamp, "referenceTimestamp"); - Date eventTimestamp = getTimestampOfMostRecentDailyEvent(configuration, sPropertyNameRoot, referenceTimestamp); - Calendar sourceCalendar = Calendar.getInstance(getTimeZoneOfEvent(configuration, sPropertyNameRoot)); - sourceCalendar.setTime(eventTimestamp); - Calendar targetCalendar = Calendar.getInstance(ms_localTimeZone); - targetCalendar.clear(); - targetCalendar.set(sourceCalendar.get(Calendar.YEAR), sourceCalendar.get(Calendar.MONTH), - sourceCalendar.get(Calendar.DAY_OF_MONTH), 12, 0, 0); - return targetCalendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Returns a date (noon in the local time zone) which is the date of the most recent occurrence (before or exactly - * on the referenceTimestamp) of the specified event, in the event's timezone. If the (strict) valid - * days mask indicates that the date is not valid, days will be subtracted until the date is valid. - *

- * See {@link #validateDayOfWeekMask}. - */ - public static Date getDateOfMostRecentDailyEvent(PropertyFile configuration, String sPropertyNameRoot, - Date referenceTimestamp, String sValidDaysMask) { - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - Require.neqNull(referenceTimestamp, "referenceTimestamp"); - validateDayOfWeekMask(sValidDaysMask, DAYMASK_STRICT); - Calendar calendar = Calendar.getInstance(ms_localTimeZone); - calendar.setTime(getDateOfMostRecentDailyEvent(configuration, sPropertyNameRoot, referenceTimestamp)); - while (true) { - char chDayType = sValidDaysMask.charAt(calendar.get(Calendar.DAY_OF_WEEK) - Calendar.SUNDAY); - if (DAY_VALID == chDayType) { - break; - } - calendar.add(Calendar.DATE, -1); - } - return calendar.getTime(); - } - - // ---------------------------------------------------------------- - /** - * Wraps a "daily event" as an object. The time of day of the event is taken from - * sPropertyNameRoot.time in "h:mm a" format. The time zone for calculations (and for - * determining the boundaries of "today") is taken from sPropertyNameRoot.timeZone. - */ - public static class DailyEvent { - - private final PropertyFile m_configuration; - private final String m_sPropertyNameRoot; - - // ------------------------------------------------------------ - public DailyEvent(PropertyFile configuration, String sPropertyNameRoot) { - Require.neqNull(configuration, "configuration"); - Require.nonempty(sPropertyNameRoot, "sPropertyNameRoot"); - try { - buildEventTimestampCalendar(getTimeZoneOfEvent(configuration, sPropertyNameRoot), sPropertyNameRoot, - configuration); - } catch (RequirementFailure e) { - throw e.adjustForDelegatingMethod(); - } - m_configuration = configuration; - m_sPropertyNameRoot = sPropertyNameRoot; - } - - // ------------------------------------------------------------ - public long getTimestampOfEventToday(long nNow) { - return DateUtil.getTimestampOfEventToday(m_configuration, m_sPropertyNameRoot, nNow).getTime(); - } - - // ------------------------------------------------------------ - @Override - public String toString() { - return m_configuration.getProperty(m_sPropertyNameRoot + ".time") + ", " - + m_configuration.getProperty(m_sPropertyNameRoot + ".timeZone"); - } - } - - // ################################################################ - - // ---------------------------------------------------------------- - /** Parse the given string into a date with the given format. */ - public static long parse(String sTime, String sFormat) throws ParseException { - SimpleDateFormat simpleDateFormat = new SimpleDateFormat(sFormat); - simpleDateFormat.setTimeZone(ms_localTimeZone); - return simpleDateFormat.parse(sTime).getTime(); - } - - // ---------------------------------------------------------------- - /** - * Determines if two dates are on the same calendar day. - * - * @param d1 first date. - * @param d2 second date. - * @param tz timezone for the calendar. - * @return true if the dates are on the same calendar day, and false otherwise. - */ - public static boolean isSameDay(Date d1, Date d2, TimeZone tz) { - Calendar calendar1 = new GregorianCalendar(tz); - calendar1.setTime(d1); - Calendar calendar2 = new GregorianCalendar(tz); - calendar2.setTime(d2); - - if (calendar1.get(Calendar.YEAR) != calendar2.get(Calendar.YEAR)) { - return false; - } else if (calendar1.get(Calendar.DAY_OF_YEAR) != calendar2.get(Calendar.DAY_OF_YEAR)) { - return false; - } else { - return true; - } - } - - // ################################################################ - - // ---------------------------------------------------------------- - /** - * Returns a string in "0d 0h 0m 0.000'000'000s" format from a time interval in nanoseconds. - */ - public static String formatIntervalNanos(long tsInterval) { - return internalFormatInterval(tsInterval, 3); - } - - // ---------------------------------------------------------------- - /** - * Returns a string in "0d 0h 0m 0.000'000s" format from a time interval in microseconds. - */ - public static String formatIntervalMicros(long tsInterval) { - return internalFormatInterval(tsInterval, 2); - } - - // ---------------------------------------------------------------- - /** - * Returns a string in "0d 0h 0m 0.000s" format from a time interval in milliseconds. - */ - public static String formatIntervalMillis(long tsInterval) { - return internalFormatInterval(tsInterval, 1); - } - - // ---------------------------------------------------------------- - private static String internalFormatInterval(long tsInterval, int nThousands) { - - StringBuilder stringBuilder = new StringBuilder(); - if (tsInterval < 0) { - stringBuilder.append("-"); - tsInterval = -tsInterval; - } - - long tsSeconds = tsInterval / THOUSANDS[nThousands]; - - boolean bNeedUnit = false; - if (tsSeconds > SECONDS_PER_DAY) { - long nDays = tsSeconds / SECONDS_PER_DAY; - tsSeconds %= SECONDS_PER_DAY; - stringBuilder.append(nDays).append("d "); - bNeedUnit = true; - } - if (tsSeconds > SECONDS_PER_HOUR || bNeedUnit) { - long nHours = tsSeconds / SECONDS_PER_HOUR; - tsSeconds %= SECONDS_PER_HOUR; - stringBuilder.append(nHours).append("h "); - bNeedUnit = true; - } - if (tsSeconds > SECONDS_PER_MINUTE || bNeedUnit) { - long nMinutes = tsSeconds / SECONDS_PER_MINUTE; - tsSeconds %= SECONDS_PER_MINUTE; - stringBuilder.append(nMinutes).append("m "); - } - stringBuilder.append(tsSeconds).append('.'); - - long tsFractions = tsInterval % THOUSANDS[nThousands]; - - for (int nIndex = nThousands; nIndex > 0; nIndex--) { - // if (nIndex!=nThousands) { stringBuilder.append('\''); } - long tsThousand = tsFractions / THOUSANDS[nIndex - 1]; - tsFractions %= THOUSANDS[nIndex - 1]; - - String sLeadingZeros; - if (tsThousand >= 100) { - sLeadingZeros = ""; - } else if (tsThousand >= 10) { - sLeadingZeros = "0"; - } else { - sLeadingZeros = "00"; - } - stringBuilder.append(sLeadingZeros).append(tsThousand); - } - return stringBuilder.append("s").toString(); - } - - // ---------------------------------------------------------------- - /** - * Formats the given microsecond timestamp with the given date formatter and then appends the last three microsend - * digits. - */ - public static String formatWithTrailingMicros(DateFormat dateFormat, long nTimestampMicros) { - return dateFormat.format(nTimestampMicros / DateUtil.MICROS_PER_MILLI) - + DateUtil.formatTrailingMicros(nTimestampMicros); - } - - // ---------------------------------------------------------------- - /** - * Returns the last three digits of the given microsecond timestamp as a string, suitable for appending to a - * timestamp formatted to millisecond precision. - */ - public static String formatTrailingMicros(long nTimestampMicros) { - nTimestampMicros = nTimestampMicros % 1000; - String sLeadingZeros; - if (nTimestampMicros >= 100) { - sLeadingZeros = ""; - } else if (nTimestampMicros >= 10) { - sLeadingZeros = "0"; - } else { - sLeadingZeros = "00"; - } - return sLeadingZeros + nTimestampMicros; - } -} diff --git a/FishUtil/src/main/java/io/deephaven/util/ExceptionUtil.java b/FishUtil/src/main/java/io/deephaven/util/ExceptionUtil.java deleted file mode 100644 index 759729e5f32..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/ExceptionUtil.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util; - -import org.jetbrains.annotations.NotNull; - -/** - * Some utilities for inspecting exceptions - */ -public class ExceptionUtil { - public static boolean causedBy(@NotNull Throwable t, Class cause) { - Throwable curr = t; - while (curr != null) { - if (cause.isAssignableFrom(curr.getClass())) { - return true; - } - curr = curr.getCause(); - } - return false; - } -} diff --git a/FishUtil/src/main/java/io/deephaven/util/Mailer.java b/FishUtil/src/main/java/io/deephaven/util/Mailer.java deleted file mode 100644 index c520f647054..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/Mailer.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -public interface Mailer { - void sendEmail(String sender, String[] recipients, String subject, String msg) throws IOException; - - void sendEmail(String sender, String recipient, String subject, String msg) throws IOException; - - void sendHTMLEmail(String sender, String recipient, String subject, String msg) throws IOException; - - void sendEmail(String sender, String recipient, String subject, String msg, - List> extraHeaderEntries) throws IOException; -} diff --git a/FishUtil/src/main/java/io/deephaven/util/ThreadSafeDateFormat.java b/FishUtil/src/main/java/io/deephaven/util/ThreadSafeDateFormat.java deleted file mode 100644 index 18cf9f4a550..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/ThreadSafeDateFormat.java +++ /dev/null @@ -1,155 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util; - -import io.deephaven.base.verify.Require; - -import java.text.AttributedCharacterIterator; -import java.text.DateFormat; -import java.text.FieldPosition; -import java.text.NumberFormat; -import java.text.ParseException; -import java.text.ParsePosition; -import java.util.Calendar; -import java.util.Date; -import java.util.TimeZone; - -// -------------------------------------------------------------------- -/** - * Wraps a {@link DateFormat} to provide a minimal level of thread safety that DateFormat is lacking (namely, preventing - * simultaneous calls to {@link #format} from separate threads from interfering with each other). - */ -public class ThreadSafeDateFormat extends DateFormat { - private final DateFormat m_dateFormat; - - public ThreadSafeDateFormat(DateFormat dateFormat) { - m_dateFormat = dateFormat; - } - - @Override - public Date parse(String source, ParsePosition pos) { - synchronized (m_dateFormat) { - return m_dateFormat.parse(source, pos); - } - } - - @Override - public StringBuffer format(Date date, StringBuffer toAppendTo, FieldPosition fieldPosition) { - synchronized (m_dateFormat) { - return m_dateFormat.format(date, toAppendTo, fieldPosition); - } - } - - @Override - public boolean isLenient() { - synchronized (m_dateFormat) { - return m_dateFormat.isLenient(); - } - } - - @Override - public void setLenient(boolean lenient) { - synchronized (m_dateFormat) { - m_dateFormat.setLenient(lenient); - } - } - - @Override - public NumberFormat getNumberFormat() { - synchronized (m_dateFormat) { - return m_dateFormat.getNumberFormat(); - } - } - - @Override - public void setNumberFormat(NumberFormat newNumberFormat) { - synchronized (m_dateFormat) { - m_dateFormat.setNumberFormat(newNumberFormat); - } - } - - @Override - public Calendar getCalendar() { - synchronized (m_dateFormat) { - return m_dateFormat.getCalendar(); - } - } - - @Override - public void setCalendar(Calendar newCalendar) { - synchronized (m_dateFormat) { - m_dateFormat.setCalendar(newCalendar); - } - } - - @Override - public TimeZone getTimeZone() { - synchronized (m_dateFormat) { - return m_dateFormat.getTimeZone(); - } - } - - @Override - public void setTimeZone(TimeZone zone) { - synchronized (m_dateFormat) { - m_dateFormat.setTimeZone(zone); - } - } - - @Override - public Date parse(String source) throws ParseException { - synchronized (m_dateFormat) { - return m_dateFormat.parse(source); - } - } - - @Override - public Object parseObject(String source, ParsePosition pos) { - synchronized (m_dateFormat) { - return m_dateFormat.parseObject(source, pos); - } - } - - @Override - public Object parseObject(String source) throws ParseException { - synchronized (m_dateFormat) { - return m_dateFormat.parseObject(source); - } - } - - @Override - public AttributedCharacterIterator formatToCharacterIterator(Object obj) { - synchronized (m_dateFormat) { - return m_dateFormat.formatToCharacterIterator(obj); - } - } - - @Override - public String toString() { - synchronized (m_dateFormat) { - return m_dateFormat.toString(); - } - } - - // ################################################################ - - @Override - public boolean equals(Object obj) { - Require.statementNeverExecuted(); - return super.equals(obj); - } - - @Override - public int hashCode() { - Require.statementNeverExecuted(); - return super.hashCode(); - } - - @Override - public Object clone() { - Require.statementNeverExecuted(); - return super.clone(); - } - -} diff --git a/FishUtil/src/main/java/io/deephaven/util/Validate.java b/FishUtil/src/main/java/io/deephaven/util/Validate.java deleted file mode 100644 index bb03259c765..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/Validate.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util; - -public class Validate { - public static int validatePositiveInteger(String name, String s) throws NumberFormatException { - int i = 0; - - try { - i = Integer.parseInt(s); - } catch (NumberFormatException e) { - throw new NumberFormatException(name + " is not a valid number"); - } - - if (i <= 0) { - throw new NumberFormatException(name + " must be greater than zero"); - } - - return i; - } - - public static int validateInteger(String name, String s) throws NumberFormatException { - int i = 0; - - try { - i = Integer.parseInt(s); - } catch (NumberFormatException e) { - throw new NumberFormatException(name + " is not a valid number"); - } - - return i; - } - - public static double validatePositiveDouble(String name, String s) throws NumberFormatException { - double d = 0; - - try { - d = Double.parseDouble(s); - } catch (NumberFormatException e) { - throw new NumberFormatException(name + " is not a valid number"); - } - - if (d <= 0) { - throw new NumberFormatException(name + " must be greater than zero"); - } - - return d; - } - - public static double validateDouble(String name, String s) throws NumberFormatException { - double d = 0; - - try { - d = Double.parseDouble(s); - } catch (NumberFormatException e) { - throw new NumberFormatException(name + " is not a valid number"); - } - - return d; - } - - public static void validate(boolean b, String errorMsg) throws Exception { - if (!b) { - throw new Exception(errorMsg); - } - } - - public static void validateDouble(String name, double value, double min, double max, boolean inclusiveMin, - boolean inclusiveMax) throws Exception { - if (Double.isNaN(value)) { - throw new Exception(name + " may not be NaN"); - } - - if (inclusiveMin && value < min) { - throw new Exception(name + " must be greater than or equal to " + min); - } - - if (!inclusiveMin && value <= min) { - throw new Exception(name + " must be greater than " + min); - } - - if (inclusiveMax && value > max) { - throw new Exception(name + " must be less than or equal to " + max); - } - - if (!inclusiveMax && value >= max) { - throw new Exception(name + " must be less than " + max); - } - } - - public static void validateInteger(String name, int value, int min, int max, boolean inclusiveMin, - boolean inclusiveMax) throws Exception { - if (inclusiveMin && value < min) { - throw new Exception(name + " must be greater than or equal to " + min); - } - - if (!inclusiveMin && value <= min) { - throw new Exception(name + " must be greater than " + min); - } - - if (inclusiveMax && value > max) { - throw new Exception(name + " must be less than or equal to " + max); - } - - if (!inclusiveMax && value >= max) { - throw new Exception(name + " must be less than " + max); - } - } -} diff --git a/FishUtil/src/main/java/io/deephaven/util/formatters/ISO8601.java b/FishUtil/src/main/java/io/deephaven/util/formatters/ISO8601.java deleted file mode 100644 index b5f6a2da220..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/formatters/ISO8601.java +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util.formatters; - -import io.deephaven.configuration.Configuration; - -import java.text.DateFormat; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.TimeZone; - -public class ISO8601 { - private static final ThreadLocal toISO8601Cache = new ThreadLocal(); - private static final ThreadLocal timeISO8601Cache = new ThreadLocal(); - private static final ThreadLocal dateISO8601Cache = new ThreadLocal(); - private static TimeZone TZ_SERVER = null; - - public static synchronized TimeZone serverTimeZone() { - if (TZ_SERVER == null) { - TZ_SERVER = Configuration.getInstance().getServerTimezone(); - } - return TZ_SERVER; - } - - public static DateFormat ISO8601DateFormat(TimeZone tz) { - SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd"); - df.setTimeZone(tz); - return df; - } - - public static DateFormat ISE8601TimeFormat() { - return ISO8601TimeFormat(serverTimeZone()); - } - - public static DateFormat ISO8601TimeFormat(TimeZone tz) { - SimpleDateFormat df = new SimpleDateFormat("HH:mm:ss.SSSZ"); - df.setTimeZone(tz); - return df; - } - - public static DateFormat ISE8601DateTimeFormat() { - return ISO8601DateTimeFormat(serverTimeZone()); - } - - public static DateFormat ISO8601DateTimeFormat(TimeZone tz) { - SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - df.setTimeZone(tz); - return df; - } - - public static String toISO8601(long millis) { - return toISO8601(new Date(millis), serverTimeZone()); - } - - public static String toISO8601(Date d) { - return toISO8601(d, serverTimeZone()); - } - - public static String toISO8601(long millis, TimeZone tz) { - return toISO8601(new Date(millis), tz); - } - - public static String toISO8601(Date d, TimeZone tz) { - DateFormat df = toISO8601Cache.get(); - if (df == null) { - df = ISO8601DateTimeFormat(tz); - toISO8601Cache.set(df); - } else { - df.setTimeZone(tz); - } - return df.format(d); - } - - public static String timeISO8601(long millis) { - return timeISO8601(new Date(millis), serverTimeZone()); - } - - public static String timeISO8601(Date d) { - return timeISO8601(d, serverTimeZone()); - } - - public static String timeISO8601(long millis, TimeZone tz) { - return timeISO8601(new Date(millis), tz); - } - - public static String timeISO8601(Date d, TimeZone tz) { - DateFormat df = timeISO8601Cache.get(); - if (df == null) { - df = ISO8601TimeFormat(tz); - timeISO8601Cache.set(df); - } else { - df.setTimeZone(tz); - } - return df.format(d); - } - - public static String dateISO8601(long millis) { - return dateISO8601(new Date(millis), serverTimeZone()); - } - - public static String dateISO8601(Date d) { - return dateISO8601(d, serverTimeZone()); - } - - public static String dateISO8601(long millis, TimeZone tz) { - return dateISO8601(new Date(millis), tz); - } - - public static String dateISO8601(Date d, TimeZone tz) { - DateFormat df = dateISO8601Cache.get(); - if (df == null) { - df = ISO8601DateFormat(tz); - dateISO8601Cache.set(df); - } else { - df.setTimeZone(tz); - } - return df.format(d); - } -} diff --git a/FishUtil/src/main/java/io/deephaven/util/signals/SignalSender.java b/FishUtil/src/main/java/io/deephaven/util/signals/SignalSender.java deleted file mode 100644 index c039e5a79ee..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/signals/SignalSender.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util.signals; - -import io.deephaven.base.verify.Require; -import io.deephaven.io.logger.Logger; -import io.deephaven.io.logger.StreamLoggerImpl; -import org.jetbrains.annotations.NotNull; - -import java.io.IOException; - -public class SignalSender { - - private final Logger log; - private final boolean useNative; - - public SignalSender(@NotNull final Logger log, final boolean useNative) { - this.log = log; - this.useNative = useNative; - if (useNative) { - SignalUtils.loadNative(); - } - } - - /** - * Helper method - sends SIQQUIT to a process. If this process is a JVM, it will send a stack dump to stdout. - * - * @param processId The process ID to send the signal to - * @return true on success, false on error - */ - public boolean sendQuit(final int processId) { - return sendSignal(processId, SignalUtils.Signal.SIGQUIT); - } - - /** - * Helper method - sends SIQKILL to a process. - * - * @param processId The process ID to send the signal to - * @return true on success, false on error - */ - public boolean kill(final int processId) { - return sendSignal(processId, SignalUtils.Signal.SIGKILL); - } - - /** - * Helper method - sends SIGCONT to a process. - * - * @param processId The process ID to send the signal to - * @return true on success, false on error - */ - public boolean resume(final int processId) { - return sendSignal(processId, SignalUtils.Signal.SIGCONT); - } - - /** - * Helper method - sends SIGSTOP to a process. - * - * @param processId The process ID to send the signal to - * @return true on success, false on error - */ - public boolean suspend(final int processId) { - return sendSignal(processId, SignalUtils.Signal.SIGSTOP); - } - - /** - * Send the specified signal to the target process. - * - * @param processId The process ID to send the signal to - * @param signal The signal to send - * @return true on success, false on error - */ - private boolean sendSignal(final int processId, final SignalUtils.Signal signal) { - Require.gtZero(processId, "processId"); // Don't want to allow fancier usages for now. See 'man -s 2 kill'. - Require.neqNull(signal, "signal"); - - final int rc; - if (useNative) { - rc = SignalUtils.sendSignalNative(processId, signal.getSignalNumber()); - } else { - try { - rc = SignalUtils.sendSignalWithBinKill(processId, signal.getSignalName()); - } catch (IOException e) { - log.error().append("sendSignal: Exception while using /bin/kill to send ").append(signal.toString()) - .append(" to processId ").append(processId).append(": ").append(e).endl(); - return false; - } - } - - if (rc == 0) { - return true; - } - log.error().append("sendSignal: Error while using ").append(useNative ? "native code" : "/bin/kill") - .append(" to send ").append(signal.toString()) - .append(" to processId ").append(processId) - .append(": kill returned ").append(rc).endl(); - return false; - } - - /** - * Simple program for functionality testing. - * - * @param args [ <pid> <signal> <use native?> ] - */ - public static void main(final String... args) { - final int pid = Integer.parseInt(args[0]); - final SignalUtils.Signal signal = SignalUtils.Signal.valueOf(args[1]); - final boolean useNative = Boolean.valueOf(args[2]); - new SignalSender(new StreamLoggerImpl(), useNative).sendSignal(pid, signal); - } -} diff --git a/FishUtil/src/main/java/io/deephaven/util/signals/SignalUtils.java b/FishUtil/src/main/java/io/deephaven/util/signals/SignalUtils.java deleted file mode 100644 index 10aaf8bcd06..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/signals/SignalUtils.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util.signals; - -import io.deephaven.util.OSUtil; - -import java.io.IOException; - -public class SignalUtils { - - /** - * What operating system does the JVM think we're on? - */ - private static final OSUtil.OSFamily OPERATING_SYSTEM = OSUtil.getOSFamily(); - - /** - * Placeholder value when we don't know a signal's number on the current OS. - */ - private static final int UNDEFINED_SIGNAL_NUMBER = Integer.MIN_VALUE; - - /** - * Supported signals. Be careful when adding new entries - as you can see, signal numbers don't always line up - * across operating systems. - */ - public enum Signal { - SIGINT("int", 2, 2, 2), SIGTERM("term", 15, 15, 15), SIGQUIT("quit", 3, 3, 3), SIGKILL("kill", 9, 9, - 9), SIGSTOP("stop", 19, 23, 17), SIGCONT("cont", 18, 25, 19); - - private final String signalName; - private final int signalNumber; - - Signal(final String signalName, final int linuxSignalNumber, final int solarisSignalNumber, - final int macOsSignalNumber) { - this.signalName = signalName; - switch (OPERATING_SYSTEM) { - case LINUX: - signalNumber = linuxSignalNumber; - break; - case MAC_OS: - signalNumber = macOsSignalNumber; - break; - case SOLARIS: - signalNumber = solarisSignalNumber; - break; - case WINDOWS: - default: - signalNumber = UNDEFINED_SIGNAL_NUMBER; - break; - } - } - - public String getSignalName() { - return signalName; - } - - public int getSignalNumber() { - if (signalNumber == UNDEFINED_SIGNAL_NUMBER) { - throw new UnsupportedOperationException(this + " is undefined on " + OPERATING_SYSTEM); - } - return signalNumber; - } - } - - /** - * Use /bin/kill to send a signal by name. - * - * @param processId The process ID to send the signal to - * @param signalName The name of the signal to send - * @return The exit value of the child process. - */ - @SuppressWarnings("WeakerAccess") - public static int sendSignalWithBinKill(final int processId, final String signalName) throws IOException { - final ProcessBuilder pb = new ProcessBuilder("/bin/kill", "-s", signalName, Integer.toString(processId)); - final Process p = pb.start(); - - try { - p.getErrorStream().close(); - p.getInputStream().close(); - p.getOutputStream().close(); - } catch (IOException e) { - throw new AssertionError("sendSignalWithBinKill: unexpected exception while closing child process streams: " - + e.getMessage(), e); - } - - while (true) { - try { - return p.waitFor(); - } catch (InterruptedException ignored) { - } - } - } - - /** - * Ensure that libraries have been loaded, before using sendSignalNative(...). - */ - @SuppressWarnings("WeakerAccess") - public static void loadNative() { - System.loadLibrary("FishCommon"); - } - - /** - * Use native code to send a signal by number. - * - * @param processId The process ID to send the signal to - * @param signalNumber The signal number to send - * @return The return value of kill(2). - */ - public static native int sendSignalNative(final int processId, final int signalNumber); -} diff --git a/FishUtil/src/main/java/io/deephaven/util/threads/ThreadDump.java b/FishUtil/src/main/java/io/deephaven/util/threads/ThreadDump.java deleted file mode 100644 index 344cc6e517f..00000000000 --- a/FishUtil/src/main/java/io/deephaven/util/threads/ThreadDump.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.util.threads; - -import io.deephaven.io.logger.Logger; - -import java.io.PrintStream; -import java.lang.management.ManagementFactory; -import java.lang.management.ThreadInfo; -import java.lang.management.ThreadMXBean; -import java.util.function.Consumer; - -/** - * A simple method for generating a Thread dump for this JVM; it doesn't do all the stuff that the kill -3 does; but you - * can easily run it from inside the JVM without having to send yourself a signal. - */ -public class ThreadDump { - @SuppressWarnings("WeakerAccess") - public static void threadDump(final PrintStream out) { - doDump(out::print); - } - - public static void threadDump(final Logger logger) { - doDump(arg -> logger.info().append(arg).endl()); - } - - @SuppressWarnings("WeakerAccess") - public static String threadDump() { - final StringBuilder builder = new StringBuilder(); - doDump(builder::append); - return builder.toString(); - } - - private static void doDump(Consumer output) { - ThreadMXBean threadMXBean = ManagementFactory.getPlatformMXBean(ThreadMXBean.class); - - ThreadInfo[] threadInfos = threadMXBean.dumpAllThreads(true, true); - - for (ThreadInfo threadInfo : threadInfos) { - output.accept(threadInfo.toString()); - } - } - - public static void main(String[] args) { - threadDump(System.out); - } -} diff --git a/IO/src/main/java/io/deephaven/io/NioUtil.java b/IO/src/main/java/io/deephaven/io/NioUtil.java deleted file mode 100644 index 4e24b19ac38..00000000000 --- a/IO/src/main/java/io/deephaven/io/NioUtil.java +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io; - -import io.deephaven.base.LowGarbageArrayIntegerMap; -import io.deephaven.base.LowGarbageArrayList; -import io.deephaven.base.LowGarbageArraySet; -import io.deephaven.base.verify.Assert; -import io.deephaven.base.verify.Require; - -import java.lang.reflect.Field; -import java.nio.channels.Selector; -import java.nio.channels.spi.AbstractSelector; -import java.util.List; -import java.util.Set; - -// -------------------------------------------------------------------- -/** - * General utilities for NIO - */ -public class NioUtil { - - private static final String JAVA_8_SPEC_VERSION = "1.8"; - - // ---------------------------------------------------------------- - /** - * Use reflection to change the collection implementations so iteration operations used in the selector - * implementation will not produce garbage. - * - *

- * This is only applied when the system property {@code java.specification.version} is equal to "1.8". - * - *

- * We can do this because, by looking at the source code, we can tell that there are no simultaneous iterations so - * reusing one iterator is OK. Because of concurrent modification issues and thread safety issues, this is generally - * likely to be the case anyway. The implementation of selector is not likely to change between minor JDK revisions. - * A major JDK release might produce a rewrite, but in that case we can check the JDK version and apply the - * appropriate set of patches. - */ - public static Selector reduceSelectorGarbage(Selector selector) { - final String javaSpecificationVersion = System.getProperty("java.specification.version"); - if (JAVA_8_SPEC_VERSION.equals(javaSpecificationVersion)) { - return reduceSelectorGarbageImpl(selector); - } - return selector; - } - - private static Selector reduceSelectorGarbageImpl(Selector selector) { - try { - Class selectorImplClass = Class.forName("sun.nio.ch.SelectorImpl"); - Require.instanceOf(selector, "selector", selectorImplClass); - - Field cancelledKeysField = AbstractSelector.class.getDeclaredField("cancelledKeys"); - cancelledKeysField.setAccessible(true); - Set newCancelledKeys = new LowGarbageArraySet(); - cancelledKeysField.set(selector, newCancelledKeys); - - Field keysField = selectorImplClass.getDeclaredField("keys"); - keysField.setAccessible(true); - Field publicKeysField = selectorImplClass.getDeclaredField("publicKeys"); - publicKeysField.setAccessible(true); - Set newKeys = new LowGarbageArraySet(); - keysField.set(selector, newKeys); - publicKeysField.set(selector, newKeys); - - Field selectedKeysField = selectorImplClass.getDeclaredField("selectedKeys"); - selectedKeysField.setAccessible(true); - Field publicSelectedKeysField = selectorImplClass.getDeclaredField("publicSelectedKeys"); - publicSelectedKeysField.setAccessible(true); - Set newSelectedKeys = new LowGarbageArraySet(); - selectedKeysField.set(selector, newSelectedKeys); - publicSelectedKeysField.set(selector, newSelectedKeys); - - if (System.getProperty("os.name").startsWith("Windows") - && System.getProperty("java.vendor").startsWith("Oracle")) { - Class windowsSelectorImplClass = Class.forName("sun.nio.ch.WindowsSelectorImpl"); - Require.instanceOf(selector, "selector", windowsSelectorImplClass); - - Field threadsField = windowsSelectorImplClass.getDeclaredField("threads"); - threadsField.setAccessible(true); - List newThreads = new LowGarbageArrayList(); - threadsField.set(selector, newThreads); - - } else if (System.getProperty("os.name").startsWith("Linux")) { - Class ePollSelectorImplClass = Class.forName("sun.nio.ch.EPollSelectorImpl"); - Require.instanceOf(selector, "selector", ePollSelectorImplClass); - - Field fdToKeyField = ePollSelectorImplClass.getDeclaredField("fdToKey"); - fdToKeyField.setAccessible(true); - LowGarbageArrayIntegerMap newFdToKey = new LowGarbageArrayIntegerMap(); - fdToKeyField.set(selector, newFdToKey); - - } else if (System.getProperty("os.name").startsWith("SunOS")) { - Class devPollSelectorImplClass = Class.forName("sun.nio.ch.DevPollSelectorImpl"); - Require.instanceOf(selector, "selector", devPollSelectorImplClass); - - Field fdToKeyField = devPollSelectorImplClass.getDeclaredField("fdToKey"); - fdToKeyField.setAccessible(true); - LowGarbageArrayIntegerMap newFdToKey = new LowGarbageArrayIntegerMap(); - fdToKeyField.set(selector, newFdToKey); - } - - return selector; - } catch (final NoSuchFieldException | IllegalAccessException | ClassNotFoundException e) { - throw Assert.exceptionNeverCaught(e); - } - } -} diff --git a/IO/src/main/java/io/deephaven/io/sched/Job.java b/IO/src/main/java/io/deephaven/io/sched/Job.java deleted file mode 100644 index ead141f4164..00000000000 --- a/IO/src/main/java/io/deephaven/io/sched/Job.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import io.deephaven.base.log.LogOutput; -import io.deephaven.base.log.LogOutputAppendable; - -import java.nio.channels.SelectableChannel; -import java.io.IOException; - -/** - * This is the base class for jobs that can be invoked by the scheduler. - */ -public abstract class Job implements LogOutputAppendable { - - // -------------------------------------------------------------------------- - // public interface - // -------------------------------------------------------------------------- - - /** - * This method is invoked by the scheduler when the job's channel becomes ready. - * - * @param channel the channel which has become ready - * @param readyOps the operations which can be performed on this channel without blocking - * @returns the modified readyOps after the invocation; if non-zero, the job will be invoked again with these - * @throws IOException - if something bad happens - */ - public abstract int invoke(SelectableChannel channel, int readyOps, Runnable handoff) throws IOException; - - /** - * This method is invoked if the job times out. - */ - public abstract void timedOut(); - - /** - * This method is called if the job is explicitly cancelled before it becomes ready or times out. - */ - public abstract void cancelled(); - - // -------------------------------------------------------------------------- - // scheduler state management - // -------------------------------------------------------------------------- - - // TODO: currently, we assume that the scheduler is a singleton, or at the least - // TODO: that no job will be used with more than one scheduler throughout its lifetime. - // TODO: If this changes, we will have to change the state pointer to a set. - - /** the link to the scheduler's state for this job */ - JobState state; - - /** return the state for the given scheduler, or null */ - final JobState getStateFor(Scheduler sched) { - return state; - } - - /** return or create the state for the given scheduler */ - final JobState makeStateFor(Scheduler sched) { - return state == null ? (state = new JobState(this)) : state; - } - - @Override - public LogOutput append(LogOutput logOutput) { - return logOutput.append(LogOutput.BASIC_FORMATTER, this); - } -} diff --git a/IO/src/main/java/io/deephaven/io/sched/JobState.java b/IO/src/main/java/io/deephaven/io/sched/JobState.java deleted file mode 100644 index 00be8941aee..00000000000 --- a/IO/src/main/java/io/deephaven/io/sched/JobState.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import java.nio.channels.SelectableChannel; - -/** - * The per-scheduler state for a job. Note that this class is package-private. - */ -class JobState implements Cloneable { - /** the job */ - final Job job; - - /** the update count for this job state */ - long updateClock = 0; - - /** the current deadline for this job */ - long deadline = Long.MAX_VALUE; - - /** the job's current position in the scheduler's timeout queue */ - int tqPos = 0; - - /** true, if this job has been invoked or has timed out */ - boolean gathered = false; - - /** true if the job has been forgotten after being dispatched and not reinstalled */ - boolean forgotten = false; - - /** true if this job has been explicitly cancelled */ - boolean cancelled = false; - - /** this is the channel we are waiting on in the selector */ - SelectableChannel waitChannel = null; - - /** the channel on which the job is ready to be dispatched, or null */ - SelectableChannel readyChannel = null; - - /** the operation set on which the job is ready to be dispatched, or zero */ - int readyOps = 0; - - /** the channel on which this job will select in the next scheduler loop */ - SelectableChannel nextChannel = null; - - /** the interest set on which this job will select in the next scheduler loop */ - int nextOps = 0; - - /** the timeout deadline of this job in the next scheduler loop */ - long nextDeadline = Long.MAX_VALUE; - - /** the nano-time when this job was last enqueued */ - long gatheredNanos = 0; - - /** constructor stores the back-link to the job */ - JobState(Job job) { - this.job = job; - } - - /** - * Clone this object - */ - public JobState clone() throws CloneNotSupportedException { - return (JobState) super.clone(); - } -} diff --git a/IO/src/main/java/io/deephaven/io/sched/JobStateTimeoutQueue.java b/IO/src/main/java/io/deephaven/io/sched/JobStateTimeoutQueue.java deleted file mode 100644 index 9c57851c20e..00000000000 --- a/IO/src/main/java/io/deephaven/io/sched/JobStateTimeoutQueue.java +++ /dev/null @@ -1,211 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import io.deephaven.io.logger.Logger; - -import java.util.Set; - -/** - * A priority queue (heap) for JobState instances, ordered by their deadlines. Note that this class is package-private. - */ -class JobStateTimeoutQueue implements Cloneable { - private final Logger log; - - /** the queue storage */ - private JobState[] queue; - - /** the size of the queue (invariant: size < queue.length - 1) */ - private int size = 0; - - public JobStateTimeoutQueue(Logger log, int initialSize) { - this.log = log; - this.queue = new JobState[initialSize]; - } - - /** clone the queue (for testing) */ - public Object clone() throws CloneNotSupportedException { - JobStateTimeoutQueue q = (JobStateTimeoutQueue) super.clone(); - q.queue = new JobState[queue.length]; - for (int i = 1; i <= size; ++i) { - q.queue[i] = queue[i].clone(); - } - q.size = size; - return q; - } - - /** return the priority queue's size */ - int size() { - return size; - } - - /** Returns true if the priority queue contains no elements. */ - boolean isEmpty() { - return size == 0; - } - - /** Adds a job to to the timeout queue */ - void enter(JobState state, long deadline) { - state.deadline = deadline; - if (state.tqPos == 0) { - if (++size == queue.length) { - JobState[] newQueue = new JobState[2 * queue.length]; - System.arraycopy(queue, 0, newQueue, 0, size); - queue = newQueue; - } - queue[size] = state; - state.tqPos = size; - fixUp(size); - assert testInvariant("after fixUp in enter-add"); - } else { - assert queue[state.tqPos] == state; - int k = state.tqPos; - fixDown(k); - fixUp(k); - assert testInvariant("after fixDown/fixUp in enter-change"); - } - } - - /** Return the top of the timeout queue - the next timeout */ - JobState top() { - return queue[1]; - } - - /** Remove the top element from the timeout queue. */ - void removeTop() { - queue[1].tqPos = 0; - if (--size == 0) { - queue[1] = null; - } else { - queue[1] = queue[size + 1]; - queue[size + 1] = null; // Drop extra reference to prevent memory leak - queue[1].tqPos = 1; - fixDown(1); - } - assert testInvariant("after removeTop()"); - } - - /** remove an arbitrary element from the timeout queue */ - void remove(JobState state) { - int k = state.tqPos; - if (k != 0) { - assert queue[k] == state; - state.tqPos = 0; - if (k == size) { - queue[size--] = null; - } else { - queue[k] = queue[size]; - queue[k].tqPos = k; - queue[size--] = null; - fixDown(k); - fixUp(k); - assert testInvariant("after fixDown/fixUp in remove()"); - } - } - assert testInvariant("at end of remove()"); - } - - /** move queue[k] up the heap until it's deadline is >= that of its parent. */ - private void fixUp(int k) { - if (k > 1) { - JobState state = queue[k]; - int j = k >> 1; - JobState parent = queue[j]; - if (parent.deadline > state.deadline) { - queue[k] = parent; - parent.tqPos = k; - k = j; - j = k >> 1; - while (k > 1 && (parent = queue[j]).deadline > state.deadline) { - queue[k] = parent; - parent.tqPos = k; - k = j; - j = k >> 1; - } - queue[k] = state; - state.tqPos = k; - } - } - } - - /** move queue[k] down the heap until it's deadline is <= those of its children. */ - private void fixDown(int k) { - int j = k << 1; - if (j <= size) { - JobState state = queue[k], child = queue[j], child2; - if (j < size && (child2 = queue[j + 1]).deadline < child.deadline) { - child = child2; - j++; - } - if (child.deadline < state.deadline) { - queue[k] = child; - child.tqPos = k; - k = j; - j = k << 1; - while (j <= size) { - child = queue[j]; - if (j < size && (child2 = queue[j + 1]).deadline < child.deadline) { - child = child2; - j++; - } - if (child.deadline >= state.deadline) { - break; - } - queue[k] = child; - child.tqPos = k; - k = j; - j = k << 1; - } - queue[k] = state; - state.tqPos = k; - } - } - } - - boolean testInvariantAux(int i, String what) { - if (i <= size) { - if (queue[i].tqPos != i) { - log.error().append(what).append(": queue[").append(i).append("].tqPos=").append(queue[i].tqPos) - .append(" != ").append(i).endl(); - } - if (!testInvariantAux(i * 2, what)) { - return false; - } - if (!testInvariantAux(i * 2 + 1, what)) { - return false; - } - if (i > 1) { - if (queue[i].deadline < queue[i / 2].deadline) { - log.error().append(what).append(": child[").append(i).append("]=").append(queue[i].deadline) - .append(" < parent[").append((i / 2)).append("]=").append(queue[i / 2].deadline).endl(); - return false; - } - } - } - return true; - } - - boolean testInvariant(String what) { - boolean result = testInvariantAux(1, what); - if (result) { - for (int i = size + 1; i < queue.length; ++i) { - if (queue[i] != null) { - log.error().append(what).append(": size = ").append(size).append(", child[").append(i).append("]=") - .append(queue[i].deadline).append(" != null").endl(); - result = false; - } - } - } - if (result) { - // log.info("timeoutQueue.testInvariant: OK "+what); - } - return result; - } - - void junitGetAllJobs(Set jobs) { - for (int i = 1; i <= size; ++i) { - jobs.add(queue[i].job); - } - } -} diff --git a/IO/src/main/java/io/deephaven/io/sched/Scheduler.java b/IO/src/main/java/io/deephaven/io/sched/Scheduler.java deleted file mode 100644 index f38cde3c881..00000000000 --- a/IO/src/main/java/io/deephaven/io/sched/Scheduler.java +++ /dev/null @@ -1,199 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import java.nio.channels.*; -import java.util.*; -import java.util.concurrent.Executor; - -/** - * This class provides a singleton wrapper for scheduling invocations of multiple Job instances from a single thread. - * Job are scheduled in accordance with an interest set on a java.nio.Channel, deadline based time scheduling, and/or - * custom criteria defined by the Jobs' implementation of the ready() method. - * - * Jobs are instantiated by the application and made known to the scheduler by one of the install() methods. Once the - * job is installed, the scheduler will call exactly one of its invoke(), timedOut() or cancelled() methods exactly - * once. After this, the scheduler forgets about the job completely, unless the application installs it again. - */ -public interface Scheduler { - - // -------------------------------------------------------------------------- - // public interface - // -------------------------------------------------------------------------- - - /** - * Return the scheduler's idea of the current time. - */ - public long currentTimeMillis(); - - /** - * Install a job in association with a channel and an interest set. - */ - public void installJob(Job job, long deadline, SelectableChannel channel, int interest); - - /** - * Install a job with only an associated deadline (removing any channel association) - */ - public void installJob(Job job, long deadline); - - /** - * Cancel a job, making the scheduler forget it completely.. - */ - public void cancelJob(Job job); - - /** - * Wait for jobs to become ready, then invoke() them all. This method will form the core of the main loop of a - * scheduler-driven application. The method first waits until: - * - * -- the given timeout expires, -- the earliest job-specific timeout expires, or -- one or more jobs becomes ready - * - * If jobs have become ready, then the entire ready set will be invoked. If any job throws an uncaught exception, - * the job's terminated() method will be called and the job deregistered. This does not abort the invocation of the - * remaining jobs. The return value is then the number of jobs that were invoked. - * - * If no jobs are ready and any job-specific timeouts expire, the associated jobs' timedOut() methods are called. - * The return value is the negative of the number of expired timeouts. - * - * If the time given by the timeout argument expires, then zero is returned. - * - * Note that this method is not synchronized. The application must ensure that it is never called concurrently by - * more than one thread. - * - * @return true, if some job was dispatched - */ - public boolean work(long timeout, Runnable handoff); - - /** - * Shut down the scheduler, calling close() on the underlying Selector. - */ - public void close(); - - /** - * Return true if the scheduler is closing or closed. - */ - public boolean isClosed(); - - // -------------------------------------------------------------------------- - // test support methods - // -------------------------------------------------------------------------- - - /** - * Return a reference to the selector - */ - public Selector junitGetSelector(); - - /** - * Return all jobs known to the scheduler, in whatever state. - */ - public Set junitGetAllJobs(); - - /** - * Return the contents of the timeout queue, in deadline order - * - * @return the jobs in the timeout queue - */ - public ArrayList junitGetTimeoutQueue(); - - /** - * Return the selection keys currently known to the scheduler. - */ - public ArrayList junitGetAllKeys(); - - /** - * Return the selection keys currently known to the scheduler. - */ - public ArrayList junitGetReadyKeys(); - - /** - * Return a map containing all channels and the jobs to which they are associated. - */ - public Map junitGetChannelsAndJobs(); - - /** - * Return true if the timeout queue invariant holds. - */ - public boolean junitTestTimeoutQueueInvariant(); - - public class Null implements Scheduler { - @Override - public long currentTimeMillis() { - return 0; - } - - @Override - public void installJob(Job job, long deadline, SelectableChannel channel, int interest) {} - - @Override - public void installJob(Job job, long deadline) {} - - @Override - public void cancelJob(Job job) {} - - @Override - public boolean work(long timeout, Runnable handoff) { - return false; - } - - @Override - public void close() {} - - @Override - public boolean isClosed() { - return false; - } - - @Override - public Selector junitGetSelector() { - return null; - } - - @Override - public Set junitGetAllJobs() { - return null; - } - - @Override - public ArrayList junitGetTimeoutQueue() { - return null; - } - - @Override - public ArrayList junitGetAllKeys() { - return null; - } - - @Override - public ArrayList junitGetReadyKeys() { - return null; - } - - @Override - public Map junitGetChannelsAndJobs() { - return null; - } - - @Override - public boolean junitTestTimeoutQueueInvariant() { - return false; - } - } - - public final class ExecutorAdaptor implements Executor { - final Scheduler scheduler; - - public ExecutorAdaptor(final Scheduler scheduler) { - this.scheduler = scheduler; - } - - @Override - public void execute(final Runnable runnable) { - scheduler.installJob(new TimedJob() { - @Override - public final void timedOut() { - runnable.run(); - } - }, 0); - } - } -} diff --git a/IO/src/main/java/io/deephaven/io/sched/TimedJob.java b/IO/src/main/java/io/deephaven/io/sched/TimedJob.java deleted file mode 100644 index 9fb8c2899fb..00000000000 --- a/IO/src/main/java/io/deephaven/io/sched/TimedJob.java +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import io.deephaven.base.log.LogOutput; - -import java.nio.channels.SelectableChannel; - -/** - * This is the base class for jobs which are only interested in timing events. It provides default invoke() and - * cancelled() method which do nothing. - */ -public abstract class TimedJob extends Job { - public int invoke(SelectableChannel channel, int readyOps, Runnable handoff) { - if (handoff != null) { - handoff.run(); - } - return 0; - } - - public void cancelled() { - // do nothing - } - - @Override - public LogOutput append(LogOutput logOutput) { - return logOutput.append(LogOutput.BASIC_FORMATTER, this); - } -} diff --git a/IO/src/main/java/io/deephaven/io/sched/YASchedulerImpl.java b/IO/src/main/java/io/deephaven/io/sched/YASchedulerImpl.java deleted file mode 100644 index f10c7fc70b8..00000000000 --- a/IO/src/main/java/io/deephaven/io/sched/YASchedulerImpl.java +++ /dev/null @@ -1,979 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import io.deephaven.base.RingBuffer; -import io.deephaven.base.stats.*; -import io.deephaven.io.logger.Logger; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.nio.channels.*; -import java.util.*; - -/** - * Yet Another implementation of the Scheduler interface -- the best one yet. - * - * This class provides a singleton wrapper for scheduling invocations of multiple Job instances from a single thread. - * Job are scheduled in accordance with an interest set on a java.nio.Channel, deadline based time scheduling, and/or - * custom criteria defined by the Jobs' implementation of the ready() method. - * - * Jobs are instantiated by the application and made known to the scheduler by one of the installJob() methods. A - * previously installed job can be removed from the scheduler with the cancelJob() method. The installJob() and - * cancelJob() methods are thread-safe. It is allowed to call installJob() on a job that is already installed, or - * cancelJob() on a job that is not current in the scheduler. In the former case, the channel and/or deadline will be - * updated accordingly; in the latter, the call will be ignored. - * - * Once the job is installed, the scheduler promises to call exactly one of its invoke(), timedOut() or cancelled() - * methods exactly once. The invoke() method will be called only if the job was (last) installed with a channel and - * non-zero interest set. The timedOut() method can be called for any job, since all jobs have an associated deadline - * (although the timeout value can be set to Integer.MAX_VALUE to make if effectively infinite). The cancelled() method - * is called only if the job is removed by a cancelJob() call before either the channe is ready or the deadline expires. - * - * After the job is called back, the scheduler forgets about the job completely, unless the application installs it - * again. That is, from the scheduler's point of view *all* jobs are one-shots. This design is based on the observation - * that it is easier to reschedule jobs on every invocation in the style of a tail-recursive loop, as opposed to - * maintaining persistent state in the scheduler. - * - * The application must drive the scheduler by calling the work() method in a loop. The work() method is *not* - * thread-safe; the application must either call it from a single thread or synchronize calls accordingly. - */ -public class YASchedulerImpl implements Scheduler { - - /** the scheduler name, for debug and stats output */ - protected final String name; - - /** the java.nio.Selector instance */ - private final Selector selector; - - /** the logger */ - protected final Logger log; - - /** lock for internal state */ - private final Object stateLock = new Object(); - - /** if non-zero, there is a select() in progress that will terminate at the specified deadline */ - private long selectingTill = 0; - - private volatile boolean spinWakeSelector = false; - - /** the update clock for this scheduler */ - private long updateClock = 1; - - /** the waiting jobs, ordered by deadline */ - private final JobStateTimeoutQueue timeoutQueue; - - /** invokable/timed-out jobs are stored here */ - private RingBuffer dispatchQueue = new RingBuffer(128); - - /** the list of jobs which might have changed since the last update() call */ - private ArrayList changedStates = new ArrayList(128); - - /** add a state to the changedStates list */ - private boolean changedState(JobState state) { - if (state.updateClock < updateClock) { - state.updateClock = updateClock; - changedStates.add(state); - return true; - } - - // Assert.eqTrue(isInChangedStates(state), "isInChangedStates(state)"); // temporary - - return false; - } - - private boolean isInChangedStates(JobState state) { - final int L = changedStates.size(); - for (int i = 0; i < L; ++i) { - if (state == changedStates.get(i)) { - return true; - } - } - return false; - } - - /** if there are lots of tiny jobs, taking timing measurements may be time consuming. */ - private final boolean doTimingStats; - - private final boolean doSpinSelect; - - /** time base for loop duration measurements */ - private long lastNanos = 0; - - private void mark(Value v) { - if (doTimingStats) { - long t = System.nanoTime(); - if (lastNanos != 0) { - v.sample((t - lastNanos + 500) / 1000); - } - lastNanos = t; - } - } - - /** have we been closed? */ - private volatile boolean isClosed = false; - - // statistics - private Value invokeCount; - private Value timeoutCount; - private Value selectDuration; - private Value workDuration; - private Value gatheredDuration; - private Value channelInstalls; - private Value timedInstalls; - private Value jobCancels; - private Value jobUpdates; - private Value keyUpdates; - private Value keyOrphans; - private Value selectorWakeups; - private Value channelInterestWakeups; - private Value channelTimeoutWakeups; - private Value plainTimeoutWakeups; - private Value cancelWakeups; - - /** - * The constructor. - */ - public YASchedulerImpl(Selector selector, Logger log) throws IOException { - this("Scheduler", selector, log); - } - - public YASchedulerImpl(String name, Selector selector, Logger log) throws IOException { - this(name, selector, log, true, false); - } - - public YASchedulerImpl(String name, Selector selector, Logger log, boolean doTimingStats, boolean doSpinSelect) { - this.name = name; - this.selector = selector; - this.log = log; - this.doTimingStats = doTimingStats; - this.doSpinSelect = doSpinSelect; - - this.timeoutQueue = new JobStateTimeoutQueue(log, 1024); - - this.invokeCount = Stats.makeItem(name, "invokeCount", Counter.FACTORY, - "The number of jobs invoked for I/O").getValue(); - this.timeoutCount = Stats.makeItem(name, "timeoutCount", Counter.FACTORY, - "The number of jobs that have timed out").getValue(); - this.selectDuration = Stats.makeItem(name, "SelectDuration", State.FACTORY, - "The number of microseconds spent in select()").getValue(); - this.workDuration = Stats.makeItem(name, "WorkDuration", State.FACTORY, - "The number of microseconds between successive select() calls").getValue(); - this.gatheredDuration = Stats.makeItem(name, "GatheredDuration", State.FACTORY, - "The number of microseconds jobs spend waiting after being gathered").getValue(); - this.channelInstalls = Stats.makeItem(name, "channelInstalls", Counter.FACTORY, - "The number of installJob() calls with a channel").getValue(); - this.timedInstalls = Stats.makeItem(name, "timedInstalls", Counter.FACTORY, - "The number of installJob() calls with just a timeout").getValue(); - this.jobCancels = Stats.makeItem(name, "jobCancels", Counter.FACTORY, - "The number of cancelJob() calls").getValue(); - this.jobUpdates = Stats.makeItem(name, "jobUpdates", Counter.FACTORY, - "The number of updates applied to the job state pre- and post-select").getValue(); - this.keyUpdates = Stats.makeItem(name, "keyUpdates", Counter.FACTORY, - "The number of times an NIO SelectionKey was updated with non-zero interest").getValue(); - this.keyOrphans = Stats.makeItem(name, "keyOrphans", Counter.FACTORY, - "The number of times an NIO SelectionKey's interest was cleared").getValue(); - this.selectorWakeups = Stats.makeItem(name, "selectorWakeups", Counter.FACTORY, - "The number of times the selector had to be woken up").getValue(); - - this.channelInterestWakeups = Stats.makeItem(name, "channelInterestWakeups", Counter.FACTORY, - "The number of selector wakeups due to a change in a channel's interest set").getValue(); - this.channelTimeoutWakeups = Stats.makeItem(name, "channelTimeoutWakeups", Counter.FACTORY, - "The number of selector wakeups due to a channel's timeout becoming the earliest").getValue(); - this.plainTimeoutWakeups = Stats.makeItem(name, "plainTimeoutWakeups", Counter.FACTORY, - "The number of selector wakeups due to a plain timeout becoming the earliest").getValue(); - this.cancelWakeups = Stats.makeItem(name, "cancelWakeups", Counter.FACTORY, - "The number of selector wakeups due to a job cancellation").getValue(); - } - - /** - * Return the scheduler's idea of the current time. - */ - public long currentTimeMillis() { - return System.currentTimeMillis(); - } - - /** - * Install a job in association with a channel and an interest set. - */ - public void installJob(Job job, long deadline, SelectableChannel channel, int interest) { - synchronized (stateLock) { - JobState state = job.makeStateFor(this); - SelectionKey key = channel.keyFor(selector); - - // see if we will need to wake up the selector - boolean wakeup = false; - if (key == null || !key.isValid()) { - wakeup = true; - } else if (deadline < selectingTill) { - wakeup = true; - channelTimeoutWakeups.sample(1); - } else if (key.interestOps() != interest && (channel != state.nextChannel || interest != state.nextOps)) { - wakeup = true; - channelInterestWakeups.sample(1); - } - - state.nextChannel = channel; - state.nextOps = interest; - state.nextDeadline = deadline; - state.cancelled = false; - state.forgotten = false; - changedState(state); - - if (log.isDebugEnabled()) { - log.debug().append(name).append(" installed job ").append(job) - .append(", d=").append(deadline) - .append(", ni=").append(state.nextOps) - // .append(", k=").append(key) - .append(", ki=").append((key == null || !key.isValid() ? 0 : key.interestOps())) - .append(", w=").append(wakeup) - .endl(); - } - - if (wakeup) { - maybeWakeSelector(); - } - - // must always wake if doing spin select since we aren't setting selectingTill - else if (doSpinSelect) { - spinWakeSelector = true; - } - - channelInstalls.sample(1); - } - } - - /** - * Install a job with only an associated deadline (removing any channel association) - */ - public void installJob(Job job, long deadline) { - synchronized (stateLock) { - JobState state = job.makeStateFor(this); - state.nextChannel = null; - state.nextOps = 0; - state.nextDeadline = deadline; - state.cancelled = false; - state.forgotten = false; - final boolean changed = changedState(state); - - // Note: We don't need to be concerned with waking up due to channelInterest changes, since - // we would have to be reducing the interest set which can only lead to a later wakeup time. - - // if the new deadline is earlier than the current top, wake up the selector - boolean wakeup = false; - if (deadline < selectingTill) { - plainTimeoutWakeups.sample(1); - maybeWakeSelector(); - } - - // must always wake if doing spin select since we aren't setting selectingTill - else if (doSpinSelect) { - spinWakeSelector = true; - } - - if (log.isDebugEnabled()) { - log.debug().append(name).append(" installed job ").append(job) - .append(", d=").append(deadline) - .append(", w=").append(wakeup) - .append(", c=").append(changed) - .endl(); - } - - timedInstalls.sample(1); - } - } - - /** - * Cancel a job's selection key with the scheduler. - * - * @param job the job to be cancelled. - */ - public void cancelJob(Job job) { - synchronized (stateLock) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" explicitly cancelling ").append(job) - .append(" in YAScheduler.cancelJob").endl(); - } - JobState state = job.getStateFor(this); - if (state != null) { - state.nextChannel = null; - state.nextOps = 0; - state.nextDeadline = 0; - state.cancelled = true; - state.forgotten = false; - changedState(state); - - if (state.waitChannel != null) { - cancelWakeups.sample(1); - maybeWakeSelector(); - } - jobCancels.sample(1); - } - } - } - - /** - * drop the association of a state with a channel - */ - private void dropChannel(JobState state) { - if (state.waitChannel != null) { - SelectionKey key = state.waitChannel.keyFor(selector); - try { - if (key != null && key.isValid() && key.attachment() == state) { - key.attach(null); - if (key.interestOps() != 0) { - key.interestOps(0); - if (log.isDebugEnabled()) { - log.debug().append(name).append(" setting interest on orphaned key ").append(key.toString()) - .append(" to 0").endl(); - } - keyUpdates.sample(1); - } - } - } catch (CancelledKeyException x) { - // ignore it - if (log.isDebugEnabled()) { - log.info().append(name).append(" got CancelledKeyException while dropping channel ") - .append(state.waitChannel.toString()).endl(); - } - } - state.waitChannel = null; - } - } - - /** - * associate a channel with a state - */ - private boolean grabChannel(JobState state) { - try { - SelectionKey key = state.nextChannel.keyFor(selector); - if (key == null) { - key = state.nextChannel.register(selector, state.nextOps, state); - log.debug().append(name).append(" update ").append(state.job) - .append(": registered channel ").append(state.nextChannel.toString()) - .append(", ni=").append(state.nextOps) - .append(", k=").append(key.toString()) - .endl(); - } else { - key.attach(state); - if (key.interestOps() != state.nextOps) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" update ").append(state.job) - .append(": setting interest on key ").append(key.toString()).append(" to ") - .append(state.nextOps) - .endl(); - } - key.interestOps(state.nextOps); - keyUpdates.sample(1); - } else { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" update ").append(state.job) - .append(": interest on key ").append(key.toString()).append(" already at ") - .append(state.nextOps) - .endl(); - } - } - } - if (state.waitChannel != null && state.waitChannel != state.nextChannel) { - SelectionKey waitKey = state.waitChannel.keyFor(selector); - if (waitKey != null && waitKey.attachment() == state) { - try { - waitKey.interestOps(0); - } catch (CancelledKeyException x) { - // ignore this - } - } - } - state.waitChannel = state.nextChannel; - return true; - } catch (ClosedChannelException x) { - // fall through - } catch (CancelledKeyException x) { - // fall through - } - state.waitChannel = null; - log.error().append(name).append(" tried to register ").append(state.job).append(" on closed channel ") - .append(state.nextChannel.toString()).endl(); - return false; - } - - /** - * Apply changes to the job states. - * - * NOTE: assumes that stateLock is held - */ - private void update() { - // DO NOT USE FOREACH HERE AS IT CREATES AN INTERATOR -> No Allocation changes - int size = changedStates.size(); - for (int i = 0; i < size; i++) { - JobState state = changedStates.get(i); - jobUpdates.sample(1); - - if (log.isDebugEnabled()) { - SelectionKey key = null; - if (state.nextChannel != null) { - key = state.nextChannel.keyFor(selector); - } - log.debug().append(name).append(" updating job ").append(state.job) - .append(", d=").append(state.nextDeadline) - .append(", ni=").append(state.nextOps) - .append(", k=").append(key == null ? "null" : key.toString()) - .append(", ki=").append(key == null || !key.isValid() ? 0 : key.interestOps()) - .endl(); - } - - if (state.gathered) { - // job is waiting to be invoked; leave it alone - } else if (state.nextChannel != null && state.nextOps != 0) { - if (!grabChannel(state)) { - log.error().append(name).append(" cancelling ").append(state.job) - .append(" after failed I/O registration").endl(); - timeoutQueue.remove(state); - state.cancelled = true; - dispatchQueue.add(state); - } else { - timeoutQueue.enter(state, state.nextDeadline); - } - } else if (state.forgotten) { - dropChannel(state); - timeoutQueue.remove(state); - } else if (state.cancelled) { - dropChannel(state); - timeoutQueue.remove(state); - if (log.isDebugEnabled()) { - log.debug().append(name).append(" cancelling ").append(state.job).append(" from update()").endl(); - } - state.cancelled = true; - dispatchQueue.add(state); - } else { - dropChannel(state); - timeoutQueue.enter(state, state.nextDeadline); - } - - state.forgotten = true; - state.nextChannel = null; - state.nextOps = 0; - state.nextDeadline = 0; - - assert state.waitChannel == null || state.waitChannel.keyFor(selector).attachment() == state; - } - if (log.isDebugEnabled()) { - log.debug().append(name).append(" updated ").append(changedStates.size()).append(" jobs").endl(); - } - changedStates.clear(); - updateClock++; - } - - /** - * compute the timeout value for the next select() call - * - * NOTE: assumes that stateLock is held - */ - private long computeTimeout(long now, long timeout) { - if (!dispatchQueue.isEmpty()) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" update: dispatch queue is not empty, setting timeout to zero").endl(); - } - timeout = 0; - } else if (!timeoutQueue.isEmpty()) { - JobState next = timeoutQueue.top(); - long remain = next.deadline - now; - if (log.isDebugEnabled()) { - log.debug().append(name).append(" update: next timeout due in ").append(remain).append(" millis: ") - .append(next.job).endl(); - } - timeout = Math.max(0, Math.min(timeout, remain)); - } - return timeout; - } - - /** - * Wait for something to happen - */ - private void select(long timeout) { - try { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" calling select(").append(timeout).append(")").endl(); - } - - mark(workDuration); - - if (timeout > 0) { - selector.select(timeout); - } else { - selector.selectNow(); - } - - mark(selectDuration); - } catch (IOException x) { - if (java.util.regex.Pattern.matches(".*Operation not permitted.*", x.toString())) { - // There is a documented bug (http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6481709) in some - // versions of the epoll selector which causes occasional "Operation not permitted" errors to be - // thrown. - log.warn().append(name).append( - " Ignoring 'Operation not permitted' exception, see http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6481709") - .endl(); - } else { - if (!isClosed()) { - log.fatal(x).append(name).append(" Unexpected IOException in select(): ").append(x.getMessage()) - .endl(); - System.exit(1); - } - } - } catch (ClosedSelectorException x) { - if (!isClosed()) { - log.fatal(x).append(name).append(" ClosedSelectorException in select(): ").append(x.getMessage()) - .endl(); - System.exit(1); - } - } - } - - private void spinSelect(long times) { - try { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" calling spinSelect(").append(times).append(")").endl(); - } - - mark(workDuration); - - while (selector.selectNow() == 0 && !spinWakeSelector && (times-- > 0)) { - } - - mark(selectDuration); - } catch (IOException x) { - if (java.util.regex.Pattern.matches(".*Operation not permitted.*", x.toString())) { - // There is a documented bug (http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6481709) in some - // versions of the epoll selector which causes occasional "Operation not permitted" errors to be - // thrown. - log.warn().append(name).append( - " Ignoring 'Operation not permitted' exception, see http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6481709") - .endl(); - } else { - if (!isClosed()) { - log.fatal(x).append(name).append(" Unexpected IOException in spinSelect(): ").append(x.getMessage()) - .endl(); - System.exit(1); - } - } - } catch (ClosedSelectorException x) { - if (!isClosed()) { - log.fatal(x).append(name).append(" ClosedSelectorException in spinSelect(): ").append(x.getMessage()) - .endl(); - System.exit(1); - } - } - } - - /** - * Gather up selected and timed-out jobs - * - * NOTE: assumes that stateLock is held - */ - private void gather(long now) { - JobState state; - int numInvokes = 0; - // first gather all of the invokable jobs - for (SelectionKey key : selector.selectedKeys()) { - ++numInvokes; - try { - if ((state = (JobState) key.attachment()) == null) { - // clear interest ops, so we don't select in a tight loop - if (key.isValid() && key.interestOps() != 0) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" clearing interest in orphaned key ") - .append(key.toString()).append(" in YASchedulerImpl.gather").endl(); - } - if (key.isValid()) { - key.interestOps(0); - } - keyOrphans.sample(1); - } - } else { - key.attach(null); - state.readyChannel = key.channel(); - state.readyOps = key.readyOps(); - state.gathered = true; - state.gatheredNanos = lastNanos; - dispatchQueue.add(state); - timeoutQueue.remove(state); - if (log.isDebugEnabled()) { - log.debug().append(name).append(" gather ").append(key.toString()).append(" -> ") - .append(state.job) - .append(", ops=").append(key.readyOps()) - .append(", ki=").append(key.interestOps()) - .append(", dq=").append(dispatchQueue.size()) - .endl(); - } - } - } catch (CancelledKeyException x) { - // We can't guarantee that some thread won't try to write to the channel and - // cause it to cancel the key -- if that happens, then we'll get the exception - // here. But that's okay, because it's either an orphan channel which we just - // want to get rid of, or the IOJob will get the exception later and handle it. - } - } - selector.selectedKeys().clear(); - invokeCount.sample(numInvokes); - - // now get all of the expired timeouts - int numTimeouts = 0; - while (!timeoutQueue.isEmpty() && now >= (state = timeoutQueue.top()).deadline) { - ++numTimeouts; - timeoutQueue.removeTop(); - state.gathered = true; - state.gatheredNanos = lastNanos; - dispatchQueue.add(state); - } - timeoutCount.sample(numTimeouts); - - if (log.isDebugEnabled()) { - log.debug().append(name).append(" gathered ").append(numInvokes).append(" for I/O and ").append(numTimeouts) - .append(" timeouts").endl(); - } - } - - /** - * dispatch a gathered job, if there are any - */ - private boolean dispatch(Runnable handoff) { - JobState state; - SelectableChannel readyChannel; - int readyOps; - boolean cancelled; - synchronized (stateLock) { - if ((state = dispatchQueue.poll()) == null) { - return false; - } - - readyChannel = state.readyChannel; - readyOps = state.readyOps; - cancelled = state.cancelled; - state.readyChannel = null; - state.readyOps = 0; - state.gathered = false; - // NOTE: we only need to record the state as changed if it has a channel; - // cancelled and timed-out states will just be forgotten. - if (!cancelled && readyChannel != null) { - changedState(state); - } - if (log.isDebugEnabled()) { - log.debug().append(name).append(" dispatch ").append(state.job) - .append(", ops=").append(readyOps) - .append(", dq=").append(dispatchQueue.size()) - .endl(); - } - assert readyChannel == null || readyOps != 0; - } - - // dispatch the job outside of the state lock - try { - if (cancelled) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" cancelled ").append(state.job).endl(); - } - state.job.cancelled(); - } else { - if (doTimingStats) - gatheredDuration.sample((System.nanoTime() - state.gatheredNanos + 500) / 1000); - if (readyOps != 0) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" invoke ").append(state.job).endl(); - } - state.job.invoke(readyChannel, readyOps, handoff); - } else { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" timedOut ").append(state.job).endl(); - } - if (handoff != null) { - handoff.run(); - } - state.job.timedOut(); - } - } - } catch (Throwable x) { - log.fatal(x).append(": unhandled Throwable in dispatch on job [").append(state.job).append("]: ") - .append(x.getMessage()).endl(); - throw new RuntimeException(x); - } - - return true; - } - - /** - * Wake up the selector, if necessary. - * - * NOTE: assumes that stateLock is held! - */ - private void maybeWakeSelector() { - if (selectingTill > 0) { - if (log.isDebugEnabled()) { - log.debug().append(name).append(" waking up the scheduler").endl(); - } - selector.wakeup(); - selectorWakeups.sample(1); - } - - if (doSpinSelect) { - spinWakeSelector = true; - } - } - - /** - * Wait for jobs to become ready, then invoke() them all. This method will form the core of the main loop of a - * scheduler-driven application. The method first waits until: - * - * -- the given timeout expires, -- the earliest job-specific timeout expires, or -- one or more jobs becomes ready - * - * Note that this method is not synchronized. The application must ensure that it is never called concurrently by - * more than one thread. - * - * @return true, if some work was done. - */ - public boolean work(long timeout, Runnable handoff) { - if (doSpinSelect) { - // just use the millis timeout as the number of times to spin - long times = timeout; - return spinWork(times, handoff); - } - - boolean didOne = dispatch(handoff); - if (!didOne) { - // apply any changes to the states - synchronized (stateLock) { - update(); - long now = currentTimeMillis(); - timeout = computeTimeout(now, timeout); - assert selectingTill == 0 : "no more than one thread should ever call work!"; - if (timeout > 0) { - selectingTill = now + timeout; - } - } - - // wait for something to happen - select(timeout); - - // apply changes while we were waiting, then gather up all of the jobs that can be dispatched - synchronized (stateLock) { - selectingTill = 0; - update(); - long now = currentTimeMillis(); - gather(now); - } - - // and try again - didOne = dispatch(handoff); - } - return didOne; - } - - private boolean spinWork(long times, Runnable handoff) { - boolean didOne = dispatch(handoff); - if (!didOne) { - // apply any changes to the states - synchronized (stateLock) { - update(); - if (!dispatchQueue.isEmpty() || spinWakeSelector) { - times = 1; // only want to spin on select once since we have stuff to dispatch - spinWakeSelector = false; - } - assert selectingTill == 0 : "no more than one thread should ever call work!"; - } - - // spin for something to happen - spinSelect(times); - - // apply changes while we were waiting, then gather up all of the jobs that can be dispatched - synchronized (stateLock) { - selectingTill = 0; - update(); - long now = currentTimeMillis(); - gather(now); - } - - // and try again - didOne = dispatch(handoff); - } - return didOne; - } - - /** - * Shuts down the scheduler, calling close() on the underlying Selector instance. - */ - public void close() { - isClosed = true; - clear(); - try { - selector.close(); - } catch (IOException x) { - log.warn(x).append(name).append(" Scheduler.close: ignoring exception from selector.close(): ") - .append(x.getMessage()).endl(); - } - } - - /** - * Return true if the scheduler is closed, or in the process of closing. - */ - public boolean isClosed() { - return isClosed; - } - - /** - * Clear out the scheduler state - */ - private void clear() { - Set allJobs = getAllJobs(); - for (Job j : allJobs) { - cancelJob(j); - } - log.info().append(name).append(" Scheduler.clear: starting with ").append(allJobs.size()).append(" jobs") - .endl(); - synchronized (stateLock) { - update(); - } - ArrayList allKeys = getAllKeys(); - for (SelectionKey k : allKeys) { - k.cancel(); - } - synchronized (stateLock) { - update(); - } - try { - selector.selectNow(); - } catch (IOException x) { - throw new UncheckedIOException(x); - } - while (true) { - try { - if (!dispatch(null)) { - break; - } - } catch (Exception x) { - log.warn().append(name).append(" Scheduler.clear: ignoring shutdown exception: ").append(x).endl(); - } - } - log.info().append(name).append(" Scheduler.clear: finished").endl(); - } - - /** - * return the set of all jobs known to the scheduler, in whatever state - */ - private Set getAllJobs() { - synchronized (stateLock) { - update(); - Set result = new HashSet(); - timeoutQueue.junitGetAllJobs(result); - for (JobState state : changedStates) { - assert state != null; - if (state.job != null) { - result.add(state.job); - } - } - for (SelectionKey key : junitGetAllKeys()) { - Object attachment; - if (key != null && (attachment = key.attachment()) != null && attachment instanceof JobState) { - JobState state = (JobState) attachment; - if (state.job != null) { - result.add(state.job); - } - } - } - return result; - } - } - - /** - * Return the selection keys currently known to the scheduler. - */ - private ArrayList getAllKeys() { - synchronized (stateLock) { - update(); - Set keys = selector.keys(); - selector.wakeup(); - synchronized (keys) { - return new ArrayList(keys); - } - } - } - - // -------------------------------------------------------------------------- - // test support methods (white-box) - // -------------------------------------------------------------------------- - - public Selector junitGetSelector() { - return selector; - } - - /** - * return the set of all jobs known to the scheduler, in whatever state - */ - public Set junitGetAllJobs() { - return getAllJobs(); - } - - /** - * Return the contents of the timeout queue, in deadline order - * - * @return the jobs in the timeout queue - */ - public ArrayList junitGetTimeoutQueue() { - synchronized (stateLock) { - update(); - ArrayList result = new ArrayList(timeoutQueue.size()); - try { - JobStateTimeoutQueue q = (JobStateTimeoutQueue) timeoutQueue.clone(); - while (!q.isEmpty()) { - result.add(q.top().job); - q.removeTop(); - } - } catch (CloneNotSupportedException x) { - // ignore - } - return result; - } - } - - /** - * Return the selection keys currently known to the scheduler. - */ - public ArrayList junitGetAllKeys() { - return getAllKeys(); - } - - /** - * Return the selection keys currently known to the scheduler. - */ - public ArrayList junitGetReadyKeys() { - return new ArrayList(selector.selectedKeys()); - } - - /** - * Return a map containing all channels and the jobs to which they are associated. - */ - public Map junitGetChannelsAndJobs() { - synchronized (stateLock) { - update(); - Map result = new HashMap(); - for (SelectionKey key : junitGetAllKeys()) { - Object attachment; - if (key != null && (attachment = key.attachment()) != null && attachment instanceof JobState) { - JobState state = (JobState) attachment; - if (state.job != null) { - result.put(key.channel(), ((JobState) attachment).job); - } - } - } - return result; - } - } - - /** - * Return true if the timeout queue invariant holds. - */ - public boolean junitTestTimeoutQueueInvariant() { - synchronized (stateLock) { - return timeoutQueue.testInvariant("in call from junit"); - } - } -} diff --git a/IO/src/test/java/io/deephaven/io/sched/TestJobStateTimeoutQueue.java b/IO/src/test/java/io/deephaven/io/sched/TestJobStateTimeoutQueue.java deleted file mode 100644 index a572d425d89..00000000000 --- a/IO/src/test/java/io/deephaven/io/sched/TestJobStateTimeoutQueue.java +++ /dev/null @@ -1,124 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.io.sched; - -import io.deephaven.io.logger.Logger; -import junit.framework.TestCase; - -import java.nio.channels.SelectableChannel; -import java.io.IOException; - -public class TestJobStateTimeoutQueue extends TestCase { - - public void setUp() throws Exception { - super.setUp(); - } - - public void tearDown() throws Exception { - super.tearDown(); - } - - /** - * A null Job implementation - */ - private static class NullJob extends Job { - public int invoke(SelectableChannel channel, int readyOps, Runnable handoff) throws IOException { - return 0; - } - - public void timedOut() {} - - public void cancelled() {} - } - - /** - * Macro test - */ - public void testTimeoutQueue() { - JobState[] ja = new JobState[10]; - for (int i = 0; i < ja.length; ++i) { - ja[i] = new JobState(new NullJob()); - } - JobStateTimeoutQueue q = new JobStateTimeoutQueue(Logger.NULL, 10); - - q.enter(ja[0], 1); - assertTrue(q.testInvariant("insert 1")); - q.enter(ja[1], 9); - assertTrue(q.testInvariant("insert 9")); - q.enter(ja[2], 8); - assertTrue(q.testInvariant("insert 8")); - q.enter(ja[3], 5); - assertTrue(q.testInvariant("insert 5")); - q.enter(ja[4], 2); - assertTrue(q.testInvariant("insert 2")); - q.enter(ja[5], 3); - assertTrue(q.testInvariant("insert 3")); - q.enter(ja[6], 6); - assertTrue(q.testInvariant("insert 6")); - q.enter(ja[7], 4); - assertTrue(q.testInvariant("insert 4")); - q.enter(ja[8], 7); - assertTrue(q.testInvariant("insert 7")); - q.enter(ja[9], 10); - assertTrue(q.testInvariant("insert 10")); - - assertEquals(ja[0], q.top()); - q.removeTop(); - q.testInvariant("remove 1"); - assertEquals(ja[4], q.top()); - q.removeTop(); - q.testInvariant("remove 2"); - assertEquals(ja[5], q.top()); - q.removeTop(); - q.testInvariant("remove 3"); - assertEquals(ja[7], q.top()); - q.removeTop(); - q.testInvariant("remove 4"); - assertEquals(ja[3], q.top()); - q.removeTop(); - q.testInvariant("remove 5"); - assertEquals(ja[6], q.top()); - q.removeTop(); - q.testInvariant("remove 6"); - assertEquals(ja[8], q.top()); - q.removeTop(); - q.testInvariant("remove 7"); - assertEquals(ja[2], q.top()); - q.removeTop(); - q.testInvariant("remove 8"); - assertEquals(ja[1], q.top()); - q.removeTop(); - q.testInvariant("remove 9"); - assertEquals(ja[9], q.top()); - q.removeTop(); - q.testInvariant("remove 10"); - - assertTrue(q.testInvariant("after clone")); - } - - /** - * Test change of deadline within queue - */ - public void testDeadlineChange() { - JobState j1 = new JobState(new NullJob()); - JobState j2 = new JobState(new NullJob()); - JobState j3 = new JobState(new NullJob()); - JobStateTimeoutQueue q = new JobStateTimeoutQueue(Logger.NULL, 10); - - q.enter(j1, 1000); - q.enter(j2, 2000); - q.enter(j3, 3000); - - assertEquals(j1, q.top()); - - q.enter(j2, 200); - assertEquals(j2, q.top()); - - q.enter(j2, 20000); - assertEquals(j1, q.top()); - - q.enter(j1, 100000); - assertEquals(j3, q.top()); - } -} diff --git a/Net/build.gradle b/Net/build.gradle deleted file mode 100644 index 31bf972685b..00000000000 --- a/Net/build.gradle +++ /dev/null @@ -1,30 +0,0 @@ -plugins { - id 'io.deephaven.project.register' -} - -dependencies { - implementation project(':Base') - implementation project(':DataStructures') - implementation project(':IO') - implementation project(':Configuration') - implementation project(':FishUtil') - implementation project(':log-factory') - - testImplementation project(path: ':Base', configuration: 'tests') - - testRuntimeOnly project(':log-to-slf4j') - Classpaths.inheritSlf4j(project, 'slf4j-simple', 'testRuntimeOnly') -} - -test { - useJUnit() - - enableAssertions = true - maxHeapSize = '3g' - - systemProperty 'Configuration.rootFile', 'lib-tests.prop' - systemProperty 'deephaven.dataDir', "$rootDir/tmp/workspace" - systemProperty 'configuration.quiet', 'true' - - exclude '**/NoTest*' -} \ No newline at end of file diff --git a/Net/gradle.properties b/Net/gradle.properties deleted file mode 100644 index c186bbfdde1..00000000000 --- a/Net/gradle.properties +++ /dev/null @@ -1 +0,0 @@ -io.deephaven.project.ProjectType=JAVA_PUBLIC diff --git a/Net/src/main/java/io/deephaven/net/CommBase.java b/Net/src/main/java/io/deephaven/net/CommBase.java deleted file mode 100644 index cdbdb9b42f3..00000000000 --- a/Net/src/main/java/io/deephaven/net/CommBase.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.net; - -import io.deephaven.base.FatalErrorHandler; -import io.deephaven.base.FatalErrorHandlerFactory; -import io.deephaven.configuration.Configuration; -import io.deephaven.net.impl.nio.NIODriver; -import io.deephaven.io.NioUtil; -import io.deephaven.io.logger.Logger; -import io.deephaven.io.sched.*; - -import java.io.IOException; -import java.nio.channels.Selector; - -public class CommBase { - - private static volatile FatalErrorHandler defaultFatalErrorHandler; - - public static FatalErrorHandler getDefaultFatalHandler() { - if (defaultFatalErrorHandler == null) { - synchronized (CommBase.class) { - if (defaultFatalErrorHandler == null) { - final String defaultFatalErrorHandlerClassName = - Configuration.getInstance().getProperty("Comm.fatalErrorHandlerFactoryClass"); - final Class defaultFatalErrorHandlerClass; - try { - defaultFatalErrorHandlerClass = Class.forName(defaultFatalErrorHandlerClassName); - } catch (ClassNotFoundException e) { - throw new IllegalArgumentException( - "Could not find envelopeHandlerFactoryClass " + defaultFatalErrorHandlerClassName, e); - } - final FatalErrorHandlerFactory defaultFatalErrorHandlerFactory; - try { - defaultFatalErrorHandlerFactory = - (FatalErrorHandlerFactory) defaultFatalErrorHandlerClass.newInstance(); - } catch (InstantiationException | IllegalAccessException | ClassCastException e) { - throw new IllegalArgumentException( - "Could not instantiate envelopeHandlerFactoryClass " + defaultFatalErrorHandlerClass, - e); - } - defaultFatalErrorHandler = defaultFatalErrorHandlerFactory.get(); - } - } - } - return defaultFatalErrorHandler; - } - - public static void signalFatalError(final String message, Throwable x) { - try { - FatalErrorHandler feh = getDefaultFatalHandler(); - feh.signalFatalError(message, x); - } catch (Throwable fehx) { - // dump this to stderr, it's not great, but we had an error raising an error and really do want both of - // these in the log - fehx.printStackTrace(System.err); - x.printStackTrace(System.err); - throw new RuntimeException("Could not raise fatal error: " + message, x); - } - } - - /** - * Return the scheduler used by the NIO implementation - */ - public static Scheduler getScheduler() { - NIODriver.init(); - return NIODriver.getScheduler(); - } - - /** - * Create a private, single-threaded scheduler and driver thread - */ - public static class SingleThreadedScheduler extends YASchedulerImpl { - private final Thread driver; - private volatile boolean done = false; - - public SingleThreadedScheduler(final String name, Logger log) throws IOException { - super(name, NioUtil.reduceSelectorGarbage(Selector.open()), log); - this.driver = new Thread(() -> { - try { - while (!SingleThreadedScheduler.this.done) { - work(10, null); - } - } catch (Throwable x) { - signalFatalError(name + " exception", x); - } - }); - driver.setName(name + "-Driver"); - driver.setDaemon(true); - } - - public SingleThreadedScheduler start() { - driver.start(); - return this; - } - - public void stop() { - done = true; - } - } - - public static SingleThreadedScheduler singleThreadedScheduler(final String name, Logger log) { - try { - return new SingleThreadedScheduler(name, log); - } catch (IOException x) { - signalFatalError(name + " exception", x); - return null; - } - } -} diff --git a/Net/src/main/java/io/deephaven/net/impl/nio/FastNIODriver.java b/Net/src/main/java/io/deephaven/net/impl/nio/FastNIODriver.java deleted file mode 100644 index 492d5718f0b..00000000000 --- a/Net/src/main/java/io/deephaven/net/impl/nio/FastNIODriver.java +++ /dev/null @@ -1,285 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.net.impl.nio; - -import io.deephaven.base.UnfairMutex; -import io.deephaven.configuration.Configuration; -import io.deephaven.net.CommBase; -import io.deephaven.io.NioUtil; -import io.deephaven.io.logger.LogCrashDump; -import io.deephaven.io.logger.Logger; -import io.deephaven.io.sched.Scheduler; -import io.deephaven.io.sched.TimedJob; -import io.deephaven.io.sched.YASchedulerImpl; - -import java.io.IOException; -import java.nio.channels.Selector; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -public final class FastNIODriver implements Runnable { - private static Logger log; - - public static int numTotalThreads(String property) { - final String[] values = Configuration.getInstance().getProperty(property).split(","); - return Integer.parseInt(values[0]) * Integer.parseInt(values[1]); - } - - public static int threadsPerScheduler(String property) { - final String[] values = Configuration.getInstance().getProperty(property).split(","); - if (values.length != 6) - return 0; - return Integer.parseInt(values[1]); - } - - public static Scheduler[] createSchedulers(String name, String property, Logger log) { - return createSchedulers(name, property, log, Configuration.getInstance()); - } - - public static Scheduler[] createSchedulers(String name, String property, Logger log, Configuration config) { - final String[] values = config.getProperty(property).split(","); - if (values.length != 6) - return null; - - final int numSchedulers = Integer.parseInt(values[0]); - final int threadsPerScheduler = Integer.parseInt(values[1]); - final long timeoutsOrSpins = Long.parseLong(values[2]); - final int spinsUntilPark = Integer.parseInt(values[3]); - final boolean doTimingStats = Boolean.parseBoolean(values[4]); - final boolean doSpinSelect = Boolean.parseBoolean(values[5]); - final Scheduler[] schedulers = new Scheduler[numSchedulers]; - for (int i = 0; i < numSchedulers; ++i) { - schedulers[i] = createDrivers(name + "-" + i, log, threadsPerScheduler, threadsPerScheduler, - timeoutsOrSpins, spinsUntilPark, false, doTimingStats, doSpinSelect).getScheduler(); - } - return schedulers; - } - - public static FastNIODriver createDrivers(String name, Logger log, int initialThreads, int maxThreads, - long workTimeout, int spinsUntilPark, boolean crashOnMax) { - return createDrivers(name, log, initialThreads, maxThreads, workTimeout, spinsUntilPark, crashOnMax, true, - false); - } - - public static FastNIODriver createDrivers(String name, Logger log, int initialThreads, int maxThreads, - long workTimeout, int spinsUntilPark, boolean crashOnMax, boolean doTimingStats, boolean doSpinSelect) { - FastNIODriver.log = log; - log.info().append(name).append(": Starting FastNIODriver Scheduler: threads: ").append(initialThreads) - .append(", maxThreads: ").append(maxThreads) - .append(", workTimeout/spinsOnSelect: ").append(workTimeout) - .append(", spinsUntilPark: ").append(spinsUntilPark) - .append(", doSpinSelect: ").append(doSpinSelect) - .endl(); - try { - final Scheduler scheduler = new YASchedulerImpl(name, NioUtil.reduceSelectorGarbage(Selector.open()), log, - doTimingStats, doSpinSelect); - - final UnfairMutex mutex = new UnfairMutex(spinsUntilPark, maxThreads); - final AtomicBoolean shutdown = new AtomicBoolean(false); - final AtomicInteger created = new AtomicInteger(0); - final AtomicInteger destroyed = new AtomicInteger(0); - final AtomicInteger available = new AtomicInteger(0); - final InternalThread[] threads = new InternalThread[initialThreads]; - // separate the creation and start so the created / available values are setup - for (int i = 0; i < initialThreads; ++i) { - threads[i] = createNewThread(name, scheduler, mutex, shutdown, workTimeout, created, destroyed, - available, maxThreads, crashOnMax); - } - for (int i = 0; i < initialThreads; ++i) { - threads[i].start(); - } - - return threads[0].driver; - } catch (IOException x) { - CommBase.signalFatalError(name + ": FastNIODriver can't create scheduler", x); - return null; - } - } - - private static class InternalThread extends Thread { - private final FastNIODriver driver; - - private InternalThread(final FastNIODriver driver) { - super(driver); - this.driver = driver; - } - } - - private static InternalThread createNewThread(final String name, final Scheduler scheduler, final UnfairMutex mutex, - final AtomicBoolean shutdown, final long workTimeout, final AtomicInteger created, - final AtomicInteger destroyed, final AtomicInteger available, final int maxThreads, - final boolean crashOnMax) { - InternalThread t = new InternalThread(new FastNIODriver(name, scheduler, mutex, shutdown, workTimeout, created, - destroyed, available, maxThreads, crashOnMax)); - t.setDaemon(true); - t.setName(name + "-FastNIODriver-" + created.getAndIncrement()); - int a = available.incrementAndGet(); - log.info().append("Creating thread ").append(t.getName()).append(". available: ").append(a).endl(); - return t; - } - - private final Scheduler scheduler; - private final UnfairMutex mutex; - private final AtomicBoolean shutdown; - private final long workTimeout; - private final Runnable mutexUnlockHandoff; - private boolean alreadyHandedOff; - - private final AtomicInteger created; - private final AtomicInteger destroyed; - private final AtomicInteger available; - private final int maxThreads; - private final boolean crashOnMax; - - private FastNIODriver(final String name, final Scheduler scheduler, final UnfairMutex mutex, - final AtomicBoolean shutdown, final long workTimeout, final AtomicInteger created, - final AtomicInteger destroyed, final AtomicInteger available, final int maxThreads, - final boolean crashOnMax) { - this.scheduler = scheduler; - this.mutex = mutex; - this.shutdown = shutdown; - this.workTimeout = workTimeout; - this.created = created; - this.destroyed = destroyed; - this.available = available; - this.maxThreads = maxThreads; - this.crashOnMax = crashOnMax; - alreadyHandedOff = false; - mutexUnlockHandoff = () -> { - if (!alreadyHandedOff) { - if (shouldCreate()) { - // nobody to handoff to! let's create a new driver - createNewThread(name, scheduler, mutex, shutdown, workTimeout, created, destroyed, available, - maxThreads, crashOnMax).start(); - } - mutex.unlock(); - alreadyHandedOff = true; - } - }; - } - - // only called when we have the mutex... - private boolean shouldCreate() { - if (available.get() == 0) { - // don't need to worry about races w/ index b/c we have lock - if (created.get() == maxThreads) { - if (crashOnMax) { - log.fatal().append("FastNIODriver: exceeded maximum thread pool limit: ").append(summary()).endl(); - LogCrashDump.logCrashDump(log); - CommBase.signalFatalError("FastNIODriver: exceeded maximum thread pool limit: " + summary(), - new Throwable()); - } - return false; - } - return true; - } - return false; - } - - public String summary() { - return "(available: " + available.get() + ", created: " + created.get() + ", destroyed: " + destroyed.get() - + ")"; - } - - @Override - public void run() { - final Thread me = Thread.currentThread(); - Throwable throwable = null; - while (true) { - if (shutdown.get()) { - break; - } - mutex.lock(); - alreadyHandedOff = false; - if (shutdown.get()) { - mutexUnlockHandoff.run(); - break; - } - - try { - - available.getAndDecrement(); - do { - scheduler.work(workTimeout, mutexUnlockHandoff); - } while (mutex.getOwner() == me); - available.getAndIncrement(); - - } catch (Throwable x) { - throwable = x; - shutdown.set(true); - scheduler.installJob(new TimedJob() { - public void timedOut() {} - }, 0); // wake us up yo - mutexUnlockHandoff.run(); // we aren't sure whether the scheduler.work has already called the handoff - // or not yet, so go ahead and call it (it won't double release it) - long deadline = System.currentTimeMillis() + 5000; - // b/c we haven't destroyed ourself yet... - // meh spinning :/ - while (created.get() != destroyed.get() + 1) { - if (deadline - System.currentTimeMillis() < 0) { - break; - } - Thread.yield(); // better than spinning? - } - - break; - } - } - - if (destroyed.incrementAndGet() == created.get()) { - scheduler.close(); - } - - if (throwable == null) { - log.error().append("Thread ").append(me.getName()).append(" is terminating: ").append(summary()).endl(); - } else { - log.fatal(throwable).append("Thread ").append(me.getName()).append(" is terminating on a fatal exception: ") - .append(summary()).endl(); - } - - if (throwable != null) - CommBase.signalFatalError("Unhandled throwable from FastNIODriver scheduler", throwable); - } - - public boolean isShutdown() { - return shutdown.get(); - } - - public boolean shutdown(long maxWait) { - shutdown.set(true); - scheduler.installJob(new TimedJob() { - public void timedOut() {} - }, 0); - long deadline = System.currentTimeMillis() + maxWait; - while (created.get() != destroyed.get()) { - if (deadline - System.currentTimeMillis() < 0) { - break; - } - try { - Thread.sleep(1); // better than spinning? - } catch (InterruptedException e) { - // ignore - } - } - - return created.get() == destroyed.get(); - } - - public Scheduler getScheduler() { - return scheduler; - } - - // whitebox test support methods - public int junit_getWaiting() { - return available.get(); - } - - public int junit_getCreated() { - return created.get(); - } - - public int junit_getDestroyed() { - return destroyed.get(); - } -} diff --git a/Net/src/main/java/io/deephaven/net/impl/nio/NIODriver.java b/Net/src/main/java/io/deephaven/net/impl/nio/NIODriver.java deleted file mode 100644 index f1c57602a45..00000000000 --- a/Net/src/main/java/io/deephaven/net/impl/nio/NIODriver.java +++ /dev/null @@ -1,295 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.net.impl.nio; - -import io.deephaven.internal.log.LoggerFactory; -import java.io.IOException; -import java.nio.channels.Selector; -import java.util.concurrent.atomic.AtomicInteger; - -import io.deephaven.net.CommBase; -import io.deephaven.configuration.Configuration; -import io.deephaven.io.NioUtil; -import io.deephaven.io.logger.LogCrashDump; -import io.deephaven.io.logger.Logger; -import io.deephaven.io.sched.Scheduler; -import io.deephaven.io.sched.TimedJob; -import io.deephaven.io.sched.YASchedulerImpl; - -public class NIODriver implements Runnable { - private static Logger log; - - private static boolean initialized = false; - private static volatile boolean stopped = false; - - private static Scheduler sched = null; - private static FastNIODriver driver = null; - - private static final Object lock = new Object(); - private static Thread leader = null; - private static AtomicInteger available = new AtomicInteger(0); - private static int created = 0; - private static int destroyed = 0; - - public static int WORK_TIMEOUT; - public static int NUM_INITIAL_THREADS; - public static int HARD_MAX_THREADS; - - private static final boolean useFastNIODriver = Configuration.getInstance().getBoolean("NIO.driver.useFast"); - - /** - * Let another thread take over the leadership. - */ - private static void handoff() { - Thread me = Thread.currentThread(); - synchronized (lock) { - if (leader != me) { - LogCrashDump.logCrashDump(log); - CommBase.signalFatalError("NIODriver: WTF? in handoff(), but not the leader?", new Throwable()); - } - - if (log.isDebugEnabled()) { - log.debug().append("Thread ").append(me.getName()).append(" is giving up leadership").endl(); - } - - leader = null; - - if (stopped || available.get() != 0) { - lock.notify(); - } else { - // no joy, have to add another thread - log.warn().append("Thread ").append(me.getName()).append(" is handing off with no threads available: ") - .append(summary()).endl(); - addThread(); - } - } - } - - /** - * A procedure which calls handoff(), to give the scheduler when we are running full-bore - */ - private static final Runnable handoffProc = NIODriver::handoff; - - /** - * return a string telling how many threads are doing what - */ - public static String summary() { - if (useFastNIODriver) { - return driver.summary(); - } else { - return "(available: " + available + ", created: " + created + ", destroyed: " + destroyed + ")"; - } - } - - /** - * one-time initialization - */ - public static void init() { - if (!initialized) { - init(LoggerFactory.getLogger(NIODriver.class)); - } - } - - public static void init(Logger log) { - synchronized (lock) { - if (!initialized) { - NIODriver.log = log; - WORK_TIMEOUT = Configuration.getInstance().getInteger("NIO.driver.workTimeout"); - NUM_INITIAL_THREADS = Configuration.getInstance().getInteger("NIO.driver.initialThreadCount"); - HARD_MAX_THREADS = Configuration.getInstance().getInteger("NIO.driver.maxThreadCount"); - if (useFastNIODriver) { - driver = FastNIODriver.createDrivers("Static", log, NUM_INITIAL_THREADS, HARD_MAX_THREADS, - WORK_TIMEOUT, 1000, true); - sched = driver.getScheduler(); - } else { - try { - sched = new YASchedulerImpl(NioUtil.reduceSelectorGarbage(Selector.open()), log); - } catch (IOException x) { - sched = null; - CommBase.signalFatalError("NIODriver.init: can't create scheduler", x); - } - for (int i = 0; i < NUM_INITIAL_THREADS; ++i) { - addThread(); - } - } - initialized = true; - } - } - - } - - /** - * Shut down, and wait for all threads to terminate. This method is really just for testing; it's a bad idea to do - * this in production because waiting for threads to terminate is prone to deadlocks. If desired, though, it can be - * called from an AbstractService shutdown hook installed in init(). - */ - public static boolean shutdown(long maxWait) { - synchronized (lock) { - if (!initialized) - return true; - - if (useFastNIODriver) { - if (driver.shutdown(maxWait)) { - initialized = false; - log.info().append("NIODriver.shutdown: finished").endl(); - return true; - } else { - return false; - } - } else { - long deadline = System.currentTimeMillis() + maxWait, remain = maxWait; - stopped = true; - lock.notifyAll(); - // force the scheduler to wake up - sched.installJob(new TimedJob() { - public void timedOut() {} - }, 0); - while (created != destroyed) { - try { - log.info().append("NIODriver.shutdown: waiting for threads to terminate: ").append(summary()) - .endl(); - lock.wait(Math.max(remain, 0)); - } catch (InterruptedException x) { - // ignore - } - if ((remain = deadline - System.currentTimeMillis()) < 0) { - return false; - } - } - sched.close(); - log.info().append("NIODriver.shutdown: finished").endl(); - leader = null; - sched = null; - initialized = stopped = false; - created = destroyed = 0; - available.set(0); - return true; - } - } - } - - /** - * Return the scheduler used by the NIO driver - */ - public static Scheduler getScheduler() { - return sched; - } - - /** - * Return the scheduler used by the NIO driver - */ - public static Logger getLogger() { - return log; - } - - /** - * add a thread to the pool - * - * NOTE: caller must hold the lock! - * - * NOTE: We increment the "waiting" variable *before* we start the new thread, and then make sure to correct it in - * the first iteration of the thread loop. This prevents a race in which we handoff() method creates too many - * threads, because it keeps getting called before the first thread it creates can get started. - */ - private static void addThread() { - if (created == HARD_MAX_THREADS) { - log.fatal().append("NIODriver: exceeded maximum thread pool limit: ").append(summary()).endl(); - LogCrashDump.logCrashDump(log); - CommBase.signalFatalError("NIODriver: exceeded maximum thread pool limit: " + summary(), new Throwable()); - } - Thread thread = new Thread(new NIODriver()); - thread.setDaemon(true); - thread.setName("NIODriver-" + created); - created++; - available.incrementAndGet(); - log.info().append("Thread ").append(thread.getName()).append(" is starting: ").append(summary()).endl(); - thread.start(); - } - - /** - * the threads' run method just does an endless loop, trying to become the leader whenever it can - */ - public void run() { - Thread me = Thread.currentThread(); - STOP: { - while (true) { - synchronized (lock) { - while (leader != me) { - if (stopped) { - destroyed++; - log.info().append("Thread ").append(me.getName()).append(" is terminating: ") - .append(summary()).endl(); - lock.notifyAll(); - break STOP; - } else if (leader == null) { - if (log.isDebugEnabled()) { - log.debug().append("Thread ").append(me.getName()).append(" is assuming leadership") - .endl(); - } - leader = me; - } else { - try { - if (log.isDebugEnabled()) { - log.debug().append("Thread ").append(me.getName()).append(" is waiting ") - .append(summary()).endl(); - } - lock.wait(); - if (log.isDebugEnabled()) { - log.debug().append("Thread ").append(me.getName()).append(" has awoken ") - .append(summary()).endl(); - } - } catch (InterruptedException x) { - // ignore - } - } - } - } - try { - available.decrementAndGet(); - sched.work(WORK_TIMEOUT, handoffProc); - available.incrementAndGet(); - } catch (Throwable x) { - synchronized (lock) { - destroyed++; - log.fatal(x).append("Thread ").append(me.getName()) - .append(" is terminating on a fatal exception: ").append(summary()).endl(); - lock.notifyAll(); - } - - NIODriver.shutdown(5000); - CommBase.signalFatalError("Unhandled throwable from NIO scheduler", x); - break STOP; - } - } - } - } - - // whitebox test support methods - public static int junit_getWaiting() { - if (useFastNIODriver) { - return driver.junit_getWaiting(); - } else { - return available.get(); - } - } - - public static int junit_getCreated() { - if (useFastNIODriver) { - return driver.junit_getCreated(); - } else { - return created; - } - } - - public static int junit_getDestroyed() { - if (useFastNIODriver) { - return driver.junit_getDestroyed(); - } else { - return destroyed; - } - } - - // ################################################################ - -} diff --git a/Stats/build.gradle b/Stats/build.gradle index e9d46238886..bac1cdce147 100644 --- a/Stats/build.gradle +++ b/Stats/build.gradle @@ -7,8 +7,6 @@ dependencies { implementation project(':DataStructures') implementation project(':IO') implementation project(':Configuration') - implementation project(':FishUtil') - implementation project(':Net') implementation project(':log-factory') implementation project(':engine-context') compileOnly 'com.google.code.java-allocation-instrumenter:java-allocation-instrumenter:3.3.0' diff --git a/Stats/src/main/java/io/deephaven/stats/StatsCPUCollector.java b/Stats/src/main/java/io/deephaven/stats/StatsCPUCollector.java index 1f003b5fb1e..7b1a3e8c8f8 100644 --- a/Stats/src/main/java/io/deephaven/stats/StatsCPUCollector.java +++ b/Stats/src/main/java/io/deephaven/stats/StatsCPUCollector.java @@ -7,7 +7,7 @@ import io.deephaven.configuration.Configuration; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; -import io.deephaven.util.OSUtil; +import io.deephaven.stats.util.OSUtil; import io.deephaven.base.stats.*; import io.deephaven.hash.KeyedLongObjectHash; import io.deephaven.hash.KeyedLongObjectHashMap; diff --git a/Stats/src/main/java/io/deephaven/stats/StatsDriver.java b/Stats/src/main/java/io/deephaven/stats/StatsDriver.java index 29c10096d13..beeda59894b 100644 --- a/Stats/src/main/java/io/deephaven/stats/StatsDriver.java +++ b/Stats/src/main/java/io/deephaven/stats/StatsDriver.java @@ -5,23 +5,27 @@ import io.deephaven.base.clock.Clock; import io.deephaven.engine.context.ExecutionContext; -import io.deephaven.net.CommBase; import io.deephaven.util.SafeCloseable; -import io.deephaven.util.formatters.ISO8601; import io.deephaven.base.stats.*; import io.deephaven.base.text.TimestampBuffer; import io.deephaven.configuration.Configuration; import io.deephaven.io.log.*; -import io.deephaven.io.sched.TimedJob; import io.deephaven.io.log.impl.LogEntryPoolImpl; import io.deephaven.io.log.impl.LogSinkImpl; +import io.deephaven.util.annotations.ReferentialIntegrity; +import io.deephaven.util.thread.NamingThreadFactory; import java.util.Properties; +import java.util.TimeZone; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; /** * Drives the collection of statistics on a 1-second timer task. */ -public class StatsDriver extends TimedJob { +public class StatsDriver { public interface StatusAdapter { void sendAlert(String alertText); @@ -39,11 +43,11 @@ public boolean cmsAlertEnabled() { } private final LogEntryPool entryPool; - private final LogSink sink; + private final LogSink sink; private final LogEntry[] entries; private final LogEntryPool entryPoolHisto; - private final LogSink sinkHisto; + private final LogSink sinkHisto; private final LogEntry[] entriesHisto; private final TimestampBuffer systemTimestamp; @@ -52,9 +56,8 @@ public boolean cmsAlertEnabled() { public final static String header = "Stat,IntervalName,NowSec,NowString,AppNowSec,AppNowString,TypeTag,Name,N,Sum,Last,Min,Max,Avg,Sum2,Stdev"; - private long nextInvocation = System.currentTimeMillis(); - private long nextCpuUpdate = nextInvocation + CPU_INTERVAL; - private long nextMemUpdate = nextInvocation + MEM_INTERVAL; + private long nextCpuUpdate; + private long nextMemUpdate; private static final long STEP = 1000; private static final long MEM_INTERVAL = 1000; @@ -71,9 +74,14 @@ public boolean cmsAlertEnabled() { private final StatsIntradayLogger intraday; private final Value clockValue; private final ExecutionContext executionContext; + @ReferentialIntegrity + private final ScheduledExecutorService scheduler; + @ReferentialIntegrity + private final ScheduledFuture updateJobFuture; private final StatsMemoryCollector memStats; private final StatsCPUCollector cpuStats; + @ReferentialIntegrity private ObjectAllocationCollector objectAllocation; public StatsDriver(Clock clock) { @@ -116,8 +124,9 @@ public StatsDriver(Clock clock, StatsIntradayLogger intraday, boolean getFdStats } } - this.systemTimestamp = new TimestampBuffer(ISO8601.serverTimeZone()); - this.appTimestamp = new TimestampBuffer(ISO8601.serverTimeZone()); + final TimeZone serverTimeZone = Configuration.getInstance().getServerTimezone(); + this.systemTimestamp = new TimestampBuffer(serverTimeZone); + this.appTimestamp = new TimestampBuffer(serverTimeZone); if (path == null) { this.entryPool = null; @@ -150,9 +159,11 @@ public StatsDriver(Clock clock, StatsIntradayLogger intraday, boolean getFdStats clockValue = null; } - long now = System.currentTimeMillis(); - long delay = STEP - (now % STEP); - nextInvocation = now + delay; + final long now = System.currentTimeMillis(); + final long delay = STEP - (now % STEP); + nextCpuUpdate = now + delay + CPU_INTERVAL; + nextMemUpdate = now + delay + MEM_INTERVAL; + cpuStats = new StatsCPUCollector(CPU_INTERVAL, getFdStats); memStats = new StatsMemoryCollector(MEM_INTERVAL, statusAdapter::sendAlert, statusAdapter::cmsAlertEnabled); if (Configuration.getInstance().getBoolean("allocation.stats.enabled")) { @@ -160,13 +171,18 @@ public StatsDriver(Clock clock, StatsIntradayLogger intraday, boolean getFdStats } executionContext = ExecutionContext.getContext(); - // now that the StatsDriver is completely constructed, we can schedule the first iteration + // now that the StatsDriver is completely constructed, we can schedule the update job if (Configuration.getInstance().getBoolean("statsdriver.enabled")) { - schedule(); + scheduler = Executors.newSingleThreadScheduledExecutor( + new NamingThreadFactory(StatsDriver.class, "updateScheduler", true)); + updateJobFuture = scheduler.scheduleAtFixedRate(this::update, delay, STEP, TimeUnit.MILLISECONDS); + } else { + scheduler = null; + updateJobFuture = null; } } - public void timedOut() { + private void update() { long t0 = System.nanoTime(); long now = System.currentTimeMillis(); long appNow = clock == null ? now : clock.currentTimeMillis(); @@ -207,20 +223,12 @@ public void timedOut() { } } - schedule(); - statsTiming.sample((System.nanoTime() - t0 + 500) / 1000); } - private void schedule() { - CommBase.getScheduler().installJob(this, nextInvocation); - long steps = Math.max(1L, (((System.currentTimeMillis() - nextInvocation) / STEP) + 1)); - nextInvocation += steps * STEP; - } - private final ItemUpdateListener LISTENER = new ItemUpdateListener() { @Override - public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, + public void handleItemUpdated(Item item, long now, long appNow, int intervalIndex, long intervalMillis, String intervalName) { final Value v = item.getValue(); final History history = v.getHistory(); diff --git a/FishUtil/src/main/java/io/deephaven/util/OSUtil.java b/Stats/src/main/java/io/deephaven/stats/util/OSUtil.java similarity index 85% rename from FishUtil/src/main/java/io/deephaven/util/OSUtil.java rename to Stats/src/main/java/io/deephaven/stats/util/OSUtil.java index 4e136975d2c..ef3566f4696 100644 --- a/FishUtil/src/main/java/io/deephaven/util/OSUtil.java +++ b/Stats/src/main/java/io/deephaven/stats/util/OSUtil.java @@ -1,20 +1,22 @@ /** * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending */ -package io.deephaven.util; +package io.deephaven.stats.util; import org.jetbrains.annotations.NotNull; import java.util.Arrays; import java.util.function.Predicate; -@SuppressWarnings("WeakerAccess") public class OSUtil { public enum OSFamily { - - LINUX(name -> name.startsWith("Linux")), WINDOWS(name -> name.contains("Windows")), MAC_OS( - name -> name.startsWith("Mac OS")), SOLARIS(name -> name.startsWith("SunOs")); + // @formatter:off + LINUX(name -> name.startsWith("Linux")), + WINDOWS(name -> name.contains("Windows")), + MAC_OS(name -> name.startsWith("Mac OS")), + SOLARIS(name -> name.startsWith("SunOs")); + // @formatter:on private final Predicate nameMatcher; diff --git a/TableLogger/TableLogger.gradle b/TableLogger/TableLogger.gradle index 411b1b752d0..a4173ac368d 100644 --- a/TableLogger/TableLogger.gradle +++ b/TableLogger/TableLogger.gradle @@ -2,12 +2,8 @@ plugins { id 'io.deephaven.project.register' } -configurations { - implementation.extendsFrom fishUtil, fishData - testImplementation.extendsFrom fishDataTest -} - dependencies { + implementation project(':Base') implementation project(':Util') testRuntimeOnly project(path: ':configs') testRuntimeOnly project(path: ':test-configs') diff --git a/FishUtil/src/main/java/io/deephaven/util/process/BaseProcessEnvironment.java b/Util/src/main/java/io/deephaven/util/process/BaseProcessEnvironment.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/BaseProcessEnvironment.java rename to Util/src/main/java/io/deephaven/util/process/BaseProcessEnvironment.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/DefaultFatalErrorReporter.java b/Util/src/main/java/io/deephaven/util/process/DefaultFatalErrorReporter.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/DefaultFatalErrorReporter.java rename to Util/src/main/java/io/deephaven/util/process/DefaultFatalErrorReporter.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/DefaultProcessEnvironment.java b/Util/src/main/java/io/deephaven/util/process/DefaultProcessEnvironment.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/DefaultProcessEnvironment.java rename to Util/src/main/java/io/deephaven/util/process/DefaultProcessEnvironment.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/FatalErrorReporter.java b/Util/src/main/java/io/deephaven/util/process/FatalErrorReporter.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/FatalErrorReporter.java rename to Util/src/main/java/io/deephaven/util/process/FatalErrorReporter.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/FatalErrorReporterBase.java b/Util/src/main/java/io/deephaven/util/process/FatalErrorReporterBase.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/FatalErrorReporterBase.java rename to Util/src/main/java/io/deephaven/util/process/FatalErrorReporterBase.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/LoggerShutdownTask.java b/Util/src/main/java/io/deephaven/util/process/LoggerShutdownTask.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/LoggerShutdownTask.java rename to Util/src/main/java/io/deephaven/util/process/LoggerShutdownTask.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/OnetimeShutdownTask.java b/Util/src/main/java/io/deephaven/util/process/OnetimeShutdownTask.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/OnetimeShutdownTask.java rename to Util/src/main/java/io/deephaven/util/process/OnetimeShutdownTask.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/ProcessEnvironment.java b/Util/src/main/java/io/deephaven/util/process/ProcessEnvironment.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/ProcessEnvironment.java rename to Util/src/main/java/io/deephaven/util/process/ProcessEnvironment.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/ShutdownManager.java b/Util/src/main/java/io/deephaven/util/process/ShutdownManager.java similarity index 100% rename from FishUtil/src/main/java/io/deephaven/util/process/ShutdownManager.java rename to Util/src/main/java/io/deephaven/util/process/ShutdownManager.java diff --git a/FishUtil/src/main/java/io/deephaven/util/process/ShutdownManagerImpl.java b/Util/src/main/java/io/deephaven/util/process/ShutdownManagerImpl.java similarity index 99% rename from FishUtil/src/main/java/io/deephaven/util/process/ShutdownManagerImpl.java rename to Util/src/main/java/io/deephaven/util/process/ShutdownManagerImpl.java index 520b86a96bf..5b3e4dc68ae 100644 --- a/FishUtil/src/main/java/io/deephaven/util/process/ShutdownManagerImpl.java +++ b/Util/src/main/java/io/deephaven/util/process/ShutdownManagerImpl.java @@ -9,8 +9,8 @@ import io.deephaven.io.log.LogEntry; import io.deephaven.io.log.LogLevel; import io.deephaven.io.logger.Logger; -import io.deephaven.util.threads.ThreadDump; import io.deephaven.internal.log.LoggerFactory; +import io.deephaven.util.thread.ThreadDump; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; diff --git a/buildSrc/src/main/groovy/io.deephaven.java-classpath-conventions.gradle b/buildSrc/src/main/groovy/io.deephaven.java-classpath-conventions.gradle index baa0413b77f..d008aa19202 100644 --- a/buildSrc/src/main/groovy/io.deephaven.java-classpath-conventions.gradle +++ b/buildSrc/src/main/groovy/io.deephaven.java-classpath-conventions.gradle @@ -29,14 +29,12 @@ configurations { fishDataStructure.extendsFrom fishIo fishConfig.extendsFrom fishDataStructure fishDataGenerator.extendsFrom jdom - fishNet.extendsFrom fishIo fishNumerics.extendsFrom fishBase - fishUtil.extendsFrom fishConfig fishBaseTest.extendsFrom junit fishIoTest.extendsFrom fishBaseTest dhNumerics.extendsFrom fishNumerics, jama - dhUtil.extendsFrom commonsIo, commonsLang3, commonsText, fishUtil, fishNet, fishIo, jdom + dhUtil.extendsFrom commonsIo, commonsLang3, commonsText, fishConfig, fishIo, jdom dhPlot.extendsFrom dhUtil dhBenchmarkSupport.extendsFrom fishData dhIntegrations.extendsFrom math3 @@ -70,10 +68,6 @@ dependencies { fishConfig project(':Configuration') - fishUtil project(':FishUtil') - - fishNet project(':Net') - fishBaseTest project(path: ':Base', configuration: 'tests') fishIoTest project(path: ':IO', configuration: 'tests') diff --git a/engine/table/build.gradle b/engine/table/build.gradle index 4456f02c262..261678a3c06 100644 --- a/engine/table/build.gradle +++ b/engine/table/build.gradle @@ -28,8 +28,6 @@ dependencies { implementation project(':Configuration') implementation project(':log-factory') implementation project(':Stats') - implementation project(':Net') - implementation project(':FishUtil') implementation 'com.github.f4b6a3:uuid-creator:5.2.0' // TODO(deephaven-core#3204): t-digest 3.3 appears to have higher errors than 3.2 diff --git a/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PeriodicUpdateGraph.java b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PeriodicUpdateGraph.java index a2fbaee6581..34e0462dde7 100644 --- a/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PeriodicUpdateGraph.java +++ b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PeriodicUpdateGraph.java @@ -27,9 +27,6 @@ import io.deephaven.io.log.LogEntry; import io.deephaven.io.log.impl.LogOutputStringImpl; import io.deephaven.io.logger.Logger; -import io.deephaven.io.sched.Scheduler; -import io.deephaven.io.sched.TimedJob; -import io.deephaven.net.CommBase; import io.deephaven.util.SafeCloseable; import io.deephaven.util.annotations.TestUseOnly; import io.deephaven.util.datastructures.SimpleReferenceManager; @@ -51,6 +48,8 @@ import java.util.function.BooleanSupplier; import java.util.function.LongConsumer; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + /** *

* This class uses a thread (or pool of threads) to periodically update a set of monitored update sources at a specified @@ -132,25 +131,30 @@ public static PerformanceEntry createUpdatePerformanceEntry( private final Thread refreshThread; private volatile boolean running = true; + /** + * {@link ScheduledExecutorService} used for scheduling the {@link #watchDogTimeoutProcedure}. + */ + private final ScheduledExecutorService watchdogScheduler; + /** * If this is set to a positive value, then we will call the {@link #watchDogTimeoutProcedure} if any single run * loop takes longer than this value. The intention is to use this for strategies, or other queries, where a * PeriodicUpdateGraph loop that is "stuck" is the equivalent of an error. Set the value with * {@link #setWatchDogMillis(int)}. */ - private int watchDogMillis = 0; + private volatile int watchDogMillis = 0; /** * If a timeout time has been {@link #setWatchDogMillis(int) set}, this procedure will be called if any single run * loop takes longer than the value specified. Set the value with * {@link #setWatchDogTimeoutProcedure(LongConsumer)}. */ - private LongConsumer watchDogTimeoutProcedure = null; + private volatile LongConsumer watchDogTimeoutProcedure; public static final String ALLOW_UNIT_TEST_MODE_PROP = "PeriodicUpdateGraph.allowUnitTestMode"; private final boolean allowUnitTestMode; - private int notificationAdditionDelay = 0; + private int notificationAdditionDelay; private Random notificationRandomizer = new Random(0); - private boolean unitTestMode = false; + private boolean unitTestMode; private ExecutorService unitTestRefreshThreadPool; public static final String DEFAULT_TARGET_CYCLE_DURATION_MILLIS_PROP = @@ -162,27 +166,27 @@ public static PerformanceEntry createUpdatePerformanceEntry( private final long minimumCycleDurationToLogNanos; /** when to next flush the performance tracker; initializes to zero to force a flush on start */ - private long nextUpdatePerformanceTrackerFlushTime = 0; + private long nextUpdatePerformanceTrackerFlushTimeNanos; /** * How many cycles we have not logged, but were non-zero. */ - private long suppressedCycles = 0; - private long suppressedCyclesTotalNanos = 0; - private long suppressedCyclesTotalSafePointTimeMillis = 0; + private long suppressedCycles; + private long suppressedCyclesTotalNanos; + private long suppressedCyclesTotalSafePointTimeMillis; /** * Accumulated UpdateGraph exclusive lock waits for the current cycle (or previous, if idle). */ - private long currentCycleLockWaitTotalNanos = 0; + private long currentCycleLockWaitTotalNanos; /** * Accumulated delays due to intracycle yields for the current cycle (or previous, if idle). */ - private long currentCycleYieldTotalNanos = 0L; + private long currentCycleYieldTotalNanos; /** * Accumulated delays due to intracycle sleeps for the current cycle (or previous, if idle). */ - private long currentCycleSleepTotalNanos = 0L; + private long currentCycleSleepTotalNanos; public static class AccumulatedCycleStats { /** @@ -331,6 +335,14 @@ public PeriodicUpdateGraph( } }), "PeriodicUpdateGraph." + name + ".refreshThread"); refreshThread.setDaemon(true); + watchdogScheduler = Executors.newSingleThreadScheduledExecutor( + new NamingThreadFactory(PeriodicUpdateGraph.class, "watchdogScheduler", true) { + @Override + public Thread newThread(@NotNull final Runnable r) { + // Not a refresh thread, but should still be instrumented for debugging purposes. + return super.newThread(ThreadInitializationFactory.wrapRunnable(r)); + } + }); updatePerformanceTracker = new UpdatePerformanceTracker(this); } @@ -593,15 +605,6 @@ public void setWatchDogTimeoutProcedure(LongConsumer procedure) { this.watchDogTimeoutProcedure = procedure; } - private class WatchdogJob extends TimedJob { - @Override - public void timedOut() { - if (watchDogTimeoutProcedure != null) { - watchDogTimeoutProcedure.accept(watchDogMillis); - } - } - } - /** * Install a real NotificationProcessor and start the primary refresh thread. * @@ -1129,7 +1132,7 @@ public Runnable flushAllNormalNotificationsForUnitTests(@NotNull final BooleanSu final ControlledNotificationProcessor controlledNotificationProcessor = new ControlledNotificationProcessor(); notificationProcessor = controlledNotificationProcessor; final Future flushJobFuture = unitTestRefreshThreadPool.submit(() -> { - final long deadlineNanoTime = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(timeoutMillis); + final long deadlineNanoTime = System.nanoTime() + MILLISECONDS.toNanos(timeoutMillis); boolean flushed; while ((flushed = flushOneNotificationForUnitTestsInternal(false)) || !done.getAsBoolean()) { if (!flushed) { @@ -1676,8 +1679,6 @@ private static LogEntry appendAsMillisFromNanos(final LogEntry entry, final long * {@link #getTargetCycleDurationMillis() minimum cycle time}. */ private void refreshTablesAndFlushNotifications() { - final Scheduler sched = CommBase.getScheduler(); - final long startTime = sched.currentTimeMillis(); final long startTimeNanos = System.nanoTime(); jvmIntrospectionContext.startSample(); @@ -1686,17 +1687,20 @@ private void refreshTablesAndFlushNotifications() { } else { currentCycleLockWaitTotalNanos = currentCycleYieldTotalNanos = currentCycleSleepTotalNanos = 0L; - WatchdogJob watchdogJob = null; + ScheduledFuture watchdogFuture = null; - if ((watchDogMillis > 0) && (watchDogTimeoutProcedure != null)) { - watchdogJob = new WatchdogJob(); - sched.installJob(watchdogJob, startTime + watchDogMillis); + final long localWatchdogMillis = watchDogMillis; + final LongConsumer localWatchdogTimeoutProcedure = watchDogTimeoutProcedure; + if ((localWatchdogMillis > 0) && (localWatchdogTimeoutProcedure != null)) { + watchdogFuture = watchdogScheduler.schedule( + () -> localWatchdogTimeoutProcedure.accept(localWatchdogMillis), + localWatchdogMillis, MILLISECONDS); } refreshAllTables(); - if (watchdogJob != null) { - sched.cancelJob(watchdogJob); + if (watchdogFuture != null) { + watchdogFuture.cancel(true); } jvmIntrospectionContext.endSample(); final long cycleTimeNanos = System.nanoTime() - startTimeNanos; @@ -1707,7 +1711,7 @@ private void refreshTablesAndFlushNotifications() { Thread.yield(); } - waitForNextCycle(startTime, sched); + waitForNextCycle(startTimeNanos); } private void computeStatsAndLogCycle(final long cycleTimeNanos) { @@ -1791,24 +1795,25 @@ private void logSuppressedCycles() { * wait the remaining period. *

* - * @param startTime The start time of the last run cycle - * @param timeSource The source of time that startTime was based on + * @param startTimeNanos The start time of the last run cycle as reported by {@link System#nanoTime()} */ - private void waitForNextCycle(final long startTime, final Scheduler timeSource) { - final long now = timeSource.currentTimeMillis(); - long expectedEndTime = startTime + targetCycleDurationMillis; + private void waitForNextCycle(final long startTimeNanos) { + final long nowNanos = System.nanoTime(); + long expectedEndTimeNanos = startTimeNanos + MILLISECONDS.toNanos(targetCycleDurationMillis); if (minimumInterCycleSleep > 0) { - expectedEndTime = Math.max(expectedEndTime, now + minimumInterCycleSleep); + expectedEndTimeNanos = + Math.max(expectedEndTimeNanos, nowNanos + MILLISECONDS.toNanos(minimumInterCycleSleep)); } - if (expectedEndTime >= nextUpdatePerformanceTrackerFlushTime) { - nextUpdatePerformanceTrackerFlushTime = now + UpdatePerformanceTracker.REPORT_INTERVAL_MILLIS; + if (expectedEndTimeNanos >= nextUpdatePerformanceTrackerFlushTimeNanos) { + nextUpdatePerformanceTrackerFlushTimeNanos = + nowNanos + MILLISECONDS.toNanos(UpdatePerformanceTracker.REPORT_INTERVAL_MILLIS); try { updatePerformanceTracker.flush(); } catch (Exception err) { log.error().append("Error flushing UpdatePerformanceTracker: ").append(err).endl(); } } - waitForEndTime(expectedEndTime, timeSource); + waitForEndTime(expectedEndTimeNanos); } /** @@ -1819,12 +1824,11 @@ private void waitForNextCycle(final long startTime, final Scheduler timeSource) * If the delay is interrupted for any other {@link InterruptedException reason}, it will be logged and continue to * wait the remaining period. * - * @param expectedEndTime The time which we should sleep until - * @param timeSource The source of time that startTime was based on + * @param expectedEndTimeNanos The time (as reported by {@link System#nanoTime()}) which we should sleep until */ - private void waitForEndTime(final long expectedEndTime, final Scheduler timeSource) { - long remainingMillis; - while ((remainingMillis = expectedEndTime - timeSource.currentTimeMillis()) > 0) { + private void waitForEndTime(final long expectedEndTimeNanos) { + long remainingNanos; + while ((remainingNanos = expectedEndTimeNanos - System.nanoTime()) > 0) { if (refreshRequested.get()) { return; } @@ -1832,8 +1836,10 @@ private void waitForEndTime(final long expectedEndTime, final Scheduler timeSour if (refreshRequested.get()) { return; } + final long millisToWait = remainingNanos / 1_000_000; + final int extraNanosToWait = (int) (remainingNanos - (millisToWait * 1_000_000)); try { - refreshRequested.wait(remainingMillis); + refreshRequested.wait(millisToWait, extraNanosToWait); } catch (final InterruptedException logAndIgnore) { log.warn().append("Interrupted while waiting on refreshRequested. Ignoring: ").append(logAndIgnore) .endl(); @@ -2031,7 +2037,7 @@ public static final class Builder { Configuration.getInstance().getBooleanWithDefault(ALLOW_UNIT_TEST_MODE_PROP, false); private long targetCycleDurationMillis = Configuration.getInstance().getIntegerWithDefault(DEFAULT_TARGET_CYCLE_DURATION_MILLIS_PROP, 1000); - private long minimumCycleDurationToLogNanos = TimeUnit.MILLISECONDS.toNanos( + private long minimumCycleDurationToLogNanos = MILLISECONDS.toNanos( Configuration.getInstance().getIntegerWithDefault(MINIMUM_CYCLE_DURATION_TO_LOG_MILLIS_PROP, 25)); private String name; diff --git a/engine/table/src/main/java/io/deephaven/engine/util/file/TrackedFileHandleFactory.java b/engine/table/src/main/java/io/deephaven/engine/util/file/TrackedFileHandleFactory.java index 7d89b355700..18f23de97fd 100644 --- a/engine/table/src/main/java/io/deephaven/engine/util/file/TrackedFileHandleFactory.java +++ b/engine/table/src/main/java/io/deephaven/engine/util/file/TrackedFileHandleFactory.java @@ -3,13 +3,12 @@ */ package io.deephaven.engine.util.file; -import io.deephaven.net.CommBase; +import io.deephaven.UncheckedDeephavenException; import io.deephaven.base.verify.Require; import io.deephaven.configuration.Configuration; -import io.deephaven.io.logger.Logger; -import io.deephaven.io.sched.Scheduler; -import io.deephaven.io.sched.TimedJob; +import io.deephaven.util.thread.NamingThreadFactory; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.VisibleForTesting; import java.io.File; import java.io.IOException; @@ -17,8 +16,13 @@ import java.nio.channels.FileChannel; import java.nio.file.OpenOption; -import java.util.*; +import java.util.Iterator; +import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -26,7 +30,7 @@ * Simple least-recently-opened "cache" for FileHandles, to avoid running up against ulimits. Will probably not achieve * satisfactory results if the number of file handles concurrently in active use exceeds capacity. Note that returned * FileHandles may be closed asynchronously by the factory. - * + *

* TODO: Consider adding a lookup to enable handle sharing. Not necessary for current usage. */ public class TrackedFileHandleFactory implements FileHandleFactory { @@ -38,9 +42,16 @@ public static TrackedFileHandleFactory getInstance() { synchronized (TrackedFileHandleFactory.class) { if (instance == null) { instance = new TrackedFileHandleFactory( - CommBase.singleThreadedScheduler("TrackedFileHandleFactory.CleanupScheduler", Logger.NULL) - .start(), - Configuration.getInstance().getInteger("TrackedFileHandleFactory.maxOpenFiles")); + Executors.newSingleThreadScheduledExecutor( + new NamingThreadFactory(TrackedFileHandleFactory.class, "cleanupScheduler", true)), + Configuration.getInstance().getInteger("TrackedFileHandleFactory.maxOpenFiles")) { + + @Override + public void shutdown() { + super.shutdown(); + getScheduler().shutdown(); + } + }; } } } @@ -50,11 +61,12 @@ public static TrackedFileHandleFactory getInstance() { private final static double DEFAULT_TARGET_USAGE_RATIO = 0.9; private final static long DEFAULT_CLEANUP_INTERVAL_MILLIS = 60_000; - private final Scheduler scheduler; + private final ScheduledExecutorService scheduler; private final int capacity; private final double targetUsageRatio; private final int targetUsageThreshold; + private final ScheduledFuture cleanupJobFuture; private final AtomicInteger size = new AtomicInteger(0); private final Queue handleReferences = new ConcurrentLinkedQueue<>(); @@ -70,32 +82,39 @@ public static TrackedFileHandleFactory getInstance() { /** * Full constructor. * - * @param scheduler The scheduler to use for cleanup + * @param scheduler The {@link ScheduledExecutorService} to use for cleanup * @param capacity The total number of file handles to allow outstanding * @param targetUsageRatio The target usage threshold as a ratio of capacity, in [0.1, 0.9] * @param cleanupIntervalMillis The interval for asynchronous cleanup attempts */ - public TrackedFileHandleFactory(@NotNull final Scheduler scheduler, final int capacity, - final double targetUsageRatio, final long cleanupIntervalMillis) { + @VisibleForTesting + TrackedFileHandleFactory( + @NotNull final ScheduledExecutorService scheduler, + final int capacity, + final double targetUsageRatio, + final long cleanupIntervalMillis) { this.scheduler = scheduler; this.capacity = Require.gtZero(capacity, "capacity"); this.targetUsageRatio = Require.inRange(targetUsageRatio, 0.1, 0.9, "targetUsageRatio"); targetUsageThreshold = Require.gtZero((int) (capacity * targetUsageRatio), "targetUsageThreshold"); - new CleanupJob(cleanupIntervalMillis).schedule(); + cleanupJobFuture = scheduler.scheduleAtFixedRate( + new CleanupJob(), cleanupIntervalMillis, cleanupIntervalMillis, TimeUnit.MILLISECONDS); } /** * Constructor with default target usage ratio of 0.9 (90%) and cleanup attempts every 60 seconds. - * - * @param scheduler The scheduler to use for cleanup + * + * @param scheduler The {@link ScheduledExecutorService} to use for cleanup * @param capacity The total number of file handles to allow outstanding */ - public TrackedFileHandleFactory(@NotNull final Scheduler scheduler, final int capacity) { + @VisibleForTesting + TrackedFileHandleFactory(@NotNull final ScheduledExecutorService scheduler, final int capacity) { this(scheduler, capacity, DEFAULT_TARGET_USAGE_RATIO, DEFAULT_CLEANUP_INTERVAL_MILLIS); } - public Scheduler getScheduler() { + @VisibleForTesting + ScheduledExecutorService getScheduler() { return scheduler; } @@ -160,26 +179,18 @@ public void closeAll() { } } - private class CleanupJob extends TimedJob { - - private final long intervalMills; - - private CleanupJob(final long intervalMills) { - this.intervalMills = intervalMills; - } + public void shutdown() { + cleanupJobFuture.cancel(true); + } - private void schedule() { - scheduler.installJob(this, scheduler.currentTimeMillis() + intervalMills); - } + private class CleanupJob implements Runnable { - @Override - public void timedOut() { + public void run() { try { cleanup(); } catch (Exception e) { - throw new RuntimeException("TrackedFileHandleFactory.CleanupJob: Unexpected exception", e); + throw new UncheckedDeephavenException("TrackedFileHandleFactory.CleanupJob: Unexpected exception", e); } - schedule(); } } diff --git a/engine/table/src/test/java/io/deephaven/engine/util/file/TestTrackedFileHandleFactory.java b/engine/table/src/test/java/io/deephaven/engine/util/file/TestTrackedFileHandleFactory.java index 8d28daf9d7c..d1f832086c5 100644 --- a/engine/table/src/test/java/io/deephaven/engine/util/file/TestTrackedFileHandleFactory.java +++ b/engine/table/src/test/java/io/deephaven/engine/util/file/TestTrackedFileHandleFactory.java @@ -5,8 +5,6 @@ import io.deephaven.base.testing.BaseCachedJMockTestCase; import io.deephaven.base.verify.RequirementFailure; -import io.deephaven.io.sched.Scheduler; -import io.deephaven.io.sched.TimedJob; import junit.framework.TestCase; import org.junit.After; import org.junit.Before; @@ -15,6 +13,8 @@ import java.io.File; import java.io.IOException; import java.nio.file.Files; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; public class TestTrackedFileHandleFactory extends BaseCachedJMockTestCase { @@ -23,7 +23,7 @@ public class TestTrackedFileHandleFactory extends BaseCachedJMockTestCase { private static final double TARGET_USAGE_RATIO = 0.9; private static final int TARGET_USAGE_THRESHOLD = 90; - private Scheduler scheduler; + private ScheduledExecutorService scheduler; private TrackedFileHandleFactory FHCUT; @@ -33,13 +33,15 @@ public void setUp() throws Exception { FILE = Files.createTempFile(TestTrackedFileHandleFactory.class.getName(), ".dat").toFile(); - scheduler = mock(Scheduler.class); + scheduler = mock(ScheduledExecutorService.class); checking(new Expectations() { { - one(scheduler).currentTimeMillis(); - will(returnValue(0L)); - one(scheduler).installJob(with(any(TimedJob.class)), with(equal(60000L))); + one(scheduler).scheduleAtFixedRate( + with(any(Runnable.class)), + with(equal(60000L)), + with(equal(60000L)), + with(equal(TimeUnit.MILLISECONDS))); } }); diff --git a/engine/test-utils/build.gradle b/engine/test-utils/build.gradle index b11d14731e2..743e05a1dc5 100644 --- a/engine/test-utils/build.gradle +++ b/engine/test-utils/build.gradle @@ -13,7 +13,6 @@ dependencies { implementation project(':engine-tuple') implementation project(':base-test-utils') implementation project(':engine-rowset-test-utils') - implementation project(':FishUtil') implementation project(':extensions-source-support') implementation depCommonsLang3 diff --git a/engine/time/build.gradle b/engine/time/build.gradle index 697af28e26f..3dac58f277c 100644 --- a/engine/time/build.gradle +++ b/engine/time/build.gradle @@ -16,7 +16,6 @@ dependencies { implementation project(':engine-function') implementation project(':Configuration') implementation project(':log-factory') - implementation project(':FishUtil') implementation depJdom2 testImplementation TestTools.projectDependency(project, 'Base') diff --git a/engine/updategraph/build.gradle b/engine/updategraph/build.gradle index 1194f5ed0a9..69063b30bd0 100644 --- a/engine/updategraph/build.gradle +++ b/engine/updategraph/build.gradle @@ -12,8 +12,6 @@ dependencies { implementation project(':hotspot') implementation project(':log-factory') implementation project(':Configuration') - implementation project(':Net') - implementation project(':FishUtil') implementation depCommonsLang3 compileOnly 'com.google.code.findbugs:jsr305:3.0.2' diff --git a/engine/updategraph/src/main/java/io/deephaven/engine/liveness/Liveness.java b/engine/updategraph/src/main/java/io/deephaven/engine/liveness/Liveness.java index 9b6476ff4e8..f00655d5e14 100644 --- a/engine/updategraph/src/main/java/io/deephaven/engine/liveness/Liveness.java +++ b/engine/updategraph/src/main/java/io/deephaven/engine/liveness/Liveness.java @@ -5,14 +5,15 @@ import io.deephaven.configuration.Configuration; import io.deephaven.io.logger.Logger; -import io.deephaven.io.sched.Scheduler; -import io.deephaven.io.sched.TimedJob; import io.deephaven.engine.updategraph.DynamicNode; import io.deephaven.util.HeapDump; import io.deephaven.internal.log.LoggerFactory; import org.jetbrains.annotations.NotNull; import java.io.IOException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; /** * Utility class for liveness-related instrumentation. @@ -77,14 +78,18 @@ private static void maybeLogOutstandingCount() { intervalLastOutstandingCount = intervalMinOutstandingCount = intervalMaxOutstandingCount = outstandingCount; } - public static void scheduleCountReport(@NotNull final Scheduler scheduler) { - scheduler.installJob(new TimedJob() { - @Override - public final void timedOut() { - maybeLogOutstandingCount(); - scheduler.installJob(this, scheduler.currentTimeMillis() + OUTSTANDING_COUNT_LOG_INTERVAL_MILLIS); - } - }, 0L); + /** + * Schedule a job to log the count of known outstanding {@link LivenessReferent LivenessReferents}. + * + * @param scheduler The {@link ScheduledExecutorService} to use + * @return The {@link ScheduledFuture} for the scheduled job + */ + public static ScheduledFuture scheduleCountReport(@NotNull final ScheduledExecutorService scheduler) { + return scheduler.scheduleAtFixedRate( + Liveness::maybeLogOutstandingCount, + 0L, + OUTSTANDING_COUNT_LOG_INTERVAL_MILLIS, + TimeUnit.MILLISECONDS); } private Liveness() {} diff --git a/proto/raw-js-openapi/Dockerfile b/proto/raw-js-openapi/Dockerfile index 55cd2d950fb..9f39309d334 100644 --- a/proto/raw-js-openapi/Dockerfile +++ b/proto/raw-js-openapi/Dockerfile @@ -1,5 +1,3 @@ -FROM deephaven/proto-backplane-grpc:local-build AS proto-backplane-grpc - FROM deephaven/node:local-build WORKDIR /usr/src/app # Note: we are setting CI=true, even for local development, otherwise commands may run in dev-mode (ie, @@ -12,9 +10,6 @@ COPY . . RUN set -eux; \ npm ci --unsafe-perm -# TODO: this gets TS files which we don't need -COPY --from=proto-backplane-grpc generated/js raw-js-openapi/build/js-src - WORKDIR /usr/src/app/raw-js-openapi RUN set -eux; \ ../node_modules/.bin/webpack diff --git a/server/build.gradle b/server/build.gradle index 2996be8a521..2ab80c615d6 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -14,7 +14,6 @@ dependencies { implementation project(':extensions-jdbc') implementation project(':Util'); implementation project(':Integrations') - implementation project(':FishUtil') implementation depCommonsLang3 Classpaths.inheritCommonsText(project, 'implementation') diff --git a/settings.gradle b/settings.gradle index f1474fcba81..587ce55a9d7 100644 --- a/settings.gradle +++ b/settings.gradle @@ -137,10 +137,6 @@ include(':DataStructures') include(':Configuration') -include(':FishUtil') - -include(':Net') - include(':Stats') include(':Container') From 6d1d1c96c79d7121763365ceff59f1004b3539fd Mon Sep 17 00:00:00 2001 From: George Wan Date: Thu, 30 Nov 2023 11:51:59 -0800 Subject: [PATCH 02/25] Add key column and column properties to JsPartitionedTable (#4789) * Add key column and column properties to JsPartitionedTable * Revise documentation * Add getKeyTable property to JsPartitionedTable * Drop columns from key table * Fix formatting --- .../web/client/api/JsPartitionedTable.java | 65 +++++++++++++++++-- 1 file changed, 60 insertions(+), 5 deletions(-) diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/JsPartitionedTable.java b/web/client-api/src/main/java/io/deephaven/web/client/api/JsPartitionedTable.java index a00f093942b..897e616ebc5 100644 --- a/web/client-api/src/main/java/io/deephaven/web/client/api/JsPartitionedTable.java +++ b/web/client-api/src/main/java/io/deephaven/web/client/api/JsPartitionedTable.java @@ -1,6 +1,7 @@ package io.deephaven.web.client.api; import elemental2.core.JsArray; +import elemental2.core.JsObject; import elemental2.core.JsSet; import elemental2.dom.CustomEvent; import elemental2.dom.CustomEventInit; @@ -9,9 +10,11 @@ import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.partitionedtable_pb.GetTableRequest; import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.partitionedtable_pb.MergeRequest; import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.partitionedtable_pb.PartitionedTableDescriptor; +import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.table_pb.DropColumnsRequest; import io.deephaven.javascript.proto.dhinternal.io.deephaven.proto.ticket_pb.TypedTicket; import io.deephaven.web.client.api.barrage.WebBarrageUtils; import io.deephaven.web.client.api.barrage.def.ColumnDefinition; +import io.deephaven.web.client.api.barrage.def.InitialTableDefinition; import io.deephaven.web.client.api.lifecycle.HasLifecycle; import io.deephaven.web.client.api.subscription.SubscriptionTableData; import io.deephaven.web.client.api.subscription.TableSubscription; @@ -21,6 +24,7 @@ import io.deephaven.web.shared.data.RangeSet; import io.deephaven.web.shared.fu.JsConsumer; import jsinterop.annotations.JsIgnore; +import jsinterop.annotations.JsMethod; import jsinterop.annotations.JsProperty; import jsinterop.annotations.JsType; import jsinterop.base.Js; @@ -64,6 +68,10 @@ public class JsPartitionedTable extends HasLifecycle implements ServerObject { */ private final Map, JsLazy>> tables = new HashMap<>(); + private Column[] keyColumns; + + private Column[] columns; + @JsIgnore public JsPartitionedTable(WorkerConnection connection, JsWidget widget) { @@ -83,14 +91,22 @@ public Promise refetch() { descriptor = PartitionedTableDescriptor.deserializeBinary(w.getDataAsU8()); keyColumnTypes = new ArrayList<>(); - ColumnDefinition[] columnDefinitions = WebBarrageUtils.readColumnDefinitions( + InitialTableDefinition tableDefinition = WebBarrageUtils.readTableDefinition( WebBarrageUtils.readSchemaMessage(descriptor.getConstituentDefinitionSchema_asU8())); + ColumnDefinition[] columnDefinitions = tableDefinition.getColumns(); + Column[] columns = new Column[0]; + Column[] keyColumns = new Column[0]; for (int i = 0; i < columnDefinitions.length; i++) { ColumnDefinition columnDefinition = columnDefinitions[i]; + Column column = columnDefinition.makeJsColumn(columns.length, tableDefinition.getColumnsByName()); + columns[columns.length] = column; if (descriptor.getKeyColumnNamesList().indexOf(columnDefinition.getName()) != -1) { keyColumnTypes.add(columnDefinition.getType()); + keyColumns[keyColumns.length] = column; } } + this.columns = JsObject.freeze(columns); + this.keyColumns = JsObject.freeze(keyColumns); return w.getExportedObjects()[0].fetch(); }).then(result -> { @@ -188,7 +204,7 @@ private void populateLazyTable(List key) { /** * Fetch the table with the given key. - * + * * @param key The key to fetch. An array of values for each key column, in the same order as the key columns are. * @return Promise of dh.Table */ @@ -211,7 +227,7 @@ public Promise getTable(Object key) { /** * Open a new table that is the result of merging all constituent tables. See * {@link io.deephaven.engine.table.PartitionedTable#merge()} for details. - * + * * @return A merged representation of the constituent tables. */ public Promise getMergedTable() { @@ -228,7 +244,7 @@ public Promise getMergedTable() { /** * The set of all currently known keys. This is kept up to date, so getting the list after adding an event listener * for keyadded will ensure no keys are missed. - * + * * @return Set of Object */ public JsSet getKeys() { @@ -240,7 +256,7 @@ public JsSet getKeys() { /** * The count of known keys. - * + * * @return int */ @JsProperty(name = "size") @@ -248,6 +264,45 @@ public int size() { return tables.size(); } + /** + * An array of all the key columns that the tables are partitioned by. + * + * @return Array of Column + */ + @JsProperty + public Column[] getKeyColumns() { + return keyColumns; + } + + /** + * An array of the columns in the tables that can be retrieved from this partitioned table, including both key and + * non-key columns. + * + * @return Array of Column + */ + @JsProperty + public Column[] getColumns() { + return columns; + } + + /** + * Fetch a table containing all the valid keys of the partitioned table. + * + * @return Promise of a Table + */ + @JsMethod + public Promise getKeyTable() { + return connection.newState((c, state, metadata) -> { + DropColumnsRequest drop = new DropColumnsRequest(); + drop.setColumnNamesList(new String[] {descriptor.getConstituentColumnName()}); + drop.setSourceId(keys.state().getHandle().makeTableReference()); + drop.setResultId(state.getHandle().makeTicket()); + connection.tableServiceClient().dropColumns(drop, metadata, c::apply); + }, "drop constituent column") + .refetch(this, connection.metadata()) + .then(state -> Promise.resolve(new JsTable(connection, state))); + } + /** * Indicates that this PartitionedTable will no longer be used, removing subcriptions to updated keys, etc. This * will not affect tables in use. From 19b1d7e553a99d57dee5a992ecfe9f17b7b20f59 Mon Sep 17 00:00:00 2001 From: Jianfeng Mao <4297243+jmao-denver@users.noreply.github.com> Date: Fri, 1 Dec 2023 13:25:21 -0700 Subject: [PATCH 03/25] auto convert Java values(arrays/scalar) to Numpy ones and convert DH nulls based on the annotations of the params of a Py UDF (#4502) * A bit of a milestone * made test suite pass * Refactor the new code * Add more tests * More refactoring and code cleanup * Fix a bug that fails vectorization * More code cleanup and clarification * More pathological test cases * Fix String/Instant array conversion issue * Fix test failures and refactor code * Trivial renaming * Respond to review comments * Apply suggestions from code review Co-authored-by: Chip Kent <5250374+chipkent@users.noreply.github.com> * Refactor the code and a minor fixes * Improve the test cases * Clearly distinguqish between params and return * Clarify some code with comments * More clarifying comments --------- Co-authored-by: Chip Kent <5250374+chipkent@users.noreply.github.com> --- .../engine/util/PyCallableWrapperJpyImpl.java | 15 +- py/server/deephaven/_udf.py | 420 ++++++++++++++++++ py/server/deephaven/dtypes.py | 63 ++- py/server/deephaven/jcompat.py | 123 ++++- py/server/deephaven/numpy.py | 29 +- py/server/deephaven/pandas.py | 49 +- py/server/deephaven/table.py | 193 +------- py/server/tests/test_numba_guvectorize.py | 12 +- py/server/tests/test_udf_numpy_args.py | 397 +++++++++++++++++ ...lues.py => test_udf_return_java_values.py} | 19 +- py/server/tests/test_vectorization.py | 56 +-- 11 files changed, 1061 insertions(+), 315 deletions(-) create mode 100644 py/server/deephaven/_udf.py create mode 100644 py/server/tests/test_udf_numpy_args.py rename py/server/tests/{test_pyfunc_return_java_values.py => test_udf_return_java_values.py} (96%) diff --git a/engine/table/src/main/java/io/deephaven/engine/util/PyCallableWrapperJpyImpl.java b/engine/table/src/main/java/io/deephaven/engine/util/PyCallableWrapperJpyImpl.java index 18262f8e7f0..006bae5be5c 100644 --- a/engine/table/src/main/java/io/deephaven/engine/util/PyCallableWrapperJpyImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/util/PyCallableWrapperJpyImpl.java @@ -24,17 +24,18 @@ public class PyCallableWrapperJpyImpl implements PyCallableWrapper { private static final PyObject NUMBA_VECTORIZED_FUNC_TYPE = getNumbaVectorizedFuncType(); private static final PyObject NUMBA_GUVECTORIZED_FUNC_TYPE = getNumbaGUVectorizedFuncType(); - private static final PyModule dh_table_module = PyModule.importModule("deephaven.table"); + private static final PyModule dh_udf_module = PyModule.importModule("deephaven._udf"); private static final Map> numpyType2JavaClass = new HashMap<>(); static { + numpyType2JavaClass.put('b', byte.class); + numpyType2JavaClass.put('h', short.class); + numpyType2JavaClass.put('H', char.class); numpyType2JavaClass.put('i', int.class); numpyType2JavaClass.put('l', long.class); - numpyType2JavaClass.put('h', short.class); numpyType2JavaClass.put('f', float.class); numpyType2JavaClass.put('d', double.class); - numpyType2JavaClass.put('b', byte.class); numpyType2JavaClass.put('?', boolean.class); numpyType2JavaClass.put('U', String.class); numpyType2JavaClass.put('M', Instant.class); @@ -133,23 +134,21 @@ private void prepareSignature() { pyCallable + " has multiple signatures; this is not currently supported for numba vectorized/guvectorized functions"); } - signature = params.get(0).getStringValue(); unwrapped = pyCallable; // since vectorization doesn't support array type parameters, don't flag numba guvectorized as vectorized numbaVectorized = isNumbaVectorized; vectorized = isNumbaVectorized; } else if (pyCallable.hasAttribute("dh_vectorized")) { - signature = pyCallable.getAttribute("signature").toString(); unwrapped = pyCallable.getAttribute("callable"); numbaVectorized = false; vectorized = true; } else { - signature = dh_table_module.call("_encode_signature", pyCallable).toString(); unwrapped = pyCallable; numbaVectorized = false; vectorized = false; } - pyUdfDecoratedCallable = dh_table_module.call("_py_udf", unwrapped); + pyUdfDecoratedCallable = dh_udf_module.call("_py_udf", unwrapped); + signature = pyUdfDecoratedCallable.getAttribute("signature").toString(); } @Override @@ -199,7 +198,7 @@ public PyObject vectorizedCallable() { if (numbaVectorized || vectorized) { return pyCallable; } else { - return dh_table_module.call("dh_vectorize", unwrapped); + return dh_udf_module.call("_dh_vectorize", unwrapped); } } diff --git a/py/server/deephaven/_udf.py b/py/server/deephaven/_udf.py new file mode 100644 index 00000000000..fba76b3472a --- /dev/null +++ b/py/server/deephaven/_udf.py @@ -0,0 +1,420 @@ +# +# Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending +# + +from __future__ import annotations + +import inspect +import re +from dataclasses import dataclass, field +from functools import wraps +from typing import Callable, List, Any, Union, Tuple, _GenericAlias + +import numba +import numpy +import numpy as np + +from deephaven import DHError, dtypes +from deephaven.dtypes import _np_ndarray_component_type, _np_dtype_char, _NUMPY_INT_TYPE_CODES, \ + _NUMPY_FLOATING_TYPE_CODES, _component_np_dtype_char, _J_ARRAY_NP_TYPE_MAP, _PRIMITIVE_DTYPE_NULL_MAP, _scalar, \ + _BUILDABLE_ARRAY_DTYPE_MAP +from deephaven.jcompat import _j_array_to_numpy_array +from deephaven.time import to_np_datetime64 + +# For unittest vectorization +test_vectorization = False +vectorized_count = 0 + + +_SUPPORTED_NP_TYPE_CODES = {"b", "h", "H", "i", "l", "f", "d", "?", "U", "M", "O"} + + +@dataclass +class _ParsedParamAnnotation: + orig_types: set[type] = field(default_factory=set) + encoded_types: set[str] = field(default_factory=set) + none_allowed: bool = False + has_array: bool = False + int_char: str = None + floating_char: str = None + + +@dataclass +class _ParsedReturnAnnotation: + orig_type: type = None + encoded_type: str = None + none_allowed: bool = False + has_array: bool = False + + +@dataclass +class _ParsedSignature: + fn: Callable = None + params: List[_ParsedParamAnnotation] = field(default_factory=list) + ret_annotation: _ParsedReturnAnnotation = None + + @property + def encoded(self) -> str: + """Encode the signature of a Python function by mapping the annotations of the parameter types and the return + type to numpy dtype chars (i,l,h,f,d,b,?,U,M,O) and '[' for array, 'N' for NoneType. and pack them into a + string with parameter type chars first, in their original order, followed by the delimiter string '->', + then the return type char. If a parameter or the return of the function is not annotated, + the default 'O' - object type, will be used. + """ + param_str = ",".join(["".join(p.encoded_types) for p in self.params]) + # ret_annotation has only one parsed annotation, and it might be Optional which means it contains 'N' in the + # encoded type. We need to remove it. + return_type_code = re.sub(r"[N]", "", self.ret_annotation.encoded_type) + return param_str + "->" + return_type_code + + +def _encode_param_type(t: type) -> str: + """Returns the numpy based char codes for the given type. + If the type is a numpy ndarray, prefix the numpy dtype char with '[' using Java convention + If the type is a NoneType (as in Optional or as None in Union), return 'N' + """ + if t is type(None): + return "N" + + # find the component type if it is numpy ndarray + component_type = _np_ndarray_component_type(t) + if component_type: + t = component_type + + tc = _np_dtype_char(t) + tc = tc if tc in _SUPPORTED_NP_TYPE_CODES else "O" + + if component_type: + tc = "[" + tc + return tc + + +def _parse_param_annotation(annotation: Any) -> _ParsedParamAnnotation: + """ Parse a parameter annotation in a function's signature """ + p_annotation = _ParsedParamAnnotation() + + if annotation is inspect._empty: + p_annotation.encoded_types.add("O") + p_annotation.none_allowed = True + elif isinstance(annotation, _GenericAlias) and annotation.__origin__ == Union: + for t in annotation.__args__: + _parse_type_no_nested(annotation, p_annotation, t) + else: + _parse_type_no_nested(annotation, p_annotation, annotation) + return p_annotation + + +def _parse_type_no_nested(annotation: Any, p_annotation: _ParsedParamAnnotation, t: type) -> None: + """ Parse a specific type (top level or nested in a top-level Union annotation) without handling nested types + (e.g. a nested Union). The result is stored in the given _ParsedAnnotation object. + """ + p_annotation.orig_types.add(t) + tc = _encode_param_type(t) + if "[" in tc: + p_annotation.has_array = True + if tc in {"N", "O"}: + p_annotation.none_allowed = True + if tc in _NUMPY_INT_TYPE_CODES: + if p_annotation.int_char and p_annotation.int_char != tc: + raise DHError(message=f"multiple integer types in annotation: {annotation}, " + f"types: {p_annotation.int_char}, {tc}. this is not supported because it is not " + f"clear which Deephaven null value to use when checking for nulls in the argument") + p_annotation.int_char = tc + if tc in _NUMPY_FLOATING_TYPE_CODES: + if p_annotation.floating_char and p_annotation.floating_char != tc: + raise DHError(message=f"multiple floating types in annotation: {annotation}, " + f"types: {p_annotation.floating_char}, {tc}. this is not supported because it is not " + f"clear which Deephaven null value to use when checking for nulls in the argument") + p_annotation.floating_char = tc + p_annotation.encoded_types.add(tc) + + +def _parse_return_annotation(annotation: Any) -> _ParsedReturnAnnotation: + """ Parse a function's return annotation + + The return annotation is treated differently from the parameter annotations. We don't apply the same check and are + only interested in getting the array-like type right. Any nonsensical annotation will be treated as object type. + This definitely can be improved in the future. + """ + + pra = _ParsedReturnAnnotation() + + t = annotation + pra.orig_type = t + if isinstance(annotation, _GenericAlias) and annotation.__origin__ == Union and len(annotation.__args__) == 2: + # if the annotation is a Union of two types, we'll use the non-None type + if annotation.__args__[1] == type(None): # noqa: E721 + t = annotation.__args__[0] + elif annotation.__args__[0] == type(None): # noqa: E721 + t = annotation.__args__[1] + component_char = _component_np_dtype_char(t) + if component_char: + pra.encoded_type = "[" + component_char + pra.has_array = True + else: + pra.encoded_type = _np_dtype_char(t) + return pra + + +def _parse_numba_signature(fn: Union[numba.np.ufunc.gufunc.GUFunc, numba.np.ufunc.dufunc.DUFunc]) -> _ParsedSignature: + """ Parse a numba function's signature""" + sigs = fn.types # in the format of ll->l, ff->f,dd->d,OO->O, etc. + if sigs: + p_sig = _ParsedSignature(fn) + + # for now, we only support one signature for a numba function because the query engine is not ready to handle + # multiple signatures for vectorization https://github.com/deephaven/deephaven-core/issues/4762 + sig = sigs[0] + params, rt_char = sig.split("->") + + p_sig.params = [] + p_sig.ret_annotation = _ParsedReturnAnnotation() + p_sig.ret_annotation.encoded_type = rt_char + + if isinstance(fn, numba.np.ufunc.dufunc.DUFunc): + for p in params: + pa = _ParsedParamAnnotation() + pa.encoded_types.add(p) + if p in _NUMPY_INT_TYPE_CODES: + pa.int_char = p + if p in _NUMPY_FLOATING_TYPE_CODES: + pa.floating_char = p + p_sig.params.append(pa) + else: # GUFunc + # An example: @guvectorize([(int64[:], int64[:], int64[:])], "(m),(n)->(n)" + input_output_decl = fn.signature # "(m),(n)->(n)" in the above example + input_decl, output_decl = input_output_decl.split("->") + # remove the parentheses so that empty string indicates no array, non-empty string indicates array + input_decl = re.sub("[()]", "", input_decl).split(",") + output_decl = re.sub("[()]", "", output_decl) + + for p, d in zip(params, input_decl): + pa = _ParsedParamAnnotation() + if d: + pa.encoded_types.add("[" + p) + pa.has_array = True + else: + pa.encoded_types.add(p) + if p in _NUMPY_INT_TYPE_CODES: + pa.int_char = p + if p in _NUMPY_FLOATING_TYPE_CODES: + pa.floating_char = p + p_sig.params.append(pa) + + if output_decl: + p_sig.ret_annotation.has_array = True + return p_sig + else: + raise DHError(message=f"numba decorated functions must have an explicitly defined signature: {fn}") + + +def _parse_np_ufunc_signature(fn: numpy.ufunc) -> _ParsedSignature: + """ Parse the signature of a numpy ufunc """ + + # numpy ufuncs actually have signature encoded in their 'types' attribute, we want to better support + # them in the future (https://github.com/deephaven/deephaven-core/issues/4762) + p_sig = _ParsedSignature(fn) + if fn.nin > 0: + pa = _ParsedParamAnnotation() + pa.encoded_types.add("O") + p_sig.params = [pa] * fn.nin + p_sig.ret_annotation = _ParsedReturnAnnotation() + p_sig.ret_annotation.encoded_type = "O" + return p_sig + + +def _parse_signature(fn: Callable) -> _ParsedSignature: + """ Parse the signature of a function """ + + if isinstance(fn, (numba.np.ufunc.gufunc.GUFunc, numba.np.ufunc.dufunc.DUFunc)): + return _parse_numba_signature(fn) + elif isinstance(fn, numpy.ufunc): + return _parse_np_ufunc_signature(fn) + else: + p_sig = _ParsedSignature(fn=fn) + sig = inspect.signature(fn) + for n, p in sig.parameters.items(): + p_sig.params.append(_parse_param_annotation(p.annotation)) + + p_sig.ret_annotation = _parse_return_annotation(sig.return_annotation) + return p_sig + + +def _convert_arg(param: _ParsedParamAnnotation, arg: Any) -> Any: + """ Convert a single argument to the type specified by the annotation """ + if arg is None: + if not param.none_allowed: + raise TypeError(f"Argument {arg} is not compatible with annotation {param.orig_types}") + else: + return None + + # if the arg is a Java array + if np_dtype := _J_ARRAY_NP_TYPE_MAP.get(type(arg)): + encoded_type = "[" + np_dtype.char + # if it matches one of the encoded types, convert it + if encoded_type in param.encoded_types: + dtype = dtypes.from_np_dtype(np_dtype) + return _j_array_to_numpy_array(dtype, arg, conv_null=True, type_promotion=False) + # if the annotation is missing, or it is a generic object type, return the arg + elif "O" in param.encoded_types: + return arg + else: + raise TypeError(f"Argument {arg} is not compatible with annotation {param.encoded_types}") + else: # if the arg is not a Java array + specific_types = param.encoded_types - {"N", "O"} # remove NoneType and object type + if specific_types: + for t in specific_types: + if t.startswith("["): + if isinstance(arg, np.ndarray) and arg.dtype.char == t[1]: + return arg + continue + + dtype = dtypes.from_np_dtype(np.dtype(t)) + dh_null = _PRIMITIVE_DTYPE_NULL_MAP.get(dtype) + + if param.int_char and isinstance(arg, int): + if arg == dh_null: + if param.none_allowed: + return None + else: + raise DHError(f"Argument {arg} is not compatible with annotation {param.orig_types}") + else: + return np.dtype(param.int_char).type(arg) + elif param.floating_char and isinstance(arg, float): + if isinstance(arg, float): + if arg == dh_null: + return np.nan if "N" not in param.encoded_types else None + else: + return np.dtype(param.floating_char).type(arg) + elif t == "?" and isinstance(arg, bool): + return arg + elif t == "M": + try: + return to_np_datetime64(arg) + except Exception as e: + # don't raise an error, if this is the only annotation, the else block of the for loop will + # catch it and raise a TypeError + pass + elif t == "U" and isinstance(arg, str): + return arg + else: # didn't return from inside the for loop + if "O" in param.encoded_types: + return arg + else: + raise TypeError(f"Argument {arg} is not compatible with annotation {param.orig_types}") + else: # if no annotation or generic object, return arg + return arg + + +def _convert_args(p_sig: _ParsedSignature, args: Tuple[Any, ...]) -> List[Any]: + """ Convert all arguments to the types specified by the annotations. + Given that the number of arguments and the number of parameters may not match (in the presence of keyword, + var-positional, or var-keyword parameters), we have the following rules: + If the number of arguments is less than the number of parameters, the remaining parameters are left as is. + If the number of arguments is greater than the number of parameters, the extra arguments are left as is. + + Python's function call mechanism will raise an exception if it can't resolve the parameters with the arguments. + """ + converted_args = [_convert_arg(param, arg) for param, arg in zip(p_sig.params, args)] + converted_args.extend(args[len(converted_args):]) + return converted_args + + +def _py_udf(fn: Callable): + """A decorator that acts as a transparent translator for Python UDFs used in Deephaven query formulas between + Python and Java. This decorator is intended for use by the Deephaven query engine and should not be used by + users. + + It carries out two conversions: + 1. convert Python function return values to Java values. + For properly annotated functions, including numba vectorized and guvectorized ones, this decorator inspects the + signature of the function and determines its return type, including supported primitive types and arrays of + the supported primitive types. It then converts the return value of the function to the corresponding Java value + of the same type. For unsupported types, the decorator returns the original Python value which appears as + org.jpy.PyObject in Java. + 2. convert Java function arguments to Python values based on the signature of the function. + """ + if hasattr(fn, "return_type"): + return fn + p_sig = _parse_signature(fn) + # build a signature string for vectorization by removing NoneType, array char '[', and comma from the encoded types + # since vectorization only supports UDFs with a single signature and enforces an exact match, any non-compliant + # signature (e.g. Union with more than 1 non-NoneType) will be rejected by the vectorizer. + sig_str_vectorization = re.sub(r"[\[N,]", "", p_sig.encoded) + return_array = p_sig.ret_annotation.has_array + ret_dtype = dtypes.from_np_dtype(np.dtype(p_sig.ret_annotation.encoded_type[-1])) + + @wraps(fn) + def wrapper(*args, **kwargs): + converted_args = _convert_args(p_sig, args) + # kwargs are not converted because they are not used in the UDFs + ret = fn(*converted_args, **kwargs) + if return_array: + return dtypes.array(ret_dtype, ret) + elif ret_dtype == dtypes.PyObject: + return ret + else: + return _scalar(ret, ret_dtype) + + wrapper.j_name = ret_dtype.j_name + real_ret_dtype = _BUILDABLE_ARRAY_DTYPE_MAP.get(ret_dtype, dtypes.PyObject) if return_array else ret_dtype + + if hasattr(ret_dtype.j_type, 'jclass'): + j_class = real_ret_dtype.j_type.jclass + else: + j_class = real_ret_dtype.qst_type.clazz() + + wrapper.return_type = j_class + wrapper.signature = sig_str_vectorization + + return wrapper + + +def _dh_vectorize(fn): + """A decorator to vectorize a Python function used in Deephaven query formulas and invoked on a row basis. + + If this annotation is not used on a query function, the Deephaven query engine will make an effort to vectorize + the function. If vectorization is not possible, the query engine will use the original, non-vectorized function. + If this annotation is used on a function, the Deephaven query engine will use the vectorized function in a query, + or an error will result if the function can not be vectorized. + + When this decorator is used on a function, the number and type of input and output arguments are changed. + These changes are only intended for use by the Deephaven query engine. Users are discouraged from using + vectorized functions in non-query code, since the function signature may change in future versions. + + The current vectorized function signature includes (1) the size of the input arrays, (2) the output array, + and (3) the input arrays. + """ + p_sig = _parse_signature(fn) + ret_dtype = dtypes.from_np_dtype(np.dtype(p_sig.ret_annotation.encoded_type[-1])) + + @wraps(fn) + def wrapper(*args): + if len(args) != len(p_sig.params) + 2: + raise ValueError( + f"The number of arguments doesn't match the function signature. {len(args) - 2}, {p_sig.encoded}") + if args[0] <= 0: + raise ValueError(f"The chunk size argument must be a positive integer. {args[0]}") + + chunk_size = args[0] + chunk_result = args[1] + if args[2:]: + vectorized_args = zip(*args[2:]) + for i in range(chunk_size): + scalar_args = next(vectorized_args) + converted_args = _convert_args(p_sig, scalar_args) + chunk_result[i] = _scalar(fn(*converted_args), ret_dtype) + else: + for i in range(chunk_size): + chunk_result[i] = _scalar(fn(), ret_dtype) + + return chunk_result + + wrapper.callable = fn + wrapper.dh_vectorized = True + + if test_vectorization: + global vectorized_count + vectorized_count += 1 + + return wrapper \ No newline at end of file diff --git a/py/server/deephaven/dtypes.py b/py/server/deephaven/dtypes.py index 5f5857ffdbe..56d2f25ca0d 100644 --- a/py/server/deephaven/dtypes.py +++ b/py/server/deephaven/dtypes.py @@ -102,6 +102,8 @@ def __call__(self, *args, **kwargs): """Double-precision floating-point number type""" string = DType(j_name="java.lang.String", qst_type=_JQstType.stringType(), np_type=np.str_) """String type""" +Character = DType(j_name="java.lang.Character") +"""Character type""" BigDecimal = DType(j_name="java.math.BigDecimal") """Java BigDecimal type""" StringSet = DType(j_name="io.deephaven.stringset.StringSet") @@ -188,6 +190,20 @@ def __call__(self, *args, **kwargs): } +_J_ARRAY_NP_TYPE_MAP = { + boolean_array.j_type: np.dtype("?"), + byte_array.j_type: np.dtype("b"), + char_array.j_type: np.dtype("uint16"), + short_array.j_type: np.dtype("h"), + int32_array.j_type: np.dtype("i"), + long_array.j_type: np.dtype("l"), + float32_array.j_type: np.dtype("f"), + double_array.j_type: np.dtype("d"), + string_array.j_type: np.dtype("U"), + instant_array.j_type: np.dtype("datetime64[ns]"), +} + + def null_remap(dtype: DType) -> Callable[[Any], Any]: """ Creates a null value remap function for the provided DType. @@ -325,8 +341,19 @@ def from_np_dtype(np_dtype: Union[np.dtype, pd.api.extensions.ExtensionDtype]) - return PyObject -_NUMPY_INT_TYPE_CODES = ["i", "l", "h", "b"] -_NUMPY_FLOATING_TYPE_CODES = ["f", "d"] +_NUMPY_INT_TYPE_CODES = {"b", "h", "H", "i", "l"} +_NUMPY_FLOATING_TYPE_CODES = {"f", "d"} + + +def _is_py_null(x: Any) -> bool: + """Checks if the value is a Python null value, i.e. None or NaN, or Pandas.NA.""" + if x is None: + return True + + try: + return bool(pd.isna(x)) + except (TypeError, ValueError): + return False def _scalar(x: Any, dtype: DType) -> Any: @@ -336,12 +363,14 @@ def _scalar(x: Any, dtype: DType) -> Any: # NULL_BOOL will appear in Java as a byte value which causes a cast error. We just let JPY converts it to Java null # and the engine has casting logic to handle it. - if x is None and dtype != bool_ and _PRIMITIVE_DTYPE_NULL_MAP.get(dtype): - return _PRIMITIVE_DTYPE_NULL_MAP[dtype] + if (dt := _PRIMITIVE_DTYPE_NULL_MAP.get(dtype)) and _is_py_null(x) and dtype not in (bool_, char): + return dt try: if hasattr(x, "dtype"): - if x.dtype.char in _NUMPY_INT_TYPE_CODES: + if x.dtype.char == 'H': # np.uint16 maps to Java char + return Character(int(x)) + elif x.dtype.char in _NUMPY_INT_TYPE_CODES: return int(x) elif x.dtype.char in _NUMPY_FLOATING_TYPE_CODES: return float(x) @@ -382,20 +411,32 @@ def _component_np_dtype_char(t: type) -> Optional[str]: if isinstance(t, _GenericAlias) and issubclass(t.__origin__, Sequence): component_type = t.__args__[0] + if not component_type: + component_type = _np_ndarray_component_type(t) + + if component_type: + return _np_dtype_char(component_type) + else: + return None + + +def _np_ndarray_component_type(t: type) -> Optional[type]: + """Returns the numpy ndarray component type if the type is a numpy ndarray, otherwise return None.""" + # Py3.8: npt.NDArray can be used in Py 3.8 as a generic alias, but a specific alias (e.g. npt.NDArray[np.int64]) # is an instance of a private class of np, yet we don't have a choice but to use it. And when npt.NDArray is used, # the 1st argument is typing.Any, the 2nd argument is another generic alias of which the 1st argument is the # component type - if not component_type and sys.version_info.minor == 8: + component_type = None + if sys.version_info.major == 3 and sys.version_info.minor == 8: if isinstance(t, np._typing._generic_alias._GenericAlias) and t.__origin__ == np.ndarray: component_type = t.__args__[1].__args__[0] - # Py3.9+, np.ndarray as a generic alias is only supported in Python 3.9+, also npt.NDArray is still available but a # specific alias (e.g. npt.NDArray[np.int64]) now is an instance of typing.GenericAlias. # when npt.NDArray is used, the 1st argument is typing.Any, the 2nd argument is another generic alias of which # the 1st argument is the component type # when np.ndarray is used, the 1st argument is the component type - if not component_type and sys.version_info.minor > 8: + if not component_type and sys.version_info.major == 3 and sys.version_info.minor > 8: import types if isinstance(t, types.GenericAlias) and (issubclass(t.__origin__, Sequence) or t.__origin__ == np.ndarray): nargs = len(t.__args__) @@ -406,8 +447,4 @@ def _component_np_dtype_char(t: type) -> Optional[str]: a1 = t.__args__[1] if a0 == typing.Any and isinstance(a1, types.GenericAlias): component_type = a1.__args__[0] - - if component_type: - return _np_dtype_char(component_type) - else: - return None + return component_type diff --git a/py/server/deephaven/jcompat.py b/py/server/deephaven/jcompat.py index d12f0d01f64..c1d54a2f443 100644 --- a/py/server/deephaven/jcompat.py +++ b/py/server/deephaven/jcompat.py @@ -5,12 +5,29 @@ """ This module provides Java compatibility support including convenience functions to create some widely used Java data structures from corresponding Python ones in order to be able to call Java methods. """ -from typing import Any, Callable, Dict, Iterable, List, Sequence, Set, TypeVar, Union +from typing import Any, Callable, Dict, Iterable, List, Sequence, Set, TypeVar, Union, Tuple, Literal import jpy +import numpy as np +import pandas as pd +from deephaven import dtypes, DHError from deephaven._wrapper import unwrap, wrap_j_object -from deephaven.dtypes import DType +from deephaven.dtypes import DType, _PRIMITIVE_DTYPE_NULL_MAP, _J_ARRAY_NP_TYPE_MAP + +_NULL_BOOLEAN_AS_BYTE = jpy.get_type("io.deephaven.util.BooleanUtils").NULL_BOOLEAN_AS_BYTE +_JPrimitiveArrayConversionUtility = jpy.get_type("io.deephaven.integrations.common.PrimitiveArrayConversionUtility") + +_DH_PANDAS_NULLABLE_TYPE_MAP: Dict[DType, pd.api.extensions.ExtensionDtype] = { + dtypes.bool_: pd.BooleanDtype, + dtypes.byte: pd.Int8Dtype, + dtypes.short: pd.Int16Dtype, + dtypes.char: pd.UInt16Dtype, + dtypes.int32: pd.Int32Dtype, + dtypes.int64: pd.Int64Dtype, + dtypes.float32: pd.Float32Dtype, + dtypes.float64: pd.Float64Dtype, +} def is_java_type(obj: Any) -> bool: @@ -181,11 +198,109 @@ def to_sequence(v: Union[T, Sequence[T]] = None, wrapped: bool = False) -> Seque return () if wrapped: if not isinstance(v, Sequence) or isinstance(v, str): - return (v, ) + return (v,) else: return tuple(v) if not isinstance(v, Sequence) or isinstance(v, str): - return (unwrap(v), ) + return (unwrap(v),) else: return tuple((unwrap(o) for o in v)) + + +def _j_array_to_numpy_array(dtype: DType, j_array: jpy.JType, conv_null: bool, type_promotion: bool = False) -> \ + np.ndarray: + """ Produces a numpy array from the DType and given Java array. + + Args: + dtype (DType): The dtype of the Java array + j_array (jpy.JType): The Java array to convert + conv_null (bool): If True, convert nulls to the null value for the dtype + type_promotion (bool): Ignored when conv_null is False. When type_promotion is False, (1) input Java integer, + boolean, or character arrays containing Deephaven nulls yield an exception, (2) input Java float or double + arrays containing Deephaven nulls have null values converted to np.nan, and (3) input Java arrays without + Deephaven nulls are converted to the target type. When type_promotion is True, (1) input Java integer, + boolean, or character arrays containing Deephaven nulls are converted to np.float64 arrays and Deephaven + null values are converted to np.nan, (2) input Java float or double arrays containing Deephaven nulls have + null values converted to np.nan, and (3) input Java arrays without Deephaven nulls are converted to the + target type. Defaults to False. + + Returns: + np.ndarray: The numpy array + + Raises: + DHError + """ + if dtype.is_primitive: + np_array = np.frombuffer(j_array, dtype.np_type) + elif dtype == dtypes.Instant: + longs = _JPrimitiveArrayConversionUtility.translateArrayInstantToLong(j_array) + np_long_array = np.frombuffer(longs, np.int64) + np_array = np_long_array.view(dtype.np_type) + elif dtype == dtypes.bool_: + # dh nulls will be preserved and show up as True b/c the underlying byte array isn't modified + bytes_ = _JPrimitiveArrayConversionUtility.translateArrayBooleanToByte(j_array) + np_array = np.frombuffer(bytes_, dtype.np_type) + elif dtype == dtypes.string: + np_array = np.array(j_array, dtypes.string.np_type) + elif dtype.np_type is not np.object_: + try: + np_array = np.frombuffer(j_array, dtype.np_type) + except: + np_array = np.array(j_array, np.object_) + else: + np_array = np.array(j_array, np.object_) + + if conv_null: + if dh_null := _PRIMITIVE_DTYPE_NULL_MAP.get(dtype): + if dtype in (dtypes.float32, dtypes.float64): + np_array = np.copy(np_array) + np_array[np_array == dh_null] = np.nan + else: + if dtype is dtypes.bool_: # needs to change its type to byte for dh null detection + np_array = np.frombuffer(np_array, np.byte) + + if any(np_array[np_array == dh_null]): + if not type_promotion: + raise DHError(f"Problem creating numpy array. Java {dtype} array contains Deephaven null values, but numpy {np_array.dtype} array does not support null values") + np_array = np_array.astype(np.float64) + np_array[np_array == dh_null] = np.nan + else: + if dtype is dtypes.bool_: # needs to change its type back to bool + np_array = np.frombuffer(np_array, np.bool_) + return np_array + + return np_array + + +def _j_array_to_series(dtype: DType, j_array: jpy.JType, conv_null: bool) -> pd.Series: + """Produce a copy of the specified Java array as a pandas.Series object. + + Args: + dtype (DType): the dtype of the Java array + j_array (jpy.JType): the Java array + conv_null (bool): whether to check for Deephaven nulls in the data and automatically replace them with + pd.NA. + + Returns: + a pandas Series + + Raises: + DHError + """ + if conv_null and dtype == dtypes.bool_: + j_array = _JPrimitiveArrayConversionUtility.translateArrayBooleanToByte(j_array) + np_array = np.frombuffer(j_array, dtype=np.byte) + s = pd.Series(data=np_array, dtype=pd.Int8Dtype(), copy=False) + s.mask(s == _NULL_BOOLEAN_AS_BYTE, inplace=True) + return s.astype(pd.BooleanDtype(), copy=False) + + np_array = _j_array_to_numpy_array(dtype, j_array, conv_null=False) + if conv_null and (nv := _PRIMITIVE_DTYPE_NULL_MAP.get(dtype)) is not None: + pd_ex_dtype = _DH_PANDAS_NULLABLE_TYPE_MAP.get(dtype) + s = pd.Series(data=np_array, dtype=pd_ex_dtype(), copy=False) + s.mask(s == nv, inplace=True) + else: + s = pd.Series(data=np_array, copy=False) + + return s diff --git a/py/server/deephaven/numpy.py b/py/server/deephaven/numpy.py index 412b6e8b5ac..3cc898271b3 100644 --- a/py/server/deephaven/numpy.py +++ b/py/server/deephaven/numpy.py @@ -8,13 +8,13 @@ import jpy import numpy as np -from deephaven.dtypes import DType -from deephaven import DHError, dtypes, empty_table, new_table +from deephaven import DHError, dtypes, new_table from deephaven.column import Column, InputColumn +from deephaven.dtypes import DType +from deephaven.jcompat import _j_array_to_numpy_array from deephaven.table import Table -_JPrimitiveArrayConversionUtility = jpy.get_type("io.deephaven.integrations.common.PrimitiveArrayConversionUtility") _JDataAccessHelpers = jpy.get_type("io.deephaven.engine.table.impl.DataAccessHelpers") @@ -25,28 +25,9 @@ def _to_column_name(name: str) -> str: def column_to_numpy_array(col_def: Column, j_array: jpy.JType) -> np.ndarray: - """ Produces a numpy array from the given Java array and the Table column definition. """ + """ Produces a numpy array from the given Java array and the Table column definition.""" try: - if col_def.data_type.is_primitive: - np_array = np.frombuffer(j_array, col_def.data_type.np_type) - elif col_def.data_type == dtypes.Instant: - longs = _JPrimitiveArrayConversionUtility.translateArrayInstantToLong(j_array) - np_long_array = np.frombuffer(longs, np.int64) - np_array = np_long_array.view(col_def.data_type.np_type) - elif col_def.data_type == dtypes.bool_: - bytes_ = _JPrimitiveArrayConversionUtility.translateArrayBooleanToByte(j_array) - np_array = np.frombuffer(bytes_, col_def.data_type.np_type) - elif col_def.data_type == dtypes.string: - np_array = np.array([s for s in j_array], dtypes.string.np_type) - elif col_def.data_type.np_type is not np.object_: - try: - np_array = np.frombuffer(j_array, col_def.data_type.np_type) - except: - np_array = np.array(j_array, np.object_) - else: - np_array = np.array(j_array, np.object_) - - return np_array + return _j_array_to_numpy_array(col_def.data_type, j_array, conv_null=False, type_promotion=False) except DHError: raise except Exception as e: diff --git a/py/server/deephaven/pandas.py b/py/server/deephaven/pandas.py index 883622ce27b..8626b999e11 100644 --- a/py/server/deephaven/pandas.py +++ b/py/server/deephaven/pandas.py @@ -3,7 +3,7 @@ # """ This module supports the conversion between Deephaven tables and pandas DataFrames. """ -from typing import List, Dict, Tuple, Literal +from typing import List, Literal import jpy import numpy as np @@ -13,26 +13,14 @@ from deephaven import DHError, new_table, dtypes, arrow from deephaven.column import Column from deephaven.constants import NULL_BYTE, NULL_SHORT, NULL_INT, NULL_LONG, NULL_FLOAT, NULL_DOUBLE, NULL_CHAR -from deephaven.dtypes import DType -from deephaven.numpy import column_to_numpy_array, _make_input_column +from deephaven.jcompat import _j_array_to_series +from deephaven.numpy import _make_input_column from deephaven.table import Table _NULL_BOOLEAN_AS_BYTE = jpy.get_type("io.deephaven.util.BooleanUtils").NULL_BOOLEAN_AS_BYTE -_JPrimitiveArrayConversionUtility = jpy.get_type("io.deephaven.integrations.common.PrimitiveArrayConversionUtility") _JDataAccessHelpers = jpy.get_type("io.deephaven.engine.table.impl.DataAccessHelpers") _is_dtype_backend_supported = pd.__version__ >= "2.0.0" -_DTYPE_NULL_MAPPING: Dict[DType, Tuple] = { - dtypes.bool_: (_NULL_BOOLEAN_AS_BYTE, pd.BooleanDtype), - dtypes.byte: (NULL_BYTE, pd.Int8Dtype), - dtypes.short: (NULL_SHORT, pd.Int16Dtype), - dtypes.char: (NULL_CHAR, pd.UInt16Dtype), - dtypes.int32: (NULL_INT, pd.Int32Dtype), - dtypes.int64: (NULL_LONG, pd.Int64Dtype), - dtypes.float32: (NULL_FLOAT, pd.Float32Dtype), - dtypes.float64: (NULL_DOUBLE, pd.Float64Dtype), -} - def _column_to_series(table: Table, col_def: Column, conv_null: bool) -> pd.Series: """Produce a copy of the specified column as a pandas.Series object. @@ -51,29 +39,15 @@ def _column_to_series(table: Table, col_def: Column, conv_null: bool) -> pd.Seri """ try: data_col = _JDataAccessHelpers.getColumn(table.j_table, col_def.name) - if conv_null and col_def.data_type == dtypes.bool_: - j_array = _JPrimitiveArrayConversionUtility.translateArrayBooleanToByte(data_col.getDirect()) - np_array = np.frombuffer(j_array, dtype=np.byte) - s = pd.Series(data=np_array, dtype=pd.Int8Dtype(), copy=False) - s.mask(s == _NULL_BOOLEAN_AS_BYTE, inplace=True) - return s.astype(pd.BooleanDtype(), copy=False) - - np_array = column_to_numpy_array(col_def, data_col.getDirect()) - if conv_null and (null_pair := _DTYPE_NULL_MAPPING.get(col_def.data_type)) is not None: - nv = null_pair[0] - pd_ex_dtype = null_pair[1] - s = pd.Series(data=np_array, dtype=pd_ex_dtype(), copy=False) - s.mask(s == nv, inplace=True) - else: - s = pd.Series(data=np_array, copy=False) - return s + j_array = data_col.getDirect() + return _j_array_to_series(col_def.data_type, j_array, conv_null) except DHError: raise except Exception as e: raise DHError(e, message="failed to create a pandas Series for {col}") from e -_DTYPE_MAPPING_PYARROW = { +_PANDAS_ARROW_TYPE_MAP = { pa.int8(): pd.ArrowDtype(pa.int8()), pa.int16(): pd.ArrowDtype(pa.int16()), pa.int32(): pd.ArrowDtype(pa.int32()), @@ -90,7 +64,7 @@ def _column_to_series(table: Table, col_def: Column, conv_null: bool) -> pd.Seri pa.timestamp('ns', tz='UTC'): pd.ArrowDtype(pa.timestamp('ns', tz='UTC')), } -_DTYPE_MAPPING_NUMPY_NULLABLE = { +_PANDAS_NULLABLE_TYPE_MAP = { pa.int8(): pd.Int8Dtype(), pa.int16(): pd.Int16Dtype(), pa.uint16(): pd.UInt16Dtype(), @@ -107,8 +81,8 @@ def _column_to_series(table: Table, col_def: Column, conv_null: bool) -> pd.Seri } _PYARROW_TO_PANDAS_TYPE_MAPPERS = { - "pyarrow": _DTYPE_MAPPING_PYARROW.get, - "numpy_nullable": _DTYPE_MAPPING_NUMPY_NULLABLE.get, + "pyarrow": _PANDAS_ARROW_TYPE_MAP.get, + "numpy_nullable": _PANDAS_NULLABLE_TYPE_MAP.get, } @@ -180,7 +154,7 @@ def to_pandas(table: Table, cols: List[str] = None, raise DHError(e, "failed to create a pandas DataFrame from table.") from e -_EX_DTYPE_NULL_MAP = { +_PANDAS_EXTYPE_DH_NULL_MAP = { # This reflects the fact that in the server we use NULL_BOOLEAN_AS_BYTE - the byte encoding of null boolean to # translate boxed Boolean to/from primitive bytes pd.BooleanDtype: _NULL_BOOLEAN_AS_BYTE, @@ -209,7 +183,7 @@ def _map_na(array: [np.ndarray, pd.api.extensions.ExtensionArray]): if not isinstance(pd_dtype, pd.api.extensions.ExtensionDtype): return array - dh_null = _EX_DTYPE_NULL_MAP.get(type(pd_dtype)) or _EX_DTYPE_NULL_MAP.get(pd_dtype) + dh_null = _PANDAS_EXTYPE_DH_NULL_MAP.get(type(pd_dtype)) or _PANDAS_EXTYPE_DH_NULL_MAP.get(pd_dtype) # To preserve NaNs in floating point arrays, Pandas doesn't distinguish NaN/Null as far as NA testing is # concerned, thus its fillna() method will replace both NaN/Null in the data. if isinstance(pd_dtype, (pd.Float32Dtype, pd.Float64Dtype)) and isinstance(getattr(array, "_data"), np.ndarray): @@ -276,3 +250,4 @@ def to_table(df: pd.DataFrame, cols: List[str] = None) -> Table: raise except Exception as e: raise DHError(e, "failed to create a Deephaven Table from a pandas DataFrame.") from e + diff --git a/py/server/deephaven/table.py b/py/server/deephaven/table.py index 89fa8df9c19..922e6b3dcd1 100644 --- a/py/server/deephaven/table.py +++ b/py/server/deephaven/table.py @@ -11,13 +11,10 @@ import inspect from enum import Enum from enum import auto -from functools import wraps -from typing import Any, Optional, Callable, Dict, _GenericAlias +from typing import Any, Optional, Callable, Dict from typing import Sequence, List, Union, Protocol import jpy -import numba -import numpy as np from deephaven import DHError from deephaven import dtypes @@ -31,8 +28,6 @@ from deephaven.jcompat import to_sequence, j_array_list from deephaven.update_graph import auto_locking_ctx, UpdateGraph from deephaven.updateby import UpdateByOperation -from deephaven.dtypes import _BUILDABLE_ARRAY_DTYPE_MAP, _scalar, _np_dtype_char, \ - _component_np_dtype_char # Table _J_Table = jpy.get_type("io.deephaven.engine.table.Table") @@ -80,10 +75,6 @@ _JMultiJoinTable = jpy.get_type("io.deephaven.engine.table.MultiJoinTable") _JMultiJoinFactory = jpy.get_type("io.deephaven.engine.table.MultiJoinFactory") -# For unittest vectorization -_test_vectorization = False -_vectorized_count = 0 - class NodeType(Enum): """An enum of node types for RollupTable""" @@ -363,178 +354,6 @@ def _j_py_script_session() -> _JPythonScriptSession: return None -_SUPPORTED_NP_TYPE_CODES = ["i", "l", "h", "f", "d", "b", "?", "U", "M", "O"] - - -def _parse_annotation(annotation: Any) -> Any: - """Parse a Python annotation, for now mostly to extract the non-None type from an Optional(Union) annotation, - otherwise return the original annotation. """ - if isinstance(annotation, _GenericAlias) and annotation.__origin__ == Union and len(annotation.__args__) == 2: - if annotation.__args__[1] == type(None): # noqa: E721 - return annotation.__args__[0] - elif annotation.__args__[0] == type(None): # noqa: E721 - return annotation.__args__[1] - else: - return annotation - else: - return annotation - - -def _encode_signature(fn: Callable) -> str: - """Encode the signature of a Python function by mapping the annotations of the parameter types and the return - type to numpy dtype chars (i,l,h,f,d,b,?,U,M,O), and pack them into a string with parameter type chars first, - in their original order, followed by the delimiter string '->', then the return type_char. - - If a parameter or the return of the function is not annotated, the default 'O' - object type, will be used. - """ - try: - sig = inspect.signature(fn) - except: - # in case inspect.signature() fails, we'll just use the default 'O' - object type. - # numpy ufuncs actually have signature encoded in their 'types' attribute, we want to better support - # them in the future (https://github.com/deephaven/deephaven-core/issues/4762) - if type(fn) == np.ufunc: - return "O"*fn.nin + "->" + "O" - return "->O" - - np_type_codes = [] - for n, p in sig.parameters.items(): - p_annotation = _parse_annotation(p.annotation) - np_type_codes.append(_np_dtype_char(p_annotation)) - - return_annotation = _parse_annotation(sig.return_annotation) - return_type_code = _np_dtype_char(return_annotation) - np_type_codes = [c if c in _SUPPORTED_NP_TYPE_CODES else "O" for c in np_type_codes] - return_type_code = return_type_code if return_type_code in _SUPPORTED_NP_TYPE_CODES else "O" - - np_type_codes.extend(["-", ">", return_type_code]) - return "".join(np_type_codes) - - -def _udf_return_dtype(fn): - if isinstance(fn, (numba.np.ufunc.dufunc.DUFunc, numba.np.ufunc.gufunc.GUFunc)) and hasattr(fn, "types"): - return dtypes.from_np_dtype(np.dtype(fn.types[0][-1])) - else: - return dtypes.from_np_dtype(np.dtype(_encode_signature(fn)[-1])) - - -def _py_udf(fn: Callable): - """A decorator that acts as a transparent translator for Python UDFs used in Deephaven query formulas between - Python and Java. This decorator is intended for use by the Deephaven query engine and should not be used by - users. - - For now, this decorator is only capable of converting Python function return values to Java values. It - does not yet convert Java values in arguments to usable Python object (e.g. numpy arrays) or properly translate - Deephaven primitive null values. - - For properly annotated functions, including numba vectorized and guvectorized ones, this decorator inspects the - signature of the function and determines its return type, including supported primitive types and arrays of - the supported primitive types. It then converts the return value of the function to the corresponding Java value - of the same type. For unsupported types, the decorator returns the original Python value which appears as - org.jpy.PyObject in Java. - """ - - if hasattr(fn, "return_type"): - return fn - ret_dtype = _udf_return_dtype(fn) - - return_array = False - # If the function is a numba guvectorized function, examine the signature of the function to determine if it - # returns an array. - if isinstance(fn, numba.np.ufunc.gufunc.GUFunc): - sig = fn.signature - rtype = sig.split("->")[-1].strip("()") - if rtype: - return_array = True - else: - try: - return_annotation = _parse_annotation(inspect.signature(fn).return_annotation) - except ValueError: - # the function has no return annotation, and since we can't know what the exact type is, the return type - # defaults to the generic object type therefore it is not an array of a specific type, - # but see (https://github.com/deephaven/deephaven-core/issues/4762) for future imporvement to better support - # numpy ufuncs. - pass - else: - component_type = _component_np_dtype_char(return_annotation) - if component_type: - ret_dtype = dtypes.from_np_dtype(np.dtype(component_type)) - if ret_dtype in _BUILDABLE_ARRAY_DTYPE_MAP: - return_array = True - - @wraps(fn) - def wrapper(*args, **kwargs): - ret = fn(*args, **kwargs) - if return_array: - return dtypes.array(ret_dtype, ret) - elif ret_dtype == dtypes.PyObject: - return ret - else: - return _scalar(ret, ret_dtype) - - wrapper.j_name = ret_dtype.j_name - real_ret_dtype = _BUILDABLE_ARRAY_DTYPE_MAP.get(ret_dtype) if return_array else ret_dtype - - if hasattr(ret_dtype.j_type, 'jclass'): - j_class = real_ret_dtype.j_type.jclass - else: - j_class = real_ret_dtype.qst_type.clazz() - - wrapper.return_type = j_class - - return wrapper - - -def dh_vectorize(fn): - """A decorator to vectorize a Python function used in Deephaven query formulas and invoked on a row basis. - - If this annotation is not used on a query function, the Deephaven query engine will make an effort to vectorize - the function. If vectorization is not possible, the query engine will use the original, non-vectorized function. - If this annotation is used on a function, the Deephaven query engine will use the vectorized function in a query, - or an error will result if the function can not be vectorized. - - When this decorator is used on a function, the number and type of input and output arguments are changed. - These changes are only intended for use by the Deephaven query engine. Users are discouraged from using - vectorized functions in non-query code, since the function signature may change in future versions. - - The current vectorized function signature includes (1) the size of the input arrays, (2) the output array, - and (3) the input arrays. - """ - signature = _encode_signature(fn) - ret_dtype = _udf_return_dtype(fn) - - @wraps(fn) - def wrapper(*args): - if len(args) != len(signature) - len("->?") + 2: - raise ValueError( - f"The number of arguments doesn't match the function signature. {len(args) - 2}, {signature}") - if args[0] <= 0: - raise ValueError(f"The chunk size argument must be a positive integer. {args[0]}") - - chunk_size = args[0] - chunk_result = args[1] - if args[2:]: - vectorized_args = zip(*args[2:]) - for i in range(chunk_size): - scalar_args = next(vectorized_args) - chunk_result[i] = _scalar(fn(*scalar_args), ret_dtype) - else: - for i in range(chunk_size): - chunk_result[i] = _scalar(fn(), ret_dtype) - - return chunk_result - - wrapper.callable = fn - wrapper.signature = signature - wrapper.dh_vectorized = True - - if _test_vectorization: - global _vectorized_count - _vectorized_count += 1 - - return wrapper - - @contextlib.contextmanager def _query_scope_ctx(): """A context manager to set/unset query scope based on the scope of the most immediate caller code that invokes @@ -3712,6 +3531,7 @@ def update_by(self, ops: Union[UpdateByOperation, List[UpdateByOperation]], except Exception as e: raise DHError(e, "update-by operation on the PartitionedTableProxy failed.") from e + class MultiJoinInput(JObjectWrapper): """A MultiJoinInput represents the input tables, key columns and additional columns to be used in the multi-table natural join. """ @@ -3779,7 +3599,8 @@ def __init__(self, input: Union[Table, Sequence[Table], MultiJoinInput, Sequence with auto_locking_ctx(*tables): j_tables = to_sequence(input) self.j_multijointable = _JMultiJoinFactory.of(on, *j_tables) - elif isinstance(input, MultiJoinInput) or (isinstance(input, Sequence) and all(isinstance(ji, MultiJoinInput) for ji in input)): + elif isinstance(input, MultiJoinInput) or ( + isinstance(input, Sequence) and all(isinstance(ji, MultiJoinInput) for ji in input)): if on is not None: raise DHError(message="on parameter is not permitted when MultiJoinInput objects are provided.") wrapped_input = to_sequence(input, wrapped=True) @@ -3788,13 +3609,13 @@ def __init__(self, input: Union[Table, Sequence[Table], MultiJoinInput, Sequence input = to_sequence(input) self.j_multijointable = _JMultiJoinFactory.of(*input) else: - raise DHError(message="input must be a Table, a sequence of Tables, a MultiJoinInput, or a sequence of MultiJoinInputs.") + raise DHError( + message="input must be a Table, a sequence of Tables, a MultiJoinInput, or a sequence of MultiJoinInputs.") except Exception as e: raise DHError(e, "failed to build a MultiJoinTable object.") from e - def multi_join(input: Union[Table, Sequence[Table], MultiJoinInput, Sequence[MultiJoinInput]], on: Union[str, Sequence[str]] = None) -> MultiJoinTable: """ The multi_join method creates a new table by performing a multi-table natural join on the input tables. The result @@ -3812,4 +3633,4 @@ def multi_join(input: Union[Table, Sequence[Table], MultiJoinInput, Sequence[Mul MultiJoinTable: the result of the multi-table natural join operation. To access the underlying Table, use the table() method. """ - return MultiJoinTable(input, on) \ No newline at end of file + return MultiJoinTable(input, on) diff --git a/py/server/tests/test_numba_guvectorize.py b/py/server/tests/test_numba_guvectorize.py index c82b92296e3..79d9f87241f 100644 --- a/py/server/tests/test_numba_guvectorize.py +++ b/py/server/tests/test_numba_guvectorize.py @@ -5,7 +5,7 @@ import unittest import numpy as np -from numba import guvectorize, int64 +from numba import guvectorize, int64, int32 from deephaven import empty_table, dtypes from tests.testbase import BaseTestCase @@ -22,13 +22,13 @@ def g(x, res): for xi in x: res[0] += xi - t = empty_table(10).update(["X=i%3", "Y=i"]).group_by("X").update("Z=g(Y)") + t = empty_table(10).update(["X=i%3", "Y=ii"]).group_by("X").update("Z=g(Y)") m = t.meta_table self.assertEqual(t.columns[2].data_type, dtypes.int64) def test_vector_return(self): # vector and scalar input to vector ouput function - @guvectorize([(int64[:], int64, int64[:])], "(m),()->(m)", nopython=True) + @guvectorize([(int32[:], int32, int64[:])], "(m),()->(m)", nopython=True) def g(x, y, res): for i in range(len(x)): res[i] = x[i] + y @@ -61,7 +61,7 @@ def test_fixed_length_vector_return(self): dummy = np.array([0, 0], dtype=np.int64) # vector input to fixed-length vector ouput function -- second arg is a dummy just to get a fixed size output - @guvectorize([(int64[:], int64[:], int64[:])], "(m),(n)->(n)", nopython=True) + @guvectorize([(int32[:], int64[:], int64[:])], "(m),(n)->(n)", nopython=True) def g(x, dummy, res): res[0] = min(x) res[1] = max(x) @@ -78,7 +78,7 @@ def g(x, dummy, res): res[0] = np.min(x) res[1] = np.max(x) - t = empty_table(10).update(["X=i%3", "Y=i"]).group_by("X").update("Z=g(Y,dummy)") + t = empty_table(10).update(["X=i%3", "Y=ii"]).group_by("X").update("Z=g(Y,dummy)") self.assertEqual(t.columns[2].data_type, dtypes.long_array) def test_np_on_java_array2(self): @@ -86,7 +86,7 @@ def test_np_on_java_array2(self): def g(x, res): res[:] = x + 5 - t = empty_table(10).update(["X=i%3", "Y=i"]).group_by("X").update("Z=g(Y)") + t = empty_table(10).update(["X=i%3", "Y=ii"]).group_by("X").update("Z=g(Y)") self.assertEqual(t.columns[2].data_type, dtypes.long_array) diff --git a/py/server/tests/test_udf_numpy_args.py b/py/server/tests/test_udf_numpy_args.py new file mode 100644 index 00000000000..ba698a4b21c --- /dev/null +++ b/py/server/tests/test_udf_numpy_args.py @@ -0,0 +1,397 @@ +# +# Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending +# +import typing +from typing import Optional, Union, Any +import unittest + +import numpy as np +import numpy.typing as npt + +from deephaven import empty_table, DHError, dtypes +from deephaven.dtypes import double_array, int32_array, long_array, int16_array, char_array, int8_array, \ + float32_array +from tests.testbase import BaseTestCase + +_J_TYPE_NULL_MAP = { + "byte": "NULL_BYTE", + "short": "NULL_SHORT", + "char": "NULL_CHAR", + "int": "NULL_INT", + "long": "NULL_LONG", + "float": "NULL_FLOAT", + "double": "NULL_DOUBLE", +} + +_J_TYPE_NP_DTYPE_MAP = { + "byte": "np.int8", + "short": "np.int16", + "char": "np.uint16", + "int": "np.int32", + "long": "np.int64", + "float": "np.float32", + "double": "np.float64", +} + +_J_TYPE_J_ARRAY_TYPE_MAP = { + "byte": int8_array, + "short": int16_array, + "char": char_array, + "int": int32_array, + "long": long_array, + "float": float32_array, + "double": double_array, +} + + +class UDFNumpyTest(BaseTestCase): + def test_j_to_py_no_annotation_no_null(self): + col1_formula = "Col1 = i % 10" + for j_dtype, np_dtype in _J_TYPE_NP_DTYPE_MAP.items(): + col2_formula = f"Col2 = ({j_dtype})i" + with self.subTest(j_dtype): + tbl = empty_table(100).update([col1_formula, col2_formula]).group_by("Col1") + + func_str = f""" +def test_udf(col1, col2) -> bool: + j_array_type = _J_TYPE_J_ARRAY_TYPE_MAP[{j_dtype!r}].j_type + return isinstance(col1, int) and isinstance(col2, j_array_type) + """ + exec(func_str, globals()) + res = tbl.update("Col3 = test_udf(Col1, Col2)") + self.assertEqual(10, res.to_string().count("true")) + + def test_j_to_py_no_annotation_null(self): + col1_formula = "Col1 = i % 10" + for j_dtype, null_name in _J_TYPE_NULL_MAP.items(): + col2_formula = f"Col2 = i % 3 == 0? {null_name} : ({j_dtype})i" + with self.subTest(j_dtype): + tbl = empty_table(100).update([col1_formula, col2_formula]).group_by("Col1") + + func_str = f""" +def test_udf(col1, col2) -> bool: + j_array_type = _J_TYPE_J_ARRAY_TYPE_MAP[{j_dtype!r}].j_type + return (isinstance(col1, int) and isinstance(col2, j_array_type) and np.any(np.array(col2) == {null_name})) + """ + exec(f"from deephaven.constants import {null_name}", globals()) + exec(func_str, globals()) + res = tbl.update("Col3 = test_udf(Col1, Col2)") + self.assertEqual(10, res.to_string().count("true")) + exec(f"del {null_name}", globals()) + + def test_jarray_to_np_array_no_null(self): + col1_formula = "Col1 = i % 10" + for j_dtype, np_dtype in _J_TYPE_NP_DTYPE_MAP.items(): + col2_formula = f"Col2 = ({j_dtype})i" + with self.subTest(j_dtype): + tbl = empty_table(100).update([col1_formula, col2_formula]).group_by("Col1") + + func_str = f""" +def test_udf(col1, col2: np.ndarray[{np_dtype}]) -> bool: + return (isinstance(col1, int) and isinstance(col2, np.ndarray) and col2.dtype.type == {np_dtype} and np.nanmean( + col2) == np.mean( col2)) + """ + exec(func_str, globals()) + res = tbl.update("Col3 = test_udf(Col1, Col2)") + self.assertEqual(10, res.to_string().count("true")) + + def test_jarray_to_np_array_null(self): + col1_formula = "Col1 = i % 10" + for j_dtype, null_name in _J_TYPE_NULL_MAP.items(): + col2_formula = f"Col2 = i % 3 == 0? {null_name} : ({j_dtype})i" + with self.subTest(j_dtype): + tbl = empty_table(100).update([col1_formula, col2_formula]).group_by("Col1") + + func_str = f""" +def test_udf(col1, col2: np.ndarray[{_J_TYPE_NP_DTYPE_MAP[j_dtype]}]) -> bool: + return (isinstance(col1, int) and isinstance(col2, np.ndarray) and col2.dtype.type == + {_J_TYPE_NP_DTYPE_MAP[j_dtype]} and np.nanmean(col2) != np.mean( col2)) + """ + exec(func_str, globals()) + + # for floating point types, DH nulls are auto converted to np.nan + # for integer types, DH nulls in the array raise exceptions + if j_dtype in ("float", "double"): + res = tbl.update("Col3 = test_udf(Col1, Col2)") + self.assertEqual(10, res.to_string().count("true")) + else: + with self.assertRaises(DHError) as cm: + tbl.update("Col3 = test_udf(Col1, Col2)") + self.assertRegex(str(cm.exception), "Java .* array contains Deephaven null values, but numpy .* " + "array does not support ") + + def test_j_scalar_to_py_no_null(self): + col1_formula = "Col1 = i % 10" + for j_dtype, null_name in _J_TYPE_NULL_MAP.items(): + col2_formula = f"Col2 = ({j_dtype})i" + with self.subTest(j_dtype): + np_type = _J_TYPE_NP_DTYPE_MAP[j_dtype] + func = f""" +def test_udf(col: {np_type}) -> bool: + if not isinstance(col, {np_type}): + return False + if np.isnan(col): + return False + else: + return True + """ + exec(func, globals()) + with self.subTest(j_dtype): + tbl = empty_table(100).update([col1_formula, col2_formula]) + res = tbl.update("Col3 = test_udf(Col2)") + self.assertEqual(10, res.to_string().count("true")) + + func = f""" +def test_udf(col: Optional[{np_type}]) -> bool: + if not isinstance(col, {np_type}): + return False + if col is None: + return False + else: + return True + """ + exec(func, globals()) + with self.subTest(j_dtype): + tbl = empty_table(100).update([col1_formula, col2_formula]) + res = tbl.update("Col3 = test_udf(Col2)") + self.assertEqual(10, res.to_string().count("true")) + + def test_j_scalar_to_py_null(self): + col1_formula = "Col1 = i % 10" + for data_type, null_name in _J_TYPE_NULL_MAP.items(): + col2_formula = f"Col2 = i % 3 == 0? {null_name} : ({data_type})i" + with self.subTest(data_type): + np_type = _J_TYPE_NP_DTYPE_MAP[data_type] + func = f""" +def test_udf(col: {np_type}) -> bool: + if np.isnan(col): + return True + else: + if not isinstance(col, {np_type}): + return True + return False +""" + exec(func, globals()) + with self.subTest(data_type): + tbl = empty_table(100).update([col1_formula, col2_formula]) + # for floating point types, DH nulls are auto converted to np.nan + # for integer types, DH nulls in the array raise exceptions + if data_type in ("float", "double"): + res = tbl.update("Col3 = test_udf(Col2)") + self.assertEqual(4, res.to_string().count("true")) + else: + with self.assertRaises(DHError) as cm: + res = tbl.update("Col3 = test_udf(Col2)") + self.assertRegex(str(cm.exception), "Argument .* is not compatible with annotation*") + + func = f""" +def test_udf(col: Optional[{np_type}]) -> bool: + if col is None: + return True + else: + if not isinstance(col, {np_type}): + return True + return False +""" + exec(func, globals()) + with self.subTest(data_type): + tbl = empty_table(100).update([col1_formula, col2_formula]) + res = tbl.update("Col3 = test_udf(Col2)") + self.assertEqual(4, res.to_string().count("true")) + + def test_weird_cases(self): + def f(p1: Union[np.ndarray[typing.Any], None]) -> bool: + return bool(p1) + + with self.assertRaises(DHError) as cm: + t = empty_table(10).update(["X1 = f(i)"]) + + def f1(p1: Union[np.int16, np.int32]) -> bool: + return bool(p1) + + with self.assertRaises(DHError) as cm: + t = empty_table(10).update(["X1 = f1(i)"]) + + def f11(p1: Union[float, np.float32]) -> bool: + return bool(p1) + + with self.assertRaises(DHError) as cm: + t = empty_table(10).update(["X1 = f11(i)"]) + + def f2(p1: Union[np.int16, np.float64]) -> Union[Optional[bool]]: + return bool(p1) + + t = empty_table(10).update(["X1 = f2(i)"]) + self.assertEqual(t.columns[0].data_type, dtypes.bool_) + self.assertEqual(9, t.to_string().count("true")) + + def f21(p1: Union[np.int16, np.float64]) -> Union[Optional[bool], int]: + return bool(p1) + + with self.assertRaises(DHError) as cm: + t = empty_table(10).update(["X1 = f21(i)"]) + + def f3(p1: Union[np.int16, np.float64], p2=None) -> bool: + return bool(p1) + + t = empty_table(10).update(["X1 = f3(i)"]) + self.assertEqual(t.columns[0].data_type, dtypes.bool_) + + def f4(p1: Union[np.int16, np.float64], p2=None) -> bool: + return bool(p1) + + t = empty_table(10).update(["X1 = f4((double)i)"]) + self.assertEqual(t.columns[0].data_type, dtypes.bool_) + with self.assertRaises(DHError) as cm: + t = empty_table(10).update(["X1 = f4(now())"]) + self.assertRegex(str(cm.exception), "Argument .* is not compatible with annotation*") + + def f41(p1: Union[np.int16, np.float64, Union[Any]], p2=None) -> bool: + return bool(p1) + + t = empty_table(10).update(["X1 = f41(now())"]) + self.assertEqual(t.columns[0].data_type, dtypes.bool_) + + def f42(p1: Union[np.int16, np.float64, np.datetime64], p2=None) -> bool: + return p1.dtype.char == "M" + + t = empty_table(10).update(["X1 = f42(now())"]) + self.assertEqual(t.columns[0].data_type, dtypes.bool_) + self.assertEqual(10, t.to_string().count("true")) + + def f5(col1, col2: np.ndarray[np.int32]) -> bool: + return np.nanmean(col2) == np.mean(col2) + + t = empty_table(10).update(["X = i % 3", "Y = i"]).group_by("X") + t = t.update(["X1 = f5(X, Y)"]) + with self.assertRaises(DHError) as cm: + t = t.update(["X1 = f5(X, null)"]) + self.assertRegex(str(cm.exception), "Argument .* is not compatible with annotation*") + + def f51(col1, col2: Optional[np.ndarray[np.int32]]) -> bool: + return np.nanmean(col2) == np.mean(col2) + + t = empty_table(10).update(["X = i % 3", "Y = i"]).group_by("X") + t = t.update(["X1 = f51(X, Y)"]) + with self.assertRaises(DHError) as cm: + t = t.update(["X1 = f51(X, null)"]) + self.assertRegex(str(cm.exception), "unsupported operand type.*NoneType") + + t = empty_table(10).update(["X = i % 3", "Y = i"]).group_by("X") + + def f6(*args: np.int32, col2: np.ndarray[np.int32]) -> bool: + return np.nanmean(col2) == np.mean(col2) + with self.assertRaises(DHError) as cm: + t1 = t.update(["X1 = f6(X, Y)"]) + self.assertIn("missing 1 required keyword-only argument", str(cm.exception)) + + with self.assertRaises(DHError) as cm: + t1 = t.update(["X1 = f6(X, Y=null)"]) + self.assertIn("not compatible with annotation", str(cm.exception)) + + def test_str_bool_datetime_array(self): + with self.subTest("str"): + def f1(p1: np.ndarray[str], p2=None) -> bool: + return bool(len(p1)) + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? `deephaven`: null"]).group_by("X") + t1 = t.update(["X1 = f1(Y)"]) + self.assertEqual(t1.columns[2].data_type, dtypes.bool_) + with self.assertRaises(DHError) as cm: + t2 = t.update(["X1 = f1(null, Y )"]) + self.assertRegex(str(cm.exception), "Argument .* is not compatible with annotation*") + + def f11(p1: Union[np.ndarray[str], None], p2=None) -> bool: + return bool(len(p1)) if p1 is not None else False + t2 = t.update(["X1 = f11(null, Y)"]) + self.assertEqual(3, t2.to_string().count("false")) + + with self.subTest("datetime"): + def f2(p1: np.ndarray[np.datetime64], p2=None) -> bool: + return bool(len(p1)) + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? now() : null"]).group_by("X") + t1 = t.update(["X1 = f2(Y)"]) + self.assertEqual(t1.columns[2].data_type, dtypes.bool_) + with self.assertRaises(DHError) as cm: + t2 = t.update(["X1 = f2(null, Y )"]) + self.assertRegex(str(cm.exception), "Argument .* is not compatible with annotation*") + + def f21(p1: Union[np.ndarray[np.datetime64], None], p2=None) -> bool: + return bool(len(p1)) if p1 is not None else False + t2 = t.update(["X1 = f21(null, Y)"]) + self.assertEqual(3, t2.to_string().count("false")) + + with self.subTest("boolean"): + def f3(p1: np.ndarray[np.bool_], p2=None) -> bool: + return bool(len(p1)) + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? true : null"]).group_by("X") + with self.assertRaises(DHError) as cm: + t1 = t.update(["X1 = f3(Y)"]) + self.assertRegex(str(cm.exception), "Java .* array contains Deephaven null values, but numpy .* " + "array does not support ") + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? true : false"]).group_by("X") + t1 = t.update(["X1 = f3(Y)"]) + self.assertEqual(t1.columns[2].data_type, dtypes.bool_) + with self.assertRaises(DHError) as cm: + t2 = t.update(["X1 = f3(null, Y )"]) + self.assertRegex(str(cm.exception), "Argument None is not compatible with annotation") + + def f31(p1: Optional[np.ndarray[bool]], p2=None) -> bool: + return bool(len(p1)) if p1 is not None else False + t2 = t.update(["X1 = f31(null, Y)"]) + self.assertEqual(3, t2.to_string("X1").count("false")) + + def test_str_bool_datetime_scalar(self): + with self.subTest("str"): + def f1(p1: str, p2=None) -> bool: + return p1 is None + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? `deephaven`: null"]) + with self.assertRaises(DHError) as cm: + t1 = t.update(["X1 = f1(Y)"]) + self.assertRegex(str(cm.exception), "Argument None is not compatible with annotation") + + def f11(p1: Union[str, None], p2=None) -> bool: + return p1 is None + t2 = t.update(["X1 = f11(Y)"]) + self.assertEqual(5, t2.to_string().count("false")) + + with self.subTest("datetime"): + def f2(p1: np.datetime64, p2=None) -> bool: + return p1 is None + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? now() : null"]) + with self.assertRaises(DHError) as cm: + t1 = t.update(["X1 = f2(Y)"]) + self.assertRegex(str(cm.exception), "Argument None is not compatible with annotation") + + def f21(p1: Union[np.datetime64, None], p2=None) -> bool: + return p1 is None + t2 = t.update(["X1 = f21(Y)"]) + self.assertEqual(5, t2.to_string().count("false")) + + with self.subTest("boolean"): + def f3(p1: np.bool_, p2=None) -> bool: + return p1 is None + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? true : null"]) + with self.assertRaises(DHError) as cm: + t1 = t.update(["X1 = f3(Y)"]) + self.assertRegex(str(cm.exception), "Argument None is not compatible with annotation") + + t = empty_table(10).update(["X = i % 3", "Y = i % 2 == 0? true : false"]) + t1 = t.update(["X1 = f3(Y)"]) + self.assertEqual(t1.columns[2].data_type, dtypes.bool_) + self.assertEqual(0, t1.to_string("X1").count("true")) + + def f31(p1: Optional[np.bool_], p2=None) -> bool: + return p1 is None + t2 = t.update(["X1 = f31(null, Y)"]) + self.assertEqual(10, t2.to_string("X1").count("true")) + + +if __name__ == "__main__": + unittest.main() diff --git a/py/server/tests/test_pyfunc_return_java_values.py b/py/server/tests/test_udf_return_java_values.py similarity index 96% rename from py/server/tests/test_pyfunc_return_java_values.py rename to py/server/tests/test_udf_return_java_values.py index aef0d44cb93..1a7c78e3aa9 100644 --- a/py/server/tests/test_pyfunc_return_java_values.py +++ b/py/server/tests/test_udf_return_java_values.py @@ -23,7 +23,7 @@ dtypes.byte: "np.int8", dtypes.bool_: "np.bool_", dtypes.string: "np.str_", - # dtypes.char: "np.uint16", + dtypes.char: "np.uint16", } @@ -52,7 +52,7 @@ def test_array_return(self): "np.float64": dtypes.double_array, "bool": dtypes.boolean_array, "np.str_": dtypes.string_array, - # "np.uint16": dtypes.char_array, + "np.uint16": dtypes.char_array, } container_types = ["List", "Tuple", "list", "tuple", "Sequence", "np.ndarray"] for component_type, dh_dtype in component_types.items(): @@ -189,7 +189,7 @@ def f4557_1(x, y) -> np.ndarray[np.int64]: return np.array(x) + y # Testing https://github.com/deephaven/deephaven-core/issues/4562 - @nb.guvectorize([(nb.int64[:], nb.int64, nb.int64[:])], "(m),()->(m)", nopython=True) + @nb.guvectorize([(nb.int32[:], nb.int32, nb.int32[:])], "(m),()->(m)", nopython=True) def f4562_1(x, y, res): res[:] = x + y @@ -198,11 +198,11 @@ def f4562_1(x, y, res): "Y = f4562_1(B,3)" ]) self.assertEqual(t2.columns[2].data_type, dtypes.long_array) - self.assertEqual(t2.columns[3].data_type, dtypes.long_array) + self.assertEqual(t2.columns[3].data_type, dtypes.int32_array) t3 = t2.ungroup() self.assertEqual(t3.columns[2].data_type, dtypes.int64) - self.assertEqual(t3.columns[3].data_type, dtypes.int64) + self.assertEqual(t3.columns[3].data_type, dtypes.int32) def test_ndim_nparray_return_type(self): def f() -> np.ndarray[np.int64]: @@ -222,28 +222,29 @@ def f() -> npt.NDArray[np.int64]: def test_ndarray_weird_cases(self): def f() -> np.ndarray[typing.Any]: return np.array([1, 2], dtype=np.int64) - t = empty_table(10).update(["X1 = f()"]) self.assertEqual(t.columns[0].data_type, dtypes.PyObject) def f1() -> npt.NDArray[typing.Any]: return np.array([1, 2], dtype=np.int64) - t = empty_table(10).update(["X1 = f1()"]) self.assertEqual(t.columns[0].data_type, dtypes.PyObject) def f2() -> np.ndarray[typing.Any, np.int64]: return np.array([1, 2], dtype=np.int64) - t = empty_table(10).update(["X1 = f2()"]) self.assertEqual(t.columns[0].data_type, dtypes.PyObject) def f3() -> Union[None, None]: return np.array([1, 2], dtype=np.int64) - t = empty_table(10).update(["X1 = f3()"]) self.assertEqual(t.columns[0].data_type, dtypes.PyObject) + def f4() -> None: + return np.array([1, 2], dtype=np.int64) + t = empty_table(10).update(["X1 = f4()"]) + self.assertEqual(t.columns[0].data_type, dtypes.PyObject) + def test_optional_scalar_return(self): for dh_dtype, np_dtype in _J_TYPE_NP_DTYPE_MAP.items(): with self.subTest(dh_dtype=dh_dtype, np_dtype=np_dtype): diff --git a/py/server/tests/test_vectorization.py b/py/server/tests/test_vectorization.py index 82b9dccbe2c..8eb28e65cda 100644 --- a/py/server/tests/test_vectorization.py +++ b/py/server/tests/test_vectorization.py @@ -7,24 +7,24 @@ from typing import Optional import numpy as np -import deephaven from deephaven import DHError, empty_table, dtypes from deephaven import new_table from deephaven.column import int_col from deephaven.filters import Filter, and_ -from deephaven.table import dh_vectorize +import deephaven._udf as _udf +from deephaven._udf import _dh_vectorize as dh_vectorize from tests.testbase import BaseTestCase class VectorizationTestCase(BaseTestCase): def setUp(self): super().setUp() - deephaven.table._test_vectorization = True - deephaven.table._vectorized_count = 0 + _udf.test_vectorization = True + _udf.vectorized_count = 0 def tearDown(self) -> None: - deephaven.table._test_vectorization = False - deephaven.table._vectorized_count = 0 + _udf.test_vectorization = False + _udf.vectorized_count = 0 super().tearDown() def test_vectorization_exceptions(self): @@ -66,7 +66,7 @@ def py_plus(p1, p2) -> int: t = empty_table(1).update("X = py_plus(ii, ii)") - self.assertEqual(deephaven.table._vectorized_count, 1) + self.assertEqual(_udf.vectorized_count, 1) def test_vectorized_no_arg(self): def py_random() -> int: @@ -74,7 +74,7 @@ def py_random() -> int: t = empty_table(1).update("X = py_random()") - self.assertEqual(deephaven.table._vectorized_count, 1) + self.assertEqual(_udf.vectorized_count, 1) def test_vectorized_const_arg(self): def py_const(seed) -> int: @@ -84,27 +84,27 @@ def py_const(seed) -> int: expected_count = 0 t = empty_table(10).update("X = py_const(3)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) seed = 10 t = empty_table(10).update("X = py_const(seed)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = empty_table(10).update("X = py_const(30*1024*1024*1024)") - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = empty_table(10).update("X = py_const(30000000000L)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = empty_table(10).update("X = py_const(100.01)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = empty_table(10).update("X = py_const(100.01f)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) with self.assertRaises(DHError) as cm: t = empty_table(1).update("X = py_const(NULL_INT)") @@ -115,26 +115,26 @@ def py_const_str(s) -> str: t = empty_table(10).update("X = py_const_str(`Deephaven`)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = empty_table(10).update("X = py_const_str(null)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = empty_table(10).update("X = py_const_str(true)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) t = t.update("Y = py_const_str(X)") expected_count += 1 - self.assertEqual(deephaven.table._vectorized_count, expected_count) + self.assertEqual(_udf.vectorized_count, expected_count) def test_multiple_formulas(self): def pyfunc(p1, p2, p3) -> int: return p1 + p2 + p3 t = empty_table(1).update("X = i").update(["Y = pyfunc(X, i, 33)", "Z = pyfunc(X, ii, 66)"]) - self.assertEqual(deephaven.table._vectorized_count, 2) + self.assertEqual(_udf.vectorized_count, 2) self.assertIn("33", t.to_string(cols=["Y"])) self.assertIn("66", t.to_string(cols=["Z"])) @@ -144,7 +144,7 @@ def pyfunc(p1, p2, p3) -> int: return p1 + p2 + p3 t = empty_table(1).update("X = i").update(["Y = pyfunc(X, i, 33)", "Z = pyfunc(X, ii, 66)"]) - self.assertEqual(deephaven.table._vectorized_count, 1) + self.assertEqual(_udf.vectorized_count, 1) self.assertIn("33", t.to_string(cols=["Y"])) self.assertIn("66", t.to_string(cols=["Z"])) @@ -157,11 +157,11 @@ def pyfunc_bool(p1, p2, p3) -> bool: with self.assertRaises(DHError) as cm: t = empty_table(10).view(formulas=["I=ii", "J=(ii * 2)"]).where("pyfunc_int(I, 3, J)") - self.assertEqual(deephaven.table._vectorized_count, 0) + self.assertEqual(_udf.vectorized_count, 0) self.assertIn("boolean required", str(cm.exception)) t = empty_table(10).view(formulas=["I=ii", "J=(ii * 2)"]).where("pyfunc_bool(I, 3, J)") - self.assertEqual(deephaven.table._vectorized_count, 1) + self.assertEqual(_udf.vectorized_count, 1) self.assertGreater(t.size, 1) def test_multiple_filters(self): @@ -171,11 +171,11 @@ def pyfunc_bool(p1, p2, p3) -> bool: conditions = ["pyfunc_bool(I, 3, J)", "pyfunc_bool(i, 10, ii)"] filters = Filter.from_(conditions) t = empty_table(10).view(formulas=["I=ii", "J=(ii * 2)"]).where(filters) - self.assertEqual(2, deephaven.table._vectorized_count) + self.assertEqual(2, _udf.vectorized_count) filter_and = and_(filters) t1 = empty_table(10).view(formulas=["I=ii", "J=(ii * 2)"]).where(filter_and) - self.assertEqual(4, deephaven.table._vectorized_count) + self.assertEqual(4, _udf.vectorized_count) self.assertEqual(t1.size, t.size) self.assertEqual(9, t.size) @@ -187,11 +187,11 @@ def pyfunc_bool(p1, p2, p3) -> bool: conditions = ["pyfunc_bool(I, 3, J)", "pyfunc_bool(i, 10, ii)"] filters = Filter.from_(conditions) t = empty_table(10).view(formulas=["I=ii", "J=(ii * 2)"]).where(filters) - self.assertEqual(1, deephaven.table._vectorized_count) + self.assertEqual(1, _udf.vectorized_count) filter_and = and_(filters) t1 = empty_table(10).view(formulas=["I=ii", "J=(ii * 2)"]).where(filter_and) - self.assertEqual(1, deephaven.table._vectorized_count) + self.assertEqual(1, _udf.vectorized_count) self.assertEqual(t1.size, t.size) self.assertEqual(9, t.size) @@ -258,7 +258,7 @@ def sinc(x) -> np.double: t = empty_table(100).update(["X = 0.1 * i", "SincXS=((sinc(X)))"]) self.assertEqual(t.columns[1].data_type, dtypes.double) - self.assertEqual(deephaven.table._vectorized_count, 1) + self.assertEqual(_udf.vectorized_count, 1) def sinc2(x): return np.sinc(x) @@ -272,7 +272,7 @@ def pyfunc(p1: np.int32, p2: np.int32, p3: Optional[np.int32]) -> Optional[int]: return None if total % 3 == 0 else total t = empty_table(10).update("X = i").update(["Y = pyfunc(X, i, 13)", "Z = pyfunc(X, ii, 66)"]) - self.assertEqual(deephaven.table._vectorized_count, 2) + self.assertEqual(_udf.vectorized_count, 2) self.assertIn("13", t.to_string(cols=["Y"])) self.assertIn("null", t.to_string()) self.assertEqual(t.columns[1].data_type, dtypes.long) From 414be28c741ef7a33580cc7b8baf6dc05752485b Mon Sep 17 00:00:00 2001 From: Jianfeng Mao <4297243+jmao-denver@users.noreply.github.com> Date: Fri, 1 Dec 2023 13:32:44 -0700 Subject: [PATCH 04/25] Don't run run_scrip/_refresh_token under lock (#4906) --- py/client/pydeephaven/session.py | 18 ++++++++---------- py/client/tests/test_session.py | 8 ++++++++ 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/py/client/pydeephaven/session.py b/py/client/pydeephaven/session.py index 217743ea5b7..df4d4107f79 100644 --- a/py/client/pydeephaven/session.py +++ b/py/client/pydeephaven/session.py @@ -334,12 +334,11 @@ def _keep_alive(self): self._keep_alive_timer.start() def _refresh_token(self): - with self._r_lock: - try: - self._flight_client.authenticate(self._auth_handler) - except Exception as e: - self.is_connected = False - raise DHError("failed to refresh auth token") from e + try: + self._flight_client.authenticate(self._auth_handler) + except Exception as e: + self.is_connected = False + raise DHError("failed to refresh auth token") from e @property def is_alive(self) -> bool: @@ -385,10 +384,9 @@ def run_script(self, script: str) -> None: Raises: DHError """ - with self._r_lock: - response = self.console_service.run_script(script) - if response.error_message != '': - raise DHError("could not run script: " + response.error_message) + response = self.console_service.run_script(script) + if response.error_message != '': + raise DHError("could not run script: " + response.error_message) def open_table(self, name: str) -> Table: """Opens a table in the global scope with the given name on the server. diff --git a/py/client/tests/test_session.py b/py/client/tests/test_session.py index fe75d170ff3..b5b71bc83d5 100644 --- a/py/client/tests/test_session.py +++ b/py/client/tests/test_session.py @@ -56,6 +56,10 @@ def test_time_table(self): session.bind_table("t", t) session.run_script(""" from deephaven import empty_table +try: + del t1 +except NameError: + pass t1 = empty_table(0) if t.is_blink else None """) self.assertNotIn("t1", session.tables) @@ -64,6 +68,10 @@ def test_time_table(self): session.bind_table("t", t) session.run_script(""" from deephaven import empty_table +try: + del t1 +except NameError: + pass t1 = empty_table(0) if t.is_blink else None """) self.assertIn("t1", session.tables) From fa233eefbd57593344cc35649ef0ab525205fa4b Mon Sep 17 00:00:00 2001 From: Mike Bender Date: Mon, 4 Dec 2023 16:34:17 -0500 Subject: [PATCH 05/25] fix: Add BigDecimal and BigInteger as numeric aggregation options (#4878) - BigDecimal and BigInteger weren't listed as numeric types, and were not getting aggregated properly - Fixes #4877 --- .../web/client/api/tree/enums/JsAggregationOperation.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/web/client-api/src/main/java/io/deephaven/web/client/api/tree/enums/JsAggregationOperation.java b/web/client-api/src/main/java/io/deephaven/web/client/api/tree/enums/JsAggregationOperation.java index acd460e0859..79fa9a9ec5f 100644 --- a/web/client-api/src/main/java/io/deephaven/web/client/api/tree/enums/JsAggregationOperation.java +++ b/web/client-api/src/main/java/io/deephaven/web/client/api/tree/enums/JsAggregationOperation.java @@ -128,7 +128,9 @@ private static boolean isNumeric(String columnType) { case "long": case "short": case "char": - case "byte": { + case "byte": + case "java.math.BigDecimal": + case "java.math.BigInteger": { return true; } } From 771e735b230a751ae05fce1caa989684b20ad431 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Dec 2023 11:39:56 -0800 Subject: [PATCH 06/25] Bump actions/setup-java from 3 to 4 (#4910) Bumps [actions/setup-java](https://github.com/actions/setup-java) from 3 to 4. - [Release notes](https://github.com/actions/setup-java/releases) - [Commits](https://github.com/actions/setup-java/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/setup-java dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build-ci.yml | 6 +++--- .github/workflows/check-ci.yml | 4 ++-- .github/workflows/docs-ci.yml | 16 ++++++++-------- .github/workflows/nightly-check-ci.yml | 6 +++--- .github/workflows/nightly-image-check.yml | 2 +- .github/workflows/publish-ci.yml | 4 ++-- .github/workflows/quick-ci.yml | 2 +- .github/workflows/tag-base-images.yml | 4 ++-- 8 files changed, 22 insertions(+), 22 deletions(-) diff --git a/.github/workflows/build-ci.yml b/.github/workflows/build-ci.yml index e02b88f6348..3c250fcfc54 100644 --- a/.github/workflows/build-ci.yml +++ b/.github/workflows/build-ci.yml @@ -17,14 +17,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' @@ -153,7 +153,7 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' diff --git a/.github/workflows/check-ci.yml b/.github/workflows/check-ci.yml index a80f9153b61..53340a370b9 100644 --- a/.github/workflows/check-ci.yml +++ b/.github/workflows/check-ci.yml @@ -21,14 +21,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' diff --git a/.github/workflows/docs-ci.yml b/.github/workflows/docs-ci.yml index 3b8bd964585..f69a984eabd 100644 --- a/.github/workflows/docs-ci.yml +++ b/.github/workflows/docs-ci.yml @@ -19,14 +19,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' @@ -81,14 +81,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' @@ -138,14 +138,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' @@ -227,7 +227,7 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' @@ -301,7 +301,7 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' diff --git a/.github/workflows/nightly-check-ci.yml b/.github/workflows/nightly-check-ci.yml index 5f501789109..81c62c1a120 100644 --- a/.github/workflows/nightly-check-ci.yml +++ b/.github/workflows/nightly-check-ci.yml @@ -29,21 +29,21 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' - name: Setup JDK 21 id: setup-java-21 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '21' diff --git a/.github/workflows/nightly-image-check.yml b/.github/workflows/nightly-image-check.yml index 5b97bda947c..de160aa3593 100644 --- a/.github/workflows/nightly-image-check.yml +++ b/.github/workflows/nightly-image-check.yml @@ -15,7 +15,7 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' diff --git a/.github/workflows/publish-ci.yml b/.github/workflows/publish-ci.yml index debbeb3dde8..2d1d3aec235 100644 --- a/.github/workflows/publish-ci.yml +++ b/.github/workflows/publish-ci.yml @@ -19,14 +19,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' diff --git a/.github/workflows/quick-ci.yml b/.github/workflows/quick-ci.yml index 7bea9bbbb1c..f482528bc8e 100644 --- a/.github/workflows/quick-ci.yml +++ b/.github/workflows/quick-ci.yml @@ -22,7 +22,7 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' diff --git a/.github/workflows/tag-base-images.yml b/.github/workflows/tag-base-images.yml index 3a9dbb91b21..3ce799ed989 100644 --- a/.github/workflows/tag-base-images.yml +++ b/.github/workflows/tag-base-images.yml @@ -17,14 +17,14 @@ jobs: - name: Setup JDK 11 id: setup-java-11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - name: Setup JDK 17 id: setup-java-17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' From 374188ae166aa202da30e078a78c13334cb683f6 Mon Sep 17 00:00:00 2001 From: "Charles P. Wright" Date: Tue, 5 Dec 2023 18:00:06 -0500 Subject: [PATCH 07/25] EventDrivenUpdateGraph. (#4613) * EventDrivenUpdateGraph. * bad import * Comment on PoisonedNotificationProcessor. * running into state problems. * rename stuff * move graph name collision check into base. * test, but in an impossible place * Use EDUG for default. * flush reset * also clear on end * some comments * spotless * self-review. * rset UPT. * live code review comments to address. * cleanup * spotless * javadoc * Cleanup of unused variables, move budget calculation. * Fix existingOrBuild. * speling. * Apply suggestions from code review Co-authored-by: Ryan Caudy --------- Co-authored-by: Ryan Caudy --- .../engine/context/PoisonedUpdateGraph.java | 3 + .../impl/perf/UpdatePerformanceTracker.java | 16 +- .../updategraph/impl/BaseUpdateGraph.java | 1075 +++++++++++++++++ .../impl/EventDrivenUpdateGraph.java | 143 +++ .../updategraph/impl/PeriodicUpdateGraph.java | 1033 ++-------------- .../impl/PoisonedNotificationProcessor.java | 58 + .../table/impl/CapturingUpdateGraph.java | 5 + .../engine/table/impl/QueryTableTest.java | 3 + .../impl/TestEventDrivenUpdateGraph.java | 245 ++++ .../engine/updategraph/UpdateGraph.java | 8 +- 10 files changed, 1623 insertions(+), 966 deletions(-) create mode 100644 engine/table/src/main/java/io/deephaven/engine/updategraph/impl/BaseUpdateGraph.java create mode 100644 engine/table/src/main/java/io/deephaven/engine/updategraph/impl/EventDrivenUpdateGraph.java create mode 100644 engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PoisonedNotificationProcessor.java create mode 100644 engine/table/src/test/java/io/deephaven/engine/updategraph/impl/TestEventDrivenUpdateGraph.java diff --git a/engine/context/src/main/java/io/deephaven/engine/context/PoisonedUpdateGraph.java b/engine/context/src/main/java/io/deephaven/engine/context/PoisonedUpdateGraph.java index c1f28d33a77..6cfb3ad1283 100644 --- a/engine/context/src/main/java/io/deephaven/engine/context/PoisonedUpdateGraph.java +++ b/engine/context/src/main/java/io/deephaven/engine/context/PoisonedUpdateGraph.java @@ -118,4 +118,7 @@ public boolean supportsRefreshing() { public void requestRefresh() { fail(); } + + @Override + public void stop() {} } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java index ddb576b62e0..8cf902f16b3 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java @@ -17,13 +17,14 @@ import io.deephaven.engine.tablelogger.EngineTableLoggers; import io.deephaven.engine.tablelogger.UpdatePerformanceLogLogger; import io.deephaven.engine.updategraph.UpdateGraph; -import io.deephaven.engine.updategraph.impl.PeriodicUpdateGraph; +import io.deephaven.engine.updategraph.impl.BaseUpdateGraph; import io.deephaven.engine.util.string.StringUtils; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; import io.deephaven.stream.StreamToBlinkTableAdapter; import io.deephaven.util.QueryConstants; import io.deephaven.util.SafeCloseable; +import io.deephaven.util.annotations.TestUseOnly; import org.apache.commons.lang3.mutable.MutableObject; import org.jetbrains.annotations.NotNull; @@ -37,7 +38,7 @@ /** *

- * This tool is meant to track periodic update events that take place in an {@link PeriodicUpdateGraph}. This generally + * This tool is meant to track periodic update events that take place in an {@link UpdateGraph}. This generally * includes: *

    *
  1. Update source {@code run()} invocations
  2. @@ -88,8 +89,8 @@ private static class InternalState { private InternalState() { final UpdateGraph publishingGraph = - PeriodicUpdateGraph.getInstance(PeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME); - Assert.neqNull(publishingGraph, "The " + PeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME + " UpdateGraph " + BaseUpdateGraph.getInstance(BaseUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME); + Assert.neqNull(publishingGraph, "The " + BaseUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME + " UpdateGraph " + "must be created before UpdatePerformanceTracker can be initialized."); try (final SafeCloseable ignored = ExecutionContext.getContext().withUpdateGraph(publishingGraph).open()) { tableLogger = EngineTableLoggers.get().updatePerformanceLogLogger(); @@ -286,4 +287,11 @@ public long getIntervalEndTimeEpochNanos() { public static QueryTable getQueryTable() { return (QueryTable) BlinkTableTools.blinkToAppendOnly(getInternalState().blink); } + + @TestUseOnly + public static void resetForUnitTests() { + synchronized (UpdatePerformanceTracker.class) { + INSTANCE = null; + } + } } diff --git a/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/BaseUpdateGraph.java b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/BaseUpdateGraph.java new file mode 100644 index 00000000000..2676354eeb7 --- /dev/null +++ b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/BaseUpdateGraph.java @@ -0,0 +1,1075 @@ +/** + * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending + */ + +package io.deephaven.engine.updategraph.impl; + +import io.deephaven.base.log.LogOutput; +import io.deephaven.base.log.LogOutputAppendable; +import io.deephaven.base.reference.SimpleReference; +import io.deephaven.base.verify.Assert; +import io.deephaven.configuration.Configuration; +import io.deephaven.engine.liveness.LivenessManager; +import io.deephaven.engine.liveness.LivenessScope; +import io.deephaven.engine.liveness.LivenessScopeStack; +import io.deephaven.engine.table.impl.perf.PerformanceEntry; +import io.deephaven.engine.table.impl.perf.UpdatePerformanceTracker; +import io.deephaven.engine.table.impl.util.StepUpdater; +import io.deephaven.engine.updategraph.*; +import io.deephaven.engine.util.reference.CleanupReferenceProcessorInstance; +import io.deephaven.hash.KeyedObjectHashMap; +import io.deephaven.hash.KeyedObjectKey; +import io.deephaven.hotspot.JvmIntrospectionContext; +import io.deephaven.io.log.LogEntry; +import io.deephaven.io.log.impl.LogOutputStringImpl; +import io.deephaven.io.logger.Logger; +import io.deephaven.util.SafeCloseable; +import io.deephaven.util.annotations.TestUseOnly; +import io.deephaven.util.datastructures.SimpleReferenceManager; +import io.deephaven.util.datastructures.linked.IntrusiveDoublyLinkedNode; +import io.deephaven.util.datastructures.linked.IntrusiveDoublyLinkedQueue; +import io.deephaven.util.locks.AwareFunctionalLock; +import io.deephaven.util.process.ProcessEnvironment; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +import java.lang.ref.WeakReference; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * The BaseUpdateGraph contains common code for other UpdateGraph implementations and a map of named UpdateGraph + * instances. + */ +public abstract class BaseUpdateGraph implements UpdateGraph, LogOutputAppendable { + public static final String DEFAULT_UPDATE_GRAPH_NAME = "DEFAULT"; + + /** + * If the provided update graph is a {@link BaseUpdateGraph} then create a PerformanceEntry using the given + * description. Otherwise, return null. + * + * @param updateGraph The update graph to create a performance entry for. + * @param description The description for the performance entry. + * @return The performance entry, or null if the update graph is not a {@link BaseUpdateGraph}. + */ + @Nullable + public static PerformanceEntry createUpdatePerformanceEntry( + final UpdateGraph updateGraph, + final String description) { + if (updateGraph instanceof BaseUpdateGraph) { + final BaseUpdateGraph bug = (BaseUpdateGraph) updateGraph; + if (bug.updatePerformanceTracker != null) { + return bug.updatePerformanceTracker.getEntry(description); + } + throw new IllegalStateException("Cannot create a performance entry for a BaseUpdateGraph that has " + + "not been completely constructed."); + } + return null; + } + + private static final KeyedObjectHashMap INSTANCES = new KeyedObjectHashMap<>( + new KeyedObjectKey.BasicAdapter<>(UpdateGraph::getName)); + + private final Logger log; + + /** + * Update sources that are part of this BaseUpdateGraph. + */ + private final SimpleReferenceManager sources = + new SimpleReferenceManager<>(UpdateSourceRefreshNotification::new); + + /** + * Recorder for updates source satisfaction as a phase of notification processing. + */ + private volatile long sourcesLastSatisfiedStep; + + /** + * The queue of non-terminal notifications to process. + */ + final IntrusiveDoublyLinkedQueue pendingNormalNotifications = + new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); + + /** + * The queue of terminal notifications to process. + */ + final IntrusiveDoublyLinkedQueue terminalNotifications = + new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); + + volatile boolean running = true; + + public static final String MINIMUM_CYCLE_DURATION_TO_LOG_MILLIS_PROP = + "UpdateGraph.minimumCycleDurationToLogMillis"; + public static final long DEFAULT_MINIMUM_CYCLE_DURATION_TO_LOG_NANOSECONDS = TimeUnit.MILLISECONDS.toNanos( + Configuration.getInstance().getIntegerWithDefault(MINIMUM_CYCLE_DURATION_TO_LOG_MILLIS_PROP, 25)); + private final long minimumCycleDurationToLogNanos; + + /** when to next flush the performance tracker; initializes to zero to force a flush on start */ + private long nextUpdatePerformanceTrackerFlushTimeNanos; + + /** + * How many cycles we have not logged, but were non-zero. + */ + long suppressedCycles; + long suppressedCyclesTotalNanos; + long suppressedCyclesTotalSafePointTimeMillis; + + /** + * Accumulated UpdateGraph exclusive lock waits for the current cycle (or previous, if idle). + */ + private long currentCycleLockWaitTotalNanos; + + public static class AccumulatedCycleStats { + /** + * Number of cycles run. + */ + public int cycles = 0; + /** + * Number of cycles run not exceeding their time budget. + */ + public int cyclesOnBudget = 0; + /** + * Accumulated safepoints over all cycles. + */ + public int safePoints = 0; + /** + * Accumulated safepoint time over all cycles. + */ + public long safePointPauseTimeMillis = 0L; + + public int[] cycleTimesMicros = new int[32]; + public static final int MAX_DOUBLING_LEN = 1024; + + synchronized void accumulate( + final boolean onBudget, + final long cycleTimeNanos, + final long safePoints, + final long safePointPauseTimeMillis) { + if (onBudget) { + ++cyclesOnBudget; + } + this.safePoints += safePoints; + this.safePointPauseTimeMillis += safePointPauseTimeMillis; + if (cycles >= cycleTimesMicros.length) { + final int newLen; + if (cycleTimesMicros.length < MAX_DOUBLING_LEN) { + newLen = cycleTimesMicros.length * 2; + } else { + newLen = cycleTimesMicros.length + MAX_DOUBLING_LEN; + } + cycleTimesMicros = Arrays.copyOf(cycleTimesMicros, newLen); + } + cycleTimesMicros[cycles] = (int) ((cycleTimeNanos + 500) / 1_000); + ++cycles; + } + + public synchronized void take(final AccumulatedCycleStats out) { + out.cycles = cycles; + out.cyclesOnBudget = cyclesOnBudget; + out.safePoints = safePoints; + out.safePointPauseTimeMillis = safePointPauseTimeMillis; + if (out.cycleTimesMicros.length < cycleTimesMicros.length) { + out.cycleTimesMicros = new int[cycleTimesMicros.length]; + } + System.arraycopy(cycleTimesMicros, 0, out.cycleTimesMicros, 0, cycles); + cycles = 0; + cyclesOnBudget = 0; + safePoints = 0; + safePointPauseTimeMillis = 0; + } + } + + public final AccumulatedCycleStats accumulatedCycleStats = new AccumulatedCycleStats(); + + /** + * Abstracts away the processing of non-terminal notifications. + */ + NotificationProcessor notificationProcessor; + + /** + * Facilitate GC Introspection during refresh cycles. + */ + private final JvmIntrospectionContext jvmIntrospectionContext; + + /** + * The {@link LivenessScope} that should be on top of the {@link LivenessScopeStack} for all run and notification + * processing. Only non-null while some thread is in {@link #doRefresh(Runnable)}. + */ + volatile LivenessScope refreshScope; + + /** + * Is this one of the threads engaged in notification processing? (Either the solitary run thread, or one of the + * pooled threads it uses in some configurations) + */ + final ThreadLocal isUpdateThread = ThreadLocal.withInitial(() -> false); + + private final ThreadLocal serialTableOperationsSafe = ThreadLocal.withInitial(() -> false); + + final LogicalClockImpl logicalClock = new LogicalClockImpl(); + + /** + * Encapsulates locking support. + */ + private final UpdateGraphLock lock; + + /** + * When UpdateGraph.printDependencyInformation is set to true, the UpdateGraph will print debug information for each + * notification that has dependency information; as well as which notifications have been completed and are + * outstanding. + */ + private final boolean printDependencyInformation = + Configuration.getInstance().getBooleanWithDefault("UpdateGraph.printDependencyInformation", false); + + private final String name; + + final UpdatePerformanceTracker updatePerformanceTracker; + + /** + * The BaseUpdateGraph is an abstract class that is suitable for extension by UpdateGraphs that process a set of + * sources and then the resulting {@link io.deephaven.engine.updategraph.NotificationQueue.Notification + * Notifications} using a {@link NotificationProcessor}. + * + * @param name the name of the update graph, which must be unique + * @param allowUnitTestMode is unit test mode allowed, used for configuring the lock + * @param log the logger for this update graph + * @param minimumCycleDurationToLogNanos the minimum cycle time, in nanoseconds, that results in cycle times being + * logged to at an INFO level + */ + public BaseUpdateGraph( + final String name, + final boolean allowUnitTestMode, + final Logger log, + long minimumCycleDurationToLogNanos) { + this.name = name; + this.log = log; + this.minimumCycleDurationToLogNanos = minimumCycleDurationToLogNanos; + notificationProcessor = PoisonedNotificationProcessor.INSTANCE; + jvmIntrospectionContext = new JvmIntrospectionContext(); + lock = UpdateGraphLock.create(this, allowUnitTestMode); + updatePerformanceTracker = new UpdatePerformanceTracker(this); + } + + public String getName() { + return name; + } + + public UpdateGraph getUpdateGraph() { + return this; + } + + @Override + public String toString() { + return new LogOutputStringImpl().append(this).toString(); + } + + @Override + public LogicalClock clock() { + return logicalClock; + } + // region Accessors for the shared and exclusive locks + + /** + *

    + * Get the shared lock for this {@link UpdateGraph}. + *

    + * Using this lock will prevent run processing from proceeding concurrently, but will allow other read-only + * processing to proceed. + *

    + * The shared lock implementation is expected to support reentrance. + *

    + * This lock does not support {@link java.util.concurrent.locks.Lock#newCondition()}. Use the exclusive + * lock if you need to wait on events that are driven by run processing. + * + * @return The shared lock for this {@link UpdateGraph} + */ + public AwareFunctionalLock sharedLock() { + return lock.sharedLock(); + } + + /** + *

    + * Get the exclusive lock for this {@link UpdateGraph}. + *

    + * Using this lock will prevent run or read-only processing from proceeding concurrently. + *

    + * The exclusive lock implementation is expected to support reentrance. + *

    + * Note that using the exclusive lock while the shared lock is held by the current thread will result in exceptions, + * as lock upgrade is not supported. + *

    + * This lock does support {@link java.util.concurrent.locks.Lock#newCondition()}. + * + * @return The exclusive lock for this {@link UpdateGraph} + */ + public AwareFunctionalLock exclusiveLock() { + return lock.exclusiveLock(); + } + + // endregion Accessors for the shared and exclusive locks + + /** + * Test if this thread is part of our run thread executor service. + * + * @return whether this is one of our run threads. + */ + @Override + public boolean currentThreadProcessesUpdates() { + return isUpdateThread.get(); + } + + @Override + public boolean serialTableOperationsSafe() { + return serialTableOperationsSafe.get(); + } + + @Override + public boolean setSerialTableOperationsSafe(final boolean newValue) { + final boolean old = serialTableOperationsSafe.get(); + serialTableOperationsSafe.set(newValue); + return old; + } + + + /** + * Add a table to the list of tables to run and mark it as {@link DynamicNode#setRefreshing(boolean) refreshing} if + * it was a {@link DynamicNode}. + * + * @param updateSource The table to be added to the run list + */ + @Override + public void addSource(@NotNull final Runnable updateSource) { + if (!running) { + throw new IllegalStateException("UpdateGraph is no longer running"); + } + + if (updateSource instanceof DynamicNode) { + ((DynamicNode) updateSource).setRefreshing(true); + } + + sources.add(updateSource); + } + + + @Override + public void removeSource(@NotNull final Runnable updateSource) { + sources.remove(updateSource); + } + + /** + * Remove a collection of sources from the list of refreshing sources. + * + * @implNote This will not set the sources as {@link DynamicNode#setRefreshing(boolean) non-refreshing}. + * @param sourcesToRemove The sources to remove from the list of refreshing sources + */ + public void removeSources(final Collection sourcesToRemove) { + sources.removeAll(sourcesToRemove); + } + + /** + * Return the number of valid sources. + * + * @return the number of valid sources + */ + public int sourceCount() { + return sources.size(); + } + + /** + * Enqueue a notification to be flushed according to its priority. Non-terminal notifications should only be + * enqueued during the updating phase of a cycle. That is, they should be enqueued from an update source or + * subsequent notification delivery. + * + * @param notification The notification to enqueue + * @see NotificationQueue.Notification#isTerminal() + * @see LogicalClock.State + */ + @Override + public void addNotification(@NotNull final Notification notification) { + if (notification.isTerminal()) { + synchronized (terminalNotifications) { + terminalNotifications.offer(notification); + } + } else { + logDependencies().append(Thread.currentThread().getName()).append(": Adding notification ") + .append(notification).endl(); + synchronized (pendingNormalNotifications) { + Assert.eq(logicalClock.currentState(), "logicalClock.currentState()", + LogicalClock.State.Updating, "LogicalClock.State.Updating"); + pendingNormalNotifications.offer(notification); + } + notificationProcessor.onNotificationAdded(); + } + } + + @Override + public boolean maybeAddNotification(@NotNull final Notification notification, final long deliveryStep) { + if (notification.isTerminal()) { + throw new IllegalArgumentException("Notification must not be terminal"); + } + logDependencies().append(Thread.currentThread().getName()).append(": Adding notification ").append(notification) + .append(" if step is ").append(deliveryStep).endl(); + final boolean added; + synchronized (pendingNormalNotifications) { + // Note that the clock is advanced to idle under the pendingNormalNotifications lock, after which point no + // further normal notifications will be processed on this cycle. + final long logicalClockValue = logicalClock.currentValue(); + if (LogicalClock.getState(logicalClockValue) == LogicalClock.State.Updating + && LogicalClock.getStep(logicalClockValue) == deliveryStep) { + pendingNormalNotifications.offer(notification); + added = true; + } else { + added = false; + } + } + if (added) { + notificationProcessor.onNotificationAdded(); + } + return added; + } + + @Override + public boolean satisfied(final long step) { + StepUpdater.checkForOlderStep(step, sourcesLastSatisfiedStep); + return sourcesLastSatisfiedStep == step; + } + + /** + * Enqueue a collection of notifications to be flushed. + * + * @param notifications The notification to enqueue + * + * @see #addNotification(Notification) + */ + @Override + public void addNotifications(@NotNull final Collection notifications) { + synchronized (pendingNormalNotifications) { + synchronized (terminalNotifications) { + notifications.forEach(this::addNotification); + } + } + } + + /** + * @return Whether this UpdateGraph has a mechanism that supports refreshing + */ + @Override + public boolean supportsRefreshing() { + return true; + } + + /** + * Reset state at the beginning or end of a unit test. + * + * @param after if this is done after a test, in which case the liveness scope is popped + * @param errors the list of errors generated during reset + */ + @TestUseOnly + void resetForUnitTests(final boolean after, final List errors) { + sources.clear(); + notificationProcessor.shutdown(); + synchronized (pendingNormalNotifications) { + pendingNormalNotifications.clear(); + } + isUpdateThread.remove(); + synchronized (terminalNotifications) { + terminalNotifications.clear(); + } + logicalClock.resetForUnitTests(); + sourcesLastSatisfiedStep = logicalClock.currentStep(); + + refreshScope = null; + if (after) { + LivenessManager stackTop; + while ((stackTop = LivenessScopeStack.peek()) instanceof LivenessScope) { + LivenessScopeStack.pop((LivenessScope) stackTop); + } + CleanupReferenceProcessorInstance.resetAllForUnitTests(); + } + + ensureUnlocked("unit test reset thread", errors); + } + + @TestUseOnly + void resetLock() { + lock.reset(); + } + + /** + * Flush all non-terminal notifications, complete the logical clock update cycle, then flush all terminal + * notifications. + * + * @param check whether to check that update sources have not yet been satisfied (false in unit test mode) + */ + void flushNotificationsAndCompleteCycle(boolean check) { + // We cannot proceed with normal notifications, nor are we satisfied, until all update source refresh + // notifications have been processed. Note that non-update source notifications that require dependency + // satisfaction are delivered first to the pendingNormalNotifications queue, and hence will not be processed + // until we advance to the flush* methods. + // TODO: If and when we properly integrate update sources into the dependency tracking system, we can + // discontinue this distinct phase, along with the requirement to treat the UpdateGraph itself as a Dependency. + // Until then, we must delay the beginning of "normal" notification processing until all update sources are + // done. See IDS-8039. + notificationProcessor.doAllWork(); + + updateSourcesLastSatisfiedStep(check); + + flushNormalNotificationsAndCompleteCycle(); + flushTerminalNotifications(); + synchronized (pendingNormalNotifications) { + Assert.assertion(pendingNormalNotifications.isEmpty(), "pendingNormalNotifications.isEmpty()"); + } + } + + void updateSourcesLastSatisfiedStep(boolean check) { + if (check && sourcesLastSatisfiedStep >= logicalClock.currentStep()) { + throw new IllegalStateException("Already marked sources as satisfied!"); + } + sourcesLastSatisfiedStep = logicalClock.currentStep(); + } + + /** + * Flush all non-terminal {@link Notification notifications} from the queue. + */ + private void flushNormalNotificationsAndCompleteCycle() { + final IntrusiveDoublyLinkedQueue pendingToEvaluate = + new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); + while (true) { + final int outstandingCountAtStart = notificationProcessor.outstandingNotificationsCount(); + notificationProcessor.beforeNotificationsDrained(); + synchronized (pendingNormalNotifications) { + pendingToEvaluate.transferAfterTailFrom(pendingNormalNotifications); + if (outstandingCountAtStart == 0 && pendingToEvaluate.isEmpty()) { + // We complete the cycle here before releasing the lock on pendingNotifications, so that + // maybeAddNotification can detect scenarios where the notification cannot be delivered on the + // desired step. + logicalClock.completeUpdateCycle(); + break; + } + } + logDependencies().append(Thread.currentThread().getName()) + .append(": Notification queue size=").append(pendingToEvaluate.size()) + .append(", outstanding=").append(outstandingCountAtStart) + .endl(); + + boolean nothingBecameSatisfied = true; + for (final Iterator it = pendingToEvaluate.iterator(); it.hasNext();) { + final Notification notification = it.next(); + + Assert.eqFalse(notification.isTerminal(), "notification.isTerminal()"); + Assert.eqFalse(notification.mustExecuteWithUpdateGraphLock(), + "notification.mustExecuteWithUpdateGraphLock()"); + + final boolean satisfied = notification.canExecute(sourcesLastSatisfiedStep); + if (satisfied) { + nothingBecameSatisfied = false; + it.remove(); + logDependencies().append(Thread.currentThread().getName()) + .append(": Submitting to notification processor ").append(notification).endl(); + notificationProcessor.submit(notification); + } else { + logDependencies().append(Thread.currentThread().getName()).append(": Unmet dependencies for ") + .append(notification).endl(); + } + } + if (outstandingCountAtStart == 0 && nothingBecameSatisfied) { + throw new IllegalStateException( + "No outstanding notifications, yet the notification queue is not empty!"); + } + if (notificationProcessor.outstandingNotificationsCount() > 0) { + notificationProcessor.doWork(); + } + } + synchronized (pendingNormalNotifications) { + Assert.eqZero(pendingNormalNotifications.size() + pendingToEvaluate.size(), + "pendingNormalNotifications.size() + pendingToEvaluate.size()"); + } + } + + /** + * Flush all {@link Notification#isTerminal() terminal} {@link Notification notifications} from the queue. + * + * @implNote Any notification that may have been queued while the clock's state is Updating must be invoked during + * this cycle's Idle phase. + */ + private void flushTerminalNotifications() { + synchronized (terminalNotifications) { + for (final Iterator it = terminalNotifications.iterator(); it.hasNext();) { + final Notification notification = it.next(); + Assert.assertion(notification.isTerminal(), "notification.isTerminal()"); + + if (!notification.mustExecuteWithUpdateGraphLock()) { + it.remove(); + // for the single threaded queue case; this enqueues the notification; + // for the executor service case, this causes the notification to be kicked off + notificationProcessor.submit(notification); + } + } + } + + // run the notifications that must be run on this thread + while (true) { + final Notification notificationForThisThread; + synchronized (terminalNotifications) { + notificationForThisThread = terminalNotifications.poll(); + } + if (notificationForThisThread == null) { + break; + } + runNotification(notificationForThisThread); + } + + // We can not proceed until all of the terminal notifications have executed. + notificationProcessor.doAllWork(); + } + + /** + * Abstract away the details of satisfied notification processing. + */ + interface NotificationProcessor { + + /** + * Submit a satisfied notification for processing. + * + * @param notification The notification + */ + void submit(@NotNull NotificationQueue.Notification notification); + + /** + * Submit a queue of satisfied notification for processing. + * + * @param notifications The queue of notifications to + * {@link IntrusiveDoublyLinkedQueue#transferAfterTailFrom(IntrusiveDoublyLinkedQueue) transfer} from. + * Will become empty as a result of successful completion + */ + void submitAll(@NotNull IntrusiveDoublyLinkedQueue notifications); + + /** + * Query the number of outstanding notifications submitted to this processor. + * + * @return The number of outstanding notifications + */ + int outstandingNotificationsCount(); + + /** + *

    + * Do work (or in the multi-threaded case, wait for some work to have happened). + *

    + * Caller must know that work is outstanding. + */ + void doWork(); + + /** + * Do all outstanding work. + */ + void doAllWork(); + + /** + * Shutdown this notification processor (for unit tests). + */ + void shutdown(); + + /** + * Called after a pending notification is added. + */ + void onNotificationAdded(); + + /** + * Called before pending notifications are drained. + */ + void beforeNotificationsDrained(); + } + + void runNotification(@NotNull final Notification notification) { + logDependencies().append(Thread.currentThread().getName()).append(": Executing ").append(notification).endl(); + + final LivenessScope scope; + final boolean releaseScopeOnClose; + if (notification.isTerminal()) { + // Terminal notifications can't create new notifications, so they have no need to participate in a shared + // run scope. + scope = new LivenessScope(); + releaseScopeOnClose = true; + } else { + // Non-terminal notifications must use a shared run scope. + Assert.neqNull(refreshScope, "refreshScope"); + scope = refreshScope == LivenessScopeStack.peek() ? null : refreshScope; + releaseScopeOnClose = false; + } + + try (final SafeCloseable ignored = scope == null ? null : LivenessScopeStack.open(scope, releaseScopeOnClose)) { + notification.run(); + logDependencies().append(Thread.currentThread().getName()).append(": Completed ").append(notification) + .endl(); + } catch (final Exception e) { + log.error().append(Thread.currentThread().getName()) + .append(": Exception while executing UpdateGraph notification: ").append(notification) + .append(": ").append(e).endl(); + ProcessEnvironment.getGlobalFatalErrorReporter() + .report("Exception while processing UpdateGraph notification", e); + } + } + + class QueueNotificationProcessor implements NotificationProcessor { + + final IntrusiveDoublyLinkedQueue satisfiedNotifications = + new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); + + @Override + public void submit(@NotNull final Notification notification) { + satisfiedNotifications.offer(notification); + } + + @Override + public void submitAll(@NotNull IntrusiveDoublyLinkedQueue notifications) { + satisfiedNotifications.transferAfterTailFrom(notifications); + } + + @Override + public int outstandingNotificationsCount() { + return satisfiedNotifications.size(); + } + + @Override + public void doWork() { + Notification satisfiedNotification; + while ((satisfiedNotification = satisfiedNotifications.poll()) != null) { + runNotification(satisfiedNotification); + } + } + + @Override + public void doAllWork() { + doWork(); + } + + @Override + public void shutdown() { + satisfiedNotifications.clear(); + } + + @Override + public void onNotificationAdded() {} + + @Override + public void beforeNotificationsDrained() {} + } + + + static LogEntry appendAsMillisFromNanos(final LogEntry entry, final long nanos) { + if (nanos > 0) { + return entry.appendDouble(nanos / 1_000_000.0, 3); + } + return entry.append(0); + } + + /** + * Iterate over all monitored tables and run them. + */ + void refreshTablesAndFlushNotifications() { + final long startTimeNanos = System.nanoTime(); + + currentCycleLockWaitTotalNanos = 0; + jvmIntrospectionContext.startSample(); + + if (sources.isEmpty()) { + exclusiveLock().doLocked(this::flushTerminalNotifications); + } else { + refreshAllTables(); + } + + jvmIntrospectionContext.endSample(); + final long cycleTimeNanos = System.nanoTime() - startTimeNanos; + computeStatsAndLogCycle(cycleTimeNanos); + } + + private void computeStatsAndLogCycle(final long cycleTimeNanos) { + final long safePointPauseTimeMillis = jvmIntrospectionContext.deltaSafePointPausesTimeMillis(); + accumulatedCycleStats.accumulate( + isCycleOnBudget(cycleTimeNanos), + cycleTimeNanos, + jvmIntrospectionContext.deltaSafePointPausesCount(), + safePointPauseTimeMillis); + if (cycleTimeNanos >= minimumCycleDurationToLogNanos) { + if (suppressedCycles > 0) { + logSuppressedCycles(); + } + final double cycleTimeMillis = cycleTimeNanos / 1_000_000.0; + LogEntry entry = log.info().append(getName()) + .append(": Update Graph Processor cycleTime=").appendDouble(cycleTimeMillis, 3); + if (jvmIntrospectionContext.hasSafePointData()) { + final long safePointSyncTimeMillis = jvmIntrospectionContext.deltaSafePointSyncTimeMillis(); + entry = entry + .append("ms, safePointTime=") + .append(safePointPauseTimeMillis) + .append("ms, safePointTimePct="); + if (safePointPauseTimeMillis > 0 && cycleTimeMillis > 0.0) { + final double safePointTimePct = 100.0 * safePointPauseTimeMillis / cycleTimeMillis; + entry = entry.appendDouble(safePointTimePct, 2); + } else { + entry = entry.append("0"); + } + entry = entry.append("%, safePointSyncTime=").append(safePointSyncTimeMillis); + } + entry = entry.append("ms, lockWaitTime="); + entry = appendAsMillisFromNanos(entry, currentCycleLockWaitTotalNanos); + entry.append("ms").endl(); + return; + } + if (cycleTimeNanos > 0) { + ++suppressedCycles; + suppressedCyclesTotalNanos += cycleTimeNanos; + suppressedCyclesTotalSafePointTimeMillis += safePointPauseTimeMillis; + if (suppressedCyclesTotalNanos >= minimumCycleDurationToLogNanos) { + logSuppressedCycles(); + } + } + } + + /** + * Is the provided cycle time on budget? + * + * @param cycleTimeNanos the cycle time, in nanoseconds + * + * @return true if the cycle time is within the desired budget + */ + public boolean isCycleOnBudget(long cycleTimeNanos) { + return true; + } + + private void logSuppressedCycles() { + LogEntry entry = log.info() + .append("Minimal Update Graph Processor cycle times: ") + .appendDouble((double) (suppressedCyclesTotalNanos) / 1_000_000.0, 3).append("ms / ") + .append(suppressedCycles).append(" cycles = ") + .appendDouble( + (double) suppressedCyclesTotalNanos / (double) suppressedCycles / 1_000_000.0, 3) + .append("ms/cycle average)"); + if (jvmIntrospectionContext.hasSafePointData()) { + entry = entry + .append(", safePointTime=") + .append(suppressedCyclesTotalSafePointTimeMillis) + .append("ms"); + } + entry.endl(); + suppressedCycles = suppressedCyclesTotalNanos = 0; + suppressedCyclesTotalSafePointTimeMillis = 0; + } + + + void maybeFlushUpdatePerformance(final long nowNanos, final long checkTime) { + if (checkTime >= nextUpdatePerformanceTrackerFlushTimeNanos) { + nextUpdatePerformanceTrackerFlushTimeNanos = + nowNanos + MILLISECONDS.toNanos(UpdatePerformanceTracker.REPORT_INTERVAL_MILLIS); + try { + updatePerformanceTracker.flush(); + } catch (Exception err) { + log.error().append("Error flushing UpdatePerformanceTracker: ").append(err).endl(); + } + } + } + + /** + * In unit tests it can be convenient to force the update performance tracker to flush, without waiting for the + * complete REPORT_INTERVAL_MILLIS to elapse. + */ + @TestUseOnly + public void resetNextFlushTime() { + nextUpdatePerformanceTrackerFlushTimeNanos = 0; + } + + /** + * Refresh all the update sources within an {@link LogicalClock update cycle} after the UpdateGraph has been locked. + * At the end of the updates all {@link Notification notifications} will be flushed. + */ + void refreshAllTables() { + doRefresh(() -> sources.forEach((final UpdateSourceRefreshNotification updateSourceNotification, + final Runnable unused) -> notificationProcessor.submit(updateSourceNotification))); + } + + /** + * Perform a run cycle, using {@code refreshFunction} to ensure the desired update sources are refreshed at the + * start. + * + * @param refreshFunction Function to submit one or more {@link UpdateSourceRefreshNotification update source + * refresh notifications} to the {@link NotificationProcessor notification processor} or run them directly. + */ + private void doRefresh(@NotNull final Runnable refreshFunction) { + final long lockStartTimeNanos = System.nanoTime(); + exclusiveLock().doLocked(() -> { + currentCycleLockWaitTotalNanos += System.nanoTime() - lockStartTimeNanos; + if (!running) { + return; + } + synchronized (pendingNormalNotifications) { + Assert.eqZero(pendingNormalNotifications.size(), "pendingNormalNotifications.size()"); + } + Assert.eqNull(refreshScope, "refreshScope"); + refreshScope = new LivenessScope(); + final long updatingCycleValue = logicalClock.startUpdateCycle(); + logDependencies().append("Beginning UpdateGraph cycle step=") + .append(logicalClock.currentStep()).endl(); + try (final SafeCloseable ignored = LivenessScopeStack.open(refreshScope, true)) { + refreshFunction.run(); + flushNotificationsAndCompleteCycle(true); + } finally { + logicalClock.ensureUpdateCycleCompleted(updatingCycleValue); + refreshScope = null; + } + logDependencies().append("Completed UpdateGraph cycle step=") + .append(logicalClock.currentStep()).endl(); + }); + } + + /** + * Re-usable class for adapting update sources to {@link Notification}s. + */ + static final class UpdateSourceRefreshNotification extends AbstractNotification + implements SimpleReference { + + private final WeakReference updateSourceRef; + + private UpdateSourceRefreshNotification(@NotNull final Runnable updateSource) { + super(false); + updateSourceRef = new WeakReference<>(updateSource); + } + + @Override + public LogOutput append(@NotNull final LogOutput logOutput) { + return logOutput.append("UpdateSourceRefreshNotification{").append(System.identityHashCode(this)) + .append(", for UpdateSource{").append(System.identityHashCode(get())).append("}}"); + } + + @Override + public boolean canExecute(final long step) { + return true; + } + + @Override + public void run() { + final Runnable updateSource = updateSourceRef.get(); + if (updateSource == null) { + return; + } + updateSource.run(); + } + + @Override + public Runnable get() { + // NB: Arguably we should make get() and clear() synchronized. + return updateSourceRef.get(); + } + + @Override + public void clear() { + updateSourceRef.clear(); + } + } + + public LogEntry logDependencies() { + if (printDependencyInformation) { + return log.info(); + } else { + return LogEntry.NULL; + } + } + + /** + * Ensure the lock is not held by the current thread. + * + * @param callerDescription the description of the caller + * @param errors an optional list to populate with errors when the lock is held. + */ + @TestUseOnly + void ensureUnlocked(@NotNull final String callerDescription, @Nullable final List errors) { + if (exclusiveLock().isHeldByCurrentThread()) { + if (errors != null) { + errors.add(callerDescription + ": UpdateGraph exclusive lock is still held"); + } + while (exclusiveLock().isHeldByCurrentThread()) { + exclusiveLock().unlock(); + } + } + if (sharedLock().isHeldByCurrentThread()) { + if (errors != null) { + errors.add(callerDescription + ": UpdateGraph shared lock is still held"); + } + while (sharedLock().isHeldByCurrentThread()) { + sharedLock().unlock(); + } + } + } + + public void takeAccumulatedCycleStats(AccumulatedCycleStats updateGraphAccumCycleStats) { + accumulatedCycleStats.take(updateGraphAccumCycleStats); + } + + public static UpdateGraph getInstance(final String name) { + return INSTANCES.get(name); + } + + + + /** + * Remove a named UpdateGraph. + * + *

    + * In addition to removing the UpdateGraph from the instances, an attempt is made to {@link #stop()} it. + *

    + * + * @param name the name of the UpdateGraph to remove + * @return true if the update graph was found + */ + public static boolean removeInstance(final String name) { + final UpdateGraph graph; + synchronized (INSTANCES) { + graph = INSTANCES.removeKey(name); + if (graph == null) { + return false; + } + } + graph.stop(); + return true; + } + + /** + * Builds and caches a new UpdateGraph named {@code name} and provided by {@code construct}. It is an error if there + * is already an UpdateGraph with the same name. + * + * @param name the name of the new update graph + * @param construct A {@link Supplier} to construct an UpdateGraph if no update graph with the name already exists. + * The Supplier must provide an update graph with the given name. + * + * @throws IllegalStateException if an UpdateGraph with the provided name already exists + */ + public static T buildOrThrow(final String name, final Supplier construct) { + synchronized (INSTANCES) { + if (INSTANCES.containsKey(name)) { + throw new IllegalStateException( + String.format("UpdateGraph with name %s already exists", name)); + } + final T newGraph = construct.get(); + Assert.equals(newGraph.getName(), "newGraph.getName()", name, "name"); + INSTANCES.put(name, newGraph); + return newGraph; + } + } + + /** + * Returns an existing UpdateGraph with the provided {@code name} if one exists, else returns a new named + * UpdateGraph supplied by {@code construct}. + * + * @param construct A {@link Supplier} to construct an UpdateGraph if no update graph with the name already exists. + * The Supplier must provide an update graph with the given name. + * + * @return the UpdateGraph + */ + public static T existingOrBuild(final String name, Supplier construct) { + return INSTANCES.putIfAbsent(name, (nameToInsert) -> { + final T newGraph = construct.get(); + Assert.equals(newGraph.getName(), "newGraph.getName()", nameToInsert, "name"); + return newGraph; + }).cast(); + } +} diff --git a/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/EventDrivenUpdateGraph.java b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/EventDrivenUpdateGraph.java new file mode 100644 index 00000000000..701a0d17878 --- /dev/null +++ b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/EventDrivenUpdateGraph.java @@ -0,0 +1,143 @@ +package io.deephaven.engine.updategraph.impl; + +import io.deephaven.base.log.LogOutput; +import io.deephaven.engine.context.ExecutionContext; +import io.deephaven.internal.log.LoggerFactory; +import io.deephaven.io.logger.Logger; +import io.deephaven.util.SafeCloseable; +import org.jetbrains.annotations.NotNull; + +/** + * An EventDrivenUpdateGraph provides an isolated refresh processor. + * + *

    + * As with a {@link PeriodicUpdateGraph}, the EventDrivenUpdateGraph contains a set of sources, but it is refreshed only + * when a call to {@link #requestRefresh()} is made. All sources are synchronously refreshed on that thread, and then + * the resultant notifications are also synchronously processed. + *

    + */ +public class EventDrivenUpdateGraph extends BaseUpdateGraph { + private static final Logger log = LoggerFactory.getLogger(EventDrivenUpdateGraph.class); + private boolean started = false; + + /** + * Create a builder for an EventDrivenUpdateGraph with the given name. + * + * @param name the name of the new EventDrivenUpdateGraph + * @return a builder for the EventDrivenUpdateGraph + */ + public static EventDrivenUpdateGraph.Builder newBuilder(final String name) { + return new EventDrivenUpdateGraph.Builder(name); + } + + private EventDrivenUpdateGraph(String name, long minimumCycleDurationToLogNanos) { + super(name, false, log, minimumCycleDurationToLogNanos); + notificationProcessor = new QueueNotificationProcessor(); + } + + @Override + public LogOutput append(@NotNull final LogOutput logOutput) { + return logOutput.append("EventDrivenUpdateGraph-").append(getName()); + } + + @Override + public int parallelismFactor() { + return 1; + } + + /** + * Refresh all sources and execute the resulting notifications synchronously on this thread. + */ + @Override + public void requestRefresh() { + maybeStart(); + // do the work to refresh everything, on this thread + isUpdateThread.set(true); + try (final SafeCloseable ignored = ExecutionContext.newBuilder().setUpdateGraph(this).build().open()) { + refreshAllTables(); + } finally { + isUpdateThread.remove(); + } + final long nowNanos = System.nanoTime(); + synchronized (this) { + maybeFlushUpdatePerformance(nowNanos, nowNanos); + } + } + + /** + * We defer starting the update performance tracker until our first cycle. This is essential when we are the DEFAULT + * graph used for UPT publishing, as the UPT requires the publication graph to be in the BaseUpdateGraph map, which + * is not done until after our constructor completes. + */ + private synchronized void maybeStart() { + if (started) { + return; + } + updatePerformanceTracker.start(); + started = true; + } + + @Override + public void stop() { + running = false; + // if we wait for the lock to be done, then we should have completed our cycle and will not execute again + exclusiveLock().doLocked(() -> { + }); + } + + /** + * Builds or retrieves a new EventDrivenUpdateGraph. + */ + public static class Builder { + private final String name; + private long minimumCycleDurationToLogNanos = DEFAULT_MINIMUM_CYCLE_DURATION_TO_LOG_NANOSECONDS; + + public Builder(String name) { + this.name = name; + } + + /** + * Set the minimum duration of an update cycle that should be logged at the INFO level. + * + * @param minimumCycleDurationToLogNanos threshold to log a slow cycle + * @return this builder + */ + public Builder minimumCycleDurationToLogNanos(long minimumCycleDurationToLogNanos) { + this.minimumCycleDurationToLogNanos = minimumCycleDurationToLogNanos; + return this; + } + + /** + * Constructs and returns an EventDrivenUpdateGraph. It is an error to do so if an UpdateGraph already exists + * with the name provided to this builder. + * + * @return the new EventDrivenUpdateGraph + * @throws IllegalStateException if an UpdateGraph with the provided name already exists + */ + public EventDrivenUpdateGraph build() { + return BaseUpdateGraph.buildOrThrow(name, this::construct); + } + + /** + * Returns an existing EventDrivenUpdateGraph with the name provided to this Builder, if one exists, else + * returns a new EventDrivenUpdateGraph. + * + *

    + * If the options for the existing graph are different than the options specified in this Builder, this + * Builder's options are ignored. + *

    + * + * @return the EventDrivenUpdateGraph + * @throws ClassCastException if the existing graph is not an EventDrivenUpdateGraph + */ + public EventDrivenUpdateGraph existingOrBuild() { + return BaseUpdateGraph.existingOrBuild(name, this::construct); + } + + private EventDrivenUpdateGraph construct() { + return new EventDrivenUpdateGraph( + name, + minimumCycleDurationToLogNanos); + } + } +} diff --git a/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PeriodicUpdateGraph.java b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PeriodicUpdateGraph.java index 34e0462dde7..a86225c9522 100644 --- a/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PeriodicUpdateGraph.java +++ b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PeriodicUpdateGraph.java @@ -6,42 +6,29 @@ import io.deephaven.UncheckedDeephavenException; import io.deephaven.base.SleepUtil; import io.deephaven.base.log.LogOutput; -import io.deephaven.base.reference.SimpleReference; import io.deephaven.base.verify.Assert; import io.deephaven.chunk.util.pools.MultiChunkPool; import io.deephaven.configuration.Configuration; import io.deephaven.engine.context.ExecutionContext; -import io.deephaven.engine.liveness.LivenessManager; import io.deephaven.engine.liveness.LivenessScope; import io.deephaven.engine.liveness.LivenessScopeStack; -import io.deephaven.engine.table.impl.perf.PerformanceEntry; -import io.deephaven.engine.table.impl.perf.UpdatePerformanceTracker; -import io.deephaven.engine.table.impl.util.StepUpdater; import io.deephaven.engine.updategraph.*; -import io.deephaven.engine.util.reference.CleanupReferenceProcessorInstance; import io.deephaven.engine.util.systemicmarking.SystemicObjectTracker; -import io.deephaven.hash.KeyedObjectHashMap; -import io.deephaven.hash.KeyedObjectKey; -import io.deephaven.hotspot.JvmIntrospectionContext; import io.deephaven.internal.log.LoggerFactory; -import io.deephaven.io.log.LogEntry; -import io.deephaven.io.log.impl.LogOutputStringImpl; import io.deephaven.io.logger.Logger; import io.deephaven.util.SafeCloseable; import io.deephaven.util.annotations.TestUseOnly; -import io.deephaven.util.datastructures.SimpleReferenceManager; import io.deephaven.util.datastructures.linked.IntrusiveDoublyLinkedNode; import io.deephaven.util.datastructures.linked.IntrusiveDoublyLinkedQueue; import io.deephaven.util.function.ThrowingRunnable; -import io.deephaven.util.locks.AwareFunctionalLock; -import io.deephaven.util.process.ProcessEnvironment; import io.deephaven.util.thread.NamingThreadFactory; import io.deephaven.util.thread.ThreadInitializationFactory; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; -import java.lang.ref.WeakReference; -import java.util.*; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Random; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -62,9 +49,8 @@ * defined) * */ -public class PeriodicUpdateGraph implements UpdateGraph { +public class PeriodicUpdateGraph extends BaseUpdateGraph { - public static final String DEFAULT_UPDATE_GRAPH_NAME = "DEFAULT"; public static final int NUM_THREADS_DEFAULT_UPDATE_GRAPH = Configuration.getInstance().getIntegerWithDefault("PeriodicUpdateGraph.updateThreads", -1); @@ -72,56 +58,8 @@ public static Builder newBuilder(final String name) { return new Builder(name); } - /** - * If the provided update graph is a {@link PeriodicUpdateGraph} then create a PerformanceEntry using the given - * description. Otherwise, return null. - * - * @param updateGraph The update graph to create a performance entry for. - * @param description The description for the performance entry. - * @return The performance entry, or null if the update graph is not a {@link PeriodicUpdateGraph}. - */ - @Nullable - public static PerformanceEntry createUpdatePerformanceEntry( - final UpdateGraph updateGraph, - final String description) { - if (updateGraph instanceof PeriodicUpdateGraph) { - final PeriodicUpdateGraph pug = (PeriodicUpdateGraph) updateGraph; - if (pug.updatePerformanceTracker != null) { - return pug.updatePerformanceTracker.getEntry(description); - } - throw new IllegalStateException("Cannot create a performance entry for a PeriodicUpdateGraph that has " - + "not been completely constructed."); - } - return null; - } - - private static final KeyedObjectHashMap INSTANCES = new KeyedObjectHashMap<>( - new KeyedObjectKey.BasicAdapter<>(PeriodicUpdateGraph::getName)); - - private final Logger log = LoggerFactory.getLogger(PeriodicUpdateGraph.class); - - /** - * Update sources that are part of this PeriodicUpdateGraph. - */ - private final SimpleReferenceManager sources = - new SimpleReferenceManager<>(UpdateSourceRefreshNotification::new); - - /** - * Recorder for updates source satisfaction as a phase of notification processing. - */ - private volatile long sourcesLastSatisfiedStep; - /** - * The queue of non-terminal notifications to process. - */ - private final IntrusiveDoublyLinkedQueue pendingNormalNotifications = - new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); - - /** - * The queue of terminal notifications to process. - */ - private final IntrusiveDoublyLinkedQueue terminalNotifications = - new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); + private static final Logger log = LoggerFactory.getLogger(PeriodicUpdateGraph.class); /** * A flag indicating that an accelerated cycle has been requested. @@ -129,7 +67,6 @@ public static PerformanceEntry createUpdatePerformanceEntry( private final AtomicBoolean refreshRequested = new AtomicBoolean(); private final Thread refreshThread; - private volatile boolean running = true; /** * {@link ScheduledExecutorService} used for scheduling the {@link #watchDogTimeoutProcedure}. @@ -159,113 +96,8 @@ public static PerformanceEntry createUpdatePerformanceEntry( public static final String DEFAULT_TARGET_CYCLE_DURATION_MILLIS_PROP = "PeriodicUpdateGraph.targetCycleDurationMillis"; - public static final String MINIMUM_CYCLE_DURATION_TO_LOG_MILLIS_PROP = - "PeriodicUpdateGraph.minimumCycleDurationToLogMillis"; private final long defaultTargetCycleDurationMillis; private volatile long targetCycleDurationMillis; - private final long minimumCycleDurationToLogNanos; - - /** when to next flush the performance tracker; initializes to zero to force a flush on start */ - private long nextUpdatePerformanceTrackerFlushTimeNanos; - - /** - * How many cycles we have not logged, but were non-zero. - */ - private long suppressedCycles; - private long suppressedCyclesTotalNanos; - private long suppressedCyclesTotalSafePointTimeMillis; - - /** - * Accumulated UpdateGraph exclusive lock waits for the current cycle (or previous, if idle). - */ - private long currentCycleLockWaitTotalNanos; - /** - * Accumulated delays due to intracycle yields for the current cycle (or previous, if idle). - */ - private long currentCycleYieldTotalNanos; - /** - * Accumulated delays due to intracycle sleeps for the current cycle (or previous, if idle). - */ - private long currentCycleSleepTotalNanos; - - public static class AccumulatedCycleStats { - /** - * Number of cycles run. - */ - public int cycles = 0; - /** - * Number of cycles run not exceeding their time budget. - */ - public int cyclesOnBudget = 0; - /** - * Accumulated safepoints over all cycles. - */ - public int safePoints = 0; - /** - * Accumulated safepoint time over all cycles. - */ - public long safePointPauseTimeMillis = 0L; - - public int[] cycleTimesMicros = new int[32]; - public static final int MAX_DOUBLING_LEN = 1024; - - synchronized void accumulate( - final long targetCycleDurationMillis, - final long cycleTimeNanos, - final long safePoints, - final long safePointPauseTimeMillis) { - final boolean onBudget = targetCycleDurationMillis * 1000 * 1000 >= cycleTimeNanos; - if (onBudget) { - ++cyclesOnBudget; - } - this.safePoints += safePoints; - this.safePointPauseTimeMillis += safePointPauseTimeMillis; - if (cycles >= cycleTimesMicros.length) { - final int newLen; - if (cycleTimesMicros.length < MAX_DOUBLING_LEN) { - newLen = cycleTimesMicros.length * 2; - } else { - newLen = cycleTimesMicros.length + MAX_DOUBLING_LEN; - } - cycleTimesMicros = Arrays.copyOf(cycleTimesMicros, newLen); - } - cycleTimesMicros[cycles] = (int) ((cycleTimeNanos + 500) / 1_000); - ++cycles; - } - - public synchronized void take(final AccumulatedCycleStats out) { - out.cycles = cycles; - out.cyclesOnBudget = cyclesOnBudget; - out.safePoints = safePoints; - out.safePointPauseTimeMillis = safePointPauseTimeMillis; - if (out.cycleTimesMicros.length < cycleTimesMicros.length) { - out.cycleTimesMicros = new int[cycleTimesMicros.length]; - } - System.arraycopy(cycleTimesMicros, 0, out.cycleTimesMicros, 0, cycles); - cycles = 0; - cyclesOnBudget = 0; - safePoints = 0; - safePointPauseTimeMillis = 0; - } - } - - public final AccumulatedCycleStats accumulatedCycleStats = new AccumulatedCycleStats(); - - /** - * Abstracts away the processing of non-terminal notifications. - */ - private NotificationProcessor notificationProcessor; - - /** - * Facilitate GC Introspection during refresh cycles. - */ - private final JvmIntrospectionContext jvmIntrospectionContext; - - /** - * The {@link LivenessScope} that should be on top of the {@link LivenessScopeStack} for all run and notification - * processing. Only non-null while some thread is in {@link #doRefresh(Runnable)}. - */ - private volatile LivenessScope refreshScope; /** * The number of threads in our executor service for dispatching notifications. If 1, then we don't actually use the @@ -273,50 +105,21 @@ public synchronized void take(final AccumulatedCycleStats out) { */ private final int updateThreads; - /** - * Is this one of the threads engaged in notification processing? (Either the solitary run thread, or one of the - * pooled threads it uses in some configurations) - */ - private final ThreadLocal isUpdateThread = ThreadLocal.withInitial(() -> false); - - private final ThreadLocal serialTableOperationsSafe = ThreadLocal.withInitial(() -> false); - private final long minimumInterCycleSleep = Configuration.getInstance().getIntegerWithDefault("PeriodicUpdateGraph.minimumInterCycleSleep", 0); private final boolean interCycleYield = Configuration.getInstance().getBooleanWithDefault("PeriodicUpdateGraph.interCycleYield", false); - private final LogicalClockImpl logicalClock = new LogicalClockImpl(); - - /** - * Encapsulates locking support. - */ - private final UpdateGraphLock lock; - - /** - * When PeriodicUpdateGraph.printDependencyInformation is set to true, the PeriodicUpdateGraph will print debug - * information for each notification that has dependency information; as well as which notifications have been - * completed and are outstanding. - */ - private final boolean printDependencyInformation = - Configuration.getInstance().getBooleanWithDefault("PeriodicUpdateGraph.printDependencyInformation", false); - - private final String name; - - private final UpdatePerformanceTracker updatePerformanceTracker; - public PeriodicUpdateGraph( final String name, final boolean allowUnitTestMode, final long targetCycleDurationMillis, final long minimumCycleDurationToLogNanos, final int numUpdateThreads) { - this.name = name; + super(name, allowUnitTestMode, log, minimumCycleDurationToLogNanos); this.allowUnitTestMode = allowUnitTestMode; this.defaultTargetCycleDurationMillis = targetCycleDurationMillis; this.targetCycleDurationMillis = targetCycleDurationMillis; - this.minimumCycleDurationToLogNanos = minimumCycleDurationToLogNanos; - this.lock = UpdateGraphLock.create(this, this.allowUnitTestMode); if (numUpdateThreads <= 0) { this.updateThreads = Runtime.getRuntime().availableProcessors(); @@ -324,9 +127,6 @@ public PeriodicUpdateGraph( this.updateThreads = numUpdateThreads; } - notificationProcessor = PoisonedNotificationProcessor.INSTANCE; - jvmIntrospectionContext = new JvmIntrospectionContext(); - refreshThread = new Thread(ThreadInitializationFactory.wrapRunnable(() -> { configureRefreshThread(); while (running) { @@ -343,31 +143,11 @@ public Thread newThread(@NotNull final Runnable r) { return super.newThread(ThreadInitializationFactory.wrapRunnable(r)); } }); - - updatePerformanceTracker = new UpdatePerformanceTracker(this); - } - - public String getName() { - return name; - } - - public UpdateGraph getUpdateGraph() { - return this; } @Override public LogOutput append(@NotNull final LogOutput logOutput) { - return logOutput.append("PeriodicUpdateGraph-").append(name); - } - - @Override - public String toString() { - return new LogOutputStringImpl().append(this).toString(); - } - - @Override - public LogicalClock clock() { - return logicalClock; + return logOutput.append("PeriodicUpdateGraph-").append(getName()); } @NotNull @@ -445,69 +225,6 @@ public int parallelismFactor() { } } - // region Accessors for the shared and exclusive locks - - /** - *

    - * Get the shared lock for this {@link PeriodicUpdateGraph}. - *

    - * Using this lock will prevent run processing from proceeding concurrently, but will allow other read-only - * processing to proceed. - *

    - * The shared lock implementation is expected to support reentrance. - *

    - * This lock does not support {@link java.util.concurrent.locks.Lock#newCondition()}. Use the exclusive - * lock if you need to wait on events that are driven by run processing. - * - * @return The shared lock for this {@link PeriodicUpdateGraph} - */ - public AwareFunctionalLock sharedLock() { - return lock.sharedLock(); - } - - /** - *

    - * Get the exclusive lock for this {@link PeriodicUpdateGraph}. - *

    - * Using this lock will prevent run or read-only processing from proceeding concurrently. - *

    - * The exclusive lock implementation is expected to support reentrance. - *

    - * Note that using the exclusive lock while the shared lock is held by the current thread will result in exceptions, - * as lock upgrade is not supported. - *

    - * This lock does support {@link java.util.concurrent.locks.Lock#newCondition()}. - * - * @return The exclusive lock for this {@link PeriodicUpdateGraph} - */ - public AwareFunctionalLock exclusiveLock() { - return lock.exclusiveLock(); - } - - // endregion Accessors for the shared and exclusive locks - - /** - * Test if this thread is part of our run thread executor service. - * - * @return whether this is one of our run threads. - */ - @Override - public boolean currentThreadProcessesUpdates() { - return isUpdateThread.get(); - } - - @Override - public boolean serialTableOperationsSafe() { - return serialTableOperationsSafe.get(); - } - - @Override - public boolean setSerialTableOperationsSafe(final boolean newValue) { - final boolean old = serialTableOperationsSafe.get(); - serialTableOperationsSafe.set(newValue); - return old; - } - /** * Set the target duration of an update cycle, including the updating phase and the idle phase. This is also the * target interval between the start of one cycle and the start of the next. @@ -532,6 +249,11 @@ public long getTargetCycleDurationMillis() { return targetCycleDurationMillis; } + @Override + public boolean isCycleOnBudget(long cycleTimeNanos) { + return cycleTimeNanos <= MILLISECONDS.toNanos(targetCycleDurationMillis); + } + /** * Resets the run cycle time to the default target configured via the {@link Builder} setting. * @@ -563,7 +285,7 @@ public void enableUnitTestMode() { if (refreshThread.isAlive()) { throw new IllegalStateException("PeriodicUpdateGraph.refreshThread is executing!"); } - lock.reset(); + resetLock(); unitTestMode = true; unitTestRefreshThreadPool = makeUnitTestRefreshExecutor(); updatePerformanceTracker.enableUnitTestMode(); @@ -631,58 +353,32 @@ public void start() { * Begins the process to stop all processing threads and forces ReferenceCounted sources to a reference count of * zero. */ + @Override public void stop() { running = false; notificationProcessor.shutdown(); + // ensure that any outstanding cycle has completed + exclusiveLock().doLocked(() -> { + }); } /** - * Add a table to the list of tables to run and mark it as {@link DynamicNode#setRefreshing(boolean) refreshing} if - * it was a {@link DynamicNode}. + * {@inheritDoc} * * @implNote This will do nothing in {@link #enableUnitTestMode() unit test} mode other than mark the table as * refreshing. - * @param updateSource The table to be added to the run list */ @Override - public void addSource(@NotNull final Runnable updateSource) { - if (!running) { - throw new IllegalStateException("PeriodicUpdateGraph is no longer running"); - } - - if (updateSource instanceof DynamicNode) { - ((DynamicNode) updateSource).setRefreshing(true); - } - - if (!allowUnitTestMode) { + public void addSource(@NotNull Runnable updateSource) { + if (allowUnitTestMode) { // if we are in unit test mode we never want to start the UpdateGraph - sources.add(updateSource); - start(); + if (updateSource instanceof DynamicNode) { + ((DynamicNode) updateSource).setRefreshing(true); + } + return; } - } - - @Override - public void removeSource(@NotNull final Runnable updateSource) { - sources.remove(updateSource); - } - - /** - * Remove a collection of sources from the list of refreshing sources. - * - * @implNote This will not set the sources as {@link DynamicNode#setRefreshing(boolean) non-refreshing}. - * @param sourcesToRemove The sources to remove from the list of refreshing sources - */ - public void removeSources(final Collection sourcesToRemove) { - sources.removeAll(sourcesToRemove); - } - - /** - * Return the number of valid sources. - * - * @return the number of valid sources - */ - public int sourceCount() { - return sources.size(); + super.addSource(updateSource); + start(); } /** @@ -699,20 +395,7 @@ public void addNotification(@NotNull final Notification notification) { if (notificationAdditionDelay > 0) { SleepUtil.sleep(notificationRandomizer.nextInt(notificationAdditionDelay)); } - if (notification.isTerminal()) { - synchronized (terminalNotifications) { - terminalNotifications.offer(notification); - } - } else { - logDependencies().append(Thread.currentThread().getName()).append(": Adding notification ") - .append(notification).endl(); - synchronized (pendingNormalNotifications) { - Assert.eq(logicalClock.currentState(), "logicalClock.currentState()", - LogicalClock.State.Updating, "LogicalClock.State.Updating"); - pendingNormalNotifications.offer(notification); - } - notificationProcessor.onNotificationAdded(); - } + super.addNotification(notification); } @Override @@ -720,50 +403,7 @@ public boolean maybeAddNotification(@NotNull final Notification notification, fi if (notificationAdditionDelay > 0) { SleepUtil.sleep(notificationRandomizer.nextInt(notificationAdditionDelay)); } - if (notification.isTerminal()) { - throw new IllegalArgumentException("Notification must not be terminal"); - } - logDependencies().append(Thread.currentThread().getName()).append(": Adding notification ").append(notification) - .append(" if step is ").append(deliveryStep).endl(); - final boolean added; - synchronized (pendingNormalNotifications) { - // Note that the clock is advanced to idle under the pendingNormalNotifications lock, after which point no - // further normal notifications will be processed on this cycle. - final long logicalClockValue = logicalClock.currentValue(); - if (LogicalClock.getState(logicalClockValue) == LogicalClock.State.Updating - && LogicalClock.getStep(logicalClockValue) == deliveryStep) { - pendingNormalNotifications.offer(notification); - added = true; - } else { - added = false; - } - } - if (added) { - notificationProcessor.onNotificationAdded(); - } - return added; - } - - @Override - public boolean satisfied(final long step) { - StepUpdater.checkForOlderStep(step, sourcesLastSatisfiedStep); - return sourcesLastSatisfiedStep == step; - } - - /** - * Enqueue a collection of notifications to be flushed. - * - * @param notifications The notification to enqueue - * - * @see #addNotification(Notification) - */ - @Override - public void addNotifications(@NotNull final Collection notifications) { - synchronized (pendingNormalNotifications) { - synchronized (terminalNotifications) { - notifications.forEach(this::addNotification); - } - } + return super.maybeAddNotification(notification, deliveryStep); } /** @@ -772,20 +412,15 @@ public void addNotifications(@NotNull final Collection n */ @Override public void requestRefresh() { + if (!running) { + throw new IllegalStateException("Cannot request refresh when UpdateGraph is no longer running."); + } refreshRequested.set(true); synchronized (refreshRequested) { refreshRequested.notify(); } } - /** - * @return Whether this UpdateGraph has a mechanism that supports refreshing - */ - @Override - public boolean supportsRefreshing() { - return true; - } - /** * Clear all monitored tables and enqueued notifications to support {@link #enableUnitTestMode() unit-tests}. * @@ -808,6 +443,7 @@ public void resetForUnitTests(final boolean after) { * @param notificationStartDelay Maximum randomized notification start delay * @param notificationAdditionDelay Maximum randomized notification addition delay */ + @TestUseOnly public void resetForUnitTests(boolean after, final boolean randomizedNotifications, final int seed, final int maxRandomizedThreadCount, final int notificationStartDelay, final int notificationAdditionDelay) { @@ -815,34 +451,15 @@ public void resetForUnitTests(boolean after, this.notificationRandomizer = new Random(seed); this.notificationAdditionDelay = notificationAdditionDelay; Assert.assertion(unitTestMode, "unitTestMode"); - sources.clear(); - notificationProcessor.shutdown(); - synchronized (pendingNormalNotifications) { - pendingNormalNotifications.clear(); - } - isUpdateThread.remove(); + + resetForUnitTests(after, errors); + if (randomizedNotifications) { notificationProcessor = makeRandomizedNotificationProcessor(notificationRandomizer, maxRandomizedThreadCount, notificationStartDelay); } else { notificationProcessor = makeNotificationProcessor(); } - synchronized (terminalNotifications) { - terminalNotifications.clear(); - } - logicalClock.resetForUnitTests(); - sourcesLastSatisfiedStep = logicalClock.currentStep(); - - refreshScope = null; - if (after) { - LivenessManager stackTop; - while ((stackTop = LivenessScopeStack.peek()) instanceof LivenessScope) { - LivenessScopeStack.pop((LivenessScope) stackTop); - } - CleanupReferenceProcessorInstance.resetAllForUnitTests(); - } - - ensureUnlocked("unit test reset thread", errors); if (refreshThread.isAlive()) { errors.add("UpdateGraph refreshThread isAlive"); @@ -872,7 +489,7 @@ public void resetForUnitTests(boolean after, } } - lock.reset(); + resetLock(); } /** @@ -924,10 +541,7 @@ private void startCycleForUnitTestsInternal(final boolean sourcesSatisfied) { @TestUseOnly public void markSourcesRefreshedForUnitTests() { Assert.assertion(unitTestMode, "unitTestMode"); - if (sourcesLastSatisfiedStep >= logicalClock.currentStep()) { - throw new IllegalStateException("Already marked sources as satisfied!"); - } - sourcesLastSatisfiedStep = logicalClock.currentStep(); + updateSourcesLastSatisfiedStep(true); } /** @@ -950,8 +564,9 @@ public void completeCycleForUnitTests() { private void completeCycleForUnitTests(boolean errorCaughtAndInFinallyBlock) { Assert.assertion(unitTestMode, "unitTestMode"); if (!errorCaughtAndInFinallyBlock) { - Assert.eq(sourcesLastSatisfiedStep, "sourcesLastSatisfiedStep", logicalClock.currentStep(), - "logicalClock.currentStep()"); + final long currentStep = logicalClock.currentStep(); + final boolean satisfied = satisfied(currentStep); + Assert.assertion(satisfied, "satisfied()", currentStep, "currentStep"); } try { unitTestRefreshThreadPool.submit(this::completeCycleForUnitTestsInternal).get(); @@ -974,7 +589,7 @@ private void completeCycleForUnitTestsInternal() { exclusiveLock().unlock(); isUpdateThread.remove(); }) { - flushNotificationsAndCompleteCycle(); + flushNotificationsAndCompleteCycle(false); } } @@ -1157,7 +772,7 @@ public Runnable flushAllNormalNotificationsForUnitTests(@NotNull final BooleanSu } /** - * If the run thread is waiting in {@link #flushNormalNotificationsAndCompleteCycle()} or + * If the run thread is waiting in flushNormalNotificationsAndCompleteCycle() or * {@link #flushAllNormalNotificationsForUnitTests(BooleanSupplier, long)}, wake it up. */ @TestUseOnly @@ -1166,210 +781,6 @@ public void wakeRefreshThreadForUnitTests() { notificationProcessor.onNotificationAdded(); } - /** - * Flush all non-terminal notifications, complete the logical clock update cycle, then flush all terminal - * notifications. - */ - private void flushNotificationsAndCompleteCycle() { - // We cannot proceed with normal notifications, nor are we satisfied, until all update source refresh - // notifications have been processed. Note that non-update source notifications that require dependency - // satisfaction are delivered first to the pendingNormalNotifications queue, and hence will not be processed - // until we advance to the flush* methods. - // TODO: If and when we properly integrate update sources into the dependency tracking system, we can - // discontinue this distinct phase, along with the requirement to treat the UpdateGraph itself as a Dependency. - // Until then, we must delay the beginning of "normal" notification processing until all update sources are - // done. See IDS-8039. - notificationProcessor.doAllWork(); - sourcesLastSatisfiedStep = logicalClock.currentStep(); - - flushNormalNotificationsAndCompleteCycle(); - flushTerminalNotifications(); - synchronized (pendingNormalNotifications) { - Assert.assertion(pendingNormalNotifications.isEmpty(), "pendingNormalNotifications.isEmpty()"); - } - } - - /** - * Flush all non-terminal {@link Notification notifications} from the queue. - */ - private void flushNormalNotificationsAndCompleteCycle() { - final IntrusiveDoublyLinkedQueue pendingToEvaluate = - new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); - while (true) { - final int outstandingCountAtStart = notificationProcessor.outstandingNotificationsCount(); - notificationProcessor.beforeNotificationsDrained(); - synchronized (pendingNormalNotifications) { - pendingToEvaluate.transferAfterTailFrom(pendingNormalNotifications); - if (outstandingCountAtStart == 0 && pendingToEvaluate.isEmpty()) { - // We complete the cycle here before releasing the lock on pendingNotifications, so that - // maybeAddNotification can detect scenarios where the notification cannot be delivered on the - // desired step. - logicalClock.completeUpdateCycle(); - break; - } - } - logDependencies().append(Thread.currentThread().getName()) - .append(": Notification queue size=").append(pendingToEvaluate.size()) - .append(", outstanding=").append(outstandingCountAtStart) - .endl(); - - boolean nothingBecameSatisfied = true; - for (final Iterator it = pendingToEvaluate.iterator(); it.hasNext();) { - final Notification notification = it.next(); - - Assert.eqFalse(notification.isTerminal(), "notification.isTerminal()"); - Assert.eqFalse(notification.mustExecuteWithUpdateGraphLock(), - "notification.mustExecuteWithUpdateGraphLock()"); - - final boolean satisfied = notification.canExecute(sourcesLastSatisfiedStep); - if (satisfied) { - nothingBecameSatisfied = false; - it.remove(); - logDependencies().append(Thread.currentThread().getName()) - .append(": Submitting to notification processor ").append(notification).endl(); - notificationProcessor.submit(notification); - } else { - logDependencies().append(Thread.currentThread().getName()).append(": Unmet dependencies for ") - .append(notification).endl(); - } - } - if (outstandingCountAtStart == 0 && nothingBecameSatisfied) { - throw new IllegalStateException( - "No outstanding notifications, yet the notification queue is not empty!"); - } - if (notificationProcessor.outstandingNotificationsCount() > 0) { - notificationProcessor.doWork(); - } - } - synchronized (pendingNormalNotifications) { - Assert.eqZero(pendingNormalNotifications.size() + pendingToEvaluate.size(), - "pendingNormalNotifications.size() + pendingToEvaluate.size()"); - } - } - - /** - * Flush all {@link Notification#isTerminal() terminal} {@link Notification notifications} from the queue. - * - * @implNote Any notification that may have been queued while the clock's state is Updating must be invoked during - * this cycle's Idle phase. - */ - private void flushTerminalNotifications() { - synchronized (terminalNotifications) { - for (final Iterator it = terminalNotifications.iterator(); it.hasNext();) { - final Notification notification = it.next(); - Assert.assertion(notification.isTerminal(), "notification.isTerminal()"); - - if (!notification.mustExecuteWithUpdateGraphLock()) { - it.remove(); - // for the single threaded queue case; this enqueues the notification; - // for the executor service case, this causes the notification to be kicked off - notificationProcessor.submit(notification); - } - } - } - - // run the notifications that must be run on this thread - while (true) { - final Notification notificationForThisThread; - synchronized (terminalNotifications) { - notificationForThisThread = terminalNotifications.poll(); - } - if (notificationForThisThread == null) { - break; - } - runNotification(notificationForThisThread); - } - - // We can not proceed until all of the terminal notifications have executed. - notificationProcessor.doAllWork(); - } - - /** - * Abstract away the details of satisfied notification processing. - */ - private interface NotificationProcessor { - - /** - * Submit a satisfied notification for processing. - * - * @param notification The notification - */ - void submit(@NotNull NotificationQueue.Notification notification); - - /** - * Submit a queue of satisfied notification for processing. - * - * @param notifications The queue of notifications to - * {@link IntrusiveDoublyLinkedQueue#transferAfterTailFrom(IntrusiveDoublyLinkedQueue) transfer} from. - * Will become empty as a result of successful completion - */ - void submitAll(@NotNull IntrusiveDoublyLinkedQueue notifications); - - /** - * Query the number of outstanding notifications submitted to this processor. - * - * @return The number of outstanding notifications - */ - int outstandingNotificationsCount(); - - /** - *

    - * Do work (or in the multi-threaded case, wait for some work to have happened). - *

    - * Caller must know that work is outstanding. - */ - void doWork(); - - /** - * Do all outstanding work. - */ - void doAllWork(); - - /** - * Shutdown this notification processor (for unit tests). - */ - void shutdown(); - - /** - * Called after a pending notification is added. - */ - void onNotificationAdded(); - - /** - * Called before pending notifications are drained. - */ - void beforeNotificationsDrained(); - } - - private void runNotification(@NotNull final Notification notification) { - logDependencies().append(Thread.currentThread().getName()).append(": Executing ").append(notification).endl(); - - final LivenessScope scope; - final boolean releaseScopeOnClose; - if (notification.isTerminal()) { - // Terminal notifications can't create new notifications, so they have no need to participate in a shared - // run scope. - scope = new LivenessScope(); - releaseScopeOnClose = true; - } else { - // Non-terminal notifications must use a shared run scope. - Assert.neqNull(refreshScope, "refreshScope"); - scope = refreshScope == LivenessScopeStack.peek() ? null : refreshScope; - releaseScopeOnClose = false; - } - - try (final SafeCloseable ignored = scope == null ? null : LivenessScopeStack.open(scope, releaseScopeOnClose)) { - notification.run(); - logDependencies().append(Thread.currentThread().getName()).append(": Completed ").append(notification) - .endl(); - } catch (final Exception e) { - log.error().append(Thread.currentThread().getName()) - .append(": Exception while executing PeriodicUpdateGraph notification: ").append(notification) - .append(": ").append(e).endl(); - ProcessEnvironment.getGlobalFatalErrorReporter() - .report("Exception while processing PeriodicUpdateGraph notification", e); - } - } private class ConcurrentNotificationProcessor implements NotificationProcessor { @@ -1511,100 +922,6 @@ int threadCount() { } } - private static final class PoisonedNotificationProcessor implements NotificationProcessor { - - private static final NotificationProcessor INSTANCE = new PoisonedNotificationProcessor(); - - private static RuntimeException notYetStarted() { - return new IllegalStateException("PeriodicUpdateGraph has not been started yet"); - } - - private PoisonedNotificationProcessor() {} - - @Override - public void submit(@NotNull Notification notification) { - throw notYetStarted(); - } - - @Override - public void submitAll(@NotNull IntrusiveDoublyLinkedQueue notifications) { - throw notYetStarted(); - } - - @Override - public int outstandingNotificationsCount() { - throw notYetStarted(); - } - - @Override - public void doWork() { - throw notYetStarted(); - } - - @Override - public void doAllWork() { - throw notYetStarted(); - } - - @Override - public void shutdown() {} - - @Override - public void onNotificationAdded() { - throw notYetStarted(); - } - - @Override - public void beforeNotificationsDrained() { - throw notYetStarted(); - } - } - - private class QueueNotificationProcessor implements NotificationProcessor { - - final IntrusiveDoublyLinkedQueue satisfiedNotifications = - new IntrusiveDoublyLinkedQueue<>(IntrusiveDoublyLinkedNode.Adapter.getInstance()); - - @Override - public void submit(@NotNull final Notification notification) { - satisfiedNotifications.offer(notification); - } - - @Override - public void submitAll(@NotNull IntrusiveDoublyLinkedQueue notifications) { - satisfiedNotifications.transferAfterTailFrom(notifications); - } - - @Override - public int outstandingNotificationsCount() { - return satisfiedNotifications.size(); - } - - @Override - public void doWork() { - Notification satisfiedNotification; - while ((satisfiedNotification = satisfiedNotifications.poll()) != null) { - runNotification(satisfiedNotification); - } - } - - @Override - public void doAllWork() { - doWork(); - } - - @Override - public void shutdown() { - satisfiedNotifications.clear(); - } - - @Override - public void onNotificationAdded() {} - - @Override - public void beforeNotificationsDrained() {} - } - @TestUseOnly private class ControlledNotificationProcessor implements NotificationProcessor { @@ -1667,44 +984,32 @@ private boolean blockUntilNotificationAdded(final long nanosToWait) { } } - private static LogEntry appendAsMillisFromNanos(final LogEntry entry, final long nanos) { - if (nanos > 0) { - return entry.appendDouble(nanos / 1_000_000.0, 3); - } - return entry.append(0); - } /** - * Iterate over all monitored tables and run them. This method also ensures that the loop runs no faster than - * {@link #getTargetCycleDurationMillis() minimum cycle time}. + * Iterate over all monitored tables and run them. + * + *

    + * This method also ensures that the loop runs no faster than {@link #getTargetCycleDurationMillis() minimum cycle + * time}. + *

    */ - private void refreshTablesAndFlushNotifications() { + @Override + void refreshTablesAndFlushNotifications() { final long startTimeNanos = System.nanoTime(); - jvmIntrospectionContext.startSample(); - if (sources.isEmpty()) { - exclusiveLock().doLocked(this::flushTerminalNotifications); - } else { - currentCycleLockWaitTotalNanos = currentCycleYieldTotalNanos = currentCycleSleepTotalNanos = 0L; - - ScheduledFuture watchdogFuture = null; - - final long localWatchdogMillis = watchDogMillis; - final LongConsumer localWatchdogTimeoutProcedure = watchDogTimeoutProcedure; - if ((localWatchdogMillis > 0) && (localWatchdogTimeoutProcedure != null)) { - watchdogFuture = watchdogScheduler.schedule( - () -> localWatchdogTimeoutProcedure.accept(localWatchdogMillis), - localWatchdogMillis, MILLISECONDS); - } + ScheduledFuture watchdogFuture = null; + final long localWatchdogMillis = watchDogMillis; + final LongConsumer localWatchdogTimeoutProcedure = watchDogTimeoutProcedure; + if ((localWatchdogMillis > 0) && (localWatchdogTimeoutProcedure != null)) { + watchdogFuture = watchdogScheduler.schedule( + () -> localWatchdogTimeoutProcedure.accept(localWatchdogMillis), + localWatchdogMillis, MILLISECONDS); + } - refreshAllTables(); + super.refreshTablesAndFlushNotifications(); - if (watchdogFuture != null) { - watchdogFuture.cancel(true); - } - jvmIntrospectionContext.endSample(); - final long cycleTimeNanos = System.nanoTime() - startTimeNanos; - computeStatsAndLogCycle(cycleTimeNanos); + if (watchdogFuture != null) { + watchdogFuture.cancel(true); } if (interCycleYield) { @@ -1714,72 +1019,6 @@ private void refreshTablesAndFlushNotifications() { waitForNextCycle(startTimeNanos); } - private void computeStatsAndLogCycle(final long cycleTimeNanos) { - final long safePointPauseTimeMillis = jvmIntrospectionContext.deltaSafePointPausesTimeMillis(); - accumulatedCycleStats.accumulate( - getTargetCycleDurationMillis(), - cycleTimeNanos, - jvmIntrospectionContext.deltaSafePointPausesCount(), - safePointPauseTimeMillis); - if (cycleTimeNanos >= minimumCycleDurationToLogNanos) { - if (suppressedCycles > 0) { - logSuppressedCycles(); - } - final double cycleTimeMillis = cycleTimeNanos / 1_000_000.0; - LogEntry entry = log.info() - .append("Update Graph Processor cycleTime=").appendDouble(cycleTimeMillis, 3); - if (jvmIntrospectionContext.hasSafePointData()) { - final long safePointSyncTimeMillis = jvmIntrospectionContext.deltaSafePointSyncTimeMillis(); - entry = entry - .append("ms, safePointTime=") - .append(safePointPauseTimeMillis) - .append("ms, safePointTimePct="); - if (safePointPauseTimeMillis > 0 && cycleTimeMillis > 0.0) { - final double safePointTimePct = 100.0 * safePointPauseTimeMillis / cycleTimeMillis; - entry = entry.appendDouble(safePointTimePct, 2); - } else { - entry = entry.append("0"); - } - entry = entry.append("%, safePointSyncTime=").append(safePointSyncTimeMillis); - } - entry = entry.append("ms, lockWaitTime="); - entry = appendAsMillisFromNanos(entry, currentCycleLockWaitTotalNanos); - entry = entry.append("ms, yieldTime="); - entry = appendAsMillisFromNanos(entry, currentCycleSleepTotalNanos); - entry = entry.append("ms, sleepTime="); - entry = appendAsMillisFromNanos(entry, currentCycleSleepTotalNanos); - entry.append("ms").endl(); - return; - } - if (cycleTimeNanos > 0) { - ++suppressedCycles; - suppressedCyclesTotalNanos += cycleTimeNanos; - suppressedCyclesTotalSafePointTimeMillis += safePointPauseTimeMillis; - if (suppressedCyclesTotalNanos >= minimumCycleDurationToLogNanos) { - logSuppressedCycles(); - } - } - } - - private void logSuppressedCycles() { - LogEntry entry = log.info() - .append("Minimal Update Graph Processor cycle times: ") - .appendDouble((double) (suppressedCyclesTotalNanos) / 1_000_000.0, 3).append("ms / ") - .append(suppressedCycles).append(" cycles = ") - .appendDouble( - (double) suppressedCyclesTotalNanos / (double) suppressedCycles / 1_000_000.0, 3) - .append("ms/cycle average)"); - if (jvmIntrospectionContext.hasSafePointData()) { - entry = entry - .append(", safePointTime=") - .append(suppressedCyclesTotalSafePointTimeMillis) - .append("ms"); - } - entry.endl(); - suppressedCycles = suppressedCyclesTotalNanos = 0; - suppressedCyclesTotalSafePointTimeMillis = 0; - } - /** *

    * Ensure that at least {@link #getTargetCycleDurationMillis() minCycleTime} has passed before returning. @@ -1804,15 +1043,7 @@ private void waitForNextCycle(final long startTimeNanos) { expectedEndTimeNanos = Math.max(expectedEndTimeNanos, nowNanos + MILLISECONDS.toNanos(minimumInterCycleSleep)); } - if (expectedEndTimeNanos >= nextUpdatePerformanceTrackerFlushTimeNanos) { - nextUpdatePerformanceTrackerFlushTimeNanos = - nowNanos + MILLISECONDS.toNanos(UpdatePerformanceTracker.REPORT_INTERVAL_MILLIS); - try { - updatePerformanceTracker.flush(); - } catch (Exception err) { - log.error().append("Error flushing UpdatePerformanceTracker: ").append(err).endl(); - } - } + maybeFlushUpdatePerformance(nowNanos, expectedEndTimeNanos); waitForEndTime(expectedEndTimeNanos); } @@ -1848,98 +1079,10 @@ private void waitForEndTime(final long expectedEndTimeNanos) { } } - /** - * Refresh all the update sources within an {@link LogicalClock update cycle} after the UpdateGraph has been locked. - * At the end of the updates all {@link Notification notifications} will be flushed. - */ - private void refreshAllTables() { + @Override + void refreshAllTables() { refreshRequested.set(false); - doRefresh(() -> sources.forEach((final UpdateSourceRefreshNotification updateSourceNotification, - final Runnable unused) -> notificationProcessor.submit(updateSourceNotification))); - } - - /** - * Perform a run cycle, using {@code refreshFunction} to ensure the desired update sources are refreshed at the - * start. - * - * @param refreshFunction Function to submit one or more {@link UpdateSourceRefreshNotification update source - * refresh notifications} to the {@link NotificationProcessor notification processor} or run them directly. - */ - private void doRefresh(@NotNull final Runnable refreshFunction) { - final long lockStartTimeNanos = System.nanoTime(); - exclusiveLock().doLocked(() -> { - currentCycleLockWaitTotalNanos += System.nanoTime() - lockStartTimeNanos; - synchronized (pendingNormalNotifications) { - Assert.eqZero(pendingNormalNotifications.size(), "pendingNormalNotifications.size()"); - } - Assert.eqNull(refreshScope, "refreshScope"); - refreshScope = new LivenessScope(); - final long updatingCycleValue = logicalClock.startUpdateCycle(); - logDependencies().append("Beginning PeriodicUpdateGraph cycle step=") - .append(logicalClock.currentStep()).endl(); - try (final SafeCloseable ignored = LivenessScopeStack.open(refreshScope, true)) { - refreshFunction.run(); - flushNotificationsAndCompleteCycle(); - } finally { - logicalClock.ensureUpdateCycleCompleted(updatingCycleValue); - refreshScope = null; - } - logDependencies().append("Completed PeriodicUpdateGraph cycle step=") - .append(logicalClock.currentStep()).endl(); - }); - } - - /** - * Re-usable class for adapting update sources to {@link Notification}s. - */ - private static final class UpdateSourceRefreshNotification extends AbstractNotification - implements SimpleReference { - - private final WeakReference updateSourceRef; - - private UpdateSourceRefreshNotification(@NotNull final Runnable updateSource) { - super(false); - updateSourceRef = new WeakReference<>(updateSource); - } - - @Override - public LogOutput append(@NotNull final LogOutput logOutput) { - return logOutput.append("UpdateSourceRefreshNotification{").append(System.identityHashCode(this)) - .append(", for UpdateSource{").append(System.identityHashCode(get())).append("}}"); - } - - @Override - public boolean canExecute(final long step) { - return true; - } - - @Override - public void run() { - final Runnable updateSource = updateSourceRef.get(); - if (updateSource == null) { - return; - } - updateSource.run(); - } - - @Override - public Runnable get() { - // NB: Arguably we should make get() and clear() synchronized. - return updateSourceRef.get(); - } - - @Override - public void clear() { - updateSourceRef.clear(); - } - } - - public LogEntry logDependencies() { - if (printDependencyInformation) { - return log.info(); - } else { - return LogEntry.NULL; - } + super.refreshAllTables(); } private class NotificationProcessorThreadFactory extends NamingThreadFactory { @@ -1956,26 +1099,6 @@ public Thread newThread(@NotNull final Runnable r) { } } - @TestUseOnly - private void ensureUnlocked(@NotNull final String callerDescription, @Nullable final List errors) { - if (exclusiveLock().isHeldByCurrentThread()) { - if (errors != null) { - errors.add(callerDescription + ": UpdateGraph exclusive lock is still held"); - } - while (exclusiveLock().isHeldByCurrentThread()) { - exclusiveLock().unlock(); - } - } - if (sharedLock().isHeldByCurrentThread()) { - if (errors != null) { - errors.add(callerDescription + ": UpdateGraph shared lock is still held"); - } - while (sharedLock().isHeldByCurrentThread()) { - sharedLock().unlock(); - } - } - } - private ExecutorService makeUnitTestRefreshExecutor() { return Executors.newFixedThreadPool(1, new UnitTestThreadFactory()); } @@ -2024,12 +1147,8 @@ private void configureUnitTestRefreshThread() { ExecutionContext.newBuilder().setUpdateGraph(this).build().open(); } - public void takeAccumulatedCycleStats(AccumulatedCycleStats updateGraphAccumCycleStats) { - accumulatedCycleStats.take(updateGraphAccumCycleStats); - } - public static PeriodicUpdateGraph getInstance(final String name) { - return INSTANCES.get(name); + return BaseUpdateGraph.getInstance(name).cast(); } public static final class Builder { @@ -2037,8 +1156,7 @@ public static final class Builder { Configuration.getInstance().getBooleanWithDefault(ALLOW_UNIT_TEST_MODE_PROP, false); private long targetCycleDurationMillis = Configuration.getInstance().getIntegerWithDefault(DEFAULT_TARGET_CYCLE_DURATION_MILLIS_PROP, 1000); - private long minimumCycleDurationToLogNanos = MILLISECONDS.toNanos( - Configuration.getInstance().getIntegerWithDefault(MINIMUM_CYCLE_DURATION_TO_LOG_MILLIS_PROP, 25)); + private long minimumCycleDurationToLogNanos = DEFAULT_MINIMUM_CYCLE_DURATION_TO_LOG_NANOSECONDS; private String name; private int numUpdateThreads = -1; @@ -2089,18 +1207,10 @@ public Builder numUpdateThreads(int numUpdateThreads) { * name provided to this builder. * * @return the new PeriodicUpdateGraph - * @throws IllegalStateException if a PeriodicUpdateGraph with the provided name already exists + * @throws IllegalStateException if an UpdateGraph with the provided name already exists */ public PeriodicUpdateGraph build() { - synchronized (INSTANCES) { - if (INSTANCES.containsKey(name)) { - throw new IllegalStateException( - String.format("PeriodicUpdateGraph with name %s already exists", name)); - } - final PeriodicUpdateGraph newUpdateGraph = construct(); - INSTANCES.put(name, newUpdateGraph); - return newUpdateGraph; - } + return BaseUpdateGraph.buildOrThrow(name, this::construct); } /** @@ -2108,9 +1218,10 @@ public PeriodicUpdateGraph build() { * new PeriodicUpdateGraph. * * @return the PeriodicUpdateGraph + * @throws ClassCastException if the existing graph is not a PeriodicUpdateGraph */ public PeriodicUpdateGraph existingOrBuild() { - return INSTANCES.putIfAbsent(name, n -> construct()); + return BaseUpdateGraph.existingOrBuild(name, this::construct).cast(); } private PeriodicUpdateGraph construct() { diff --git a/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PoisonedNotificationProcessor.java b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PoisonedNotificationProcessor.java new file mode 100644 index 00000000000..e6590d9285b --- /dev/null +++ b/engine/table/src/main/java/io/deephaven/engine/updategraph/impl/PoisonedNotificationProcessor.java @@ -0,0 +1,58 @@ +package io.deephaven.engine.updategraph.impl; + +import io.deephaven.engine.updategraph.NotificationQueue; +import io.deephaven.util.datastructures.linked.IntrusiveDoublyLinkedQueue; +import org.jetbrains.annotations.NotNull; + +/** + * The poisoned notification processor is used when an update graph has not yet been started, throwing an + * IllegalStateException on all operations. + */ +final class PoisonedNotificationProcessor implements BaseUpdateGraph.NotificationProcessor { + + static final BaseUpdateGraph.NotificationProcessor INSTANCE = new PoisonedNotificationProcessor(); + + private static RuntimeException notYetStarted() { + return new IllegalStateException("UpdateGraph has not been started yet"); + } + + private PoisonedNotificationProcessor() {} + + @Override + public void submit(@NotNull NotificationQueue.Notification notification) { + throw notYetStarted(); + } + + @Override + public void submitAll(@NotNull IntrusiveDoublyLinkedQueue notifications) { + throw notYetStarted(); + } + + @Override + public int outstandingNotificationsCount() { + throw notYetStarted(); + } + + @Override + public void doWork() { + throw notYetStarted(); + } + + @Override + public void doAllWork() { + throw notYetStarted(); + } + + @Override + public void shutdown() {} + + @Override + public void onNotificationAdded() { + throw notYetStarted(); + } + + @Override + public void beforeNotificationsDrained() { + throw notYetStarted(); + } +} diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/CapturingUpdateGraph.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/CapturingUpdateGraph.java index c70555e5a06..63e98610be9 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/CapturingUpdateGraph.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/CapturingUpdateGraph.java @@ -163,4 +163,9 @@ public void runWithinUnitTestCycle( final boolean satisfied) throws T { delegate.runWithinUnitTestCycle(runnable, satisfied); } + + @Override + public void stop() { + delegate.stop(); + } } diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableTest.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableTest.java index 5648009a55f..1a3fb62bf8b 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableTest.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/QueryTableTest.java @@ -3644,5 +3644,8 @@ public void removeSource(@NotNull Runnable updateSource) {} public void requestRefresh() { throw new UnsupportedOperationException(); } + + @Override + public void stop() {} } } diff --git a/engine/table/src/test/java/io/deephaven/engine/updategraph/impl/TestEventDrivenUpdateGraph.java b/engine/table/src/test/java/io/deephaven/engine/updategraph/impl/TestEventDrivenUpdateGraph.java new file mode 100644 index 00000000000..7ad90534b13 --- /dev/null +++ b/engine/table/src/test/java/io/deephaven/engine/updategraph/impl/TestEventDrivenUpdateGraph.java @@ -0,0 +1,245 @@ +package io.deephaven.engine.updategraph.impl; + +import io.deephaven.api.agg.Aggregation; +import io.deephaven.configuration.DataDir; +import io.deephaven.engine.context.ExecutionContext; +import io.deephaven.engine.context.QueryCompiler; +import io.deephaven.engine.rowset.RowSet; +import io.deephaven.engine.rowset.RowSetFactory; +import io.deephaven.engine.rowset.TrackingRowSet; +import io.deephaven.engine.table.ColumnSource; +import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.QueryTable; +import io.deephaven.engine.table.impl.perf.UpdatePerformanceTracker; +import io.deephaven.engine.table.impl.sources.LongSingleValueSource; +import io.deephaven.engine.testutil.TstUtils; +import io.deephaven.engine.updategraph.UpdateGraph; +import io.deephaven.engine.util.TableTools; +import io.deephaven.util.SafeCloseable; +import io.deephaven.util.annotations.ReflexiveUse; +import junit.framework.TestCase; +import org.junit.*; + +import java.nio.file.Path; +import java.util.Collections; + +import static io.deephaven.engine.util.TableTools.*; +import static org.junit.Assert.assertEquals; + +public class TestEventDrivenUpdateGraph { + EventDrivenUpdateGraph defaultUpdateGraph; + + @Before + public void before() { + // the default update is necessary for the update performance tracker + clearUpdateGraphInstances(); + UpdatePerformanceTracker.resetForUnitTests(); + defaultUpdateGraph = EventDrivenUpdateGraph.newBuilder(PeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME).build(); + } + + @After + public void after() { + clearUpdateGraphInstances(); + UpdatePerformanceTracker.resetForUnitTests(); + } + + private static void clearUpdateGraphInstances() { + BaseUpdateGraph.removeInstance(PeriodicUpdateGraph.DEFAULT_UPDATE_GRAPH_NAME); + BaseUpdateGraph.removeInstance("TestEDUG"); + BaseUpdateGraph.removeInstance("TestEDUG1"); + BaseUpdateGraph.removeInstance("TestEDUG2"); + } + + /** + * QueryTable that adds one row per cycle. + */ + final static class SourceThatRefreshes extends QueryTable implements Runnable { + public SourceThatRefreshes(UpdateGraph updateGraph) { + super(RowSetFactory.empty().toTracking(), Collections.emptyMap()); + setAttribute(Table.APPEND_ONLY_TABLE_ATTRIBUTE, Boolean.TRUE); + updateGraph.addSource(this); + } + + @Override + public void run() { + final RowSet added; + if (getRowSet().isEmpty()) { + added = RowSetFactory.fromKeys(0); + } else { + added = RowSetFactory.fromKeys(getRowSet().lastRowKey() + 1); + } + getRowSet().writableCast().insert(added); + notifyListeners(added, RowSetFactory.empty(), RowSetFactory.empty()); + } + } + + /** + * QueryTable that modifies its single row on each cycle. + */ + final static class SourceThatModifiesItself extends QueryTable implements Runnable { + final LongSingleValueSource svcs; + + public SourceThatModifiesItself(UpdateGraph updateGraph) { + super(RowSetFactory.fromKeys(42).toTracking(), Collections.singletonMap("V", new LongSingleValueSource())); + svcs = (LongSingleValueSource) getColumnSource("V", long.class); + svcs.startTrackingPrevValues(); + updateGraph.addSource(this); + svcs.set(0L); + } + + @Override + public void run() { + svcs.set(svcs.getLong(0) + 1); + notifyListeners(RowSetFactory.empty(), RowSetFactory.empty(), getRowSet().copy()); + } + } + + private QueryCompiler compilerForUnitTests() { + final Path queryCompilerDir = DataDir.get() + .resolve("io.deephaven.engine.updategraph.impl.TestEventDrivenUpdateGraph.compilerForUnitTests"); + + return QueryCompiler.create(queryCompilerDir.toFile(), getClass().getClassLoader()); + } + + @Test + public void testSimpleAdd() { + final EventDrivenUpdateGraph eventDrivenUpdateGraph = EventDrivenUpdateGraph.newBuilder("TestEDUG").build(); + + final ExecutionContext context = ExecutionContext.newBuilder().setUpdateGraph(eventDrivenUpdateGraph) + .emptyQueryScope().newQueryLibrary().setQueryCompiler(compilerForUnitTests()).build(); + try (final SafeCloseable ignored = context.open()) { + final SourceThatRefreshes sourceThatRefreshes = new SourceThatRefreshes(eventDrivenUpdateGraph); + final Table updated = + eventDrivenUpdateGraph.sharedLock().computeLocked(() -> sourceThatRefreshes.update("X=i")); + + int steps = 0; + do { + TestCase.assertEquals(steps, updated.size()); + eventDrivenUpdateGraph.requestRefresh(); + } while (steps++ < 100); + TestCase.assertEquals(steps, updated.size()); + } + } + + @Test + public void testSimpleModify() { + final EventDrivenUpdateGraph eventDrivenUpdateGraph = new EventDrivenUpdateGraph.Builder("TestEDUG").build(); + + final ExecutionContext context = ExecutionContext.newBuilder().setUpdateGraph(eventDrivenUpdateGraph) + .emptyQueryScope().newQueryLibrary().setQueryCompiler(compilerForUnitTests()).build(); + try (final SafeCloseable ignored = context.open()) { + final SourceThatModifiesItself modifySource = new SourceThatModifiesItself(eventDrivenUpdateGraph); + final Table updated = + eventDrivenUpdateGraph.sharedLock().computeLocked(() -> modifySource.update("X=2 * V")); + + final ColumnSource xcs = updated.getColumnSource("X"); + + int steps = 0; + do { + TestCase.assertEquals(1, updated.size()); + eventDrivenUpdateGraph.requestRefresh(); + + TableTools.showWithRowSet(modifySource); + + final TrackingRowSet rowSet = updated.getRowSet(); + System.out.println("Step = " + steps); + final long xv = xcs.getLong(rowSet.firstRowKey()); + TestCase.assertEquals(2L * (steps + 1), xv); + } while (steps++ < 100); + TestCase.assertEquals(1, updated.size()); + } + } + + @Test + public void testUpdatePerformanceTracker() { + final Table upt = UpdatePerformanceTracker.getQueryTable(); + + + final EventDrivenUpdateGraph eventDrivenUpdateGraph1 = EventDrivenUpdateGraph.newBuilder("TestEDUG1").build(); + final EventDrivenUpdateGraph eventDrivenUpdateGraph2 = EventDrivenUpdateGraph.newBuilder("TestEDUG2").build(); + + // first empty flush + eventDrivenUpdateGraph1.requestRefresh(); + eventDrivenUpdateGraph2.requestRefresh(); + + final long start = System.currentTimeMillis(); + + final int count1 = 10; + final int count2 = 20; + final int time1 = 10; + final int time2 = 5; + + // the work we care about + final Object ref1 = doWork(eventDrivenUpdateGraph1, time1, count1 - 1); + final Object ref2 = doWork(eventDrivenUpdateGraph2, time2, count2 - 1); + + // force a flush + eventDrivenUpdateGraph1.resetNextFlushTime(); + eventDrivenUpdateGraph2.resetNextFlushTime(); + eventDrivenUpdateGraph1.requestRefresh(); + eventDrivenUpdateGraph2.requestRefresh(); + + defaultUpdateGraph.requestRefresh(); + + final Table inRange; + final ExecutionContext context = ExecutionContext.newBuilder().setUpdateGraph(defaultUpdateGraph) + .emptyQueryScope().newQueryLibrary().setQueryCompiler(compilerForUnitTests()).build(); + try (final SafeCloseable ignored = context.open()) { + final Table uptAgged = upt.where("!isNull(EntryId)").aggBy( + Aggregation.AggSum("UsageNanos", "InvocationCount", "RowsModified"), + "UpdateGraph", "EntryId"); + assertEquals(defaultUpdateGraph, uptAgged.getUpdateGraph()); + inRange = defaultUpdateGraph.sharedLock().computeLocked(() -> uptAgged.update( + "EIUExpectedMillis = UpdateGraph==`TestEDUG1` ? " + time1 + " : " + time2, + "TotalExpectedTime=InvocationCount * EIUExpectedMillis * 1_000_000L", + "InRange=(UsageNanos > 0.9 * TotalExpectedTime) && (UsageNanos < 1.5 * TotalExpectedTime)")); + } + TableTools.show(inRange); + + final Table compare = + inRange.dropColumns("EntryId", "UsageNanos", "EIUExpectedMillis", "TotalExpectedTime"); + TableTools.show(compare); + + final Table expect = TableTools.newTable(stringCol("UpdateGraph", "TestEDUG1", "TestEDUG2"), + longCol("InvocationCount", count1, count2), + longCol("RowsModified", count1, count2), booleanCol("InRange", true, true)); + TstUtils.assertTableEquals(expect, compare); + } + + @ReflexiveUse(referrers = "TestEventDrivenUpdateGraph") + static public T sleepValue(long duration, T retVal) { + final Object blech = new Object(); + // noinspection SynchronizationOnLocalVariableOrMethodParameter + synchronized (blech) { + try { + final long milliSeconds = duration / 1_000_000L; + final int nanos = (int) (duration % 1_000_000L); + blech.wait(milliSeconds, nanos); + } catch (InterruptedException ignored) { + } + } + return retVal; + } + + private Object doWork(final EventDrivenUpdateGraph eventDrivenUpdateGraph, final int durationMillis, + final int steps) { + final ExecutionContext context = ExecutionContext.newBuilder().setUpdateGraph(eventDrivenUpdateGraph) + .emptyQueryScope().newQueryLibrary().setQueryCompiler(compilerForUnitTests()).build(); + try (final SafeCloseable ignored = context.open()) { + final SourceThatModifiesItself modifySource = new SourceThatModifiesItself(eventDrivenUpdateGraph); + final Table updated = + eventDrivenUpdateGraph.sharedLock().computeLocked(() -> modifySource.update("X=" + + getClass().getName() + ".sleepValue(" + (1000L * 1000L * durationMillis) + ", 2 * V)")); + + int step = 0; + do { + TestCase.assertEquals(1, updated.size()); + eventDrivenUpdateGraph.requestRefresh(); + } while (++step < steps); + TestCase.assertEquals(1, updated.size()); + + // so that we do not lose the reference + return updated; + } + } +} diff --git a/engine/updategraph/src/main/java/io/deephaven/engine/updategraph/UpdateGraph.java b/engine/updategraph/src/main/java/io/deephaven/engine/updategraph/UpdateGraph.java index e0227c50a8c..67cd6bf3add 100644 --- a/engine/updategraph/src/main/java/io/deephaven/engine/updategraph/UpdateGraph.java +++ b/engine/updategraph/src/main/java/io/deephaven/engine/updategraph/UpdateGraph.java @@ -179,12 +179,18 @@ default void checkInitiateSerialTableOperation() { return; } throw new IllegalStateException(String.format( - "May not initiate serial table operations: exclusiveLockHeld=%s, sharedLockHeld=%s, currentThreadProcessesUpdates=%s", + "May not initiate serial table operations for update graph %s: exclusiveLockHeld=%s, sharedLockHeld=%s, currentThreadProcessesUpdates=%s", + getName(), exclusiveLock().isHeldByCurrentThread(), sharedLock().isHeldByCurrentThread(), currentThreadProcessesUpdates())); } + /** + * Attempt to stop this update graph, and cease processing further notifications. + */ + void stop(); + // endregion thread control // region refresh control From bc352c7984b70906862c81f17e73c33cd59c2ebe Mon Sep 17 00:00:00 2001 From: Cristian Ferretti <37232625+jcferretti@users.noreply.github.com> Date: Thu, 7 Dec 2023 21:55:09 -0500 Subject: [PATCH 08/25] Add a note in C++ client's README.md about build-dependencies.sh version. (#4926) --- cpp-client/README.md | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/cpp-client/README.md b/cpp-client/README.md index 0130532bfdd..f44271712ee 100644 --- a/cpp-client/README.md +++ b/cpp-client/README.md @@ -34,10 +34,20 @@ on them anymore so we do notguarantee they are current for those platforms. 6. Build and install dependencies for Deephaven C++ client. - Get the `build-dependencies.sh` script from Deephaven's base images repository - at the correct version. - You can download it directly from the link + Get the `build-dependencies.sh` script from Deephaven's base images repository. + + ***Note you need the right version of `build-dependencies.sh` matching + your sources***. + + The link in the paragraph that follows points to a specific + version that works with the code this README.md files accompanies; + if you are reading a different version of the README.md compared + to the source version you will be trying to compile, go back + to the right `README.md` now. + + Download `build-dependencies.sh` directly from https://github.com/deephaven/deephaven-base-images/raw/47f51e769612785c6f320302a3f4f52bc0cff187/cpp-client/build-dependencies.sh + (this script is also used from our automated tools, to generate a docker image to support tests runs; that's why it lives in a separate repo). The script downloads, builds and installs the dependent libraries From 160760690998bb67ebe71e5977c12223a5071d58 Mon Sep 17 00:00:00 2001 From: "Charles P. Wright" Date: Fri, 8 Dec 2023 13:35:46 -0500 Subject: [PATCH 09/25] Simplify Input Table Interface. (#4923) Removes the InputTableEnumGetter and InputTableRowSetter interfaces that MutableInputTable extended, but are not used by web. There have been the following class renames: io.deephaven.engine.util.config.MutableInputTable -> io.deephaven.engine.util.input.InputTableHandler KeyedArrayBackedMutableTable -> io.deephaven.engine.table.impl.util.KeyedArrayBackedInputTable AppendOnlyArrayBackedMutableTable -> io.deephaven.engine.table.impl.util.AppendOnlyArrayBackedInputTable Additionally, the MutableInputTable interface, KeyedArrayBackedMutableTable, and AppendOnlyArrayBackedMutableTable no longer have parameters for enum values. The "allowEdits" parameter has been removed from the interface and all implementations. The delete method that takes a rowSet has been removed. --- .../engine/table/impl/TableCreatorImpl.java | 8 +- ...a => AppendOnlyArrayBackedInputTable.java} | 72 +--- ...le.java => BaseArrayBackedInputTable.java} | 183 ++-------- ...e.java => KeyedArrayBackedInputTable.java} | 68 +--- .../util/config/InputTableEnumGetter.java | 11 - .../util/config/InputTableRowSetter.java | 94 ----- .../InputTableStatusListener.java | 4 +- .../InputTableUpdater.java} | 64 ++-- .../TestFunctionGeneratedTableFactory.java | 14 +- .../util/TestKeyedArrayBackedInputTable.java | 202 +++++++++++ .../TestKeyedArrayBackedMutableTable.java | 333 ------------------ .../extensions/barrage/util/BarrageUtil.java | 17 +- py/server/deephaven/table_factory.py | 11 +- .../InputTableServiceGrpcImpl.java | 22 +- .../table/ops/CreateInputTableGrpcImpl.java | 8 +- 15 files changed, 330 insertions(+), 781 deletions(-) rename engine/table/src/main/java/io/deephaven/engine/table/impl/util/{AppendOnlyArrayBackedMutableTable.java => AppendOnlyArrayBackedInputTable.java} (59%) rename engine/table/src/main/java/io/deephaven/engine/table/impl/util/{BaseArrayBackedMutableTable.java => BaseArrayBackedInputTable.java} (62%) rename engine/table/src/main/java/io/deephaven/engine/table/impl/util/{KeyedArrayBackedMutableTable.java => KeyedArrayBackedInputTable.java} (76%) delete mode 100644 engine/table/src/main/java/io/deephaven/engine/util/config/InputTableEnumGetter.java delete mode 100644 engine/table/src/main/java/io/deephaven/engine/util/config/InputTableRowSetter.java rename engine/table/src/main/java/io/deephaven/engine/util/{config => input}/InputTableStatusListener.java (92%) rename engine/table/src/main/java/io/deephaven/engine/util/{config/MutableInputTable.java => input/InputTableUpdater.java} (71%) create mode 100644 engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedInputTable.java delete mode 100644 engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedMutableTable.java diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/TableCreatorImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/TableCreatorImpl.java index fe745db0b2a..76e643d4dcd 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/TableCreatorImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/TableCreatorImpl.java @@ -8,8 +8,8 @@ import io.deephaven.engine.table.Table; import io.deephaven.engine.table.TableDefinition; import io.deephaven.engine.table.TableFactory; -import io.deephaven.engine.table.impl.util.AppendOnlyArrayBackedMutableTable; -import io.deephaven.engine.table.impl.util.KeyedArrayBackedMutableTable; +import io.deephaven.engine.table.impl.util.AppendOnlyArrayBackedInputTable; +import io.deephaven.engine.table.impl.util.KeyedArrayBackedInputTable; import io.deephaven.engine.util.TableTools; import io.deephaven.qst.TableCreator; import io.deephaven.qst.table.EmptyTable; @@ -163,14 +163,14 @@ public static UpdatableTable of(InputTable inputTable) { @Override public UpdatableTable visit(InMemoryAppendOnlyInputTable inMemoryAppendOnly) { final TableDefinition definition = DefinitionAdapter.of(inMemoryAppendOnly.schema()); - return AppendOnlyArrayBackedMutableTable.make(definition); + return AppendOnlyArrayBackedInputTable.make(definition); } @Override public UpdatableTable visit(InMemoryKeyBackedInputTable inMemoryKeyBacked) { final TableDefinition definition = DefinitionAdapter.of(inMemoryKeyBacked.schema()); final String[] keyColumnNames = inMemoryKeyBacked.keys().toArray(String[]::new); - return KeyedArrayBackedMutableTable.make(definition, keyColumnNames); + return KeyedArrayBackedInputTable.make(definition, keyColumnNames); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AppendOnlyArrayBackedMutableTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AppendOnlyArrayBackedInputTable.java similarity index 59% rename from engine/table/src/main/java/io/deephaven/engine/table/impl/util/AppendOnlyArrayBackedMutableTable.java rename to engine/table/src/main/java/io/deephaven/engine/table/impl/util/AppendOnlyArrayBackedInputTable.java index f40908ed679..a65210dc3ea 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AppendOnlyArrayBackedMutableTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AppendOnlyArrayBackedInputTable.java @@ -9,7 +9,6 @@ import io.deephaven.engine.rowset.RowSet; import io.deephaven.engine.rowset.RowSetFactory; import io.deephaven.engine.rowset.RowSequenceFactory; -import io.deephaven.engine.util.config.InputTableStatusListener; import io.deephaven.engine.table.impl.QueryTable; import io.deephaven.engine.table.impl.sources.NullValueColumnSource; import io.deephaven.engine.table.ChunkSink; @@ -18,15 +17,13 @@ import java.util.Collections; import java.util.List; -import java.util.Map; -import java.util.function.Consumer; /** * An in-memory table that allows you to add rows as if it were an InputTable, which can be updated on the UGP. *

    * The table is not keyed, all rows are added to the end of the table. Deletions and edits are not permitted. */ -public class AppendOnlyArrayBackedMutableTable extends BaseArrayBackedMutableTable { +public class AppendOnlyArrayBackedInputTable extends BaseArrayBackedInputTable { static final String DEFAULT_DESCRIPTION = "Append Only In-Memory Input Table"; /** @@ -36,64 +33,40 @@ public class AppendOnlyArrayBackedMutableTable extends BaseArrayBackedMutableTab * * @return an empty AppendOnlyArrayBackedMutableTable with the given definition */ - public static AppendOnlyArrayBackedMutableTable make(@NotNull TableDefinition definition) { - return make(definition, Collections.emptyMap()); - } - - /** - * Create an empty AppendOnlyArrayBackedMutableTable with the given definition. - * - * @param definition the definition of the new table. - * @param enumValues a map of column names to enumeration values - * - * @return an empty AppendOnlyArrayBackedMutableTable with the given definition - */ - public static AppendOnlyArrayBackedMutableTable make(@NotNull TableDefinition definition, - final Map enumValues) { + public static AppendOnlyArrayBackedInputTable make( + @NotNull TableDefinition definition) { // noinspection resource return make(new QueryTable(definition, RowSetFactory.empty().toTracking(), - NullValueColumnSource.createColumnSourceMap(definition)), enumValues); - } - - /** - * Create an AppendOnlyArrayBackedMutableTable with the given initial data. - * - * @param initialTable the initial values to copy into the AppendOnlyArrayBackedMutableTable - * - * @return an empty AppendOnlyArrayBackedMutableTable with the given definition - */ - public static AppendOnlyArrayBackedMutableTable make(final Table initialTable) { - return make(initialTable, Collections.emptyMap()); + NullValueColumnSource.createColumnSourceMap(definition))); } /** * Create an AppendOnlyArrayBackedMutableTable with the given initial data. * * @param initialTable the initial values to copy into the AppendOnlyArrayBackedMutableTable - * @param enumValues a map of column names to enumeration values * * @return an empty AppendOnlyArrayBackedMutableTable with the given definition */ - public static AppendOnlyArrayBackedMutableTable make(final Table initialTable, - final Map enumValues) { - final AppendOnlyArrayBackedMutableTable result = new AppendOnlyArrayBackedMutableTable( - initialTable.getDefinition(), enumValues, new ProcessPendingUpdater()); + public static AppendOnlyArrayBackedInputTable make(final Table initialTable) { + final AppendOnlyArrayBackedInputTable result = + new AppendOnlyArrayBackedInputTable( + initialTable.getDefinition(), new ProcessPendingUpdater()); result.setAttribute(Table.ADD_ONLY_TABLE_ATTRIBUTE, Boolean.TRUE); + result.setAttribute(Table.APPEND_ONLY_TABLE_ATTRIBUTE, Boolean.TRUE); result.setFlat(); processInitial(initialTable, result); return result; } - private AppendOnlyArrayBackedMutableTable(@NotNull TableDefinition definition, - final Map enumValues, final ProcessPendingUpdater processPendingUpdater) { + private AppendOnlyArrayBackedInputTable(@NotNull TableDefinition definition, + final ProcessPendingUpdater processPendingUpdater) { // noinspection resource super(RowSetFactory.empty().toTracking(), makeColumnSourceMap(definition), - enumValues, processPendingUpdater); + processPendingUpdater); } @Override - protected void processPendingTable(Table table, boolean allowEdits, RowSetChangeRecorder rowSetChangeRecorder, - Consumer errorNotifier) { + protected void processPendingTable(Table table, RowSetChangeRecorder rowSetChangeRecorder) { try (final RowSet addRowSet = table.getRowSet().copy()) { final long firstRow = nextRow; final long lastRow = firstRow + addRowSet.intSize() - 1; @@ -135,28 +108,15 @@ protected List getKeyNames() { } @Override - ArrayBackedMutableInputTable makeHandler() { - return new AppendOnlyArrayBackedMutableInputTable(); + ArrayBackedInputTableUpdater makeUpdater() { + return new Updater(); } - private class AppendOnlyArrayBackedMutableInputTable extends ArrayBackedMutableInputTable { - @Override - public void setRows(@NotNull Table defaultValues, int[] rowArray, Map[] valueArray, - InputTableStatusListener listener) { - throw new UnsupportedOperationException(); - } + private class Updater extends ArrayBackedInputTableUpdater { @Override public void validateDelete(Table tableToDelete) { throw new UnsupportedOperationException("Table doesn't support delete operation"); } - - @Override - public void addRows(Map[] valueArray, boolean allowEdits, InputTableStatusListener listener) { - if (allowEdits) { - throw new UnsupportedOperationException(); - } - super.addRows(valueArray, allowEdits, listener); - } } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/BaseArrayBackedMutableTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/BaseArrayBackedInputTable.java similarity index 62% rename from engine/table/src/main/java/io/deephaven/engine/table/impl/util/BaseArrayBackedMutableTable.java rename to engine/table/src/main/java/io/deephaven/engine/table/impl/util/BaseArrayBackedInputTable.java index fc1c75d69df..f74f4b82907 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/BaseArrayBackedMutableTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/BaseArrayBackedInputTable.java @@ -4,9 +4,6 @@ package io.deephaven.engine.table.impl.util; import io.deephaven.base.verify.Assert; -import io.deephaven.base.verify.Require; -import io.deephaven.datastructures.util.CollectionUtil; -import io.deephaven.engine.rowset.RowSet; import io.deephaven.engine.rowset.RowSetBuilderSequential; import io.deephaven.engine.rowset.RowSetFactory; import io.deephaven.engine.rowset.TrackingRowSet; @@ -15,9 +12,8 @@ import io.deephaven.engine.table.TableDefinition; import io.deephaven.engine.table.WritableColumnSource; import io.deephaven.engine.table.impl.sources.ArrayBackedColumnSource; -import io.deephaven.engine.util.config.InputTableStatusListener; -import io.deephaven.engine.util.config.MutableInputTable; -import io.deephaven.engine.table.impl.QueryTable; +import io.deephaven.engine.util.input.InputTableStatusListener; +import io.deephaven.engine.util.input.InputTableUpdater; import io.deephaven.engine.table.impl.UpdatableTable; import io.deephaven.engine.table.ColumnSource; import io.deephaven.util.annotations.TestUseOnly; @@ -26,11 +22,8 @@ import java.io.IOException; import java.util.*; import java.util.concurrent.CompletableFuture; -import java.util.function.Consumer; -abstract class BaseArrayBackedMutableTable extends UpdatableTable { - - private static final Object[] BOOLEAN_ENUM_ARRAY = new Object[] {true, false, null}; +abstract class BaseArrayBackedInputTable extends UpdatableTable { /** * Queue of pending changes. Only synchronized access is permitted. @@ -45,30 +38,27 @@ abstract class BaseArrayBackedMutableTable extends UpdatableTable { */ private long processedSequence = 0L; - private final Map enumValues; - private String description = getDefaultDescription(); private Runnable onPendingChange = updateGraph::requestRefresh; long nextRow = 0; private long pendingProcessed = -1L; - public BaseArrayBackedMutableTable(TrackingRowSet rowSet, Map> nameToColumnSource, - Map enumValues, ProcessPendingUpdater processPendingUpdater) { + public BaseArrayBackedInputTable(TrackingRowSet rowSet, Map> nameToColumnSource, + ProcessPendingUpdater processPendingUpdater) { super(rowSet, nameToColumnSource, processPendingUpdater); - this.enumValues = enumValues; - MutableInputTable mutableInputTable = makeHandler(); - setAttribute(Table.INPUT_TABLE_ATTRIBUTE, mutableInputTable); + InputTableUpdater inputTableUpdater = makeUpdater(); + setAttribute(Table.INPUT_TABLE_ATTRIBUTE, inputTableUpdater); setRefreshing(true); processPendingUpdater.setThis(this); } - public MutableInputTable mutableInputTable() { - return (MutableInputTable) getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + public InputTableUpdater inputTable() { + return (InputTableUpdater) getAttribute(Table.INPUT_TABLE_ATTRIBUTE); } public Table readOnlyCopy() { - return copy(BaseArrayBackedMutableTable::applicableForReadOnly); + return copy(BaseArrayBackedInputTable::applicableForReadOnly); } private static boolean applicableForReadOnly(String attributeName) { @@ -84,9 +74,9 @@ private static boolean applicableForReadOnly(String attributeName) { return resultMap; } - static void processInitial(Table initialTable, BaseArrayBackedMutableTable result) { + static void processInitial(Table initialTable, BaseArrayBackedInputTable result) { final RowSetBuilderSequential builder = RowSetFactory.builderSequential(); - result.processPendingTable(initialTable, true, new RowSetChangeRecorder() { + result.processPendingTable(initialTable, new RowSetChangeRecorder() { @Override public void addRowKey(long key) { builder.appendKey(key); @@ -101,14 +91,13 @@ public void removeRowKey(long key) { public void modifyRowKey(long key) { throw new UnsupportedOperationException(); } - }, (e) -> { }); result.getRowSet().writableCast().insert(builder.build()); result.getRowSet().writableCast().initializePreviousValue(); result.getUpdateGraph().addSource(result); } - public BaseArrayBackedMutableTable setDescription(String newDescription) { + public BaseArrayBackedInputTable setDescription(String newDescription) { this.description = newDescription; return this; } @@ -132,8 +121,7 @@ private void processPending(RowSetChangeRecorder rowSetChangeRecorder) { if (pendingChange.delete) { processPendingDelete(pendingChange.table, rowSetChangeRecorder); } else { - processPendingTable(pendingChange.table, pendingChange.allowEdits, rowSetChangeRecorder, - (e) -> pendingChange.error = e); + processPendingTable(pendingChange.table, rowSetChangeRecorder); } pendingProcessed = pendingChange.sequence; } @@ -154,8 +142,7 @@ public void run() { } } - protected abstract void processPendingTable(Table table, boolean allowEdits, - RowSetChangeRecorder rowSetChangeRecorder, Consumer errorNotifier); + protected abstract void processPendingTable(Table table, RowSetChangeRecorder rowSetChangeRecorder); protected abstract void processPendingDelete(Table table, RowSetChangeRecorder rowSetChangeRecorder); @@ -164,74 +151,73 @@ protected abstract void processPendingTable(Table table, boolean allowEdits, protected abstract List getKeyNames(); protected static class ProcessPendingUpdater implements Updater { - private BaseArrayBackedMutableTable baseArrayBackedMutableTable; + private BaseArrayBackedInputTable baseArrayBackedInputTable; @Override public void accept(RowSetChangeRecorder rowSetChangeRecorder) { - baseArrayBackedMutableTable.processPending(rowSetChangeRecorder); + baseArrayBackedInputTable.processPending(rowSetChangeRecorder); } - public void setThis(BaseArrayBackedMutableTable keyedArrayBackedMutableTable) { - this.baseArrayBackedMutableTable = keyedArrayBackedMutableTable; + public void setThis(BaseArrayBackedInputTable keyedArrayBackedMutableTable) { + this.baseArrayBackedInputTable = keyedArrayBackedMutableTable; } } private final class PendingChange { final boolean delete; + @NotNull final Table table; final long sequence; - final boolean allowEdits; String error; - private PendingChange(Table table, boolean delete, boolean allowEdits) { + private PendingChange(@NotNull Table table, boolean delete) { Assert.holdsLock(pendingChanges, "pendingChanges"); + Assert.neqNull(table, "table"); this.table = table; this.delete = delete; - this.allowEdits = allowEdits; this.sequence = ++enqueuedSequence; } } - ArrayBackedMutableInputTable makeHandler() { - return new ArrayBackedMutableInputTable(); + ArrayBackedInputTableUpdater makeUpdater() { + return new ArrayBackedInputTableUpdater(); } - protected class ArrayBackedMutableInputTable implements MutableInputTable { + protected class ArrayBackedInputTableUpdater implements InputTableUpdater { @Override public List getKeyNames() { - return BaseArrayBackedMutableTable.this.getKeyNames(); + return BaseArrayBackedInputTable.this.getKeyNames(); } @Override public TableDefinition getTableDefinition() { - return BaseArrayBackedMutableTable.this.getDefinition(); + return BaseArrayBackedInputTable.this.getDefinition(); } @Override public void add(@NotNull final Table newData) throws IOException { checkBlockingEditSafety(); - PendingChange pendingChange = enqueueAddition(newData, true); + PendingChange pendingChange = enqueueAddition(newData); blockingContinuation(pendingChange); } @Override public void addAsync( @NotNull final Table newData, - final boolean allowEdits, @NotNull final InputTableStatusListener listener) { checkAsyncEditSafety(newData); - final PendingChange pendingChange = enqueueAddition(newData, allowEdits); + final PendingChange pendingChange = enqueueAddition(newData); asynchronousContinuation(pendingChange, listener); } - private PendingChange enqueueAddition(@NotNull final Table newData, final boolean allowEdits) { + private PendingChange enqueueAddition(@NotNull final Table newData) { validateAddOrModify(newData); // we want to get a clean copy of the table; that can not change out from under us or result in long reads // during our UGP run final Table newDataSnapshot = snapshotData(newData); final PendingChange pendingChange; synchronized (pendingChanges) { - pendingChange = new PendingChange(newDataSnapshot, false, allowEdits); + pendingChange = new PendingChange(newDataSnapshot, false); pendingChanges.add(pendingChange); } onPendingChange.run(); @@ -239,38 +225,33 @@ private PendingChange enqueueAddition(@NotNull final Table newData, final boolea } @Override - public void delete(@NotNull final Table table, @NotNull final TrackingRowSet rowsToDelete) throws IOException { + public void delete(@NotNull final Table table) throws IOException { checkBlockingEditSafety(); - final PendingChange pendingChange = enqueueDeletion(table, rowsToDelete); + final PendingChange pendingChange = enqueueDeletion(table); blockingContinuation(pendingChange); } @Override public void deleteAsync( @NotNull final Table table, - @NotNull final TrackingRowSet rowsToDelete, @NotNull final InputTableStatusListener listener) { checkAsyncEditSafety(table); - final PendingChange pendingChange = enqueueDeletion(table, rowsToDelete); + final PendingChange pendingChange = enqueueDeletion(table); asynchronousContinuation(pendingChange, listener); } - private PendingChange enqueueDeletion(@NotNull final Table table, @NotNull final TrackingRowSet rowsToDelete) { + private PendingChange enqueueDeletion(@NotNull final Table table) { validateDelete(table); - final Table oldDataSnapshot = snapshotData(table, rowsToDelete); + final Table oldDataSnapshot = snapshotData(table); final PendingChange pendingChange; synchronized (pendingChanges) { - pendingChange = new PendingChange(oldDataSnapshot, true, false); + pendingChange = new PendingChange(oldDataSnapshot, true); pendingChanges.add(pendingChange); } onPendingChange.run(); return pendingChange; } - private Table snapshotData(@NotNull final Table data, @NotNull final TrackingRowSet rowSet) { - return snapshotData(data.getSubTable(rowSet)); - } - private Table snapshotData(@NotNull final Table data) { Table dataSnapshot; if (data.isRefreshing()) { @@ -333,7 +314,7 @@ void waitForSequence(long sequence) { // in order to allow updates. while (processedSequence < sequence) { try { - BaseArrayBackedMutableTable.this.awaitUpdate(); + BaseArrayBackedInputTable.this.awaitUpdate(); } catch (InterruptedException ignored) { } } @@ -350,84 +331,6 @@ void waitForSequence(long sequence) { } } - @Override - public void setRows(@NotNull Table defaultValues, int[] rowArray, Map[] valueArray, - InputTableStatusListener listener) { - Assert.neqNull(defaultValues, "defaultValues"); - if (defaultValues.isRefreshing()) { - updateGraph.checkInitiateSerialTableOperation(); - } - - final List> columnDefinitions = getTableDefinition().getColumns(); - final Map> sources = - buildSourcesMap(valueArray.length, columnDefinitions); - final String[] kabmtColumns = - getTableDefinition().getColumnNames().toArray(CollectionUtil.ZERO_LENGTH_STRING_ARRAY); - // noinspection unchecked - final WritableColumnSource[] sourcesByPosition = - Arrays.stream(kabmtColumns).map(sources::get).toArray(WritableColumnSource[]::new); - - final Set missingColumns = new HashSet<>(getTableDefinition().getColumnNames()); - - for (final Map.Entry> entry : defaultValues.getColumnSourceMap() - .entrySet()) { - final String colName = entry.getKey(); - if (!sources.containsKey(colName)) { - continue; - } - final ColumnSource cs = Require.neqNull(entry.getValue(), "defaultValue column source: " + colName); - final WritableColumnSource dest = - Require.neqNull(sources.get(colName), "destination column source: " + colName); - - final RowSet defaultValuesRowSet = defaultValues.getRowSet(); - for (int rr = 0; rr < rowArray.length; ++rr) { - final long key = defaultValuesRowSet.get(rowArray[rr]); - dest.set(rr, cs.get(key)); - } - - missingColumns.remove(colName); - } - - for (int ii = 0; ii < valueArray.length; ++ii) { - final Map passedInValues = valueArray[ii]; - - for (int cc = 0; cc < sourcesByPosition.length; cc++) { - final String colName = kabmtColumns[cc]; - if (passedInValues.containsKey(colName)) { - sourcesByPosition[cc].set(ii, passedInValues.get(colName)); - } else if (missingColumns.contains(colName)) { - throw new IllegalArgumentException("No value specified for " + colName + " row " + ii); - } - } - } - - // noinspection resource - final QueryTable newData = new QueryTable(getTableDefinition(), - RowSetFactory.flat(valueArray.length).toTracking(), sources); - addAsync(newData, true, listener); - } - - @Override - public void addRows(Map[] valueArray, boolean allowEdits, InputTableStatusListener listener) { - final List> columnDefinitions = getTableDefinition().getColumns(); - final Map> sources = - buildSourcesMap(valueArray.length, columnDefinitions); - - for (int rowNumber = 0; rowNumber < valueArray.length; rowNumber++) { - final Map values = valueArray[rowNumber]; - for (final ColumnDefinition columnDefinition : columnDefinitions) { - sources.get(columnDefinition.getName()).set(rowNumber, values.get(columnDefinition.getName())); - } - - } - - // noinspection resource - final QueryTable newData = new QueryTable(getTableDefinition(), - RowSetFactory.flat(valueArray.length).toTracking(), sources); - - addAsync(newData, allowEdits, listener); - } - @NotNull private Map> buildSourcesMap(int capacity, List> columnDefinitions) { @@ -443,17 +346,9 @@ private Map> buildSourcesMap(int capacity, return sources; } - @Override - public Object[] getEnumsForColumn(String columnName) { - if (getTableDefinition().getColumn(columnName).getDataType().equals(Boolean.class)) { - return BOOLEAN_ENUM_ARRAY; - } - return enumValues.get(columnName); - } - @Override public Table getTable() { - return BaseArrayBackedMutableTable.this; + return BaseArrayBackedInputTable.this; } @Override diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/KeyedArrayBackedMutableTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/KeyedArrayBackedInputTable.java similarity index 76% rename from engine/table/src/main/java/io/deephaven/engine/table/impl/util/KeyedArrayBackedMutableTable.java rename to engine/table/src/main/java/io/deephaven/engine/table/impl/util/KeyedArrayBackedInputTable.java index ad4221bbb90..1eaeba52a01 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/KeyedArrayBackedMutableTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/KeyedArrayBackedInputTable.java @@ -20,14 +20,13 @@ import org.jetbrains.annotations.NotNull; import java.util.*; -import java.util.function.Consumer; /** * An in-memory table that has keys for each row, which can be updated on the UGP. *

    * This is used to implement in-memory editable table columns from web plugins. */ -public class KeyedArrayBackedMutableTable extends BaseArrayBackedMutableTable { +public class KeyedArrayBackedInputTable extends BaseArrayBackedInputTable { private static final String DEFAULT_DESCRIPTION = "In-Memory Input Table"; @@ -47,44 +46,13 @@ public class KeyedArrayBackedMutableTable extends BaseArrayBackedMutableTable { * * @return an empty KeyedArrayBackedMutableTable with the given definition and key columns */ - public static KeyedArrayBackedMutableTable make(@NotNull TableDefinition definition, + public static KeyedArrayBackedInputTable make(@NotNull TableDefinition definition, final String... keyColumnNames) { // noinspection resource return make(new QueryTable(definition, RowSetFactory.empty().toTracking(), NullValueColumnSource.createColumnSourceMap(definition)), keyColumnNames); } - /** - * Create an empty KeyedArrayBackedMutableTable. - * - * @param definition the definition of the table to create - * @param enumValues a map of column names to enumeration values - * @param keyColumnNames the name of the key columns - * - * @return an empty KeyedArrayBackedMutableTable with the given definition and key columns - */ - public static KeyedArrayBackedMutableTable make(@NotNull TableDefinition definition, - final Map enumValues, final String... keyColumnNames) { - // noinspection resource - return make(new QueryTable(definition, RowSetFactory.empty().toTracking(), - NullValueColumnSource.createColumnSourceMap(definition)), enumValues, keyColumnNames); - } - - /** - * Create an empty KeyedArrayBackedMutableTable. - *

    - * The initialTable is processed in order, so if there are duplicate keys only the last row is reflected in the - * output. - * - * @param initialTable the initial values to copy into the KeyedArrayBackedMutableTable - * @param keyColumnNames the name of the key columns - * - * @return an empty KeyedArrayBackedMutableTable with the given definition and key columns - */ - public static KeyedArrayBackedMutableTable make(final Table initialTable, final String... keyColumnNames) { - return make(initialTable, Collections.emptyMap(), keyColumnNames); - } - /** * Create an empty KeyedArrayBackedMutableTable. *

    @@ -92,25 +60,23 @@ public static KeyedArrayBackedMutableTable make(final Table initialTable, final * output. * * @param initialTable the initial values to copy into the KeyedArrayBackedMutableTable - * @param enumValues a map of column names to enumeration values * @param keyColumnNames the name of the key columns * * @return an empty KeyedArrayBackedMutableTable with the given definition and key columns */ - public static KeyedArrayBackedMutableTable make(final Table initialTable, final Map enumValues, - final String... keyColumnNames) { - final KeyedArrayBackedMutableTable result = new KeyedArrayBackedMutableTable(initialTable.getDefinition(), - keyColumnNames, enumValues, new ProcessPendingUpdater()); + public static KeyedArrayBackedInputTable make(final Table initialTable, final String... keyColumnNames) { + final KeyedArrayBackedInputTable result = new KeyedArrayBackedInputTable(initialTable.getDefinition(), + keyColumnNames, new ProcessPendingUpdater()); processInitial(initialTable, result); result.startTrackingPrev(); return result; } - private KeyedArrayBackedMutableTable(@NotNull TableDefinition definition, final String[] keyColumnNames, - final Map enumValues, final ProcessPendingUpdater processPendingUpdater) { + private KeyedArrayBackedInputTable(@NotNull TableDefinition definition, final String[] keyColumnNames, + final ProcessPendingUpdater processPendingUpdater) { // noinspection resource super(RowSetFactory.empty().toTracking(), makeColumnSourceMap(definition), - enumValues, processPendingUpdater); + processPendingUpdater); final List missingKeyColumns = new ArrayList<>(Arrays.asList(keyColumnNames)); missingKeyColumns.removeAll(definition.getColumnNames()); if (!missingKeyColumns.isEmpty()) { @@ -135,13 +101,11 @@ private void startTrackingPrev() { } @Override - protected void processPendingTable(Table table, boolean allowEdits, RowSetChangeRecorder rowSetChangeRecorder, - Consumer errorNotifier) { + protected void processPendingTable(Table table, RowSetChangeRecorder rowSetChangeRecorder) { final ChunkSource keySource = makeKeySource(table); final int chunkCapacity = table.intSize(); long rowToInsert = nextRow; - final StringBuilder errorBuilder = new StringBuilder(); try (final RowSet addRowSet = table.getRowSet().copy(); final WritableLongChunk destinations = WritableLongChunk.makeWritableChunk(chunkCapacity); @@ -161,25 +125,13 @@ protected void processPendingTable(Table table, boolean allowEdits, RowSetChange keyToRowMap.put(key, rowNumber); rowSetChangeRecorder.addRowKey(rowNumber); destinations.set(ii, rowNumber); - } else if (allowEdits) { + } else { rowSetChangeRecorder.modifyRowKey(rowNumber); destinations.set(ii, rowNumber); - } else { - // invalid edit - if (errorBuilder.length() > 0) { - errorBuilder.append(", ").append(key); - } else { - errorBuilder.append("Can not edit keys ").append(key); - } } } } - if (errorBuilder.length() > 0) { - errorNotifier.accept(errorBuilder.toString()); - return; - } - for (long ii = nextRow; ii < rowToInsert; ++ii) { rowSetChangeRecorder.addRowKey(ii); } diff --git a/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableEnumGetter.java b/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableEnumGetter.java deleted file mode 100644 index d861e125377..00000000000 --- a/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableEnumGetter.java +++ /dev/null @@ -1,11 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.engine.util.config; - -/** - * Accessor interface for enumeration constants for an input table column. - */ -public interface InputTableEnumGetter { - Object[] getEnumsForColumn(String columnName); -} diff --git a/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableRowSetter.java b/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableRowSetter.java deleted file mode 100644 index 1d058ea6567..00000000000 --- a/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableRowSetter.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.engine.util.config; - -import io.deephaven.engine.table.Table; - -import java.util.Map; - -public interface InputTableRowSetter { - /** - * Set the values of the column specified by the input, filling in missing data using the parameter 'table' as the - * previous value source. This method will be invoked asynchronously. Users may use - * {@link #setRows(Table, int[], Map[], InputTableStatusListener)} to be notified of asynchronous results. - * - * @param table The table to use as the previous value source - * @param row The row key to set - * @param values A map of column name to value to set. - */ - default void setRow(Table table, int row, Map values) { - // noinspection unchecked - setRows(table, new int[] {row}, new Map[] {values}); - } - - /** - * Set the values of the columns specified by the input, filling in missing data using the parameter 'table' as the - * previous value source. This method will be invoked asynchronously. Users may use - * {@link #setRows(Table, int[], Map[], InputTableStatusListener)} to be notified of asynchronous results. - * - * @param table The table to use as the previous value source - * @param rowArray The row keys to update. - * @param valueArray The new values. - */ - default void setRows(Table table, int[] rowArray, Map[] valueArray) { - setRows(table, rowArray, valueArray, InputTableStatusListener.DEFAULT); - } - - /** - * Set the values of the columns specified by the input, filling in missing data using the parameter 'table' as the - * previous value source. This method will be invoked asynchronously. The input listener will be notified on - * success/failure - * - * @param table The table to use as the previous value source - * @param rowArray The row keys to update. - * @param valueArray The new values. - * @param listener The listener to notify on asynchronous results. - */ - void setRows(Table table, int[] rowArray, Map[] valueArray, InputTableStatusListener listener); - - /** - * Add the specified row to the table. Duplicate keys will be overwritten. This method will execute asynchronously. - * Users may use {@link #addRow(Map, boolean, InputTableStatusListener)} to handle the result of the asynchronous - * write. - * - * @param values The values to write. - */ - default void addRow(Map values) { - // noinspection unchecked - addRows(new Map[] {values}); - } - - /** - * Add the specified rows to the table. Duplicate keys will be overwritten. This method will execute asynchronously. - * Users may use {@link #addRows(Map[], boolean, InputTableStatusListener)} to handle the asynchronous result. - * - * @param valueArray The values to write. - */ - default void addRows(Map[] valueArray) { - addRows(valueArray, true, InputTableStatusListener.DEFAULT); - } - - /** - * Add the specified row to the table, optionally overwriting existing keys. This method will execute - * asynchronously, the input listener will be notified on success/failure. - * - * @param valueArray The value to write. - * @param allowEdits Should pre-existing keys be overwritten? - * @param listener The listener to report asynchronous result to. - */ - default void addRow(Map valueArray, boolean allowEdits, InputTableStatusListener listener) { - // noinspection unchecked - addRows(new Map[] {valueArray}, allowEdits, listener); - } - - /** - * Add the specified rows to the table, optionally overwriting existing keys. This method will execute - * asynchronously, the input listener will be notified on success/failure. - * - * @param valueArray The values to write. - * @param allowEdits Should pre-existing keys be overwritten? - * @param listener The listener to report asynchronous results to. - */ - void addRows(Map[] valueArray, boolean allowEdits, InputTableStatusListener listener); -} diff --git a/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableStatusListener.java b/engine/table/src/main/java/io/deephaven/engine/util/input/InputTableStatusListener.java similarity index 92% rename from engine/table/src/main/java/io/deephaven/engine/util/config/InputTableStatusListener.java rename to engine/table/src/main/java/io/deephaven/engine/util/input/InputTableStatusListener.java index 8061f253642..2676d20a11f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/util/config/InputTableStatusListener.java +++ b/engine/table/src/main/java/io/deephaven/engine/util/input/InputTableStatusListener.java @@ -1,7 +1,7 @@ /** * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending */ -package io.deephaven.engine.util.config; +package io.deephaven.engine.util.input; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; @@ -37,7 +37,7 @@ public void onSuccess() { } /** - * Handle an error that occured during an input table write. + * Handle an error that occurred during an input table write. * * @param t the error. */ diff --git a/engine/table/src/main/java/io/deephaven/engine/util/config/MutableInputTable.java b/engine/table/src/main/java/io/deephaven/engine/util/input/InputTableUpdater.java similarity index 71% rename from engine/table/src/main/java/io/deephaven/engine/util/config/MutableInputTable.java rename to engine/table/src/main/java/io/deephaven/engine/util/input/InputTableUpdater.java index 202256ca7ea..e43053d37ef 100644 --- a/engine/table/src/main/java/io/deephaven/engine/util/config/MutableInputTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/util/input/InputTableUpdater.java @@ -1,13 +1,12 @@ /** * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending */ -package io.deephaven.engine.util.config; +package io.deephaven.engine.util.input; import io.deephaven.engine.exceptions.ArgumentException; import io.deephaven.engine.table.ColumnDefinition; import io.deephaven.engine.table.Table; import io.deephaven.engine.table.TableDefinition; -import io.deephaven.engine.rowset.TrackingRowSet; import java.io.IOException; import java.util.List; @@ -15,12 +14,12 @@ /** * A minimal interface for mutable shared tables, providing the ability to write to the table instance this is attached - * to. MutableInputTable instances are set on the table as an attribute. + * to. InputTable instances are set on the table as an attribute. *

    * Implementations of this interface will make their own guarantees about how atomically changes will be applied and * what operations they support. */ -public interface MutableInputTable extends InputTableRowSetter, InputTableEnumGetter { +public interface InputTableUpdater { /** * Gets the names of the key columns. @@ -85,7 +84,7 @@ default void validateDelete(Table tableToDelete) { error.append("Unknown key columns: ").append(extraKeys); } if (error.length() > 0) { - throw new ArgumentException("Invalid Key Table Definition: " + error.toString()); + throw new ArgumentException("Invalid Key Table Definition: " + error); } } @@ -96,8 +95,8 @@ default void validateDelete(Table tableToDelete) { * This method will block until the rows are added. As a result, this method is not suitable for use from a * {@link io.deephaven.engine.table.TableListener table listener} or any other * {@link io.deephaven.engine.updategraph.NotificationQueue.Notification notification}-dispatched callback - * dispatched by this MutableInputTable's {@link io.deephaven.engine.updategraph.UpdateGraph update graph}. It may - * be suitable to delete from another update graph if doing so does not introduce any cycles. + * dispatched by this InputTable's {@link io.deephaven.engine.updategraph.UpdateGraph update graph}. It may be + * suitable to delete from another update graph if doing so does not introduce any cycles. * * @param newData The data to write to this table * @throws IOException If there is an error writing the data @@ -105,8 +104,8 @@ default void validateDelete(Table tableToDelete) { void add(Table newData) throws IOException; /** - * Write {@code newData} to this table. Added rows with keys that match existing rows will instead replace those - * rows, if supported and {@code allowEdits == true}. + * Write {@code newData} to this table. Added rows with keys that match existing rows replace those rows, if + * supported. *

    * This method will not block, and can be safely used from a {@link io.deephaven.engine.table.TableListener * table listener} or any other {@link io.deephaven.engine.updategraph.NotificationQueue.Notification @@ -115,11 +114,9 @@ default void validateDelete(Table tableToDelete) { * cycle. * * @param newData The data to write to this table - * @param allowEdits Whether added rows with keys that match existing rows will instead replace those rows, or - * result in an error * @param listener The listener for asynchronous results */ - void addAsync(Table newData, boolean allowEdits, InputTableStatusListener listener); + void addAsync(Table newData, InputTableStatusListener listener); /** * Delete the keys contained in {@code table} from this input table. @@ -127,37 +124,19 @@ default void validateDelete(Table tableToDelete) { * This method will block until the rows are deleted. As a result, this method is not suitable for use from a * {@link io.deephaven.engine.table.TableListener table listener} or any other * {@link io.deephaven.engine.updategraph.NotificationQueue.Notification notification}-dispatched callback - * dispatched by this MutableInputTable's {@link io.deephaven.engine.updategraph.UpdateGraph update graph}. It may - * be suitable to delete from another update graph if doing so does not introduce any cycles. + * dispatched by this InputTable's {@link io.deephaven.engine.updategraph.UpdateGraph update graph}. It may be + * suitable to delete from another update graph if doing so does not introduce any cycles. * * @param table The rows to delete * @throws IOException If a problem occurred while deleting the rows. * @throws UnsupportedOperationException If this table does not support deletes */ default void delete(Table table) throws IOException { - delete(table, table.getRowSet()); - } - - /** - * Delete the keys contained in {@code table.subTable(rowSet)} from this input table. - *

    - * This method will block until the rows are deleted. As a result, this method is not suitable for use from a - * {@link io.deephaven.engine.table.TableListener table listener} or any other - * {@link io.deephaven.engine.updategraph.NotificationQueue.Notification notification}-dispatched callback - * dispatched by this MutableInputTable's {@link io.deephaven.engine.updategraph.UpdateGraph update graph}. It may - * be suitable to delete from another update graph if doing so does not introduce any cycles. - * - * @param table Table containing the rows to delete - * @param rowSet The rows to delete - * @throws IOException If a problem occurred while deleting the rows - * @throws UnsupportedOperationException If this table does not support deletes - */ - default void delete(Table table, TrackingRowSet rowSet) throws IOException { throw new UnsupportedOperationException("Table does not support deletes"); } /** - * Delete the keys contained in {@code table.subTable(rowSet)} from this input table. + * Delete the keys contained in table from this input table. *

    * This method will not block, and can be safely used from a {@link io.deephaven.engine.table.TableListener * table listener} or any other {@link io.deephaven.engine.updategraph.NotificationQueue.Notification @@ -166,24 +145,23 @@ default void delete(Table table, TrackingRowSet rowSet) throws IOException { * cycle. * * @param table Table containing the rows to delete - * @param rowSet The rows to delete * @throws UnsupportedOperationException If this table does not support deletes */ - default void deleteAsync(Table table, TrackingRowSet rowSet, InputTableStatusListener listener) { + default void deleteAsync(Table table, InputTableStatusListener listener) { throw new UnsupportedOperationException("Table does not support deletes"); } /** - * Return a user-readable description of this MutableInputTable. + * Return a user-readable description of this InputTable. * * @return a description of this input table */ String getDescription(); /** - * Returns a Deephaven table that contains the current data for this MutableInputTable. + * Returns a Deephaven table that contains the current data for this InputTable. * - * @return the current data in this MutableInputTable. + * @return the current data in this InputTable. */ Table getTable(); @@ -198,20 +176,20 @@ default boolean isKey(String columnName) { } /** - * Returns true if the specified column exists in this MutableInputTable. + * Returns true if the specified column exists in this InputTable. * * @param columnName the column to interrogate - * @return true if columnName exists in this MutableInputTable + * @return true if columnName exists in this InputTable */ default boolean hasColumn(String columnName) { return getTableDefinition().getColumnNames().contains(columnName); } /** - * Queries whether this MutableInputTable is editable in the current context. + * Queries whether this InputTable is editable in the current context. * - * @return true if this MutableInputTable may be edited, false otherwise TODO (deephaven/deephaven-core/issues/255): - * Add AuthContext and whatever else is appropriate + * @return true if this InputTable may be edited, false otherwise TODO (deephaven/deephaven-core/issues/255): Add + * AuthContext and whatever else is appropriate */ boolean canEdit(); } diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestFunctionGeneratedTableFactory.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestFunctionGeneratedTableFactory.java index d8cadb6b33f..b44c1d8d864 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestFunctionGeneratedTableFactory.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestFunctionGeneratedTableFactory.java @@ -20,7 +20,7 @@ import java.util.Random; -import static io.deephaven.engine.table.impl.util.TestKeyedArrayBackedMutableTable.handleDelayedRefresh; +import static io.deephaven.engine.table.impl.util.TestKeyedArrayBackedInputTable.handleDelayedRefresh; import static io.deephaven.engine.testutil.TstUtils.*; import static io.deephaven.engine.util.TableTools.*; @@ -68,13 +68,13 @@ public void testNoSources() { } public void testMultipleSources() throws Exception { - final AppendOnlyArrayBackedMutableTable source1 = AppendOnlyArrayBackedMutableTable.make(TableDefinition.of( + final AppendOnlyArrayBackedInputTable source1 = AppendOnlyArrayBackedInputTable.make(TableDefinition.of( ColumnDefinition.of("StringCol", Type.stringType()))); - final BaseArrayBackedMutableTable.ArrayBackedMutableInputTable inputTable1 = source1.makeHandler(); + final BaseArrayBackedInputTable.ArrayBackedInputTableUpdater inputTable1 = source1.makeUpdater(); - final AppendOnlyArrayBackedMutableTable source2 = AppendOnlyArrayBackedMutableTable.make(TableDefinition.of( + final AppendOnlyArrayBackedInputTable source2 = AppendOnlyArrayBackedInputTable.make(TableDefinition.of( ColumnDefinition.of("IntCol", Type.intType()))); - final BaseArrayBackedMutableTable.ArrayBackedMutableInputTable inputTable2 = source2.makeHandler(); + final BaseArrayBackedInputTable.ArrayBackedInputTableUpdater inputTable2 = source2.makeUpdater(); final Table functionBacked = FunctionGeneratedTableFactory.create(() -> source1.lastBy().naturalJoin(source2, ""), source1, source2); @@ -82,9 +82,9 @@ public void testMultipleSources() throws Exception { assertEquals(functionBacked.size(), 0); handleDelayedRefresh(() -> { - inputTable1.addAsync(newTable(stringCol("StringCol", "MyString")), false, t -> { + inputTable1.addAsync(newTable(stringCol("StringCol", "MyString")), t -> { }); - inputTable2.addAsync(newTable(intCol("IntCol", 12345)), false, t -> { + inputTable2.addAsync(newTable(intCol("IntCol", 12345)), t -> { }); }, source1, source2); diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedInputTable.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedInputTable.java new file mode 100644 index 00000000000..72d468c4e8b --- /dev/null +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedInputTable.java @@ -0,0 +1,202 @@ +/** + * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.engine.table.impl.util; + +import io.deephaven.UncheckedDeephavenException; +import io.deephaven.engine.context.ExecutionContext; +import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.FailureListener; +import io.deephaven.engine.table.impl.TableUpdateValidator; +import io.deephaven.engine.testutil.ControlledUpdateGraph; +import io.deephaven.engine.testutil.junit4.EngineCleanup; +import io.deephaven.engine.util.TableTools; +import io.deephaven.engine.util.input.InputTableUpdater; +import io.deephaven.util.function.ThrowingRunnable; +import junit.framework.TestCase; +import org.junit.Rule; +import org.junit.Test; + +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.CountDownLatch; + +import static io.deephaven.engine.testutil.TstUtils.assertTableEquals; +import static io.deephaven.engine.util.TableTools.showWithRowSet; +import static io.deephaven.engine.util.TableTools.stringCol; + +public class TestKeyedArrayBackedInputTable { + + @Rule + public final EngineCleanup liveTableTestCase = new EngineCleanup(); + + @Test + public void testSimple() throws Exception { + final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), + stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); + + final KeyedArrayBackedInputTable kabut = KeyedArrayBackedInputTable.make(input, "Name"); + final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); + final Table validatorResult = validator.getResultTable(); + final FailureListener failureListener = new FailureListener(); + validatorResult.addUpdateListener(failureListener); + + assertTableEquals(input, kabut); + + final InputTableUpdater inputTableUpdater = (InputTableUpdater) kabut.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + TestCase.assertNotNull(inputTableUpdater); + + final Table input2 = TableTools.newTable(stringCol("Name", "Randy"), stringCol("Employer", "USGS")); + + handleDelayedRefresh(() -> inputTableUpdater.add(input2), kabut); + assertTableEquals(TableTools.merge(input, input2), kabut); + + final Table input3 = TableTools.newTable(stringCol("Name", "Randy"), stringCol("Employer", "Tegridy")); + handleDelayedRefresh(() -> inputTableUpdater.add(input3), kabut); + assertTableEquals(TableTools.merge(input, input3), kabut); + + + final Table input4 = TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Cogswell")); + handleDelayedRefresh(() -> inputTableUpdater.add(input4), kabut); + showWithRowSet(kabut); + + assertTableEquals(TableTools.merge(input, input3, input4).lastBy("Name"), kabut); + + final Table input5 = + TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Spacely Sprockets")); + handleDelayedRefresh(() -> inputTableUpdater.add(input5), kabut); + showWithRowSet(kabut); + + assertTableEquals(TableTools.merge(input, input3, input4, input5).lastBy("Name"), kabut); + + final long sizeBeforeDelete = kabut.size(); + System.out.println("KABUT.rowSet before delete: " + kabut.getRowSet()); + final Table delete1 = TableTools.newTable(stringCol("Name", "Earl")); + handleDelayedRefresh(() -> inputTableUpdater.delete(delete1), kabut); + System.out.println("KABUT.rowSet after delete: " + kabut.getRowSet()); + final long sizeAfterDelete = kabut.size(); + TestCase.assertEquals(sizeBeforeDelete - 1, sizeAfterDelete); + + showWithRowSet(kabut); + + final Table expected = TableTools.merge( + TableTools.merge(input, input3, input4, input5).update("Deleted=false"), + delete1.update("Employer=(String)null", "Deleted=true")) + .lastBy("Name").where("Deleted=false").dropColumns("Deleted"); + showWithRowSet(expected); + + assertTableEquals(expected, kabut); + } + + @Test + public void testAppendOnly() throws Exception { + final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), + stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); + + final AppendOnlyArrayBackedInputTable aoabmt = AppendOnlyArrayBackedInputTable.make(input); + final TableUpdateValidator validator = TableUpdateValidator.make("aoabmt", aoabmt); + final Table validatorResult = validator.getResultTable(); + final FailureListener failureListener = new FailureListener(); + validatorResult.addUpdateListener(failureListener); + + assertTableEquals(input, aoabmt); + + final InputTableUpdater inputTableUpdater = + (InputTableUpdater) aoabmt.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + TestCase.assertNotNull(inputTableUpdater); + + final Table input2 = + TableTools.newTable(stringCol("Name", "Randy", "George"), stringCol("Employer", "USGS", "Cogswell")); + + handleDelayedRefresh(() -> inputTableUpdater.add(input2), aoabmt); + assertTableEquals(TableTools.merge(input, input2), aoabmt); + } + + @Test + public void testFilteredAndSorted() throws Exception { + final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), + stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); + + final KeyedArrayBackedInputTable kabut = KeyedArrayBackedInputTable.make(input, "Name"); + final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); + final Table validatorResult = validator.getResultTable(); + final FailureListener failureListener = new FailureListener(); + validatorResult.addUpdateListener(failureListener); + + assertTableEquals(input, kabut); + + final Table fs = kabut.where("Name.length() == 4").sort("Name"); + + final InputTableUpdater inputTableUpdater = (InputTableUpdater) fs.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + TestCase.assertNotNull(inputTableUpdater); + + final Table delete = TableTools.newTable(stringCol("Name", "Fred")); + + handleDelayedRefresh(() -> inputTableUpdater.delete(delete), kabut); + assertTableEquals(input.where("Name != `Fred`"), kabut); + } + + + @Test + public void testAddBack() throws Exception { + final Table input = TableTools.newTable(stringCol("Name"), stringCol("Employer")); + + final KeyedArrayBackedInputTable kabut = KeyedArrayBackedInputTable.make(input, "Name"); + final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); + final Table validatorResult = validator.getResultTable(); + final FailureListener failureListener = new FailureListener(); + validatorResult.addUpdateListener(failureListener); + + assertTableEquals(input, kabut); + + final InputTableUpdater inputTableUpdater = (InputTableUpdater) kabut.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + TestCase.assertNotNull(inputTableUpdater); + + final Table input2 = + TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Spacely Sprockets")); + + handleDelayedRefresh(() -> inputTableUpdater.add(input2), kabut); + assertTableEquals(input2, kabut); + + handleDelayedRefresh(() -> inputTableUpdater.delete(input2.view("Name")), kabut); + assertTableEquals(input, kabut); + + handleDelayedRefresh(() -> inputTableUpdater.add(input2), kabut); + assertTableEquals(input2, kabut); + } + + public static void handleDelayedRefresh(final ThrowingRunnable action, + final BaseArrayBackedInputTable... tables) throws Exception { + final Thread refreshThread; + final CountDownLatch gate = new CountDownLatch(tables.length); + + Arrays.stream(tables).forEach(t -> t.setOnPendingChange(gate::countDown)); + try { + final ControlledUpdateGraph updateGraph = ExecutionContext.getContext().getUpdateGraph().cast(); + refreshThread = new Thread(() -> { + // If this unexpected interruption happens, the test thread may hang in action.run() + // indefinitely. Best to hope it's already queued the pending action and proceed with run. + updateGraph.runWithinUnitTestCycle(() -> { + try { + gate.await(); + } catch (InterruptedException ignored) { + // If this unexpected interruption happens, the test thread may hang in action.run() + // indefinitely. Best to hope it's already queued the pending action and proceed with run. + } + Arrays.stream(tables).forEach(BaseArrayBackedInputTable::run); + }); + }); + + refreshThread.start(); + action.run(); + } finally { + Arrays.stream(tables).forEach(t -> t.setOnPendingChange(null)); + } + try { + refreshThread.join(); + } catch (InterruptedException e) { + throw new UncheckedDeephavenException( + "Interrupted unexpectedly while waiting for run cycle to complete", e); + } + } +} diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedMutableTable.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedMutableTable.java deleted file mode 100644 index a211071cbe5..00000000000 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/util/TestKeyedArrayBackedMutableTable.java +++ /dev/null @@ -1,333 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.engine.table.impl.util; - -import io.deephaven.UncheckedDeephavenException; -import io.deephaven.base.SleepUtil; -import io.deephaven.datastructures.util.CollectionUtil; -import io.deephaven.engine.context.ExecutionContext; -import io.deephaven.engine.table.Table; -import io.deephaven.engine.table.impl.FailureListener; -import io.deephaven.engine.table.impl.TableUpdateValidator; -import io.deephaven.engine.testutil.ControlledUpdateGraph; -import io.deephaven.engine.testutil.junit4.EngineCleanup; -import io.deephaven.engine.util.TableTools; -import io.deephaven.engine.util.config.InputTableStatusListener; -import io.deephaven.engine.util.config.MutableInputTable; -import io.deephaven.util.function.ThrowingRunnable; -import junit.framework.TestCase; -import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; -import org.junit.Rule; -import org.junit.Test; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Map; -import java.util.concurrent.CountDownLatch; - -import static io.deephaven.engine.testutil.TstUtils.assertTableEquals; -import static io.deephaven.engine.util.TableTools.showWithRowSet; -import static io.deephaven.engine.util.TableTools.stringCol; - -public class TestKeyedArrayBackedMutableTable { - - @Rule - public final EngineCleanup liveTableTestCase = new EngineCleanup(); - - @Test - public void testSimple() throws Exception { - final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), - stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); - - final KeyedArrayBackedMutableTable kabut = KeyedArrayBackedMutableTable.make(input, "Name"); - final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); - final Table validatorResult = validator.getResultTable(); - final FailureListener failureListener = new FailureListener(); - validatorResult.addUpdateListener(failureListener); - - assertTableEquals(input, kabut); - - final MutableInputTable mutableInputTable = (MutableInputTable) kabut.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - TestCase.assertNotNull(mutableInputTable); - - final Table input2 = TableTools.newTable(stringCol("Name", "Randy"), stringCol("Employer", "USGS")); - - handleDelayedRefresh(() -> mutableInputTable.add(input2), kabut); - assertTableEquals(TableTools.merge(input, input2), kabut); - - final Table input3 = TableTools.newTable(stringCol("Name", "Randy"), stringCol("Employer", "Tegridy")); - handleDelayedRefresh(() -> mutableInputTable.add(input3), kabut); - assertTableEquals(TableTools.merge(input, input3), kabut); - - - final Table input4 = TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Cogswell")); - handleDelayedRefresh(() -> mutableInputTable.add(input4), kabut); - showWithRowSet(kabut); - - assertTableEquals(TableTools.merge(input, input3, input4).lastBy("Name"), kabut); - - final Table input5 = - TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Spacely Sprockets")); - handleDelayedRefresh(() -> mutableInputTable.add(input5), kabut); - showWithRowSet(kabut); - - assertTableEquals(TableTools.merge(input, input3, input4, input5).lastBy("Name"), kabut); - - final long sizeBeforeDelete = kabut.size(); - System.out.println("KABUT.rowSet before delete: " + kabut.getRowSet()); - final Table delete1 = TableTools.newTable(stringCol("Name", "Earl")); - handleDelayedRefresh(() -> mutableInputTable.delete(delete1), kabut); - System.out.println("KABUT.rowSet after delete: " + kabut.getRowSet()); - final long sizeAfterDelete = kabut.size(); - TestCase.assertEquals(sizeBeforeDelete - 1, sizeAfterDelete); - - showWithRowSet(kabut); - - final Table expected = TableTools.merge( - TableTools.merge(input, input3, input4, input5).update("Deleted=false"), - delete1.update("Employer=(String)null", "Deleted=true")) - .lastBy("Name").where("Deleted=false").dropColumns("Deleted"); - showWithRowSet(expected); - - assertTableEquals(expected, kabut); - } - - @Test - public void testAppendOnly() throws Exception { - final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), - stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); - - final AppendOnlyArrayBackedMutableTable aoabmt = AppendOnlyArrayBackedMutableTable.make(input); - final TableUpdateValidator validator = TableUpdateValidator.make("aoabmt", aoabmt); - final Table validatorResult = validator.getResultTable(); - final FailureListener failureListener = new FailureListener(); - validatorResult.addUpdateListener(failureListener); - - assertTableEquals(input, aoabmt); - - final MutableInputTable mutableInputTable = - (MutableInputTable) aoabmt.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - TestCase.assertNotNull(mutableInputTable); - - final Table input2 = - TableTools.newTable(stringCol("Name", "Randy", "George"), stringCol("Employer", "USGS", "Cogswell")); - - handleDelayedRefresh(() -> mutableInputTable.add(input2), aoabmt); - assertTableEquals(TableTools.merge(input, input2), aoabmt); - } - - @Test - public void testFilteredAndSorted() throws Exception { - final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), - stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); - - final KeyedArrayBackedMutableTable kabut = KeyedArrayBackedMutableTable.make(input, "Name"); - final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); - final Table validatorResult = validator.getResultTable(); - final FailureListener failureListener = new FailureListener(); - validatorResult.addUpdateListener(failureListener); - - assertTableEquals(input, kabut); - - final Table fs = kabut.where("Name.length() == 4").sort("Name"); - - final MutableInputTable mutableInputTable = (MutableInputTable) fs.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - TestCase.assertNotNull(mutableInputTable); - - final Table delete = TableTools.newTable(stringCol("Name", "Fred")); - - handleDelayedRefresh(() -> mutableInputTable.delete(delete), kabut); - assertTableEquals(input.where("Name != `Fred`"), kabut); - } - - @Test - public void testAddRows() throws Throwable { - final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), - stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso")); - - final KeyedArrayBackedMutableTable kabut = KeyedArrayBackedMutableTable.make(input, "Name"); - final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); - final Table validatorResult = validator.getResultTable(); - final FailureListener failureListener = new FailureListener(); - validatorResult.addUpdateListener(failureListener); - - assertTableEquals(input, kabut); - - final MutableInputTable mutableInputTable = (MutableInputTable) kabut.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - TestCase.assertNotNull(mutableInputTable); - - final Table input2 = TableTools.newTable(stringCol("Name", "Randy"), stringCol("Employer", "USGS")); - - final Map randyMap = - CollectionUtil.mapFromArray(String.class, Object.class, "Name", "Randy", "Employer", "USGS"); - final TestStatusListener listener = new TestStatusListener(); - mutableInputTable.addRow(randyMap, true, listener); - SleepUtil.sleep(100); - listener.assertIncomplete(); - final ControlledUpdateGraph updateGraph = ExecutionContext.getContext().getUpdateGraph().cast(); - updateGraph.runWithinUnitTestCycle(kabut::run); - assertTableEquals(TableTools.merge(input, input2), kabut); - listener.waitForCompletion(); - listener.assertSuccess(); - - // TODO: should we throw the exception from the initial palce, should we defer edit checking to the UGP which - // would make it consistent, but also slower to produce errors and uglier for reporting? - final TestStatusListener listener2 = new TestStatusListener(); - final Map randyMap2 = - CollectionUtil.mapFromArray(String.class, Object.class, "Name", "Randy", "Employer", "Tegridy"); - mutableInputTable.addRow(randyMap2, false, listener2); - SleepUtil.sleep(100); - listener2.assertIncomplete(); - updateGraph.runWithinUnitTestCycle(kabut::run); - assertTableEquals(TableTools.merge(input, input2), kabut); - listener2.waitForCompletion(); - listener2.assertFailure(IllegalArgumentException.class, "Can not edit keys Randy"); - } - - @Test - public void testAddBack() throws Exception { - final Table input = TableTools.newTable(stringCol("Name"), stringCol("Employer")); - - final KeyedArrayBackedMutableTable kabut = KeyedArrayBackedMutableTable.make(input, "Name"); - final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); - final Table validatorResult = validator.getResultTable(); - final FailureListener failureListener = new FailureListener(); - validatorResult.addUpdateListener(failureListener); - - assertTableEquals(input, kabut); - - final MutableInputTable mutableInputTable = (MutableInputTable) kabut.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - TestCase.assertNotNull(mutableInputTable); - - final Table input2 = - TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Spacely Sprockets")); - - handleDelayedRefresh(() -> mutableInputTable.add(input2), kabut); - assertTableEquals(input2, kabut); - - handleDelayedRefresh(() -> mutableInputTable.delete(input2.view("Name")), kabut); - assertTableEquals(input, kabut); - - handleDelayedRefresh(() -> mutableInputTable.add(input2), kabut); - assertTableEquals(input2, kabut); - } - - @Test - public void testSetRows() { - final Table input = TableTools.newTable(stringCol("Name", "Fred", "George", "Earl"), - stringCol("Employer", "Slate Rock and Gravel", "Spacely Sprockets", "Wesayso"), - stringCol("Spouse", "Wilma", "Jane", "Fran")); - - final KeyedArrayBackedMutableTable kabut = KeyedArrayBackedMutableTable.make(input, "Name"); - final TableUpdateValidator validator = TableUpdateValidator.make("kabut", kabut); - final Table validatorResult = validator.getResultTable(); - final FailureListener failureListener = new FailureListener(); - validatorResult.addUpdateListener(failureListener); - - assertTableEquals(input, kabut); - - final MutableInputTable mutableInputTable = (MutableInputTable) kabut.getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - TestCase.assertNotNull(mutableInputTable); - - final Table defaultValues = input.where("Name=`George`"); - final Table ex2 = TableTools.newTable(stringCol("Name", "George"), stringCol("Employer", "Cogswell"), - stringCol("Spouse", "Jane")); - - final Map cogMap = - CollectionUtil.mapFromArray(String.class, Object.class, "Name", "George", "Employer", "Cogswell"); - mutableInputTable.setRow(defaultValues, 0, cogMap); - SleepUtil.sleep(100); - final ControlledUpdateGraph updateGraph = ExecutionContext.getContext().getUpdateGraph().cast(); - updateGraph.runWithinUnitTestCycle(kabut::run); - assertTableEquals(TableTools.merge(input, ex2).lastBy("Name"), kabut); - } - - private static class TestStatusListener implements InputTableStatusListener { - boolean success = false; - Throwable error = null; - - @Override - public synchronized void onError(Throwable t) { - if (success || error != null) { - throw new IllegalStateException("Can not complete listener twice!"); - } - error = t; - notifyAll(); - } - - @Override - public synchronized void onSuccess() { - if (success || error != null) { - throw new IllegalStateException("Can not complete listener twice!"); - } - success = true; - notifyAll(); - } - - private synchronized void assertIncomplete() { - TestCase.assertFalse(success); - TestCase.assertNull(error); - } - - private void waitForCompletion() throws InterruptedException { - synchronized (this) { - while (!success && error == null) { - wait(); - } - } - } - - private synchronized void assertSuccess() throws Throwable { - if (!success) { - throw error; - } - } - - private synchronized void assertFailure(@NotNull final Class errorClass, - @Nullable final String errorMessage) { - TestCase.assertFalse(success); - TestCase.assertNotNull(error); - TestCase.assertTrue(errorClass.isAssignableFrom(error.getClass())); - if (errorMessage != null) { - TestCase.assertEquals(errorMessage, error.getMessage()); - } - } - } - - public static void handleDelayedRefresh(final ThrowingRunnable action, - final BaseArrayBackedMutableTable... tables) throws Exception { - final Thread refreshThread; - final CountDownLatch gate = new CountDownLatch(tables.length); - - Arrays.stream(tables).forEach(t -> t.setOnPendingChange(gate::countDown)); - try { - final ControlledUpdateGraph updateGraph = ExecutionContext.getContext().getUpdateGraph().cast(); - refreshThread = new Thread(() -> { - // If this unexpected interruption happens, the test thread may hang in action.run() - // indefinitely. Best to hope it's already queued the pending action and proceed with run. - updateGraph.runWithinUnitTestCycle(() -> { - try { - gate.await(); - } catch (InterruptedException ignored) { - // If this unexpected interruption happens, the test thread may hang in action.run() - // indefinitely. Best to hope it's already queued the pending action and proceed with run. - } - Arrays.stream(tables).forEach(BaseArrayBackedMutableTable::run); - }); - }); - - refreshThread.start(); - action.run(); - } finally { - Arrays.stream(tables).forEach(t -> t.setOnPendingChange(null)); - } - try { - refreshThread.join(); - } catch (InterruptedException e) { - throw new UncheckedDeephavenException( - "Interrupted unexpectedly while waiting for run cycle to complete", e); - } - } -} diff --git a/extensions/barrage/src/main/java/io/deephaven/extensions/barrage/util/BarrageUtil.java b/extensions/barrage/src/main/java/io/deephaven/extensions/barrage/util/BarrageUtil.java index 298e97f988f..3b4a1c11423 100755 --- a/extensions/barrage/src/main/java/io/deephaven/extensions/barrage/util/BarrageUtil.java +++ b/extensions/barrage/src/main/java/io/deephaven/extensions/barrage/util/BarrageUtil.java @@ -36,7 +36,7 @@ import io.deephaven.proto.util.Exceptions; import io.deephaven.api.util.NameValidator; import io.deephaven.engine.util.ColumnFormatting; -import io.deephaven.engine.util.config.MutableInputTable; +import io.deephaven.engine.util.input.InputTableUpdater; import io.deephaven.chunk.ChunkType; import io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse; import io.deephaven.util.type.TypeUtils; @@ -148,9 +148,10 @@ public static int makeTableSchemaPayload( final Map schemaMetadata = attributesToMetadata(attributes); final Map descriptions = GridAttributes.getColumnDescriptions(attributes); - final MutableInputTable inputTable = (MutableInputTable) attributes.get(Table.INPUT_TABLE_ATTRIBUTE); + final InputTableUpdater inputTableUpdater = (InputTableUpdater) attributes.get(Table.INPUT_TABLE_ATTRIBUTE); final List fields = columnDefinitionsToFields( - descriptions, inputTable, tableDefinition, tableDefinition.getColumns(), ignored -> new HashMap<>(), + descriptions, inputTableUpdater, tableDefinition, tableDefinition.getColumns(), + ignored -> new HashMap<>(), attributes, options.columnsAsList()) .collect(Collectors.toList()); @@ -180,12 +181,12 @@ public static Map attributesToMetadata(@NotNull final Map columnDefinitionsToFields( @NotNull final Map columnDescriptions, - @Nullable final MutableInputTable inputTable, + @Nullable final InputTableUpdater inputTableUpdater, @NotNull final TableDefinition tableDefinition, @NotNull final Collection> columnDefinitions, @NotNull final Function> fieldMetadataFactory, @NotNull final Map attributes) { - return columnDefinitionsToFields(columnDescriptions, inputTable, tableDefinition, columnDefinitions, + return columnDefinitionsToFields(columnDescriptions, inputTableUpdater, tableDefinition, columnDefinitions, fieldMetadataFactory, attributes, false); @@ -197,7 +198,7 @@ private static boolean isDataTypeSortable(final Class dataType) { public static Stream columnDefinitionsToFields( @NotNull final Map columnDescriptions, - @Nullable final MutableInputTable inputTable, + @Nullable final InputTableUpdater inputTableUpdater, @NotNull final TableDefinition tableDefinition, @NotNull final Collection> columnDefinitions, @NotNull final Function> fieldMetadataFactory, @@ -274,8 +275,8 @@ public static Stream columnDefinitionsToFields( if (columnDescription != null) { putMetadata(metadata, "description", columnDescription); } - if (inputTable != null) { - putMetadata(metadata, "inputtable.isKey", inputTable.getKeyNames().contains(name) + ""); + if (inputTableUpdater != null) { + putMetadata(metadata, "inputtable.isKey", inputTableUpdater.getKeyNames().contains(name) + ""); } if (columnsAsList) { diff --git a/py/server/deephaven/table_factory.py b/py/server/deephaven/table_factory.py index 033d4c7aec2..5dc5e934f17 100644 --- a/py/server/deephaven/table_factory.py +++ b/py/server/deephaven/table_factory.py @@ -24,10 +24,9 @@ _JTableFactory = jpy.get_type("io.deephaven.engine.table.TableFactory") _JTableTools = jpy.get_type("io.deephaven.engine.util.TableTools") _JDynamicTableWriter = jpy.get_type("io.deephaven.engine.table.impl.util.DynamicTableWriter") -_JMutableInputTable = jpy.get_type("io.deephaven.engine.util.config.MutableInputTable") -_JAppendOnlyArrayBackedMutableTable = jpy.get_type( - "io.deephaven.engine.table.impl.util.AppendOnlyArrayBackedMutableTable") -_JKeyedArrayBackedMutableTable = jpy.get_type("io.deephaven.engine.table.impl.util.KeyedArrayBackedMutableTable") +_JAppendOnlyArrayBackedInputTable = jpy.get_type( + "io.deephaven.engine.table.impl.util.AppendOnlyArrayBackedInputTable") +_JKeyedArrayBackedInputTable = jpy.get_type("io.deephaven.engine.table.impl.util.KeyedArrayBackedInputTable") _JTableDefinition = jpy.get_type("io.deephaven.engine.table.TableDefinition") _JTable = jpy.get_type("io.deephaven.engine.table.Table") _J_INPUT_TABLE_ATTRIBUTE = _JTable.INPUT_TABLE_ATTRIBUTE @@ -257,9 +256,9 @@ def __init__(self, col_defs: Dict[str, DType] = None, init_table: Table = None, key_cols = to_sequence(key_cols) if key_cols: - super().__init__(_JKeyedArrayBackedMutableTable.make(j_arg_1, key_cols)) + super().__init__(_JKeyedArrayBackedInputTable.make(j_arg_1, key_cols)) else: - super().__init__(_JAppendOnlyArrayBackedMutableTable.make(j_arg_1)) + super().__init__(_JAppendOnlyArrayBackedInputTable.make(j_arg_1)) self.j_input_table = self.j_table.getAttribute(_J_INPUT_TABLE_ATTRIBUTE) self.key_columns = key_cols except Exception as e: diff --git a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java index 673e39e35b1..1436d7b6af4 100644 --- a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java @@ -10,7 +10,7 @@ import io.deephaven.engine.table.TableDefinition; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; -import io.deephaven.engine.util.config.MutableInputTable; +import io.deephaven.engine.util.input.InputTableUpdater; import io.deephaven.extensions.barrage.util.GrpcUtil; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; @@ -74,13 +74,13 @@ public void addTableToInputTable( .onError(responseObserver) .require(targetTable, tableToAddExport) .submit(() -> { - Object inputTable = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - if (!(inputTable instanceof MutableInputTable)) { + Object inputTableAsObject = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + if (!(inputTableAsObject instanceof InputTableUpdater)) { throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "Table can't be used as an input table"); } - MutableInputTable mutableInputTable = (MutableInputTable) inputTable; + final InputTableUpdater inputTableUpdater = (InputTableUpdater) inputTableAsObject; Table tableToAdd = tableToAddExport.get(); authWiring.checkPermissionAddTableToInputTable( @@ -89,7 +89,7 @@ public void addTableToInputTable( // validate that the columns are compatible try { - mutableInputTable.validateAddOrModify(tableToAdd); + inputTableUpdater.validateAddOrModify(tableToAdd); } catch (TableDefinition.IncompatibleTableDefinitionException exception) { throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "Provided tables's columns are not compatible: " + exception.getMessage()); @@ -97,7 +97,7 @@ public void addTableToInputTable( // actually add the tables contents try { - mutableInputTable.add(tableToAdd); + inputTableUpdater.add(tableToAdd); GrpcUtil.safelyComplete(responseObserver, AddTableResponse.getDefaultInstance()); } catch (IOException ioException) { throw Exceptions.statusRuntimeException(Code.DATA_LOSS, @@ -132,13 +132,13 @@ public void deleteTableFromInputTable( .onError(responseObserver) .require(targetTable, tableToRemoveExport) .submit(() -> { - Object inputTable = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - if (!(inputTable instanceof MutableInputTable)) { + Object inputTableAsObject = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + if (!(inputTableAsObject instanceof InputTableUpdater)) { throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "Table can't be used as an input table"); } - MutableInputTable mutableInputTable = (MutableInputTable) inputTable; + final InputTableUpdater inputTableUpdater = (InputTableUpdater) inputTableAsObject; Table tableToRemove = tableToRemoveExport.get(); authWiring.checkPermissionDeleteTableFromInputTable( @@ -147,7 +147,7 @@ public void deleteTableFromInputTable( // validate that the columns are compatible try { - mutableInputTable.validateDelete(tableToRemove); + inputTableUpdater.validateDelete(tableToRemove); } catch (TableDefinition.IncompatibleTableDefinitionException exception) { throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "Provided tables's columns are not compatible: " + exception.getMessage()); @@ -158,7 +158,7 @@ public void deleteTableFromInputTable( // actually delete the table's contents try { - mutableInputTable.delete(tableToRemove); + inputTableUpdater.delete(tableToRemove); GrpcUtil.safelyComplete(responseObserver, DeleteTableResponse.getDefaultInstance()); } catch (IOException ioException) { throw Exceptions.statusRuntimeException(Code.DATA_LOSS, diff --git a/server/src/main/java/io/deephaven/server/table/ops/CreateInputTableGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/CreateInputTableGrpcImpl.java index 717542465a8..981b7ea76a6 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/CreateInputTableGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/CreateInputTableGrpcImpl.java @@ -8,8 +8,8 @@ import io.deephaven.datastructures.util.CollectionUtil; import io.deephaven.engine.table.Table; import io.deephaven.engine.table.TableDefinition; -import io.deephaven.engine.table.impl.util.AppendOnlyArrayBackedMutableTable; -import io.deephaven.engine.table.impl.util.KeyedArrayBackedMutableTable; +import io.deephaven.engine.table.impl.util.AppendOnlyArrayBackedInputTable; +import io.deephaven.engine.table.impl.util.KeyedArrayBackedInputTable; import io.deephaven.extensions.barrage.util.BarrageUtil; import io.deephaven.proto.backplane.grpc.BatchTableRequest; import io.deephaven.proto.backplane.grpc.CreateInputTableRequest; @@ -71,9 +71,9 @@ public Table create(final CreateInputTableRequest request, switch (request.getKind().getKindCase()) { case IN_MEMORY_APPEND_ONLY: - return AppendOnlyArrayBackedMutableTable.make(tableDefinitionFromSchema); + return AppendOnlyArrayBackedInputTable.make(tableDefinitionFromSchema); case IN_MEMORY_KEY_BACKED: - return KeyedArrayBackedMutableTable.make(tableDefinitionFromSchema, + return KeyedArrayBackedInputTable.make(tableDefinitionFromSchema, request.getKind().getInMemoryKeyBacked().getKeyColumnsList() .toArray(CollectionUtil.ZERO_LENGTH_STRING_ARRAY)); case KIND_NOT_SET: From c29aaf8bbe9c6592c304ecda2004186595158a10 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 10:01:31 -0500 Subject: [PATCH 10/25] Update web version 0.56.0 (#4930) Release notes https://github.com/deephaven/web-client-ui/releases/tag/v0.56.0 # [0.56.0](https://github.com/deephaven/web-client-ui/compare/v0.55.0...v0.56.0) (2023-12-11) ### Bug Fixes * add right margin to