From 2450cba567ff7af1591d789b4ebeaef914111454 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Fri, 3 Nov 2023 19:21:48 -0600 Subject: [PATCH 01/31] QueryPerformanceRecorder: Add TableService#Batch Support --- .../table/impl/SelectOrUpdateListener.java | 8 +- .../table/impl/perf/BasePerformanceEntry.java | 2 +- .../impl/perf/QueryPerformanceNugget.java | 135 ++++++++++-------- .../impl/perf/QueryPerformanceRecorder.java | 84 +++++++++-- .../engine/table/impl/perf/QueryState.java | 2 +- .../engine/table/impl/updateby/UpdateBy.java | 8 +- .../engine/table/impl/util/EngineMetrics.java | 34 +++++ ...erationInitializationPoolJobScheduler.java | 4 +- .../util/QueryOperationPerformanceImpl.java | 2 +- ...ryOperationPerformanceStreamPublisher.java | 50 +++---- .../table/impl/util/QueryPerformanceImpl.java | 2 +- .../util/QueryPerformanceStreamPublisher.java | 47 +++--- .../impl/util/UpdateGraphJobScheduler.java | 4 +- .../QueryPerformanceLogLogger.java | 4 +- .../main/resources/defaultPackageFilters.qpr | 10 +- .../barrage/BarrageMessageProducer.java | 2 +- .../server/session/SessionState.java | 73 ++++++---- .../table/ops/TableServiceGrpcImpl.java | 125 +++++++++++----- 18 files changed, 379 insertions(+), 217 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/SelectOrUpdateListener.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/SelectOrUpdateListener.java index 724fe1ff0cc..1cab4f19722 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/SelectOrUpdateListener.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/SelectOrUpdateListener.java @@ -134,11 +134,9 @@ private void completionRoutine(TableUpdate upstream, JobScheduler jobScheduler, getUpdateGraph().addNotification(new TerminalNotification() { @Override public void run() { - synchronized (accumulated) { - final PerformanceEntry entry = getEntry(); - if (entry != null) { - entry.accumulate(accumulated); - } + final PerformanceEntry entry = getEntry(); + if (entry != null) { + entry.accumulate(accumulated); } } }); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java index d63ce199cac..a838a7f63bc 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java @@ -114,7 +114,7 @@ LogOutput appendStart(LogOutput logOutput) { .append(", startPoolAllocatedBytes=").append(startPoolAllocatedBytes); } - public void accumulate(BasePerformanceEntry entry) { + public synchronized void accumulate(BasePerformanceEntry entry) { this.intervalUsageNanos += entry.intervalUsageNanos; this.intervalCpuNanos = plus(this.intervalCpuNanos, entry.intervalCpuNanos); this.intervalUserCpuNanos = plus(this.intervalUserCpuNanos, entry.intervalUserCpuNanos); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java index 76761e8b561..5de006678c6 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java @@ -8,11 +8,10 @@ import io.deephaven.time.DateTimeUtils; import io.deephaven.engine.table.impl.util.RuntimeMemory; import io.deephaven.util.QueryConstants; -import io.deephaven.util.profiling.ThreadProfiler; +import io.deephaven.util.SafeCloseable; import java.io.Serializable; -import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.minus; import static io.deephaven.util.QueryConstants.*; /** @@ -20,7 +19,7 @@ * intimate relationship with another class, {@link QueryPerformanceRecorder}. Changes to either should take this lack * of encapsulation into account. */ -public class QueryPerformanceNugget implements Serializable, AutoCloseable { +public class QueryPerformanceNugget extends BasePerformanceEntry implements Serializable, SafeCloseable { private static final QueryPerformanceLogThreshold LOG_THRESHOLD = new QueryPerformanceLogThreshold("", 1_000_000); private static final QueryPerformanceLogThreshold UNINSTRUMENTED_LOG_THRESHOLD = new QueryPerformanceLogThreshold("Uninstrumented", 1_000_000_000); @@ -34,28 +33,23 @@ public class QueryPerformanceNugget implements Serializable, AutoCloseable { static final QueryPerformanceNugget DUMMY_NUGGET = new QueryPerformanceNugget(); private final int evaluationNumber; + private final int parentEvaluationNumber; + private final int operationNumber; + private final int parentOperationNumber; private final int depth; private final String description; private final boolean isUser; + private final boolean isQueryLevel; private final long inputSize; private final AuthContext authContext; private final String callerLine; private final long startClockTime; + private long endClockTime; - private final long startTimeNanos; - private final long startCpuNanos; - private final long startUserCpuNanos; - private final long startAllocatedBytes; - private final long startPoolAllocatedBytes; private volatile QueryState state; - private Long totalTimeNanos; - private long diffCpuNanos; - private long diffUserCpuNanos; - private long diffAllocatedBytes; - private long diffPoolAllocatedBytes; private final RuntimeMemory.Sample startMemorySample; private final RuntimeMemory.Sample endMemorySample; @@ -74,24 +68,41 @@ public class QueryPerformanceNugget implements Serializable, AutoCloseable { * @param evaluationNumber A unique identifier for the query evaluation that triggered this nugget creation * @param description The operation description */ - QueryPerformanceNugget(final int evaluationNumber, final String description) { - this(evaluationNumber, NULL_INT, description, false, NULL_LONG); + QueryPerformanceNugget(final int evaluationNumber, final int parentEvaluationNumber, final String description) { + this(evaluationNumber, parentEvaluationNumber, NULL_INT, NULL_INT, NULL_INT, description, false, true, + NULL_LONG); } /** * Full constructor for nuggets. * * @param evaluationNumber A unique identifier for the query evaluation that triggered this nugget creation + * @param parentEvaluationNumber The unique identifier of the parent evaluation or {@link QueryConstants#NULL_INT} + * if none + * @param operationNumber A unique identifier for the operation within a query evaluation + * @param parentOperationNumber The unique identifier of the parent operation or {@link QueryConstants#NULL_INT} if + * none * @param depth Depth in the evaluation chain for the respective operation * @param description The operation description * @param isUser Whether this is a "user" nugget or one created by the system * @param inputSize The size of the input data */ - QueryPerformanceNugget(final int evaluationNumber, final int depth, - final String description, final boolean isUser, final long inputSize) { + QueryPerformanceNugget( + final int evaluationNumber, + final int parentEvaluationNumber, + final int operationNumber, + final int parentOperationNumber, + final int depth, + final String description, + final boolean isUser, + final boolean isQueryLevel, + final long inputSize) { startMemorySample = new RuntimeMemory.Sample(); endMemorySample = new RuntimeMemory.Sample(); this.evaluationNumber = evaluationNumber; + this.parentEvaluationNumber = parentEvaluationNumber; + this.operationNumber = operationNumber; + this.parentOperationNumber = parentOperationNumber; this.depth = depth; if (description.length() > MAX_DESCRIPTION_LENGTH) { this.description = description.substring(0, MAX_DESCRIPTION_LENGTH) + " ... [truncated " @@ -100,6 +111,7 @@ public class QueryPerformanceNugget implements Serializable, AutoCloseable { this.description = description; } this.isUser = isUser; + this.isQueryLevel = isQueryLevel; this.inputSize = inputSize; authContext = ExecutionContext.getContext().getAuthContext(); @@ -108,14 +120,8 @@ public class QueryPerformanceNugget implements Serializable, AutoCloseable { final RuntimeMemory runtimeMemory = RuntimeMemory.getInstance(); runtimeMemory.read(startMemorySample); - startAllocatedBytes = ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(); - startPoolAllocatedBytes = QueryPerformanceRecorder.getPoolAllocatedBytesForCurrentThread(); - startClockTime = System.currentTimeMillis(); - startTimeNanos = System.nanoTime(); - - startCpuNanos = ThreadProfiler.DEFAULT.getCurrentThreadCpuTime(); - startUserCpuNanos = ThreadProfiler.DEFAULT.getCurrentThreadUserTime(); + onBaseEntryStart(); state = QueryState.RUNNING; shouldLogMeAndStackParents = false; @@ -128,22 +134,19 @@ private QueryPerformanceNugget() { startMemorySample = null; endMemorySample = null; evaluationNumber = NULL_INT; + parentEvaluationNumber = NULL_INT; + operationNumber = NULL_INT; + parentOperationNumber = NULL_INT; depth = 0; description = null; isUser = false; + isQueryLevel = false; inputSize = NULL_LONG; authContext = null; callerLine = null; - startAllocatedBytes = NULL_LONG; - startPoolAllocatedBytes = NULL_LONG; - startClockTime = NULL_LONG; - startTimeNanos = NULL_LONG; - - startCpuNanos = NULL_LONG; - startUserCpuNanos = NULL_LONG; basePerformanceEntry = null; @@ -190,8 +193,6 @@ public boolean abort(final QueryPerformanceRecorder recorder) { * @return If the nugget passes criteria for logging. */ private boolean close(final QueryState closingState, final QueryPerformanceRecorder recorderToNotify) { - final long currentThreadUserTime = ThreadProfiler.DEFAULT.getCurrentThreadUserTime(); - final long currentThreadCpuTime = ThreadProfiler.DEFAULT.getCurrentThreadCpuTime(); if (state != QueryState.RUNNING) { return false; } @@ -201,24 +202,14 @@ private boolean close(final QueryState closingState, final QueryPerformanceRecor return false; } - diffUserCpuNanos = minus(currentThreadUserTime, startUserCpuNanos); - diffCpuNanos = minus(currentThreadCpuTime, startCpuNanos); - - totalTimeNanos = System.nanoTime() - startTimeNanos; + endClockTime = System.currentTimeMillis(); + onBaseEntryEnd(); final RuntimeMemory runtimeMemory = RuntimeMemory.getInstance(); runtimeMemory.read(endMemorySample); - diffPoolAllocatedBytes = - minus(QueryPerformanceRecorder.getPoolAllocatedBytesForCurrentThread(), startPoolAllocatedBytes); - diffAllocatedBytes = minus(ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(), startAllocatedBytes); - if (basePerformanceEntry != null) { - diffUserCpuNanos += basePerformanceEntry.getIntervalUserCpuNanos(); - diffCpuNanos += basePerformanceEntry.getIntervalCpuNanos(); - - diffAllocatedBytes += basePerformanceEntry.getIntervalAllocatedBytes(); - diffPoolAllocatedBytes += basePerformanceEntry.getIntervalPoolAllocatedBytes(); + accumulate(basePerformanceEntry); } state = closingState; @@ -229,6 +220,7 @@ private boolean close(final QueryState closingState, final QueryPerformanceRecor @Override public String toString() { return evaluationNumber + + ":" + operationNumber + ":" + description + ":" + callerLine; } @@ -237,6 +229,18 @@ public int getEvaluationNumber() { return evaluationNumber; } + public int getParentEvaluationNumber() { + return parentEvaluationNumber; + } + + public int getOperationNumber() { + return operationNumber; + } + + public int getParentOperationNumber() { + return parentOperationNumber; + } + public int getDepth() { return depth; } @@ -249,6 +253,10 @@ public boolean isUser() { return isUser; } + public boolean isQueryLevel() { + return isQueryLevel; + } + public boolean isTopLevel() { return depth == 0; } @@ -271,17 +279,25 @@ public String getCallerLine() { /** * @return nanoseconds elapsed, once state != QueryState.RUNNING() has been called. */ - public Long getTotalTimeNanos() { - return totalTimeNanos; + public long getTotalTimeNanos() { + return getIntervalUsageNanos(); } /** - * @return wall clock time in milliseconds from the epoch + * @return wall clock start time in nanoseconds from the epoch */ public long getStartClockTime() { - return startClockTime; + return DateTimeUtils.millisToNanos(startClockTime); } + /** + * @return wall clock end time in nanoseconds from the epoch + */ + public long getEndClockTime() { + return DateTimeUtils.millisToNanos(endClockTime); + } + + /** * Get nanoseconds of CPU time attributed to the instrumented operation. * @@ -289,7 +305,7 @@ public long getStartClockTime() { * if not enabled/supported. */ public long getCpuNanos() { - return diffCpuNanos; + return getIntervalCpuNanos(); } /** @@ -299,7 +315,7 @@ public long getCpuNanos() { * {@link QueryConstants#NULL_LONG} if not enabled/supported. */ public long getUserCpuNanos() { - return diffUserCpuNanos; + return getIntervalUserCpuNanos(); } /** @@ -352,7 +368,7 @@ public long getDiffCollectionTimeNanos() { * {@link QueryConstants#NULL_LONG} if not enabled/supported. */ public long getAllocatedBytes() { - return diffAllocatedBytes; + return getIntervalAllocatedBytes(); } /** @@ -362,7 +378,7 @@ public long getAllocatedBytes() { * {@link QueryConstants#NULL_LONG} if not enabled/supported. */ public long getPoolAllocatedBytes() { - return diffPoolAllocatedBytes; + return getIntervalPoolAllocatedBytes(); } /** @@ -383,17 +399,17 @@ public void setShouldLogMeAndStackParents() { * @return true if this nugget triggers the logging of itself and every other nugget in its stack of nesting * operations. */ - public boolean shouldLogMenAndStackParents() { + public boolean shouldLogMeAndStackParents() { return shouldLogMeAndStackParents; } /** * When we track data from other threads that should be attributed to this operation, we tack extra * BasePerformanceEntry values onto this nugget when it is closed. - * + *

* The CPU time, reads, and allocations are counted against this nugget. Wall clock time is ignored. */ - public void addBaseEntry(BasePerformanceEntry baseEntry) { + public synchronized void addBaseEntry(BasePerformanceEntry baseEntry) { if (this.basePerformanceEntry == null) { this.basePerformanceEntry = baseEntry; } else { @@ -413,11 +429,6 @@ boolean shouldLogNugget(final boolean isUninstrumented) { if (shouldLogMeAndStackParents) { return true; } - // Nuggets will have a null value for total time if they weren't closed for a RUNNING query; this is an abnormal - // condition and the nugget should be logged - if (getTotalTimeNanos() == null) { - return true; - } if (isUninstrumented) { return UNINSTRUMENTED_LOG_THRESHOLD.shouldLog(getTotalTimeNanos()); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java index f92c2b15a98..2739f1e7d2f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java @@ -40,6 +40,7 @@ public class QueryPerformanceRecorder implements Serializable { private static final long serialVersionUID = 2L; private static final String[] packageFilters; + private volatile boolean hasSubQuery; private QueryPerformanceNugget queryNugget; private final ArrayList operationNuggets = new ArrayList<>(); @@ -91,20 +92,31 @@ public static void resetInstance() { theLocal.remove(); } + private QueryPerformanceRecorder() { + // private default constructor to prevent direct instantiation + } + + /** + * Start a query. + * + * @param description A description for the query. + */ + public void startQuery(final String description) { + startQuery(description, QueryConstants.NULL_INT); + } + /** * Start a query. * * @param description A description for the query. - * - * @return a unique evaluation number to identify this query execution. + * @param parentEvaluationNumber The evaluation number of the parent query. */ - public synchronized int startQuery(final String description) { + public synchronized void startQuery(final String description, final int parentEvaluationNumber) { clear(); final int evaluationNumber = queriesProcessed.getAndIncrement(); - queryNugget = new QueryPerformanceNugget(evaluationNumber, description); + queryNugget = new QueryPerformanceNugget(evaluationNumber, parentEvaluationNumber, description); state = QueryState.RUNNING; - startCatchAll(evaluationNumber); - return evaluationNumber; + startCatchAll(); } /** @@ -149,9 +161,35 @@ public synchronized boolean endQuery() { return queryNugget.done(this); } - private void startCatchAll(final int evaluationNumber) { + public synchronized void suspendQuery() { + if (state != QueryState.RUNNING) { + throw new IllegalStateException("Can't suspend a query that isn't running"); + } + + state = QueryState.SUSPENDED; + Assert.neqNull(catchAllNugget, "catchAllNugget"); + stopCatchAll(false); + queryNugget.onBaseEntryEnd(); + } + + public synchronized void resumeQuery() { + if (state != QueryState.SUSPENDED) { + throw new IllegalStateException("Can't resume a query that isn't suspended"); + } + + queryNugget.onBaseEntryStart(); + state = QueryState.RUNNING; + Assert.eqNull(catchAllNugget, "catchAllNugget"); + startCatchAll(); + } + + private void startCatchAll() { catchAllNugget = new QueryPerformanceNugget( - evaluationNumber, 0, UNINSTRUMENTED_CODE_DESCRIPTION, false, QueryConstants.NULL_LONG); + queryNugget.getEvaluationNumber(), + queryNugget.getParentEvaluationNumber(), + operationNuggets.size(), + QueryConstants.NULL_INT, 0, + UNINSTRUMENTED_CODE_DESCRIPTION, false, false, QueryConstants.NULL_LONG); } private void stopCatchAll(final boolean abort) { @@ -162,6 +200,8 @@ private void stopCatchAll(final boolean abort) { shouldLog = catchAllNugget.done(this); } if (shouldLog) { + Assert.eq(operationNuggets.size(), "operationsNuggets.size()", + catchAllNugget.getOperationNumber(), "catchAllNugget.getOperationNumber()"); operationNuggets.add(catchAllNugget); } catchAllNugget = null; @@ -190,9 +230,12 @@ public synchronized QueryPerformanceNugget getNugget(final String name, final lo if (catchAllNugget != null) { stopCatchAll(false); } + final int parentOperationNumber = userNuggetStack.isEmpty() ? QueryConstants.NULL_INT + : userNuggetStack.getLast().getOperationNumber(); final QueryPerformanceNugget nugget = new QueryPerformanceNugget( - queryNugget.getEvaluationNumber(), userNuggetStack.size(), - name, true, inputSize); + queryNugget.getEvaluationNumber(), queryNugget.getParentEvaluationNumber(), + operationNuggets.size(), parentOperationNumber, userNuggetStack.size(), + name, true, false, inputSize); operationNuggets.add(nugget); userNuggetStack.addLast(nugget); return nugget; @@ -221,9 +264,9 @@ synchronized boolean releaseNugget(QueryPerformanceNugget nugget) { ") - did you follow the correct try/finally pattern?"); } - if (removed.shouldLogMenAndStackParents()) { + if (removed.shouldLogMeAndStackParents()) { shouldLog = true; - if (userNuggetStack.size() > 0) { + if (!userNuggetStack.isEmpty()) { userNuggetStack.getLast().setShouldLogMeAndStackParents(); } } @@ -241,7 +284,7 @@ synchronized boolean releaseNugget(QueryPerformanceNugget nugget) { } if (userNuggetStack.isEmpty() && queryNugget != null && state == QueryState.RUNNING) { - startCatchAll(queryNugget.getEvaluationNumber()); + startCatchAll(); } return shouldLog; @@ -269,7 +312,7 @@ public void setQueryData(final EntrySetter setter) { operationNumber = operationNuggets.size(); if (operationNumber > 0) { // ensure UPL and QOPL are consistent/joinable. - if (userNuggetStack.size() > 0) { + if (!userNuggetStack.isEmpty()) { userNuggetStack.getLast().setShouldLogMeAndStackParents(); } else { uninstrumented = true; @@ -282,6 +325,11 @@ public void setQueryData(final EntrySetter setter) { setter.set(evaluationNumber, operationNumber, uninstrumented); } + public void accumulate(@NotNull final QueryPerformanceRecorder subQuery) { + hasSubQuery = true; + queryNugget.addBaseEntry(subQuery.queryNugget); + } + private void clear() { queryNugget = null; catchAllNugget = null; @@ -289,6 +337,14 @@ private void clear() { userNuggetStack.clear(); } + public int getEvaluationNumber() { + return queryNugget.getEvaluationNumber(); + } + + public boolean hasSubQuery() { + return hasSubQuery; + } + public synchronized QueryPerformanceNugget getQueryLevelPerformanceData() { return queryNugget; } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryState.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryState.java index baa9341e116..ebf3df1ab58 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryState.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryState.java @@ -5,5 +5,5 @@ public enum QueryState { - RUNNING, FINISHED, INTERRUPTED + RUNNING, FINISHED, SUSPENDED, INTERRUPTED } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java index c5af29411f1..5e380fee2c9 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java @@ -915,11 +915,9 @@ private void cleanUpAndNotify(final Runnable onCleanupComplete) { source.getUpdateGraph().addNotification(new TerminalNotification() { @Override public void run() { - synchronized (accumulated) { - final PerformanceEntry entry = sourceListener().getEntry(); - if (entry != null) { - entry.accumulate(accumulated); - } + final PerformanceEntry entry = sourceListener().getEntry(); + if (entry != null) { + entry.accumulate(accumulated); } } }); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java index ee32cea7f69..18bf178cb56 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java @@ -4,9 +4,12 @@ package io.deephaven.engine.table.impl.util; import io.deephaven.base.clock.Clock; +import io.deephaven.base.verify.Require; import io.deephaven.configuration.Configuration; import io.deephaven.engine.table.impl.BlinkTableTools; import io.deephaven.engine.table.impl.QueryTable; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.tablelogger.EngineTableLoggers; import io.deephaven.engine.tablelogger.QueryOperationPerformanceLogLogger; import io.deephaven.engine.tablelogger.QueryPerformanceLogLogger; @@ -16,10 +19,14 @@ import io.deephaven.process.ProcessInfoConfig; import io.deephaven.stats.Driver; import io.deephaven.stats.StatsIntradayLogger; +import io.deephaven.util.QueryConstants; +import org.jetbrains.annotations.NotNull; import java.io.IOException; +import java.util.List; public class EngineMetrics { + private static final Logger log = LoggerFactory.getLogger(EngineMetrics.class); private static final boolean STATS_LOGGING_ENABLED = Configuration.getInstance().getBooleanWithDefault( "statsLoggingEnabled", true); private static volatile ProcessInfo PROCESS_INFO; @@ -106,6 +113,33 @@ private StatsIntradayLogger getStatsLogger() { return statsImpl; } + public void logQueryProcessingResults(@NotNull final QueryProcessingResults results) { + final QueryPerformanceLogLogger qplLogger = getQplLogger(); + final QueryOperationPerformanceLogLogger qoplLogger = getQoplLogger(); + try { + final QueryPerformanceNugget queryNugget = Require.neqNull( + results.getRecorder().getQueryLevelPerformanceData(), + "queryProcessingResults.getRecorder().getQueryLevelPerformanceData()"); + + synchronized (qplLogger) { + qplLogger.log(results.getRecorder().getEvaluationNumber(), results, queryNugget); + } + final List nuggets = + results.getRecorder().getOperationLevelPerformanceData(); + synchronized (qoplLogger) { + if (results.getRecorder().hasSubQuery() || !nuggets.isEmpty()) { + // if this query has sub queries or op nuggets log an entry to enable hierarchical consistency + qoplLogger.log(queryNugget.getOperationNumber(), queryNugget); + } + for (QueryPerformanceNugget n : nuggets) { + qoplLogger.log(n.getOperationNumber(), n); + } + } + } catch (final Exception e) { + log.error().append("Failed to log query performance data: ").append(e).endl(); + } + } + public static boolean maybeStartStatsCollection() { if (!EngineMetrics.STATS_LOGGING_ENABLED) { return false; diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/OperationInitializationPoolJobScheduler.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/OperationInitializationPoolJobScheduler.java index 7037dd34811..2722d61fd35 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/OperationInitializationPoolJobScheduler.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/OperationInitializationPoolJobScheduler.java @@ -32,9 +32,7 @@ public void submit( throw e; } finally { basePerformanceEntry.onBaseEntryEnd(); - synchronized (accumulatedBaseEntry) { - accumulatedBaseEntry.accumulate(basePerformanceEntry); - } + accumulatedBaseEntry.accumulate(basePerformanceEntry); } }); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java index c95c4b0a236..d3bca54f85b 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java @@ -40,7 +40,7 @@ public Table blinkTable() { @Override public void log(Flags flags, int operationNumber, QueryPerformanceNugget nugget) throws IOException { - publisher.add(id.value(), operationNumber, nugget); + publisher.add(id.value(), nugget); qoplLogger.log(flags, operationNumber, nugget); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java index 223549a6fc9..a588c63df97 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java @@ -24,10 +24,13 @@ class QueryOperationPerformanceStreamPublisher implements StreamPublisher { private static final TableDefinition DEFINITION = TableDefinition.of( ColumnDefinition.ofString("ProcessUniqueId"), ColumnDefinition.ofInt("EvaluationNumber"), + ColumnDefinition.ofInt("ParentEvaluationNumber"), ColumnDefinition.ofInt("OperationNumber"), + ColumnDefinition.ofInt("ParentOperationNumber"), ColumnDefinition.ofInt("Depth"), ColumnDefinition.ofString("Description"), ColumnDefinition.ofString("CallerLine"), + ColumnDefinition.ofBoolean("IsQueryLevel"), ColumnDefinition.ofBoolean("IsTopLevel"), ColumnDefinition.ofBoolean("IsCompilation"), ColumnDefinition.ofTime("StartTime"), @@ -67,34 +70,33 @@ public void register(@NotNull StreamConsumer consumer) { public synchronized void add( final String id, - final int operationNumber, final QueryPerformanceNugget nugget) { chunks[0].asWritableObjectChunk().add(id); chunks[1].asWritableIntChunk().add(nugget.getEvaluationNumber()); - chunks[2].asWritableIntChunk().add(operationNumber); - chunks[3].asWritableIntChunk().add(nugget.getDepth()); - chunks[4].asWritableObjectChunk().add(nugget.getName()); - chunks[5].asWritableObjectChunk().add(nugget.getCallerLine()); - chunks[6].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.isTopLevel())); - chunks[7].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.getName().startsWith("Compile:"))); - chunks[8].asWritableLongChunk().add(DateTimeUtils.millisToNanos(nugget.getStartClockTime())); - // this is a lie; timestamps should _NOT_ be created based on adding nano time durations to timestamps. - chunks[9].asWritableLongChunk().add(nugget.getTotalTimeNanos() == null ? QueryConstants.NULL_LONG - : DateTimeUtils.millisToNanos(nugget.getStartClockTime()) + nugget.getTotalTimeNanos()); - chunks[10].asWritableLongChunk() - .add(nugget.getTotalTimeNanos() == null ? QueryConstants.NULL_LONG : nugget.getTotalTimeNanos()); - chunks[11].asWritableLongChunk().add(nugget.getCpuNanos()); - chunks[12].asWritableLongChunk().add(nugget.getUserCpuNanos()); - chunks[13].asWritableLongChunk().add(nugget.getEndFreeMemory()); - chunks[14].asWritableLongChunk().add(nugget.getEndTotalMemory()); - chunks[15].asWritableLongChunk().add(nugget.getDiffFreeMemory()); - chunks[16].asWritableLongChunk().add(nugget.getDiffTotalMemory()); - chunks[17].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); - chunks[18].asWritableLongChunk().add(nugget.getAllocatedBytes()); - chunks[19].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); - chunks[20].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); - chunks[21].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); + chunks[2].asWritableIntChunk().add(nugget.getParentEvaluationNumber()); + chunks[3].asWritableIntChunk().add(nugget.getOperationNumber()); + chunks[4].asWritableIntChunk().add(nugget.getParentOperationNumber()); + chunks[5].asWritableIntChunk().add(nugget.getDepth()); + chunks[6].asWritableObjectChunk().add(nugget.getName()); + chunks[7].asWritableObjectChunk().add(nugget.getCallerLine()); + chunks[8].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.isQueryLevel())); + chunks[9].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.isTopLevel())); + chunks[10].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.getName().startsWith("Compile:"))); + chunks[11].asWritableLongChunk().add(nugget.getStartClockTime()); + chunks[12].asWritableLongChunk().add(nugget.getEndClockTime()); + chunks[13].asWritableLongChunk().add(nugget.getTotalTimeNanos()); + chunks[14].asWritableLongChunk().add(nugget.getCpuNanos()); + chunks[15].asWritableLongChunk().add(nugget.getUserCpuNanos()); + chunks[16].asWritableLongChunk().add(nugget.getEndFreeMemory()); + chunks[17].asWritableLongChunk().add(nugget.getEndTotalMemory()); + chunks[18].asWritableLongChunk().add(nugget.getDiffFreeMemory()); + chunks[19].asWritableLongChunk().add(nugget.getDiffTotalMemory()); + chunks[20].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); + chunks[21].asWritableLongChunk().add(nugget.getAllocatedBytes()); + chunks[22].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); + chunks[23].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); + chunks[24].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); if (chunks[0].size() == CHUNK_SIZE) { flushInternal(); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java index b092073921b..c65de4f5a7c 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java @@ -42,7 +42,7 @@ public Table blinkTable() { @Override public void log(Flags flags, long evaluationNumber, QueryProcessingResults queryProcessingResults, QueryPerformanceNugget nugget) throws IOException { - publisher.add(id.value(), evaluationNumber, queryProcessingResults, nugget); + publisher.add(id.value(), queryProcessingResults, nugget); qplLogger.log(flags, evaluationNumber, queryProcessingResults, nugget); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java index adb4511a71e..0f3e11bf1a4 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java @@ -13,7 +13,6 @@ import io.deephaven.stream.StreamChunkUtils; import io.deephaven.stream.StreamConsumer; import io.deephaven.stream.StreamPublisher; -import io.deephaven.time.DateTimeUtils; import io.deephaven.util.BooleanUtils; import io.deephaven.util.QueryConstants; import org.jetbrains.annotations.NotNull; @@ -25,6 +24,7 @@ class QueryPerformanceStreamPublisher implements StreamPublisher { private static final TableDefinition DEFINITION = TableDefinition.of( ColumnDefinition.ofString("ProcessUniqueId"), ColumnDefinition.ofLong("EvaluationNumber"), + ColumnDefinition.ofLong("ParentEvaluationNumber"), ColumnDefinition.ofTime("StartTime"), ColumnDefinition.ofTime("EndTime"), ColumnDefinition.ofLong("DurationNanos"), @@ -65,68 +65,69 @@ public void register(@NotNull StreamConsumer consumer) { public synchronized void add( final String id, - final long evaluationNumber, final QueryProcessingResults queryProcessingResults, final QueryPerformanceNugget nugget) { // ColumnDefinition.ofString("ProcessUniqueId"), chunks[0].asWritableObjectChunk().add(id); // ColumnDefinition.ofLong("EvaluationNumber") - chunks[1].asWritableLongChunk().add(evaluationNumber); + final int en = nugget.getEvaluationNumber(); + chunks[1].asWritableLongChunk().add(en == QueryConstants.NULL_INT ? QueryConstants.NULL_LONG : en); + + // ColumnDefinition.ofLong("ParentEvaluationNumber") + final int pen = nugget.getParentEvaluationNumber(); + chunks[2].asWritableLongChunk().add(pen == QueryConstants.NULL_INT ? QueryConstants.NULL_LONG : pen); // ColumnDefinition.ofTime("StartTime"); - chunks[2].asWritableLongChunk().add(DateTimeUtils.millisToNanos(nugget.getStartClockTime())); + chunks[3].asWritableLongChunk().add(nugget.getStartClockTime()); // ColumnDefinition.ofTime("EndTime") - // this is a lie; timestamps should _NOT_ be created based on adding nano time durations to timestamps. - chunks[3].asWritableLongChunk().add(nugget.getTotalTimeNanos() == null ? QueryConstants.NULL_LONG - : DateTimeUtils.millisToNanos(nugget.getStartClockTime()) + nugget.getTotalTimeNanos()); + chunks[4].asWritableLongChunk().add(nugget.getEndClockTime()); // ColumnDefinition.ofLong("DurationNanos") - chunks[4].asWritableLongChunk() - .add(nugget.getTotalTimeNanos() == null ? QueryConstants.NULL_LONG : nugget.getTotalTimeNanos()); + chunks[5].asWritableLongChunk().add(nugget.getTotalTimeNanos()); // ColumnDefinition.ofLong("CpuNanos") - chunks[5].asWritableLongChunk().add(nugget.getCpuNanos()); + chunks[6].asWritableLongChunk().add(nugget.getCpuNanos()); // ColumnDefinition.ofLong("UserCpuNanos") - chunks[6].asWritableLongChunk().add(nugget.getUserCpuNanos()); + chunks[7].asWritableLongChunk().add(nugget.getUserCpuNanos()); // ColumnDefinition.ofLong("FreeMemory") - chunks[7].asWritableLongChunk().add(nugget.getEndFreeMemory()); + chunks[8].asWritableLongChunk().add(nugget.getEndFreeMemory()); // ColumnDefinition.ofLong("TotalMemory") - chunks[8].asWritableLongChunk().add(nugget.getEndTotalMemory()); + chunks[9].asWritableLongChunk().add(nugget.getEndTotalMemory()); // ColumnDefinition.ofLong("FreeMemoryChange") - chunks[9].asWritableLongChunk().add(nugget.getDiffFreeMemory()); + chunks[10].asWritableLongChunk().add(nugget.getDiffFreeMemory()); // ColumnDefinition.ofLong("TotalMemoryChange") - chunks[10].asWritableLongChunk().add(nugget.getDiffTotalMemory()); + chunks[11].asWritableLongChunk().add(nugget.getDiffTotalMemory()); // ColumnDefinition.ofLong("Collections") - chunks[11].asWritableLongChunk().add(nugget.getDiffCollections()); + chunks[12].asWritableLongChunk().add(nugget.getDiffCollections()); // ColumnDefinition.ofLong("CollectionTimeNanos") - chunks[12].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); + chunks[13].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); // ColumnDefinition.ofLong("AllocatedBytes") - chunks[13].asWritableLongChunk().add(nugget.getAllocatedBytes()); + chunks[14].asWritableLongChunk().add(nugget.getAllocatedBytes()); // ColumnDefinition.ofLong("PoolAllocatedBytes") - chunks[14].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); + chunks[15].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); // ColumnDefinition.ofBoolean("WasInterrupted") - chunks[15].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); + chunks[16].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); // ColumnDefinition.ofBoolean("IsReplayer") - chunks[16].asWritableByteChunk().add(BooleanUtils.booleanAsByte(queryProcessingResults.isReplayer())); + chunks[17].asWritableByteChunk().add(BooleanUtils.booleanAsByte(queryProcessingResults.isReplayer())); // ColumnDefinition.ofString("Exception") - chunks[17].asWritableObjectChunk().add(queryProcessingResults.getException()); + chunks[18].asWritableObjectChunk().add(queryProcessingResults.getException()); // ColumnDefinition.ofString("AuthContext") - chunks[18].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); + chunks[19].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); if (chunks[0].size() == CHUNK_SIZE) { flushInternal(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/UpdateGraphJobScheduler.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/UpdateGraphJobScheduler.java index 2d799e0582b..345b08aa24e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/UpdateGraphJobScheduler.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/UpdateGraphJobScheduler.java @@ -47,9 +47,7 @@ public void run() { throw e; } finally { baseEntry.onBaseEntryEnd(); - synchronized (accumulatedBaseEntry) { - accumulatedBaseEntry.accumulate(baseEntry); - } + accumulatedBaseEntry.accumulate(baseEntry); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java index 28a46f61a61..1d534683a44 100644 --- a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java +++ b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java @@ -7,12 +7,12 @@ import java.io.IOException; +import static io.deephaven.tablelogger.TableLogger.DEFAULT_INTRADAY_LOGGER_FLAGS; + /** * Logs data that describes the query-level performance for each worker. A given worker may be running multiple queries; * each will have its own set of query performance log entries. */ -import static io.deephaven.tablelogger.TableLogger.DEFAULT_INTRADAY_LOGGER_FLAGS; - public interface QueryPerformanceLogLogger { default void log(final long evaluationNumber, final QueryProcessingResults queryProcessingResults, final QueryPerformanceNugget nugget) throws IOException { diff --git a/props/configs/src/main/resources/defaultPackageFilters.qpr b/props/configs/src/main/resources/defaultPackageFilters.qpr index 866e95bcd36..577076bdf81 100644 --- a/props/configs/src/main/resources/defaultPackageFilters.qpr +++ b/props/configs/src/main/resources/defaultPackageFilters.qpr @@ -1,6 +1,10 @@ java. sun. -groovy.lang -org.codehaus.groovy +groovy.lang. +org.codehaus.groovy. io.deephaven. -io.deephaven.engine +io.deephaven.engine. +io.grpc. +com.google.common. +org.eclipse. +jdk.internal. diff --git a/server/src/main/java/io/deephaven/server/barrage/BarrageMessageProducer.java b/server/src/main/java/io/deephaven/server/barrage/BarrageMessageProducer.java index d06c189d4be..c8ad8192fed 100644 --- a/server/src/main/java/io/deephaven/server/barrage/BarrageMessageProducer.java +++ b/server/src/main/java/io/deephaven/server/barrage/BarrageMessageProducer.java @@ -151,7 +151,7 @@ public Operation( @Override public String getDescription() { - return "BarrageMessageProducer(" + updateIntervalMs + ")"; + return "BarrageMessageProducer(" + updateIntervalMs + "," + System.identityHashCode(parent) + ")"; } @Override diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index 2ba3045be61..1eb34a8bcfd 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -34,6 +34,7 @@ import io.deephaven.proto.util.ExportTicketHelper; import io.deephaven.server.util.Scheduler; import io.deephaven.engine.context.ExecutionContext; +import io.deephaven.util.QueryConstants; import io.deephaven.util.SafeCloseable; import io.deephaven.util.annotations.VisibleForTesting; import io.deephaven.auth.AuthContext; @@ -219,6 +220,13 @@ protected void updateExpiration(@NotNull final SessionService.TokenExpiration ex .append(MILLIS_FROM_EPOCH_FORMATTER, expiration.deadlineMillis).append(".").endl(); } + /** + * @return the session id + */ + public String getSessionId() { + return sessionId; + } + /** * @return the current expiration token for this session */ @@ -531,6 +539,9 @@ public final static class ExportObject extends LivenessArtifact { private final SessionService.ErrorTransformer errorTransformer; private final SessionState session; + /** used to keep track of performance details if caller needs to aggregate across multiple exports */ + private QueryPerformanceRecorder queryPerformanceRecorder; + /** final result of export */ private volatile T result; private volatile ExportNotification.State state = ExportNotification.State.UNKNOWN; @@ -620,6 +631,14 @@ private boolean isNonExport() { return exportId == NON_EXPORT_ID; } + private synchronized void setQueryPerformanceRecorder(final QueryPerformanceRecorder queryPerformanceRecorder) { + if (this.queryPerformanceRecorder != null) { + throw new IllegalStateException( + "performance query recorder can only be set once on an exportable object"); + } + this.queryPerformanceRecorder = queryPerformanceRecorder; + } + /** * Sets the dependencies and tracks liveness dependencies. * @@ -959,16 +978,17 @@ private void doExport() { T localResult = null; boolean shouldLog = false; - int evaluationNumber = -1; QueryProcessingResults queryProcessingResults = null; try (final SafeCloseable ignored1 = session.executionContext.open(); final SafeCloseable ignored2 = LivenessScopeStack.open()) { try { - queryProcessingResults = new QueryProcessingResults( - QueryPerformanceRecorder.getInstance()); - - evaluationNumber = QueryPerformanceRecorder.getInstance() - .startQuery("session=" + session.sessionId + ",exportId=" + logIdentity); + queryProcessingResults = new QueryProcessingResults(QueryPerformanceRecorder.getInstance()); + final int parentEvaluationNumber = queryPerformanceRecorder != null + ? queryPerformanceRecorder.getEvaluationNumber() + : QueryConstants.NULL_INT; + QueryPerformanceRecorder.getInstance().startQuery( + "ExportObject#doWork(session=" + session.sessionId + ",exportId=" + logIdentity + ")", + parentEvaluationNumber); try { localResult = capturedExport.call(); @@ -993,32 +1013,10 @@ private void doExport() { QueryPerformanceRecorder.resetInstance(); } if ((shouldLog || caughtException != null) && queryProcessingResults != null) { - final EngineMetrics memLoggers = EngineMetrics.getInstance(); - final QueryPerformanceLogLogger qplLogger = memLoggers.getQplLogger(); - final QueryOperationPerformanceLogLogger qoplLogger = memLoggers.getQoplLogger(); - try { - final QueryPerformanceNugget nugget = Require.neqNull( - queryProcessingResults.getRecorder().getQueryLevelPerformanceData(), - "queryProcessingResults.getRecorder().getQueryLevelPerformanceData()"); - - // noinspection SynchronizationOnLocalVariableOrMethodParameter - synchronized (qplLogger) { - qplLogger.log(evaluationNumber, - queryProcessingResults, - nugget); - } - final List nuggets = - queryProcessingResults.getRecorder().getOperationLevelPerformanceData(); - // noinspection SynchronizationOnLocalVariableOrMethodParameter - synchronized (qoplLogger) { - int opNo = 0; - for (QueryPerformanceNugget n : nuggets) { - qoplLogger.log(opNo++, n); - } - } - } catch (final Exception e) { - log.error().append("Failed to log query performance data: ").append(e).endl(); + if (queryPerformanceRecorder != null) { + queryPerformanceRecorder.accumulate(queryProcessingResults.getRecorder()); } + EngineMetrics.getInstance().logQueryProcessingResults(queryProcessingResults); } if (caughtException == null) { setResult(localResult); @@ -1306,6 +1304,19 @@ public class ExportBuilder { } } + /** + * Set the performance recorder to aggregate performance data across exports. If set, instrumentation logging is + * the responsibility of the caller. + * + * @param queryPerformanceRecorder the performance recorder to aggregate into + * @return this builder + */ + public ExportBuilder queryPerformanceRecorder( + @NotNull final QueryPerformanceRecorder queryPerformanceRecorder) { + export.setQueryPerformanceRecorder(queryPerformanceRecorder); + return this; + } + /** * Some exports must happen serially w.r.t. other exports. For example, an export that acquires the exclusive * UGP lock. We enqueue these dependencies independently of the otherwise regularly concurrent exports. diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index db19d235805..0d6ffe24d70 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -7,6 +7,9 @@ import io.deephaven.clientsupport.gotorow.SeekRow; import io.deephaven.auth.codegen.impl.TableServiceContextualAuthWiring; import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; +import io.deephaven.engine.table.impl.perf.QueryProcessingResults; +import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.extensions.barrage.util.ExportUtil; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; @@ -64,6 +67,7 @@ import io.deephaven.server.session.TicketRouter; import io.deephaven.server.table.ExportedTableUpdateListener; import io.deephaven.time.DateTimeUtils; +import io.deephaven.util.SafeCloseable; import io.grpc.StatusRuntimeException; import io.grpc.stub.ServerCallStreamObserver; import io.grpc.stub.StreamObserver; @@ -313,62 +317,76 @@ public void exactJoinTables( } @Override - public void leftJoinTables(LeftJoinTablesRequest request, - StreamObserver responseObserver) { + public void leftJoinTables( + @NotNull final LeftJoinTablesRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.LEFT_JOIN, request, responseObserver); } @Override - public void asOfJoinTables(AsOfJoinTablesRequest request, - StreamObserver responseObserver) { + public void asOfJoinTables( + @NotNull final AsOfJoinTablesRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.AS_OF_JOIN, request, responseObserver); } @Override - public void ajTables(AjRajTablesRequest request, StreamObserver responseObserver) { + public void ajTables( + @NotNull final AjRajTablesRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.AJ, request, responseObserver); } @Override - public void rajTables(AjRajTablesRequest request, StreamObserver responseObserver) { + public void rajTables( + @NotNull final AjRajTablesRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.RAJ, request, responseObserver); } @Override - public void rangeJoinTables(RangeJoinTablesRequest request, - StreamObserver responseObserver) { + public void rangeJoinTables( + @NotNull final RangeJoinTablesRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.RANGE_JOIN, request, responseObserver); } @Override - public void runChartDownsample(RunChartDownsampleRequest request, - StreamObserver responseObserver) { + public void runChartDownsample( + @NotNull final RunChartDownsampleRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.RUN_CHART_DOWNSAMPLE, request, responseObserver); } @Override - public void fetchTable(FetchTableRequest request, StreamObserver responseObserver) { + public void fetchTable( + @NotNull final FetchTableRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.FETCH_TABLE, request, responseObserver); } @Override - public void applyPreviewColumns(ApplyPreviewColumnsRequest request, - StreamObserver responseObserver) { + public void applyPreviewColumns( + @NotNull final ApplyPreviewColumnsRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.APPLY_PREVIEW_COLUMNS, request, responseObserver); } @Override - public void createInputTable(CreateInputTableRequest request, - StreamObserver responseObserver) { + public void createInputTable( + @NotNull final CreateInputTableRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.CREATE_INPUT_TABLE, request, responseObserver); } @Override - public void updateBy(UpdateByRequest request, StreamObserver responseObserver) { + public void updateBy( + @NotNull final UpdateByRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.UPDATE_BY, request, responseObserver); } - private Object getSeekValue(Literal literal, Class dataType) { + private Object getSeekValue(@NotNull final Literal literal, @NotNull final Class dataType) { if (literal.hasStringValue()) { if (BigDecimal.class.isAssignableFrom(dataType)) { return new BigDecimal(literal.getStringValue()); @@ -474,8 +492,9 @@ public void seekRow( } @Override - public void computeColumnStatistics(ColumnStatisticsRequest request, - StreamObserver responseObserver) { + public void computeColumnStatistics( + @NotNull final ColumnStatisticsRequest request, + @NotNull final StreamObserver responseObserver) { oneShotOperationWrapper(BatchTableRequest.Operation.OpCase.COLUMN_STATISTICS, request, responseObserver); } @@ -491,9 +510,12 @@ public void batch( } final SessionState session = sessionService.getCurrentSession(); + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.getInstance(); + queryPerformanceRecorder.startQuery("TableService#batch(session=" + session.getSessionId() + ")"); + // step 1: initialize exports final List> exportBuilders = request.getOpsList().stream() - .map(op -> createBatchExportBuilder(session, op)) + .map(op -> createBatchExportBuilder(session, queryPerformanceRecorder, op)) .collect(Collectors.toList()); // step 2: resolve dependencies @@ -503,18 +525,25 @@ public void batch( // TODO: check for cycles // step 4: submit the batched operations - final AtomicInteger remaining = new AtomicInteger(exportBuilders.size()); + final AtomicInteger remaining = new AtomicInteger(1 + exportBuilders.size()); final AtomicReference firstFailure = new AtomicReference<>(); final Runnable onOneResolved = () -> { - if (remaining.decrementAndGet() == 0) { - final StatusRuntimeException failure = firstFailure.get(); - if (failure != null) { - safelyError(responseObserver, failure); - } else { - safelyComplete(responseObserver); - } + if (remaining.decrementAndGet() > 0) { + return; } + + queryPerformanceRecorder.resumeQuery(); + final QueryProcessingResults results = new QueryProcessingResults(queryPerformanceRecorder); + final StatusRuntimeException failure = firstFailure.get(); + if (failure != null) { + results.setException(failure.getMessage()); + safelyError(responseObserver, failure); + } else { + safelyComplete(responseObserver); + } + queryPerformanceRecorder.endQuery(); + EngineMetrics.getInstance().logQueryProcessingResults(results); }; for (int i = 0; i < exportBuilders.size(); ++i) { @@ -551,6 +580,11 @@ public void batch( onOneResolved.run(); }).submit(exportBuilder::doExport); } + + // now that we've submitted everything we'll suspend the query and release our refcount + queryPerformanceRecorder.suspendQuery(); + QueryPerformanceRecorder.resetInstance(); + onOneResolved.run(); } @Override @@ -606,8 +640,8 @@ public void getExportedTableCreationResponse( */ private void oneShotOperationWrapper( final BatchTableRequest.Operation.OpCase op, - final T request, - final StreamObserver responseObserver) { + @NotNull final T request, + @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); final GrpcTableOperation operation = getOp(op); operation.validateRequest(request); @@ -634,7 +668,9 @@ private void oneShotOperationWrapper( }); } - private SessionState.ExportObject resolveOneShotReference(SessionState session, TableReference ref) { + private SessionState.ExportObject
resolveOneShotReference( + @NotNull final SessionState session, + @NotNull final TableReference ref) { if (!ref.hasTicket()) { throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "One-shot operations must use ticket references"); @@ -642,11 +678,17 @@ private SessionState.ExportObject
resolveOneShotReference(SessionState se return ticketRouter.resolve(session, ref.getTicket(), "sourceId"); } - private SessionState.ExportObject
resolveBatchReference(SessionState session, - List> exportBuilders, TableReference ref) { + private SessionState.ExportObject
resolveBatchReference( + @NotNull final SessionState session, + @NotNull final List> exportBuilders, + @NotNull final TableReference ref) { switch (ref.getRefCase()) { case TICKET: - return ticketRouter.resolve(session, ref.getTicket(), "sourceId"); + final String ticketName = ticketRouter.getLogNameFor(ref.getTicket(), "TableServiceGrpcImpl"); + try (final SafeCloseable ignored = + QueryPerformanceRecorder.getInstance().getNugget("resolveBatchReference:" + ticketName)) { + return ticketRouter.resolve(session, ref.getTicket(), "sourceId"); + } case BATCH_OFFSET: final int offset = ref.getBatchOffset(); if (offset < 0 || offset >= exportBuilders.size()) { @@ -658,7 +700,10 @@ private SessionState.ExportObject
resolveBatchReference(SessionState sess } } - private BatchExportBuilder createBatchExportBuilder(SessionState session, BatchTableRequest.Operation op) { + private BatchExportBuilder createBatchExportBuilder( + @NotNull final SessionState session, + @NotNull final QueryPerformanceRecorder queryPerformanceRecorder, + final BatchTableRequest.Operation op) { final GrpcTableOperation operation = getOp(op.getOpCase()); final T request = operation.getRequestFromOperation(op); operation.validateRequest(request); @@ -666,6 +711,7 @@ private BatchExportBuilder createBatchExportBuilder(SessionState session, final Ticket resultId = operation.getResultTicket(request); final ExportBuilder
exportBuilder = resultId.getTicket().isEmpty() ? session.nonExport() : session.newExport(resultId, "resultId"); + exportBuilder.queryPerformanceRecorder(queryPerformanceRecorder); return new BatchExportBuilder<>(operation, request, exportBuilder); } @@ -676,13 +722,18 @@ private class BatchExportBuilder { List> dependencies; - BatchExportBuilder(GrpcTableOperation operation, T request, ExportBuilder
exportBuilder) { + BatchExportBuilder( + @NotNull final GrpcTableOperation operation, + @NotNull final T request, + @NotNull final ExportBuilder
exportBuilder) { this.operation = Objects.requireNonNull(operation); this.request = Objects.requireNonNull(request); this.exportBuilder = Objects.requireNonNull(exportBuilder); } - void resolveDependencies(SessionState session, List> exportBuilders) { + void resolveDependencies( + @NotNull final SessionState session, + @NotNull final List> exportBuilders) { dependencies = operation.getTableReferences(request).stream() .map(ref -> resolveBatchReference(session, exportBuilders, ref)) .collect(Collectors.toList()); From 5bc13daf50d3ee92f5bd77b972906e9f0d96e846 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Fri, 3 Nov 2023 19:48:43 -0600 Subject: [PATCH 02/31] Use Long for EvaluationNumber in all Places --- .../table/impl/perf/PerformanceEntry.java | 6 +++--- .../impl/perf/QueryPerformanceNugget.java | 20 +++++++++---------- .../impl/perf/QueryPerformanceRecorder.java | 20 ++++++++++--------- .../UpdatePerformanceStreamPublisher.java | 4 ++-- .../table/impl/util/AsyncErrorImpl.java | 4 ++-- .../impl/util/AsyncErrorStreamPublisher.java | 12 +++++------ .../engine/table/impl/util/EngineMetrics.java | 11 +++++----- .../util/QueryOperationPerformanceImpl.java | 4 ++-- ...ryOperationPerformanceStreamPublisher.java | 10 ++++------ .../table/impl/util/QueryPerformanceImpl.java | 4 ++-- .../util/QueryPerformanceStreamPublisher.java | 7 ++----- .../QueryOperationPerformanceLogLogger.java | 8 ++++---- .../QueryPerformanceLogLogger.java | 13 ++++++------ .../server/session/SessionState.java | 4 ++-- 14 files changed, 62 insertions(+), 65 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java index fb03b488ffb..46e9bddc86c 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java @@ -19,7 +19,7 @@ */ public class PerformanceEntry extends BasePerformanceEntry implements TableListener.Entry { private final int id; - private final int evaluationNumber; + private final long evaluationNumber; private final int operationNumber; private final String description; private final String callerLine; @@ -42,7 +42,7 @@ public class PerformanceEntry extends BasePerformanceEntry implements TableListe private final RuntimeMemory.Sample startSample; private final RuntimeMemory.Sample endSample; - PerformanceEntry(final int id, final int evaluationNumber, final int operationNumber, + PerformanceEntry(final int id, final long evaluationNumber, final int operationNumber, final String description, final String callerLine, final String updateGraphName) { this.id = id; this.evaluationNumber = evaluationNumber; @@ -144,7 +144,7 @@ public int getId() { return id; } - public int getEvaluationNumber() { + public long getEvaluationNumber() { return evaluationNumber; } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java index 5de006678c6..f50f53917a2 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java @@ -32,8 +32,8 @@ public class QueryPerformanceNugget extends BasePerformanceEntry implements Seri */ static final QueryPerformanceNugget DUMMY_NUGGET = new QueryPerformanceNugget(); - private final int evaluationNumber; - private final int parentEvaluationNumber; + private final long evaluationNumber; + private final long parentEvaluationNumber; private final int operationNumber; private final int parentOperationNumber; private final int depth; @@ -68,7 +68,7 @@ public class QueryPerformanceNugget extends BasePerformanceEntry implements Seri * @param evaluationNumber A unique identifier for the query evaluation that triggered this nugget creation * @param description The operation description */ - QueryPerformanceNugget(final int evaluationNumber, final int parentEvaluationNumber, final String description) { + QueryPerformanceNugget(final long evaluationNumber, final long parentEvaluationNumber, final String description) { this(evaluationNumber, parentEvaluationNumber, NULL_INT, NULL_INT, NULL_INT, description, false, true, NULL_LONG); } @@ -77,7 +77,7 @@ public class QueryPerformanceNugget extends BasePerformanceEntry implements Seri * Full constructor for nuggets. * * @param evaluationNumber A unique identifier for the query evaluation that triggered this nugget creation - * @param parentEvaluationNumber The unique identifier of the parent evaluation or {@link QueryConstants#NULL_INT} + * @param parentEvaluationNumber The unique identifier of the parent evaluation or {@link QueryConstants#NULL_LONG} * if none * @param operationNumber A unique identifier for the operation within a query evaluation * @param parentOperationNumber The unique identifier of the parent operation or {@link QueryConstants#NULL_INT} if @@ -88,8 +88,8 @@ public class QueryPerformanceNugget extends BasePerformanceEntry implements Seri * @param inputSize The size of the input data */ QueryPerformanceNugget( - final int evaluationNumber, - final int parentEvaluationNumber, + final long evaluationNumber, + final long parentEvaluationNumber, final int operationNumber, final int parentOperationNumber, final int depth, @@ -133,8 +133,8 @@ public class QueryPerformanceNugget extends BasePerformanceEntry implements Seri private QueryPerformanceNugget() { startMemorySample = null; endMemorySample = null; - evaluationNumber = NULL_INT; - parentEvaluationNumber = NULL_INT; + evaluationNumber = NULL_LONG; + parentEvaluationNumber = NULL_LONG; operationNumber = NULL_INT; parentOperationNumber = NULL_INT; depth = 0; @@ -225,11 +225,11 @@ public String toString() { + ":" + callerLine; } - public int getEvaluationNumber() { + public long getEvaluationNumber() { return evaluationNumber; } - public int getParentEvaluationNumber() { + public long getParentEvaluationNumber() { return parentEvaluationNumber; } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java index 2739f1e7d2f..037f202dafe 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java @@ -40,7 +40,7 @@ public class QueryPerformanceRecorder implements Serializable { private static final long serialVersionUID = 2L; private static final String[] packageFilters; - private volatile boolean hasSubQuery; + private volatile boolean mustLogForHierarchicalConsistency; private QueryPerformanceNugget queryNugget; private final ArrayList operationNuggets = new ArrayList<>(); @@ -102,7 +102,7 @@ private QueryPerformanceRecorder() { * @param description A description for the query. */ public void startQuery(final String description) { - startQuery(description, QueryConstants.NULL_INT); + startQuery(description, QueryConstants.NULL_LONG); } /** @@ -111,7 +111,7 @@ public void startQuery(final String description) { * @param description A description for the query. * @param parentEvaluationNumber The evaluation number of the parent query. */ - public synchronized void startQuery(final String description, final int parentEvaluationNumber) { + public synchronized void startQuery(final String description, final long parentEvaluationNumber) { clear(); final int evaluationNumber = queriesProcessed.getAndIncrement(); queryNugget = new QueryPerformanceNugget(evaluationNumber, parentEvaluationNumber, description); @@ -291,7 +291,7 @@ synchronized boolean releaseNugget(QueryPerformanceNugget nugget) { } public interface EntrySetter { - void set(int evaluationNumber, int operationNumber, boolean uninstrumented); + void set(long evaluationNumber, int operationNumber, boolean uninstrumented); } public synchronized QueryPerformanceNugget getOuterNugget() { @@ -300,7 +300,7 @@ public synchronized QueryPerformanceNugget getOuterNugget() { // returns true if uninstrumented code data was captured. public void setQueryData(final EntrySetter setter) { - final int evaluationNumber; + final long evaluationNumber; final int operationNumber; boolean uninstrumented = false; synchronized (this) { @@ -326,7 +326,9 @@ public void setQueryData(final EntrySetter setter) { } public void accumulate(@NotNull final QueryPerformanceRecorder subQuery) { - hasSubQuery = true; + if (subQuery.mustLogForHierarchicalConsistency()) { + mustLogForHierarchicalConsistency = true; + } queryNugget.addBaseEntry(subQuery.queryNugget); } @@ -337,12 +339,12 @@ private void clear() { userNuggetStack.clear(); } - public int getEvaluationNumber() { + public long getEvaluationNumber() { return queryNugget.getEvaluationNumber(); } - public boolean hasSubQuery() { - return hasSubQuery; + public boolean mustLogForHierarchicalConsistency() { + return mustLogForHierarchicalConsistency || !operationNuggets.isEmpty(); } public synchronized QueryPerformanceNugget getQueryLevelPerformanceData() { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java index 268ac0ba3ec..5c794e77070 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java @@ -22,7 +22,7 @@ class UpdatePerformanceStreamPublisher implements StreamPublisher { private static final TableDefinition DEFINITION = TableDefinition.of( ColumnDefinition.ofString("ProcessUniqueId"), ColumnDefinition.ofInt("EntryId"), - ColumnDefinition.ofInt("EvaluationNumber"), + ColumnDefinition.ofLong("EvaluationNumber"), ColumnDefinition.ofInt("OperationNumber"), ColumnDefinition.ofString("EntryDescription"), ColumnDefinition.ofString("EntryCallerLine"), @@ -70,7 +70,7 @@ public void register(@NotNull StreamConsumer consumer) { public synchronized void add(IntervalLevelDetails intervalLevelDetails, PerformanceEntry performanceEntry) { chunks[0].asWritableObjectChunk().add(EngineMetrics.getProcessInfo().getId().value()); chunks[1].asWritableIntChunk().add(performanceEntry.getId()); - chunks[2].asWritableIntChunk().add(performanceEntry.getEvaluationNumber()); + chunks[2].asWritableLongChunk().add(performanceEntry.getEvaluationNumber()); chunks[3].asWritableIntChunk().add(performanceEntry.getOperationNumber()); chunks[4].asWritableObjectChunk().add(performanceEntry.getDescription()); chunks[5].asWritableObjectChunk().add(performanceEntry.getCallerLine()); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorImpl.java index f13d3799136..45cf2166e00 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorImpl.java @@ -39,7 +39,7 @@ public void add( @Nullable TableListener.Entry entry, @Nullable TableListener.Entry sourceEntry, Throwable originalException) { - final int evaluationNumber; + final long evaluationNumber; final int operationNumber; final String description; if (entry instanceof PerformanceEntry) { @@ -52,7 +52,7 @@ public void add( operationNumber = QueryConstants.NULL_INT; description = null; } - final int sourceEvaluationNumber; + final long sourceEvaluationNumber; final int sourceOperationNumber; final String sourceDescription; if (sourceEntry instanceof PerformanceEntry) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorStreamPublisher.java index f719fe2d623..d891df9e041 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/AsyncErrorStreamPublisher.java @@ -20,10 +20,10 @@ class AsyncErrorStreamPublisher implements StreamPublisher { private static final TableDefinition DEFINITION = TableDefinition.of( ColumnDefinition.ofTime("Time"), - ColumnDefinition.ofInt("EvaluationNumber"), + ColumnDefinition.ofLong("EvaluationNumber"), ColumnDefinition.ofInt("OperationNumber"), ColumnDefinition.ofString("Description"), - ColumnDefinition.ofInt("SourceQueryEvaluationNumber"), + ColumnDefinition.ofLong("SourceQueryEvaluationNumber"), ColumnDefinition.ofInt("SourceQueryOperationNumber"), ColumnDefinition.ofString("SourceQueryDescription"), ColumnDefinition.of("Cause", Type.ofCustom(Throwable.class))); @@ -51,18 +51,18 @@ public void register(@NotNull StreamConsumer consumer) { public synchronized void add( long timeNanos, - int evaluationNumber, + long evaluationNumber, int operationNumber, String description, - int sourceQueryEvaluationNumber, + long sourceQueryEvaluationNumber, int sourceQueryOperationNumber, String sourceQueryDescription, Throwable cause) { chunks[0].asWritableLongChunk().add(timeNanos); - chunks[1].asWritableIntChunk().add(evaluationNumber); + chunks[1].asWritableLongChunk().add(evaluationNumber); chunks[2].asWritableIntChunk().add(operationNumber); chunks[3].asWritableObjectChunk().add(description); - chunks[4].asWritableIntChunk().add(sourceQueryEvaluationNumber); + chunks[4].asWritableLongChunk().add(sourceQueryEvaluationNumber); chunks[5].asWritableIntChunk().add(sourceQueryOperationNumber); chunks[6].asWritableObjectChunk().add(sourceQueryDescription); chunks[7].asWritableObjectChunk().add(cause); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java index 18bf178cb56..2e5d3d0c88b 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java @@ -19,7 +19,6 @@ import io.deephaven.process.ProcessInfoConfig; import io.deephaven.stats.Driver; import io.deephaven.stats.StatsIntradayLogger; -import io.deephaven.util.QueryConstants; import org.jetbrains.annotations.NotNull; import java.io.IOException; @@ -122,17 +121,17 @@ public void logQueryProcessingResults(@NotNull final QueryProcessingResults resu "queryProcessingResults.getRecorder().getQueryLevelPerformanceData()"); synchronized (qplLogger) { - qplLogger.log(results.getRecorder().getEvaluationNumber(), results, queryNugget); + qplLogger.log(results, queryNugget); } final List nuggets = results.getRecorder().getOperationLevelPerformanceData(); synchronized (qoplLogger) { - if (results.getRecorder().hasSubQuery() || !nuggets.isEmpty()) { + if (results.getRecorder().mustLogForHierarchicalConsistency()) { // if this query has sub queries or op nuggets log an entry to enable hierarchical consistency - qoplLogger.log(queryNugget.getOperationNumber(), queryNugget); + qoplLogger.log(queryNugget); } - for (QueryPerformanceNugget n : nuggets) { - qoplLogger.log(n.getOperationNumber(), n); + for (QueryPerformanceNugget nugget : nuggets) { + qoplLogger.log(nugget); } } } catch (final Exception e) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java index d3bca54f85b..814fdeca0c1 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java @@ -39,8 +39,8 @@ public Table blinkTable() { } @Override - public void log(Flags flags, int operationNumber, QueryPerformanceNugget nugget) throws IOException { + public void log(Flags flags, QueryPerformanceNugget nugget) throws IOException { publisher.add(id.value(), nugget); - qoplLogger.log(flags, operationNumber, nugget); + qoplLogger.log(flags, nugget); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java index a588c63df97..c46f3d3910d 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java @@ -12,9 +12,7 @@ import io.deephaven.stream.StreamChunkUtils; import io.deephaven.stream.StreamConsumer; import io.deephaven.stream.StreamPublisher; -import io.deephaven.time.DateTimeUtils; import io.deephaven.util.BooleanUtils; -import io.deephaven.util.QueryConstants; import org.jetbrains.annotations.NotNull; import java.util.Objects; @@ -23,8 +21,8 @@ class QueryOperationPerformanceStreamPublisher implements StreamPublisher { private static final TableDefinition DEFINITION = TableDefinition.of( ColumnDefinition.ofString("ProcessUniqueId"), - ColumnDefinition.ofInt("EvaluationNumber"), - ColumnDefinition.ofInt("ParentEvaluationNumber"), + ColumnDefinition.ofLong("EvaluationNumber"), + ColumnDefinition.ofLong("ParentEvaluationNumber"), ColumnDefinition.ofInt("OperationNumber"), ColumnDefinition.ofInt("ParentOperationNumber"), ColumnDefinition.ofInt("Depth"), @@ -73,8 +71,8 @@ public synchronized void add( final QueryPerformanceNugget nugget) { chunks[0].asWritableObjectChunk().add(id); - chunks[1].asWritableIntChunk().add(nugget.getEvaluationNumber()); - chunks[2].asWritableIntChunk().add(nugget.getParentEvaluationNumber()); + chunks[1].asWritableLongChunk().add(nugget.getEvaluationNumber()); + chunks[2].asWritableLongChunk().add(nugget.getParentEvaluationNumber()); chunks[3].asWritableIntChunk().add(nugget.getOperationNumber()); chunks[4].asWritableIntChunk().add(nugget.getParentOperationNumber()); chunks[5].asWritableIntChunk().add(nugget.getDepth()); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java index c65de4f5a7c..311cbb906b0 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java @@ -40,9 +40,9 @@ public Table blinkTable() { } @Override - public void log(Flags flags, long evaluationNumber, QueryProcessingResults queryProcessingResults, + public void log(Flags flags, QueryProcessingResults queryProcessingResults, QueryPerformanceNugget nugget) throws IOException { publisher.add(id.value(), queryProcessingResults, nugget); - qplLogger.log(flags, evaluationNumber, queryProcessingResults, nugget); + qplLogger.log(flags, queryProcessingResults, nugget); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java index 0f3e11bf1a4..7551f98498c 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java @@ -14,7 +14,6 @@ import io.deephaven.stream.StreamConsumer; import io.deephaven.stream.StreamPublisher; import io.deephaven.util.BooleanUtils; -import io.deephaven.util.QueryConstants; import org.jetbrains.annotations.NotNull; import java.util.Objects; @@ -71,12 +70,10 @@ public synchronized void add( chunks[0].asWritableObjectChunk().add(id); // ColumnDefinition.ofLong("EvaluationNumber") - final int en = nugget.getEvaluationNumber(); - chunks[1].asWritableLongChunk().add(en == QueryConstants.NULL_INT ? QueryConstants.NULL_LONG : en); + chunks[1].asWritableLongChunk().add(nugget.getEvaluationNumber()); // ColumnDefinition.ofLong("ParentEvaluationNumber") - final int pen = nugget.getParentEvaluationNumber(); - chunks[2].asWritableLongChunk().add(pen == QueryConstants.NULL_INT ? QueryConstants.NULL_LONG : pen); + chunks[2].asWritableLongChunk().add(nugget.getParentEvaluationNumber()); // ColumnDefinition.ofTime("StartTime"); chunks[3].asWritableLongChunk().add(nugget.getStartClockTime()); diff --git a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java index a981c646b09..b9ba707353e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java +++ b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java @@ -13,17 +13,17 @@ * queries. */ public interface QueryOperationPerformanceLogLogger { - default void log(final int operationNumber, final QueryPerformanceNugget nugget) throws IOException { - log(DEFAULT_INTRADAY_LOGGER_FLAGS, operationNumber, nugget); + default void log(final QueryPerformanceNugget nugget) throws IOException { + log(DEFAULT_INTRADAY_LOGGER_FLAGS, nugget); } - void log(final Row.Flags flags, final int operationNumber, final QueryPerformanceNugget nugget) throws IOException; + void log(Row.Flags flags, QueryPerformanceNugget nugget) throws IOException; enum Noop implements QueryOperationPerformanceLogLogger { INSTANCE; @Override - public void log(Flags flags, int operationNumber, QueryPerformanceNugget nugget) throws IOException { + public void log(Flags flags, QueryPerformanceNugget nugget) throws IOException { } } diff --git a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java index 1d534683a44..082dff4c2f9 100644 --- a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java +++ b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java @@ -14,20 +14,21 @@ * each will have its own set of query performance log entries. */ public interface QueryPerformanceLogLogger { - default void log(final long evaluationNumber, final QueryProcessingResults queryProcessingResults, + default void log( + final QueryProcessingResults queryProcessingResults, final QueryPerformanceNugget nugget) throws IOException { - log(DEFAULT_INTRADAY_LOGGER_FLAGS, evaluationNumber, queryProcessingResults, nugget); + log(DEFAULT_INTRADAY_LOGGER_FLAGS, queryProcessingResults, nugget); } - void log(final Row.Flags flags, final long evaluationNumber, final QueryProcessingResults queryProcessingResults, - final QueryPerformanceNugget nugget) throws IOException; + void log(Row.Flags flags, QueryProcessingResults queryProcessingResults, QueryPerformanceNugget nugget) + throws IOException; enum Noop implements QueryPerformanceLogLogger { INSTANCE; @Override - public void log(Flags flags, long evaluationNumber, QueryProcessingResults queryProcessingResults, - QueryPerformanceNugget nugget) throws IOException { + public void log(Flags flags, QueryProcessingResults queryProcessingResults, QueryPerformanceNugget nugget) + throws IOException { } } diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index 1eb34a8bcfd..0c373fc5c60 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -983,9 +983,9 @@ private void doExport() { final SafeCloseable ignored2 = LivenessScopeStack.open()) { try { queryProcessingResults = new QueryProcessingResults(QueryPerformanceRecorder.getInstance()); - final int parentEvaluationNumber = queryPerformanceRecorder != null + final long parentEvaluationNumber = queryPerformanceRecorder != null ? queryPerformanceRecorder.getEvaluationNumber() - : QueryConstants.NULL_INT; + : QueryConstants.NULL_LONG; QueryPerformanceRecorder.getInstance().startQuery( "ExportObject#doWork(session=" + session.sessionId + ",exportId=" + logIdentity + ")", parentEvaluationNumber); From c5cf8088b65c7bed18a3d7f1d5a87627f52ccd12 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Fri, 3 Nov 2023 21:36:53 -0600 Subject: [PATCH 03/31] Fix QOPL Column Mismatches --- ...ryOperationPerformanceStreamPublisher.java | 68 +++++++++++++++++-- 1 file changed, 63 insertions(+), 5 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java index c46f3d3910d..d21e2b72b08 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java @@ -36,6 +36,8 @@ class QueryOperationPerformanceStreamPublisher implements StreamPublisher { ColumnDefinition.ofLong("DurationNanos"), ColumnDefinition.ofLong("CpuNanos"), ColumnDefinition.ofLong("UserCpuNanos"), + ColumnDefinition.ofLong("FreeMemory"), + ColumnDefinition.ofLong("TotalMemory"), ColumnDefinition.ofLong("FreeMemoryChange"), ColumnDefinition.ofLong("TotalMemoryChange"), ColumnDefinition.ofLong("Collections"), @@ -70,31 +72,87 @@ public synchronized void add( final String id, final QueryPerformanceNugget nugget) { + // ColumnDefinition.ofString("ProcessUniqueId"), chunks[0].asWritableObjectChunk().add(id); + + // ColumnDefinition.ofLong("EvaluationNumber"), chunks[1].asWritableLongChunk().add(nugget.getEvaluationNumber()); + + // ColumnDefinition.ofLong("ParentEvaluationNumber"), chunks[2].asWritableLongChunk().add(nugget.getParentEvaluationNumber()); + + // ColumnDefinition.ofInt("OperationNumber"), chunks[3].asWritableIntChunk().add(nugget.getOperationNumber()); + + // ColumnDefinition.ofInt("ParentOperationNumber"), chunks[4].asWritableIntChunk().add(nugget.getParentOperationNumber()); + + // ColumnDefinition.ofInt("Depth"), chunks[5].asWritableIntChunk().add(nugget.getDepth()); + + // ColumnDefinition.ofString("Description"), chunks[6].asWritableObjectChunk().add(nugget.getName()); + + // ColumnDefinition.ofString("CallerLine"), chunks[7].asWritableObjectChunk().add(nugget.getCallerLine()); + + // ColumnDefinition.ofBoolean("IsQueryLevel"), chunks[8].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.isQueryLevel())); + + // ColumnDefinition.ofBoolean("IsTopLevel"), chunks[9].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.isTopLevel())); + + // ColumnDefinition.ofBoolean("IsCompilation"), chunks[10].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.getName().startsWith("Compile:"))); + + // ColumnDefinition.ofTime("StartTime"), chunks[11].asWritableLongChunk().add(nugget.getStartClockTime()); + + // ColumnDefinition.ofTime("EndTime"), chunks[12].asWritableLongChunk().add(nugget.getEndClockTime()); + + // ColumnDefinition.ofLong("DurationNanos"), chunks[13].asWritableLongChunk().add(nugget.getTotalTimeNanos()); + + // ColumnDefinition.ofLong("CpuNanos"), chunks[14].asWritableLongChunk().add(nugget.getCpuNanos()); + + // ColumnDefinition.ofLong("UserCpuNanos"), chunks[15].asWritableLongChunk().add(nugget.getUserCpuNanos()); + + // ColumnDefinition.ofLong("FreeMemory"), chunks[16].asWritableLongChunk().add(nugget.getEndFreeMemory()); + + // ColumnDefinition.ofLong("TotalMemory"), chunks[17].asWritableLongChunk().add(nugget.getEndTotalMemory()); + + // ColumnDefinition.ofLong("FreeMemoryChange"), chunks[18].asWritableLongChunk().add(nugget.getDiffFreeMemory()); + + // ColumnDefinition.ofLong("TotalMemoryChange"), chunks[19].asWritableLongChunk().add(nugget.getDiffTotalMemory()); - chunks[20].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); - chunks[21].asWritableLongChunk().add(nugget.getAllocatedBytes()); - chunks[22].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); - chunks[23].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); - chunks[24].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); + + // ColumnDefinition.ofLong("Collections") + chunks[20].asWritableLongChunk().add(nugget.getDiffCollections()); + + // ColumnDefinition.ofLong("CollectionTimeNanos"), + chunks[21].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); + + // ColumnDefinition.ofLong("AllocatedBytes"), + chunks[22].asWritableLongChunk().add(nugget.getAllocatedBytes()); + + // ColumnDefinition.ofLong("PoolAllocatedBytes"), + chunks[23].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); + + // ColumnDefinition.ofLong("InputSizeLong"), + chunks[24].asWritableLongChunk().add(nugget.getInputSize()); + + // ColumnDefinition.ofBoolean("WasInterrupted") + chunks[25].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); + + // ColumnDefinition.ofString("AuthContext") + chunks[26].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); + if (chunks[0].size() == CHUNK_SIZE) { flushInternal(); } From 95eae63d3b28e976e0454d0600ed9a37f9492cdd Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Sat, 4 Nov 2023 08:11:30 -0600 Subject: [PATCH 04/31] unused imports --- .../main/java/io/deephaven/server/session/SessionState.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index 0c373fc5c60..d3e1d8e4365 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -10,16 +10,12 @@ import dagger.assisted.AssistedInject; import io.deephaven.base.reference.WeakSimpleReference; import io.deephaven.base.verify.Assert; -import io.deephaven.base.verify.Require; import io.deephaven.engine.liveness.LivenessArtifact; import io.deephaven.engine.liveness.LivenessReferent; import io.deephaven.engine.liveness.LivenessScopeStack; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.table.impl.util.EngineMetrics; -import io.deephaven.engine.tablelogger.QueryOperationPerformanceLogLogger; -import io.deephaven.engine.tablelogger.QueryPerformanceLogLogger; import io.deephaven.engine.updategraph.DynamicNode; import io.deephaven.hash.KeyedIntObjectHash; import io.deephaven.hash.KeyedIntObjectHashMap; From dfa80dedf69eb8eaf5cf12688a2de5ee0f845d3c Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Sat, 4 Nov 2023 12:33:34 -0600 Subject: [PATCH 05/31] Performance Tracking for One-Shot Ticket Resolution --- .../impl/perf/QueryPerformanceRecorder.java | 47 ++++++++++++++++--- .../util/QueryOperationPerformanceImpl.java | 6 ++- .../table/impl/util/QueryPerformanceImpl.java | 8 +++- .../QueryOperationPerformanceLogLogger.java | 17 +++++-- .../QueryPerformanceLogLogger.java | 25 ++++++++-- .../server/session/SessionState.java | 44 ++++++++++++----- .../table/ops/TableServiceGrpcImpl.java | 30 +++++++++--- 7 files changed, 141 insertions(+), 36 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java index 037f202dafe..06ac6c32562 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java @@ -21,7 +21,7 @@ import java.io.*; import java.net.URL; import java.util.*; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.minus; @@ -48,7 +48,7 @@ public class QueryPerformanceRecorder implements Serializable { private transient QueryPerformanceNugget catchAllNugget; private final transient Deque userNuggetStack = new ArrayDeque<>(); - private static final AtomicInteger queriesProcessed = new AtomicInteger(0); + private static final AtomicLong queriesProcessed = new AtomicLong(0); private static final ThreadLocal theLocal = ThreadLocal.withInitial(QueryPerformanceRecorder::new); @@ -100,9 +100,10 @@ private QueryPerformanceRecorder() { * Start a query. * * @param description A description for the query. + * @return this */ - public void startQuery(final String description) { - startQuery(description, QueryConstants.NULL_LONG); + public QueryPerformanceRecorder startQuery(final String description) { + return startQuery(description, QueryConstants.NULL_LONG); } /** @@ -110,13 +111,16 @@ public void startQuery(final String description) { * * @param description A description for the query. * @param parentEvaluationNumber The evaluation number of the parent query. + * @return this */ - public synchronized void startQuery(final String description, final long parentEvaluationNumber) { + public synchronized QueryPerformanceRecorder startQuery(final String description, + final long parentEvaluationNumber) { clear(); - final int evaluationNumber = queriesProcessed.getAndIncrement(); + final long evaluationNumber = queriesProcessed.getAndIncrement(); queryNugget = new QueryPerformanceNugget(evaluationNumber, parentEvaluationNumber, description); state = QueryState.RUNNING; startCatchAll(); + return this; } /** @@ -161,26 +165,55 @@ public synchronized boolean endQuery() { return queryNugget.done(this); } + /** + * Suspends a query. + *

+ * This resets the thread local and assumes that this performance nugget may be resumed on another thread. This + */ public synchronized void suspendQuery() { if (state != QueryState.RUNNING) { throw new IllegalStateException("Can't suspend a query that isn't running"); } + final QueryPerformanceRecorder threadLocalInstance = getInstance(); + if (threadLocalInstance != this) { + throw new IllegalStateException("Can't suspend a query that doesn't belong to this thread"); + } + state = QueryState.SUSPENDED; Assert.neqNull(catchAllNugget, "catchAllNugget"); stopCatchAll(false); queryNugget.onBaseEntryEnd(); + + // Very likely this QPR is being passed to another thread, be safe and reset the thread local instance. + resetInstance(); } - public synchronized void resumeQuery() { + /** + * Resumes a suspend query. + *

+ * It is an error to resume a query while another query is running on this thread. + * + * @return this + */ + public synchronized QueryPerformanceRecorder resumeQuery() { if (state != QueryState.SUSPENDED) { throw new IllegalStateException("Can't resume a query that isn't suspended"); } + final QueryPerformanceRecorder threadLocalInstance = getInstance(); + synchronized (threadLocalInstance) { + if (threadLocalInstance.state == QueryState.RUNNING) { + throw new IllegalStateException("Can't resume a query while another query is in operation"); + } + } + theLocal.set(this); + queryNugget.onBaseEntryStart(); state = QueryState.RUNNING; Assert.eqNull(catchAllNugget, "catchAllNugget"); startCatchAll(); + return this; } private void startCatchAll() { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java index 814fdeca0c1..1e10b2de73b 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java @@ -10,6 +10,7 @@ import io.deephaven.process.ProcessUniqueId; import io.deephaven.stream.StreamToBlinkTableAdapter; import io.deephaven.tablelogger.Row.Flags; +import org.jetbrains.annotations.NotNull; import java.io.IOException; import java.util.Objects; @@ -39,7 +40,10 @@ public Table blinkTable() { } @Override - public void log(Flags flags, QueryPerformanceNugget nugget) throws IOException { + public void log( + @NotNull final Flags flags, + final int deprecatedArgument, + @NotNull final QueryPerformanceNugget nugget) throws IOException { publisher.add(id.value(), nugget); qoplLogger.log(flags, nugget); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java index 311cbb906b0..b08496d33c5 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java @@ -11,6 +11,7 @@ import io.deephaven.process.ProcessUniqueId; import io.deephaven.stream.StreamToBlinkTableAdapter; import io.deephaven.tablelogger.Row.Flags; +import org.jetbrains.annotations.NotNull; import java.io.IOException; import java.util.Objects; @@ -40,8 +41,11 @@ public Table blinkTable() { } @Override - public void log(Flags flags, QueryProcessingResults queryProcessingResults, - QueryPerformanceNugget nugget) throws IOException { + public void log( + @NotNull final Flags flags, + final long deprecatedField, + @NotNull final QueryProcessingResults queryProcessingResults, + @NotNull final QueryPerformanceNugget nugget) throws IOException { publisher.add(id.value(), queryProcessingResults, nugget); qplLogger.log(flags, queryProcessingResults, nugget); } diff --git a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java index b9ba707353e..dfc961175c1 100644 --- a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java +++ b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java @@ -3,6 +3,7 @@ import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.tablelogger.Row; import io.deephaven.tablelogger.Row.Flags; +import org.jetbrains.annotations.NotNull; import java.io.IOException; @@ -13,17 +14,27 @@ * queries. */ public interface QueryOperationPerformanceLogLogger { - default void log(final QueryPerformanceNugget nugget) throws IOException { + default void log(@NotNull final QueryPerformanceNugget nugget) throws IOException { log(DEFAULT_INTRADAY_LOGGER_FLAGS, nugget); } - void log(Row.Flags flags, QueryPerformanceNugget nugget) throws IOException; + default void log( + @NotNull final Row.Flags flags, + @NotNull final QueryPerformanceNugget nugget) throws IOException { + log(flags, nugget.getOperationNumber(), nugget); + } + + // This prototype is going to be deprecated in 0.31 in favor of the one above. + void log(Row.Flags flags, int operationNumber, QueryPerformanceNugget nugget) throws IOException; enum Noop implements QueryOperationPerformanceLogLogger { INSTANCE; @Override - public void log(Flags flags, QueryPerformanceNugget nugget) throws IOException { + public void log( + @NotNull final Flags flags, + final int operationNumber, + @NotNull final QueryPerformanceNugget nugget) throws IOException { } } diff --git a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java index 082dff4c2f9..339e4e3346f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java +++ b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java @@ -4,6 +4,7 @@ import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.tablelogger.Row; import io.deephaven.tablelogger.Row.Flags; +import org.jetbrains.annotations.NotNull; import java.io.IOException; @@ -15,19 +16,35 @@ */ public interface QueryPerformanceLogLogger { default void log( - final QueryProcessingResults queryProcessingResults, - final QueryPerformanceNugget nugget) throws IOException { + @NotNull final QueryProcessingResults queryProcessingResults, + @NotNull final QueryPerformanceNugget nugget) throws IOException { log(DEFAULT_INTRADAY_LOGGER_FLAGS, queryProcessingResults, nugget); } - void log(Row.Flags flags, QueryProcessingResults queryProcessingResults, QueryPerformanceNugget nugget) + default void log( + @NotNull final Row.Flags flags, + @NotNull final QueryProcessingResults queryProcessingResults, + @NotNull final QueryPerformanceNugget nugget) throws IOException { + log(flags, nugget.getEvaluationNumber(), queryProcessingResults, nugget); + } + + // This prototype is going to be deprecated in 0.31 in favor of the one above. + void log( + Row.Flags flags, + final long evaluationNumber, + QueryProcessingResults queryProcessingResults, + QueryPerformanceNugget nugget) throws IOException; enum Noop implements QueryPerformanceLogLogger { INSTANCE; @Override - public void log(Flags flags, QueryProcessingResults queryProcessingResults, QueryPerformanceNugget nugget) + public void log( + @NotNull final Flags flags, + final long evaluationNumber, + @NotNull final QueryProcessingResults queryProcessingResults, + @NotNull final QueryPerformanceNugget nugget) throws IOException { } diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index d3e1d8e4365..16e34eecd7c 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -535,7 +535,9 @@ public final static class ExportObject extends LivenessArtifact { private final SessionService.ErrorTransformer errorTransformer; private final SessionState session; - /** used to keep track of performance details if caller needs to aggregate across multiple exports */ + /** if true the queryPerformanceRecorder belongs to a batch; otherwise if it exists it belong to the export */ + private boolean qprIsForBatch; + /** used to keep track of performance details either for aggregation or for the async ticket resolution */ private QueryPerformanceRecorder queryPerformanceRecorder; /** final result of export */ @@ -627,11 +629,14 @@ private boolean isNonExport() { return exportId == NON_EXPORT_ID; } - private synchronized void setQueryPerformanceRecorder(final QueryPerformanceRecorder queryPerformanceRecorder) { + private synchronized void setQueryPerformanceRecorder( + final QueryPerformanceRecorder queryPerformanceRecorder, + final boolean qprIsForBatch) { if (this.queryPerformanceRecorder != null) { throw new IllegalStateException( "performance query recorder can only be set once on an exportable object"); } + this.qprIsForBatch = qprIsForBatch; this.queryPerformanceRecorder = queryPerformanceRecorder; } @@ -978,19 +983,26 @@ private void doExport() { try (final SafeCloseable ignored1 = session.executionContext.open(); final SafeCloseable ignored2 = LivenessScopeStack.open()) { try { - queryProcessingResults = new QueryProcessingResults(QueryPerformanceRecorder.getInstance()); - final long parentEvaluationNumber = queryPerformanceRecorder != null - ? queryPerformanceRecorder.getEvaluationNumber() - : QueryConstants.NULL_LONG; - QueryPerformanceRecorder.getInstance().startQuery( - "ExportObject#doWork(session=" + session.sessionId + ",exportId=" + logIdentity + ")", - parentEvaluationNumber); + final QueryPerformanceRecorder exportRecorder; + if (queryPerformanceRecorder != null && !qprIsForBatch) { + exportRecorder = queryPerformanceRecorder.resumeQuery(); + } else if (queryPerformanceRecorder != null) { + // this is a sub-query; no need to re-log the session id + exportRecorder = QueryPerformanceRecorder.getInstance().startQuery( + "ExportObject#doWork(exportId=" + logIdentity + ")", + queryPerformanceRecorder.getEvaluationNumber()); + } else { + exportRecorder = QueryPerformanceRecorder.getInstance().startQuery( + "ExportObject#doWork(session=" + session.sessionId + ",exportId=" + logIdentity + ")"); + } + queryProcessingResults = new QueryProcessingResults(exportRecorder); try { localResult = capturedExport.call(); } finally { shouldLog = QueryPerformanceRecorder.getInstance().endQuery(); } + } catch (final Exception err) { caughtException = err; synchronized (this) { @@ -1009,12 +1021,14 @@ private void doExport() { QueryPerformanceRecorder.resetInstance(); } if ((shouldLog || caughtException != null) && queryProcessingResults != null) { - if (queryPerformanceRecorder != null) { + if (queryPerformanceRecorder != null && qprIsForBatch) { queryPerformanceRecorder.accumulate(queryProcessingResults.getRecorder()); } EngineMetrics.getInstance().logQueryProcessingResults(queryProcessingResults); } if (caughtException == null) { + // must set result after ending the query and accumulating into the parent so that onSuccess + // may resume and/or finalize a parent query setResult(localResult); } } @@ -1305,11 +1319,13 @@ public class ExportBuilder { * the responsibility of the caller. * * @param queryPerformanceRecorder the performance recorder to aggregate into + * @param qprIsForBatch true if a sub-query should be created for the export and aggregated into the qpr * @return this builder */ public ExportBuilder queryPerformanceRecorder( - @NotNull final QueryPerformanceRecorder queryPerformanceRecorder) { - export.setQueryPerformanceRecorder(queryPerformanceRecorder); + @NotNull final QueryPerformanceRecorder queryPerformanceRecorder, + final boolean qprIsForBatch) { + export.setQueryPerformanceRecorder(queryPerformanceRecorder, qprIsForBatch); return this; } @@ -1462,6 +1478,10 @@ public ExportBuilder onSuccess(final Runnable successHandler) { */ public ExportObject submit(final Callable exportMain) { export.setWork(exportMain, errorHandler, successHandler, requiresSerialQueue); + if (export.queryPerformanceRecorder != null && !export.qprIsForBatch) { + // transfer ownership of the qpr to the export + export.queryPerformanceRecorder.suspendQuery(); + } return export; } diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index 0d6ffe24d70..42722609caf 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -4,6 +4,7 @@ package io.deephaven.server.table.ops; import com.google.rpc.Code; +import io.deephaven.base.verify.Assert; import io.deephaven.clientsupport.gotorow.SeekRow; import io.deephaven.auth.codegen.impl.TableServiceContextualAuthWiring; import io.deephaven.engine.table.Table; @@ -510,8 +511,8 @@ public void batch( } final SessionState session = sessionService.getCurrentSession(); - final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.getInstance(); - queryPerformanceRecorder.startQuery("TableService#batch(session=" + session.getSessionId() + ")"); + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.getInstance().startQuery( + "TableService#batch(session=" + session.getSessionId() + ")"); // step 1: initialize exports final List> exportBuilders = request.getOpsList().stream() @@ -529,9 +530,11 @@ public void batch( final AtomicReference firstFailure = new AtomicReference<>(); final Runnable onOneResolved = () -> { - if (remaining.decrementAndGet() > 0) { + int numRemaining = remaining.decrementAndGet(); + if (numRemaining > 0) { return; } + Assert.geqZero(numRemaining, "numRemaining"); queryPerformanceRecorder.resumeQuery(); final QueryProcessingResults results = new QueryProcessingResults(queryPerformanceRecorder); @@ -644,13 +647,20 @@ private void oneShotOperationWrapper( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); final GrpcTableOperation operation = getOp(op); - operation.validateRequest(request); final Ticket resultId = operation.getResultTicket(request); if (resultId.getTicket().isEmpty()) { throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No result ticket supplied"); } + final String description = "TableService#" + op.name() + "(session=" + session.getSessionId() + ", resultId=" + + ticketRouter.getLogNameFor(resultId, "TableServiceGrpcImpl") + ")"; + + final QueryPerformanceRecorder queryPerformanceRecorder = + QueryPerformanceRecorder.getInstance().startQuery(description); + + operation.validateRequest(request); + final List> dependencies = operation.getTableReferences(request).stream() .map(ref -> resolveOneShotReference(session, ref)) .collect(Collectors.toList()); @@ -658,6 +668,7 @@ private void oneShotOperationWrapper( session.newExport(resultId, "resultId") .require(dependencies) .onError(responseObserver) + .queryPerformanceRecorder(queryPerformanceRecorder, false) .submit(() -> { operation.checkPermission(request, dependencies); final Table result = operation.create(request, dependencies); @@ -675,7 +686,12 @@ private SessionState.ExportObject

resolveOneShotReference( throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "One-shot operations must use ticket references"); } - return ticketRouter.resolve(session, ref.getTicket(), "sourceId"); + + final String ticketName = ticketRouter.getLogNameFor(ref.getTicket(), "TableServiceGrpcImpl"); + try (final SafeCloseable ignored = + QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { + return ticketRouter.resolve(session, ref.getTicket(), "sourceId"); + } } private SessionState.ExportObject
resolveBatchReference( @@ -686,7 +702,7 @@ private SessionState.ExportObject
resolveBatchReference( case TICKET: final String ticketName = ticketRouter.getLogNameFor(ref.getTicket(), "TableServiceGrpcImpl"); try (final SafeCloseable ignored = - QueryPerformanceRecorder.getInstance().getNugget("resolveBatchReference:" + ticketName)) { + QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { return ticketRouter.resolve(session, ref.getTicket(), "sourceId"); } case BATCH_OFFSET: @@ -711,7 +727,7 @@ private BatchExportBuilder createBatchExportBuilder( final Ticket resultId = operation.getResultTicket(request); final ExportBuilder
exportBuilder = resultId.getTicket().isEmpty() ? session.nonExport() : session.newExport(resultId, "resultId"); - exportBuilder.queryPerformanceRecorder(queryPerformanceRecorder); + exportBuilder.queryPerformanceRecorder(queryPerformanceRecorder, true); return new BatchExportBuilder<>(operation, request, exportBuilder); } From d4c8e2ac73f8cf26d9c1fa40512c425fe3c830d3 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Tue, 7 Nov 2023 21:37:25 -0700 Subject: [PATCH 06/31] Majority of Rnd2 Feedback --- .../engine/table/impl/QueryTable.java | 4 +- .../table/impl/perf/BasePerformanceEntry.java | 54 ++++- .../table/impl/perf/PerformanceEntry.java | 21 +- .../impl/perf/QueryPerformanceNugget.java | 224 +++++++++--------- .../impl/perf/QueryPerformanceRecorder.java | 85 +++---- .../impl/perf/QueryProcessingResults.java | 11 - .../UpdatePerformanceStreamPublisher.java | 78 +++--- .../impl/perf/UpdatePerformanceTracker.java | 8 +- .../engine/table/impl/updateby/UpdateBy.java | 2 +- .../engine/table/impl/util/EngineMetrics.java | 6 +- .../util/QueryOperationPerformanceImpl.java | 1 - ...ryOperationPerformanceStreamPublisher.java | 60 ++--- .../table/impl/util/QueryPerformanceImpl.java | 8 +- .../util/QueryPerformanceStreamPublisher.java | 24 +- .../QueryOperationPerformanceLogLogger.java | 14 +- .../QueryPerformanceLogLogger.java | 15 +- .../main/resources/defaultPackageFilters.qpr | 2 +- .../server/session/SessionState.java | 4 +- 18 files changed, 311 insertions(+), 310 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java index 1b468be1e76..b43d6531d61 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java @@ -1271,7 +1271,7 @@ void handleUncaughtException(Exception throwable) { final QueryPerformanceNugget outerNugget = QueryPerformanceRecorder.getInstance().getOuterNugget(); if (outerNugget != null) { - outerNugget.addBaseEntry(basePerformanceEntry); + outerNugget.accumulate(basePerformanceEntry); } } } @@ -1519,7 +1519,7 @@ this, mode, columns, rowSet, getModifiedColumnSetForUpdates(), publishTheseSourc final QueryPerformanceNugget outerNugget = QueryPerformanceRecorder.getInstance().getOuterNugget(); if (outerNugget != null) { - outerNugget.addBaseEntry(baseEntry); + outerNugget.accumulate(baseEntry); } } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java index a838a7f63bc..d7f61d94dd4 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java @@ -7,6 +7,7 @@ import io.deephaven.base.log.LogOutputAppendable; import io.deephaven.base.verify.Assert; import io.deephaven.util.profiling.ThreadProfiler; +import org.jetbrains.annotations.NotNull; import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.minus; import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.plus; @@ -31,7 +32,7 @@ public class BasePerformanceEntry implements LogOutputAppendable { private long startAllocatedBytes; private long startPoolAllocatedBytes; - public void onBaseEntryStart() { + public synchronized void onBaseEntryStart() { startAllocatedBytes = ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(); startPoolAllocatedBytes = QueryPerformanceRecorder.getPoolAllocatedBytesForCurrentThread(); @@ -40,7 +41,7 @@ public void onBaseEntryStart() { startTimeNanos = System.nanoTime(); } - public void onBaseEntryEnd() { + public synchronized void onBaseEntryEnd() { intervalUserCpuNanos = plus(intervalUserCpuNanos, minus(ThreadProfiler.DEFAULT.getCurrentThreadUserTime(), startUserCpuNanos)); intervalCpuNanos = @@ -61,7 +62,7 @@ public void onBaseEntryEnd() { startTimeNanos = 0; } - void baseEntryReset() { + synchronized void baseEntryReset() { Assert.eqZero(startTimeNanos, "startTimeNanos"); intervalUsageNanos = 0; @@ -73,28 +74,56 @@ void baseEntryReset() { intervalPoolAllocatedBytes = 0; } - public long getIntervalUsageNanos() { + /** + * Get the aggregate usage in nanoseconds. Invoking this getter is valid iff the entry will no longer be mutated. + * + * @return total wall clock time in nanos + */ + public long getTotalTimeNanos() { return intervalUsageNanos; } - public long getIntervalCpuNanos() { + /** + * Get the aggregate cpu time in nanoseconds. Invoking this getter is valid iff the entry will no longer be mutated. + * + * @return total cpu time in nanos + */ + public long getCpuNanos() { return intervalCpuNanos; } - public long getIntervalUserCpuNanos() { + /** + * Get the aggregate cpu user time in nanoseconds. Invoking this getter is valid iff the entry will no longer be + * mutated. + * + * @return total cpu user time in nanos + */ + public long getUserCpuNanos() { return intervalUserCpuNanos; } - public long getIntervalAllocatedBytes() { + /** + * Get the aggregate allocated memory in bytes. Invoking this getter is valid iff the entry will no longer be + * mutated. + * + * @return The bytes of allocated memory attributed to the instrumented operation. + */ + public long getAllocatedBytes() { return intervalAllocatedBytes; } - public long getIntervalPoolAllocatedBytes() { + /** + * Get allocated pooled/reusable memory attributed to the instrumented operation in bytes. Invoking this getter is + * valid iff the entry will no longer be mutated. + * + * @return total pool allocated memory in bytes + */ + public long getPoolAllocatedBytes() { return intervalPoolAllocatedBytes; } @Override - public LogOutput append(LogOutput logOutput) { + public LogOutput append(@NotNull final LogOutput logOutput) { final LogOutput currentValues = logOutput.append("BasePerformanceEntry{") .append(", intervalUsageNanos=").append(intervalUsageNanos) .append(", intervalCpuNanos=").append(intervalCpuNanos) @@ -114,7 +143,12 @@ LogOutput appendStart(LogOutput logOutput) { .append(", startPoolAllocatedBytes=").append(startPoolAllocatedBytes); } - public synchronized void accumulate(BasePerformanceEntry entry) { + /** + * Accumulate the values from another entry into this one. The provided entry will not be mutated. + * + * @param entry the entry to accumulate + */ + public synchronized void accumulate(@NotNull final BasePerformanceEntry entry) { this.intervalUsageNanos += entry.intervalUsageNanos; this.intervalCpuNanos = plus(this.intervalCpuNanos, entry.intervalCpuNanos); this.intervalUserCpuNanos = plus(this.intervalUserCpuNanos, entry.intervalUserCpuNanos); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java index 46e9bddc86c..6d42ee2b1d3 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java @@ -13,12 +13,13 @@ import io.deephaven.io.log.impl.LogOutputStringImpl; import io.deephaven.time.DateTimeUtils; import io.deephaven.util.QueryConstants; +import org.jetbrains.annotations.NotNull; /** * Entry class for tracking the performance characteristics of a single recurring update event. */ public class PerformanceEntry extends BasePerformanceEntry implements TableListener.Entry { - private final int id; + private final long id; private final long evaluationNumber; private final int operationNumber; private final String description; @@ -42,7 +43,7 @@ public class PerformanceEntry extends BasePerformanceEntry implements TableListe private final RuntimeMemory.Sample startSample; private final RuntimeMemory.Sample endSample; - PerformanceEntry(final int id, final long evaluationNumber, final int operationNumber, + PerformanceEntry(final long id, final long evaluationNumber, final int operationNumber, final String description, final String callerLine, final String updateGraphName) { this.id = id; this.evaluationNumber = evaluationNumber; @@ -114,7 +115,7 @@ public String toString() { } @Override - public LogOutput append(final LogOutput logOutput) { + public LogOutput append(@NotNull final LogOutput logOutput) { final LogOutput beginning = logOutput.append("PerformanceEntry{") .append(", id=").append(id) .append(", evaluationNumber=").append(evaluationNumber) @@ -122,16 +123,16 @@ public LogOutput append(final LogOutput logOutput) { .append(", description='").append(description).append('\'') .append(", callerLine='").append(callerLine).append('\'') .append(", authContext=").append(authContext) - .append(", intervalUsageNanos=").append(getIntervalUsageNanos()) - .append(", intervalCpuNanos=").append(getIntervalCpuNanos()) - .append(", intervalUserCpuNanos=").append(getIntervalUserCpuNanos()) + .append(", intervalUsageNanos=").append(getTotalTimeNanos()) + .append(", intervalCpuNanos=").append(getCpuNanos()) + .append(", intervalUserCpuNanos=").append(getUserCpuNanos()) .append(", intervalInvocationCount=").append(intervalInvocationCount) .append(", intervalAdded=").append(intervalAdded) .append(", intervalRemoved=").append(intervalRemoved) .append(", intervalModified=").append(intervalModified) .append(", intervalShifted=").append(intervalShifted) - .append(", intervalAllocatedBytes=").append(getIntervalAllocatedBytes()) - .append(", intervalPoolAllocatedBytes=").append(getIntervalPoolAllocatedBytes()) + .append(", intervalAllocatedBytes=").append(getAllocatedBytes()) + .append(", intervalPoolAllocatedBytes=").append(getPoolAllocatedBytes()) .append(", maxTotalMemory=").append(maxTotalMemory) .append(", minFreeMemory=").append(minFreeMemory) .append(", collections=").append(collections) @@ -140,7 +141,7 @@ public LogOutput append(final LogOutput logOutput) { .append('}'); } - public int getId() { + public long getId() { return id; } @@ -217,7 +218,7 @@ public long getIntervalInvocationCount() { */ boolean shouldLogEntryInterval() { return intervalInvocationCount > 0 && - UpdatePerformanceTracker.LOG_THRESHOLD.shouldLog(getIntervalUsageNanos()); + UpdatePerformanceTracker.LOG_THRESHOLD.shouldLog(getTotalTimeNanos()); } public void accumulate(PerformanceEntry entry) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java index f50f53917a2..e33474a0e8a 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java @@ -4,13 +4,14 @@ package io.deephaven.engine.table.impl.perf; import io.deephaven.auth.AuthContext; +import io.deephaven.base.log.LogOutput; +import io.deephaven.base.verify.Assert; import io.deephaven.engine.context.ExecutionContext; import io.deephaven.time.DateTimeUtils; import io.deephaven.engine.table.impl.util.RuntimeMemory; import io.deephaven.util.QueryConstants; import io.deephaven.util.SafeCloseable; - -import java.io.Serializable; +import org.jetbrains.annotations.NotNull; import static io.deephaven.util.QueryConstants.*; @@ -19,19 +20,103 @@ * intimate relationship with another class, {@link QueryPerformanceRecorder}. Changes to either should take this lack * of encapsulation into account. */ -public class QueryPerformanceNugget extends BasePerformanceEntry implements Serializable, SafeCloseable { +public class QueryPerformanceNugget extends BasePerformanceEntry implements SafeCloseable { private static final QueryPerformanceLogThreshold LOG_THRESHOLD = new QueryPerformanceLogThreshold("", 1_000_000); private static final QueryPerformanceLogThreshold UNINSTRUMENTED_LOG_THRESHOLD = new QueryPerformanceLogThreshold("Uninstrumented", 1_000_000_000); private static final int MAX_DESCRIPTION_LENGTH = 16 << 10; - private static final long serialVersionUID = 2L; - /** * A re-usable "dummy" nugget which will never collect any information or be recorded. */ static final QueryPerformanceNugget DUMMY_NUGGET = new QueryPerformanceNugget(); + public interface Factory { + /** + * Factory method for query-level nuggets. + * + * @param evaluationNumber A unique identifier for the query evaluation that triggered this nugget creation + * @param description The operation description + * @return A new nugget + */ + default QueryPerformanceNugget createForQuery(final long evaluationNumber, @NotNull final String description) { + return new QueryPerformanceNugget(evaluationNumber, NULL_LONG, NULL_INT, NULL_INT, NULL_INT, + description, false, NULL_LONG); + } + + /** + * Factory method for sub-query-level nuggets. + * + * @param parentQuery The parent query nugget + * @param evaluationNumber A unique identifier for the sub-query evaluation that triggered this nugget creation + * @param description The operation description + * @return A new nugget + */ + default QueryPerformanceNugget createForSubQuery( + @NotNull final QueryPerformanceNugget parentQuery, + final long evaluationNumber, + @NotNull final String description) { + Assert.eqTrue(parentQuery.isQueryLevel(), "parentQuery.isQueryLevel()"); + return new QueryPerformanceNugget(evaluationNumber, parentQuery.getEvaluationNumber(), + NULL_INT, NULL_INT, NULL_INT, description, false, NULL_LONG); + } + + /** + * Factory method for operation-level nuggets. + * + * @param parentQueryOrOperation The parent query / operation nugget + * @param operationNumber A query-unique identifier for the operation + * @param description The operation description + * @return A new nugget + */ + default QueryPerformanceNugget createForOperation( + @NotNull final QueryPerformanceNugget parentQueryOrOperation, + final int operationNumber, + final String description, + final long inputSize) { + int depth = parentQueryOrOperation.getDepth(); + if (depth == NULL_INT) { + depth = 0; + } else { + ++depth; + } + + return new QueryPerformanceNugget( + parentQueryOrOperation.getEvaluationNumber(), + parentQueryOrOperation.getParentEvaluationNumber(), + operationNumber, + parentQueryOrOperation.getOperationNumber(), + depth, + description, + true, // operations are always user + inputSize); + } + + /** + * Factory method for catch-all nuggets. + * + * @param parentQuery The parent query nugget + * @param operationNumber A query-unique identifier for the operation + * @return A new nugget + */ + default QueryPerformanceNugget createForCatchAll( + @NotNull final QueryPerformanceNugget parentQuery, + final int operationNumber) { + Assert.eqTrue(parentQuery.isQueryLevel(), "parentQuery.isQueryLevel()"); + return new QueryPerformanceNugget( + parentQuery.getEvaluationNumber(), + parentQuery.getParentEvaluationNumber(), + operationNumber, + NULL_INT, // catch all has no parent operation + 0, // catch all is a root operation + QueryPerformanceRecorder.UNINSTRUMENTED_CODE_DESCRIPTION, + false, // catch all is not user + NULL_LONG); // catch all has no input size + } + } + + public static final Factory DEFAULT_FACTORY = new Factory() {}; + private final long evaluationNumber; private final long parentEvaluationNumber; private final int operationNumber; @@ -39,14 +124,13 @@ public class QueryPerformanceNugget extends BasePerformanceEntry implements Seri private final int depth; private final String description; private final boolean isUser; - private final boolean isQueryLevel; private final long inputSize; private final AuthContext authContext; private final String callerLine; - private final long startClockTime; - private long endClockTime; + private final long startClockEpochNanos; + private long endClockEpochNanos = NULL_LONG; private volatile QueryState state; @@ -56,23 +140,6 @@ public class QueryPerformanceNugget extends BasePerformanceEntry implements Seri private boolean shouldLogMeAndStackParents; - /** - * For threaded operations we want to accumulate the CPU time, allocations, and read operations to the enclosing - * nugget of the main operation. For the initialization we ignore the wall clock time taken in the thread pool. - */ - private BasePerformanceEntry basePerformanceEntry; - - /** - * Constructor for query-level nuggets. - * - * @param evaluationNumber A unique identifier for the query evaluation that triggered this nugget creation - * @param description The operation description - */ - QueryPerformanceNugget(final long evaluationNumber, final long parentEvaluationNumber, final String description) { - this(evaluationNumber, parentEvaluationNumber, NULL_INT, NULL_INT, NULL_INT, description, false, true, - NULL_LONG); - } - /** * Full constructor for nuggets. * @@ -87,7 +154,7 @@ public class QueryPerformanceNugget extends BasePerformanceEntry implements Seri * @param isUser Whether this is a "user" nugget or one created by the system * @param inputSize The size of the input data */ - QueryPerformanceNugget( + protected QueryPerformanceNugget( final long evaluationNumber, final long parentEvaluationNumber, final int operationNumber, @@ -95,7 +162,6 @@ public class QueryPerformanceNugget extends BasePerformanceEntry implements Seri final int depth, final String description, final boolean isUser, - final boolean isQueryLevel, final long inputSize) { startMemorySample = new RuntimeMemory.Sample(); endMemorySample = new RuntimeMemory.Sample(); @@ -111,7 +177,6 @@ public class QueryPerformanceNugget extends BasePerformanceEntry implements Seri this.description = description; } this.isUser = isUser; - this.isQueryLevel = isQueryLevel; this.inputSize = inputSize; authContext = ExecutionContext.getContext().getAuthContext(); @@ -120,7 +185,7 @@ public class QueryPerformanceNugget extends BasePerformanceEntry implements Seri final RuntimeMemory runtimeMemory = RuntimeMemory.getInstance(); runtimeMemory.read(startMemorySample); - startClockTime = System.currentTimeMillis(); + startClockEpochNanos = DateTimeUtils.millisToNanos(System.currentTimeMillis()); onBaseEntryStart(); state = QueryState.RUNNING; @@ -140,15 +205,12 @@ private QueryPerformanceNugget() { depth = 0; description = null; isUser = false; - isQueryLevel = false; inputSize = NULL_LONG; authContext = null; callerLine = null; - startClockTime = NULL_LONG; - - basePerformanceEntry = null; + startClockEpochNanos = NULL_LONG; state = null; // This turns close into a no-op. shouldLogMeAndStackParents = false; @@ -202,16 +264,12 @@ private boolean close(final QueryState closingState, final QueryPerformanceRecor return false; } - endClockTime = System.currentTimeMillis(); onBaseEntryEnd(); + endClockEpochNanos = DateTimeUtils.millisToNanos(System.currentTimeMillis()); final RuntimeMemory runtimeMemory = RuntimeMemory.getInstance(); runtimeMemory.read(endMemorySample); - if (basePerformanceEntry != null) { - accumulate(basePerformanceEntry); - } - state = closingState; return recorderToNotify.releaseNugget(this); } @@ -220,11 +278,17 @@ private boolean close(final QueryState closingState, final QueryPerformanceRecor @Override public String toString() { return evaluationNumber - + ":" + operationNumber + + ":" + (isQueryLevel() ? "query_level" : operationNumber) + ":" + description + ":" + callerLine; } + @Override + public LogOutput append(@NotNull final LogOutput logOutput) { + // override BasePerformanceEntry's impl and match toString() + return logOutput.append(toString()); + } + public long getEvaluationNumber() { return evaluationNumber; } @@ -253,8 +317,12 @@ public boolean isUser() { return isUser; } + public boolean isBatchLevel() { + return isQueryLevel() && parentEvaluationNumber == NULL_LONG; + } + public boolean isQueryLevel() { - return isQueryLevel; + return operationNumber == NULL_INT; } public boolean isTopLevel() { @@ -276,46 +344,18 @@ public String getCallerLine() { return callerLine; } - /** - * @return nanoseconds elapsed, once state != QueryState.RUNNING() has been called. - */ - public long getTotalTimeNanos() { - return getIntervalUsageNanos(); - } - /** * @return wall clock start time in nanoseconds from the epoch */ - public long getStartClockTime() { - return DateTimeUtils.millisToNanos(startClockTime); + public long getStartClockEpochNanos() { + return startClockEpochNanos; } /** * @return wall clock end time in nanoseconds from the epoch */ - public long getEndClockTime() { - return DateTimeUtils.millisToNanos(endClockTime); - } - - - /** - * Get nanoseconds of CPU time attributed to the instrumented operation. - * - * @return The nanoseconds of CPU time attributed to the instrumented operation, or {@link QueryConstants#NULL_LONG} - * if not enabled/supported. - */ - public long getCpuNanos() { - return getIntervalCpuNanos(); - } - - /** - * Get nanoseconds of user mode CPU time attributed to the instrumented operation. - * - * @return The nanoseconds of user mode CPU time attributed to the instrumented operation, or - * {@link QueryConstants#NULL_LONG} if not enabled/supported. - */ - public long getUserCpuNanos() { - return getIntervalUserCpuNanos(); + public long getEndClockEpochNanos() { + return endClockEpochNanos; } /** @@ -361,26 +401,6 @@ public long getDiffCollectionTimeNanos() { .millisToNanos(endMemorySample.totalCollectionTimeMs - startMemorySample.totalCollectionTimeMs); } - /** - * Get bytes of allocated memory attributed to the instrumented operation. - * - * @return The bytes of allocated memory attributed to the instrumented operation, or - * {@link QueryConstants#NULL_LONG} if not enabled/supported. - */ - public long getAllocatedBytes() { - return getIntervalAllocatedBytes(); - } - - /** - * Get bytes of allocated pooled/reusable memory attributed to the instrumented operation. - * - * @return The bytes of allocated pooled/reusable memory attributed to the instrumented operation, or - * {@link QueryConstants#NULL_LONG} if not enabled/supported. - */ - public long getPoolAllocatedBytes() { - return getIntervalPoolAllocatedBytes(); - } - /** * @return true if this nugget was interrupted by an abort() call. */ @@ -403,20 +423,6 @@ public boolean shouldLogMeAndStackParents() { return shouldLogMeAndStackParents; } - /** - * When we track data from other threads that should be attributed to this operation, we tack extra - * BasePerformanceEntry values onto this nugget when it is closed. - *

- * The CPU time, reads, and allocations are counted against this nugget. Wall clock time is ignored. - */ - public synchronized void addBaseEntry(BasePerformanceEntry baseEntry) { - if (this.basePerformanceEntry == null) { - this.basePerformanceEntry = baseEntry; - } else { - this.basePerformanceEntry.accumulate(baseEntry); - } - } - /** * Suppress de minimus performance nuggets using the properties defined above. * @@ -430,6 +436,12 @@ boolean shouldLogNugget(final boolean isUninstrumented) { return true; } + // Nuggets will have a null value for end time if they weren't closed for a RUNNING query; this is an abnormal + // condition and the nugget should be logged + if (endClockEpochNanos == NULL_LONG) { + return true; + } + if (isUninstrumented) { return UNINSTRUMENTED_LOG_THRESHOLD.shouldLog(getTotalTimeNanos()); } else { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java index 06ac6c32562..9ef937cd8d1 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java @@ -37,16 +37,15 @@ public class QueryPerformanceRecorder implements Serializable { public static final String UNINSTRUMENTED_CODE_DESCRIPTION = "Uninstrumented code"; - private static final long serialVersionUID = 2L; private static final String[] packageFilters; - private volatile boolean mustLogForHierarchicalConsistency; private QueryPerformanceNugget queryNugget; private final ArrayList operationNuggets = new ArrayList<>(); - private QueryState state; - private transient QueryPerformanceNugget catchAllNugget; - private final transient Deque userNuggetStack = new ArrayDeque<>(); + private volatile QueryState state; + private QueryPerformanceNugget catchAllNugget; + private final Deque userNuggetStack = new ArrayDeque<>(); + private final QueryPerformanceNugget.Factory nuggetFactory = QueryPerformanceNugget.DEFAULT_FACTORY; private static final AtomicLong queriesProcessed = new AtomicLong(0); @@ -102,22 +101,33 @@ private QueryPerformanceRecorder() { * @param description A description for the query. * @return this */ - public QueryPerformanceRecorder startQuery(final String description) { - return startQuery(description, QueryConstants.NULL_LONG); + public QueryPerformanceRecorder startQuery(@NotNull final String description) { + return startQuery(nuggetFactory.createForQuery(queriesProcessed.getAndIncrement(), description)); } /** - * Start a query. - * + * Start a sub-query. + * + * @param parent The parent query. * @param description A description for the query. - * @param parentEvaluationNumber The evaluation number of the parent query. * @return this */ - public synchronized QueryPerformanceRecorder startQuery(final String description, - final long parentEvaluationNumber) { + public QueryPerformanceRecorder startQuery( + @NotNull final QueryPerformanceRecorder parent, + @NotNull final String description) { + return startQuery(nuggetFactory.createForSubQuery( + parent.queryNugget, queriesProcessed.getAndIncrement(), description)); + } + + /** + * Start a query. + * + * @param nugget The newly constructed query level nugget. + * @return this + */ + private synchronized QueryPerformanceRecorder startQuery(final QueryPerformanceNugget nugget) { clear(); - final long evaluationNumber = queriesProcessed.getAndIncrement(); - queryNugget = new QueryPerformanceNugget(evaluationNumber, parentEvaluationNumber, description); + queryNugget = nugget; state = QueryState.RUNNING; startCatchAll(); return this; @@ -143,7 +153,7 @@ public synchronized void abortQuery() { /** * Return the query's current state - * + * * @return the query's state or null if it isn't initialized yet */ public synchronized QueryState getState() { @@ -168,7 +178,7 @@ public synchronized boolean endQuery() { /** * Suspends a query. *

- * This resets the thread local and assumes that this performance nugget may be resumed on another thread. This + * This resets the thread local and assumes that this performance nugget may be resumed on another thread. */ public synchronized void suspendQuery() { if (state != QueryState.RUNNING) { @@ -193,7 +203,7 @@ public synchronized void suspendQuery() { * Resumes a suspend query. *

* It is an error to resume a query while another query is running on this thread. - * + * * @return this */ public synchronized QueryPerformanceRecorder resumeQuery() { @@ -202,10 +212,8 @@ public synchronized QueryPerformanceRecorder resumeQuery() { } final QueryPerformanceRecorder threadLocalInstance = getInstance(); - synchronized (threadLocalInstance) { - if (threadLocalInstance.state == QueryState.RUNNING) { - throw new IllegalStateException("Can't resume a query while another query is in operation"); - } + if (threadLocalInstance.state == QueryState.RUNNING) { + throw new IllegalStateException("Can't resume a query while another query is in operation"); } theLocal.set(this); @@ -217,12 +225,7 @@ public synchronized QueryPerformanceRecorder resumeQuery() { } private void startCatchAll() { - catchAllNugget = new QueryPerformanceNugget( - queryNugget.getEvaluationNumber(), - queryNugget.getParentEvaluationNumber(), - operationNuggets.size(), - QueryConstants.NULL_INT, 0, - UNINSTRUMENTED_CODE_DESCRIPTION, false, false, QueryConstants.NULL_LONG); + catchAllNugget = nuggetFactory.createForCatchAll(queryNugget, operationNuggets.size()); } private void stopCatchAll(final boolean abort) { @@ -263,12 +266,9 @@ public synchronized QueryPerformanceNugget getNugget(final String name, final lo if (catchAllNugget != null) { stopCatchAll(false); } - final int parentOperationNumber = userNuggetStack.isEmpty() ? QueryConstants.NULL_INT - : userNuggetStack.getLast().getOperationNumber(); - final QueryPerformanceNugget nugget = new QueryPerformanceNugget( - queryNugget.getEvaluationNumber(), queryNugget.getParentEvaluationNumber(), - operationNuggets.size(), parentOperationNumber, userNuggetStack.size(), - name, true, false, inputSize); + final QueryPerformanceNugget parent = userNuggetStack.isEmpty() ? queryNugget : userNuggetStack.getLast(); + final QueryPerformanceNugget nugget = nuggetFactory.createForOperation( + parent, operationNuggets.size(), name, inputSize); operationNuggets.add(nugget); userNuggetStack.addLast(nugget); return nugget; @@ -278,7 +278,7 @@ public synchronized QueryPerformanceNugget getNugget(final String name, final lo * Note: Do not call this directly - it's for nugget use only. Call nugget.done(), instead. TODO: Reverse the * disclaimer above - I think it's much better for the recorder to support done/abort(nugget), rather than * continuing to have the nugget support done/abort(recorder). - * + * * @param nugget the nugget to be released * @return If the nugget passes criteria for logging. */ @@ -359,10 +359,7 @@ public void setQueryData(final EntrySetter setter) { } public void accumulate(@NotNull final QueryPerformanceRecorder subQuery) { - if (subQuery.mustLogForHierarchicalConsistency()) { - mustLogForHierarchicalConsistency = true; - } - queryNugget.addBaseEntry(subQuery.queryNugget); + queryNugget.accumulate(subQuery.queryNugget); } private void clear() { @@ -372,14 +369,6 @@ private void clear() { userNuggetStack.clear(); } - public long getEvaluationNumber() { - return queryNugget.getEvaluationNumber(); - } - - public boolean mustLogForHierarchicalConsistency() { - return mustLogForHierarchicalConsistency || !operationNuggets.isEmpty(); - } - public synchronized QueryPerformanceNugget getQueryLevelPerformanceData() { return queryNugget; } @@ -503,7 +492,7 @@ private static void finishAndClear(QueryPerformanceNugget nugget, boolean needCl /** * Surround the given code with a Performance Nugget - * + * * @param name the nugget name * @param r the stuff to run */ @@ -580,7 +569,7 @@ public static R withNuggetThrowing( /** * Surround the given code with a Performance Nugget - * + * * @param name the nugget name * @param r the stuff to run */ diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java index afb475ec9ce..6bdc697b598 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java @@ -3,8 +3,6 @@ */ package io.deephaven.engine.table.impl.perf; -import io.deephaven.util.QueryConstants; - import java.io.Serializable; public class QueryProcessingResults implements Serializable { @@ -13,7 +11,6 @@ public class QueryProcessingResults implements Serializable { private final QueryPerformanceRecorder recorder; - private volatile Boolean isReplayer = QueryConstants.NULL_BOOLEAN; private volatile String exception = null; @@ -21,14 +18,6 @@ public QueryProcessingResults(final QueryPerformanceRecorder recorder) { this.recorder = recorder; } - public Boolean isReplayer() { - return isReplayer; - } - - public void setReplayer(Boolean replayer) { - isReplayer = replayer; - } - public String getException() { return exception; } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java index 5c794e77070..69432d519a9 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java @@ -9,7 +9,6 @@ import io.deephaven.engine.table.TableDefinition; import io.deephaven.engine.table.impl.perf.UpdatePerformanceTracker.IntervalLevelDetails; import io.deephaven.engine.table.impl.sources.ArrayBackedColumnSource; -import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.stream.StreamChunkUtils; import io.deephaven.stream.StreamConsumer; import io.deephaven.stream.StreamPublisher; @@ -20,8 +19,7 @@ class UpdatePerformanceStreamPublisher implements StreamPublisher { private static final TableDefinition DEFINITION = TableDefinition.of( - ColumnDefinition.ofString("ProcessUniqueId"), - ColumnDefinition.ofInt("EntryId"), + ColumnDefinition.ofLong("EntryId"), ColumnDefinition.ofLong("EvaluationNumber"), ColumnDefinition.ofInt("OperationNumber"), ColumnDefinition.ofString("EntryDescription"), @@ -68,33 +66,57 @@ public void register(@NotNull StreamConsumer consumer) { } public synchronized void add(IntervalLevelDetails intervalLevelDetails, PerformanceEntry performanceEntry) { - chunks[0].asWritableObjectChunk().add(EngineMetrics.getProcessInfo().getId().value()); - chunks[1].asWritableIntChunk().add(performanceEntry.getId()); - chunks[2].asWritableLongChunk().add(performanceEntry.getEvaluationNumber()); - chunks[3].asWritableIntChunk().add(performanceEntry.getOperationNumber()); - chunks[4].asWritableObjectChunk().add(performanceEntry.getDescription()); - chunks[5].asWritableObjectChunk().add(performanceEntry.getCallerLine()); - chunks[6].asWritableLongChunk() + // ColumnDefinition.ofInt("EntryId"), + chunks[0].asWritableLongChunk().add(performanceEntry.getId()); + // ColumnDefinition.ofLong("EvaluationNumber"), + chunks[1].asWritableLongChunk().add(performanceEntry.getEvaluationNumber()); + // ColumnDefinition.ofInt("OperationNumber"), + chunks[2].asWritableIntChunk().add(performanceEntry.getOperationNumber()); + // ColumnDefinition.ofString("EntryDescription"), + chunks[3].asWritableObjectChunk().add(performanceEntry.getDescription()); + // ColumnDefinition.ofString("EntryCallerLine"), + chunks[4].asWritableObjectChunk().add(performanceEntry.getCallerLine()); + // ColumnDefinition.ofTime("IntervalStartTime"), + chunks[5].asWritableLongChunk() .add(DateTimeUtils.millisToNanos(intervalLevelDetails.getIntervalStartTimeMillis())); - chunks[7].asWritableLongChunk() + // ColumnDefinition.ofTime("IntervalEndTime"), + chunks[6].asWritableLongChunk() .add(DateTimeUtils.millisToNanos(intervalLevelDetails.getIntervalEndTimeMillis())); - chunks[8].asWritableLongChunk().add(intervalLevelDetails.getIntervalDurationNanos()); - chunks[9].asWritableLongChunk().add(performanceEntry.getIntervalUsageNanos()); - chunks[10].asWritableLongChunk().add(performanceEntry.getIntervalCpuNanos()); - chunks[11].asWritableLongChunk().add(performanceEntry.getIntervalUserCpuNanos()); - chunks[12].asWritableLongChunk().add(performanceEntry.getIntervalAdded()); - chunks[13].asWritableLongChunk().add(performanceEntry.getIntervalRemoved()); - chunks[14].asWritableLongChunk().add(performanceEntry.getIntervalModified()); - chunks[15].asWritableLongChunk().add(performanceEntry.getIntervalShifted()); - chunks[16].asWritableLongChunk().add(performanceEntry.getIntervalInvocationCount()); - chunks[17].asWritableLongChunk().add(performanceEntry.getMinFreeMemory()); - chunks[18].asWritableLongChunk().add(performanceEntry.getMaxTotalMemory()); - chunks[19].asWritableLongChunk().add(performanceEntry.getCollections()); - chunks[20].asWritableLongChunk().add(performanceEntry.getCollectionTimeNanos()); - chunks[21].asWritableLongChunk().add(performanceEntry.getIntervalAllocatedBytes()); - chunks[22].asWritableLongChunk().add(performanceEntry.getIntervalPoolAllocatedBytes()); - chunks[23].asWritableObjectChunk().add(Objects.toString(performanceEntry.getAuthContext())); - chunks[24].asWritableObjectChunk().add(Objects.toString(performanceEntry.getUpdateGraphName())); + // ColumnDefinition.ofLong("IntervalDurationNanos"), + chunks[7].asWritableLongChunk().add(intervalLevelDetails.getIntervalDurationNanos()); + // ColumnDefinition.ofLong("EntryIntervalUsage"), + chunks[8].asWritableLongChunk().add(performanceEntry.getTotalTimeNanos()); + // ColumnDefinition.ofLong("EntryIntervalCpuNanos"), + chunks[9].asWritableLongChunk().add(performanceEntry.getCpuNanos()); + // ColumnDefinition.ofLong("EntryIntervalUserCpuNanos"), + chunks[10].asWritableLongChunk().add(performanceEntry.getUserCpuNanos()); + // ColumnDefinition.ofLong("EntryIntervalAdded"), + chunks[11].asWritableLongChunk().add(performanceEntry.getIntervalAdded()); + // ColumnDefinition.ofLong("EntryIntervalRemoved"), + chunks[12].asWritableLongChunk().add(performanceEntry.getIntervalRemoved()); + // ColumnDefinition.ofLong("EntryIntervalModified"), + chunks[13].asWritableLongChunk().add(performanceEntry.getIntervalModified()); + // ColumnDefinition.ofLong("EntryIntervalShifted"), + chunks[14].asWritableLongChunk().add(performanceEntry.getIntervalShifted()); + // ColumnDefinition.ofLong("EntryIntervalInvocationCount"), + chunks[15].asWritableLongChunk().add(performanceEntry.getIntervalInvocationCount()); + // ColumnDefinition.ofLong("MinFreeMemory"), + chunks[16].asWritableLongChunk().add(performanceEntry.getMinFreeMemory()); + // ColumnDefinition.ofLong("MaxTotalMemory"), + chunks[17].asWritableLongChunk().add(performanceEntry.getMaxTotalMemory()); + // ColumnDefinition.ofLong("Collections"), + chunks[18].asWritableLongChunk().add(performanceEntry.getCollections()); + // ColumnDefinition.ofLong("CollectionTimeNanos"), + chunks[19].asWritableLongChunk().add(performanceEntry.getCollectionTimeNanos()); + // ColumnDefinition.ofLong("EntryIntervalAllocatedBytes"), + chunks[20].asWritableLongChunk().add(performanceEntry.getAllocatedBytes()); + // ColumnDefinition.ofLong("EntryIntervalPoolAllocatedBytes"), + chunks[21].asWritableLongChunk().add(performanceEntry.getPoolAllocatedBytes()); + // ColumnDefinition.ofString("AuthContext"), + chunks[22].asWritableObjectChunk().add(Objects.toString(performanceEntry.getAuthContext())); + // ColumnDefinition.ofString("UpdateGraph")); + chunks[23].asWritableObjectChunk().add(Objects.toString(performanceEntry.getUpdateGraphName())); + if (chunks[0].size() == CHUNK_SIZE) { flushInternal(); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java index df7dff85b61..e97c53b3b86 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java @@ -32,7 +32,7 @@ import java.util.Objects; import java.util.Queue; import java.util.concurrent.LinkedBlockingDeque; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; /** *

@@ -122,7 +122,7 @@ private synchronized void publish( } } - private static final AtomicInteger entryIdCounter = new AtomicInteger(1); + private static final AtomicLong entryIdCounter = new AtomicLong(1); private final UpdateGraph updateGraph; private final PerformanceEntry aggregatedSmallUpdatesEntry; @@ -137,10 +137,10 @@ private synchronized void publish( public UpdatePerformanceTracker(final UpdateGraph updateGraph) { this.updateGraph = Objects.requireNonNull(updateGraph); this.aggregatedSmallUpdatesEntry = new PerformanceEntry( - QueryConstants.NULL_INT, QueryConstants.NULL_INT, QueryConstants.NULL_INT, + QueryConstants.NULL_LONG, QueryConstants.NULL_INT, QueryConstants.NULL_INT, "Aggregated Small Updates", null, updateGraph.getName()); this.flushEntry = new PerformanceEntry( - QueryConstants.NULL_INT, QueryConstants.NULL_INT, QueryConstants.NULL_INT, + QueryConstants.NULL_LONG, QueryConstants.NULL_INT, QueryConstants.NULL_INT, "UpdatePerformanceTracker Flush", null, updateGraph.getName()); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java index 5e380fee2c9..e603905e5c7 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java @@ -909,7 +909,7 @@ private void cleanUpAndNotify(final Runnable onCleanupComplete) { if (initialStep) { final QueryPerformanceNugget outerNugget = QueryPerformanceRecorder.getInstance().getOuterNugget(); if (outerNugget != null) { - outerNugget.addBaseEntry(accumulated); + outerNugget.accumulate(accumulated); } } else { source.getUpdateGraph().addNotification(new TerminalNotification() { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java index 2e5d3d0c88b..bb749fbf712 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java @@ -74,7 +74,7 @@ private EngineMetrics() { } catch (IOException e) { log.fatal().append("Failed to configure process info: ").append(e.toString()).endl(); } - qpImpl = new QueryPerformanceImpl(pInfo.getId(), tableLoggerFactory.queryPerformanceLogLogger()); + qpImpl = new QueryPerformanceImpl(tableLoggerFactory.queryPerformanceLogLogger()); qoplImpl = new QueryOperationPerformanceImpl(pInfo.getId(), tableLoggerFactory.queryOperationPerformanceLogLogger()); if (STATS_LOGGING_ENABLED) { @@ -126,10 +126,6 @@ public void logQueryProcessingResults(@NotNull final QueryProcessingResults resu final List nuggets = results.getRecorder().getOperationLevelPerformanceData(); synchronized (qoplLogger) { - if (results.getRecorder().mustLogForHierarchicalConsistency()) { - // if this query has sub queries or op nuggets log an entry to enable hierarchical consistency - qoplLogger.log(queryNugget); - } for (QueryPerformanceNugget nugget : nuggets) { qoplLogger.log(nugget); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java index 1e10b2de73b..3765f8fe01a 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java @@ -42,7 +42,6 @@ public Table blinkTable() { @Override public void log( @NotNull final Flags flags, - final int deprecatedArgument, @NotNull final QueryPerformanceNugget nugget) throws IOException { publisher.add(id.value(), nugget); qoplLogger.log(flags, nugget); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java index d21e2b72b08..cbdd0fa1917 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java @@ -20,7 +20,6 @@ class QueryOperationPerformanceStreamPublisher implements StreamPublisher { private static final TableDefinition DEFINITION = TableDefinition.of( - ColumnDefinition.ofString("ProcessUniqueId"), ColumnDefinition.ofLong("EvaluationNumber"), ColumnDefinition.ofLong("ParentEvaluationNumber"), ColumnDefinition.ofInt("OperationNumber"), @@ -28,8 +27,6 @@ class QueryOperationPerformanceStreamPublisher implements StreamPublisher { ColumnDefinition.ofInt("Depth"), ColumnDefinition.ofString("Description"), ColumnDefinition.ofString("CallerLine"), - ColumnDefinition.ofBoolean("IsQueryLevel"), - ColumnDefinition.ofBoolean("IsTopLevel"), ColumnDefinition.ofBoolean("IsCompilation"), ColumnDefinition.ofTime("StartTime"), ColumnDefinition.ofTime("EndTime"), @@ -72,86 +69,77 @@ public synchronized void add( final String id, final QueryPerformanceNugget nugget) { - // ColumnDefinition.ofString("ProcessUniqueId"), - chunks[0].asWritableObjectChunk().add(id); - // ColumnDefinition.ofLong("EvaluationNumber"), - chunks[1].asWritableLongChunk().add(nugget.getEvaluationNumber()); + chunks[0].asWritableLongChunk().add(nugget.getEvaluationNumber()); // ColumnDefinition.ofLong("ParentEvaluationNumber"), - chunks[2].asWritableLongChunk().add(nugget.getParentEvaluationNumber()); + chunks[1].asWritableLongChunk().add(nugget.getParentEvaluationNumber()); // ColumnDefinition.ofInt("OperationNumber"), - chunks[3].asWritableIntChunk().add(nugget.getOperationNumber()); + chunks[2].asWritableIntChunk().add(nugget.getOperationNumber()); // ColumnDefinition.ofInt("ParentOperationNumber"), - chunks[4].asWritableIntChunk().add(nugget.getParentOperationNumber()); + chunks[3].asWritableIntChunk().add(nugget.getParentOperationNumber()); // ColumnDefinition.ofInt("Depth"), - chunks[5].asWritableIntChunk().add(nugget.getDepth()); + chunks[4].asWritableIntChunk().add(nugget.getDepth()); // ColumnDefinition.ofString("Description"), - chunks[6].asWritableObjectChunk().add(nugget.getName()); + chunks[5].asWritableObjectChunk().add(nugget.getName()); // ColumnDefinition.ofString("CallerLine"), - chunks[7].asWritableObjectChunk().add(nugget.getCallerLine()); - - // ColumnDefinition.ofBoolean("IsQueryLevel"), - chunks[8].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.isQueryLevel())); - - // ColumnDefinition.ofBoolean("IsTopLevel"), - chunks[9].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.isTopLevel())); + chunks[6].asWritableObjectChunk().add(nugget.getCallerLine()); // ColumnDefinition.ofBoolean("IsCompilation"), - chunks[10].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.getName().startsWith("Compile:"))); + chunks[7].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.getName().startsWith("Compile:"))); // ColumnDefinition.ofTime("StartTime"), - chunks[11].asWritableLongChunk().add(nugget.getStartClockTime()); + chunks[8].asWritableLongChunk().add(nugget.getStartClockEpochNanos()); // ColumnDefinition.ofTime("EndTime"), - chunks[12].asWritableLongChunk().add(nugget.getEndClockTime()); + chunks[9].asWritableLongChunk().add(nugget.getEndClockEpochNanos()); // ColumnDefinition.ofLong("DurationNanos"), - chunks[13].asWritableLongChunk().add(nugget.getTotalTimeNanos()); + chunks[10].asWritableLongChunk().add(nugget.getTotalTimeNanos()); // ColumnDefinition.ofLong("CpuNanos"), - chunks[14].asWritableLongChunk().add(nugget.getCpuNanos()); + chunks[11].asWritableLongChunk().add(nugget.getCpuNanos()); // ColumnDefinition.ofLong("UserCpuNanos"), - chunks[15].asWritableLongChunk().add(nugget.getUserCpuNanos()); + chunks[12].asWritableLongChunk().add(nugget.getUserCpuNanos()); // ColumnDefinition.ofLong("FreeMemory"), - chunks[16].asWritableLongChunk().add(nugget.getEndFreeMemory()); + chunks[13].asWritableLongChunk().add(nugget.getEndFreeMemory()); // ColumnDefinition.ofLong("TotalMemory"), - chunks[17].asWritableLongChunk().add(nugget.getEndTotalMemory()); + chunks[14].asWritableLongChunk().add(nugget.getEndTotalMemory()); // ColumnDefinition.ofLong("FreeMemoryChange"), - chunks[18].asWritableLongChunk().add(nugget.getDiffFreeMemory()); + chunks[15].asWritableLongChunk().add(nugget.getDiffFreeMemory()); // ColumnDefinition.ofLong("TotalMemoryChange"), - chunks[19].asWritableLongChunk().add(nugget.getDiffTotalMemory()); + chunks[16].asWritableLongChunk().add(nugget.getDiffTotalMemory()); // ColumnDefinition.ofLong("Collections") - chunks[20].asWritableLongChunk().add(nugget.getDiffCollections()); + chunks[17].asWritableLongChunk().add(nugget.getDiffCollections()); // ColumnDefinition.ofLong("CollectionTimeNanos"), - chunks[21].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); + chunks[18].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); // ColumnDefinition.ofLong("AllocatedBytes"), - chunks[22].asWritableLongChunk().add(nugget.getAllocatedBytes()); + chunks[19].asWritableLongChunk().add(nugget.getAllocatedBytes()); // ColumnDefinition.ofLong("PoolAllocatedBytes"), - chunks[23].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); + chunks[20].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); // ColumnDefinition.ofLong("InputSizeLong"), - chunks[24].asWritableLongChunk().add(nugget.getInputSize()); + chunks[21].asWritableLongChunk().add(nugget.getInputSize()); // ColumnDefinition.ofBoolean("WasInterrupted") - chunks[25].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); + chunks[22].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); // ColumnDefinition.ofString("AuthContext") - chunks[26].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); + chunks[23].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); if (chunks[0].size() == CHUNK_SIZE) { flushInternal(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java index b08496d33c5..db3b045080e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java @@ -8,7 +8,6 @@ import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.tablelogger.QueryPerformanceLogLogger; -import io.deephaven.process.ProcessUniqueId; import io.deephaven.stream.StreamToBlinkTableAdapter; import io.deephaven.tablelogger.Row.Flags; import org.jetbrains.annotations.NotNull; @@ -17,15 +16,13 @@ import java.util.Objects; class QueryPerformanceImpl implements QueryPerformanceLogLogger { - private final ProcessUniqueId id; private final QueryPerformanceLogLogger qplLogger; private final QueryPerformanceStreamPublisher publisher; @SuppressWarnings("FieldCanBeLocal") private final StreamToBlinkTableAdapter adapter; private final Table blink; - public QueryPerformanceImpl(ProcessUniqueId id, QueryPerformanceLogLogger qplLogger) { - this.id = Objects.requireNonNull(id); + public QueryPerformanceImpl(QueryPerformanceLogLogger qplLogger) { this.qplLogger = Objects.requireNonNull(qplLogger); this.publisher = new QueryPerformanceStreamPublisher(); this.adapter = new StreamToBlinkTableAdapter( @@ -43,10 +40,9 @@ public Table blinkTable() { @Override public void log( @NotNull final Flags flags, - final long deprecatedField, @NotNull final QueryProcessingResults queryProcessingResults, @NotNull final QueryPerformanceNugget nugget) throws IOException { - publisher.add(id.value(), queryProcessingResults, nugget); + publisher.add(queryProcessingResults, nugget); qplLogger.log(flags, queryProcessingResults, nugget); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java index 7551f98498c..4c04bbd5078 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java @@ -21,9 +21,9 @@ class QueryPerformanceStreamPublisher implements StreamPublisher { private static final TableDefinition DEFINITION = TableDefinition.of( - ColumnDefinition.ofString("ProcessUniqueId"), ColumnDefinition.ofLong("EvaluationNumber"), ColumnDefinition.ofLong("ParentEvaluationNumber"), + ColumnDefinition.ofString("Description"), ColumnDefinition.ofTime("StartTime"), ColumnDefinition.ofTime("EndTime"), ColumnDefinition.ofLong("DurationNanos"), @@ -38,7 +38,6 @@ class QueryPerformanceStreamPublisher implements StreamPublisher { ColumnDefinition.ofLong("AllocatedBytes"), ColumnDefinition.ofLong("PoolAllocatedBytes"), ColumnDefinition.ofBoolean("WasInterrupted"), - ColumnDefinition.ofBoolean("IsReplayer"), ColumnDefinition.ofString("Exception"), ColumnDefinition.ofString("AuthContext")); private static final int CHUNK_SIZE = ArrayBackedColumnSource.BLOCK_SIZE; @@ -63,23 +62,23 @@ public void register(@NotNull StreamConsumer consumer) { } public synchronized void add( - final String id, final QueryProcessingResults queryProcessingResults, final QueryPerformanceNugget nugget) { - // ColumnDefinition.ofString("ProcessUniqueId"), - chunks[0].asWritableObjectChunk().add(id); // ColumnDefinition.ofLong("EvaluationNumber") - chunks[1].asWritableLongChunk().add(nugget.getEvaluationNumber()); + chunks[0].asWritableLongChunk().add(nugget.getEvaluationNumber()); // ColumnDefinition.ofLong("ParentEvaluationNumber") - chunks[2].asWritableLongChunk().add(nugget.getParentEvaluationNumber()); + chunks[1].asWritableLongChunk().add(nugget.getParentEvaluationNumber()); + + // ColumnDefinition.ofString("Description") + chunks[2].asWritableObjectChunk().add(nugget.getName()); // ColumnDefinition.ofTime("StartTime"); - chunks[3].asWritableLongChunk().add(nugget.getStartClockTime()); + chunks[3].asWritableLongChunk().add(nugget.getStartClockEpochNanos()); // ColumnDefinition.ofTime("EndTime") - chunks[4].asWritableLongChunk().add(nugget.getEndClockTime()); + chunks[4].asWritableLongChunk().add(nugget.getEndClockEpochNanos()); // ColumnDefinition.ofLong("DurationNanos") chunks[5].asWritableLongChunk().add(nugget.getTotalTimeNanos()); @@ -117,14 +116,11 @@ public synchronized void add( // ColumnDefinition.ofBoolean("WasInterrupted") chunks[16].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); - // ColumnDefinition.ofBoolean("IsReplayer") - chunks[17].asWritableByteChunk().add(BooleanUtils.booleanAsByte(queryProcessingResults.isReplayer())); - // ColumnDefinition.ofString("Exception") - chunks[18].asWritableObjectChunk().add(queryProcessingResults.getException()); + chunks[17].asWritableObjectChunk().add(queryProcessingResults.getException()); // ColumnDefinition.ofString("AuthContext") - chunks[19].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); + chunks[18].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); if (chunks[0].size() == CHUNK_SIZE) { flushInternal(); diff --git a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java index dfc961175c1..1b828022a00 100644 --- a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java +++ b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryOperationPerformanceLogLogger.java @@ -18,14 +18,7 @@ default void log(@NotNull final QueryPerformanceNugget nugget) throws IOExceptio log(DEFAULT_INTRADAY_LOGGER_FLAGS, nugget); } - default void log( - @NotNull final Row.Flags flags, - @NotNull final QueryPerformanceNugget nugget) throws IOException { - log(flags, nugget.getOperationNumber(), nugget); - } - - // This prototype is going to be deprecated in 0.31 in favor of the one above. - void log(Row.Flags flags, int operationNumber, QueryPerformanceNugget nugget) throws IOException; + void log(@NotNull Row.Flags flags, @NotNull QueryPerformanceNugget nugget) throws IOException; enum Noop implements QueryOperationPerformanceLogLogger { INSTANCE; @@ -33,9 +26,6 @@ enum Noop implements QueryOperationPerformanceLogLogger { @Override public void log( @NotNull final Flags flags, - final int operationNumber, - @NotNull final QueryPerformanceNugget nugget) throws IOException { - - } + @NotNull final QueryPerformanceNugget nugget) throws IOException {} } } diff --git a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java index 339e4e3346f..9d6959930dd 100644 --- a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java +++ b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java @@ -21,20 +21,10 @@ default void log( log(DEFAULT_INTRADAY_LOGGER_FLAGS, queryProcessingResults, nugget); } - default void log( + void log( @NotNull final Row.Flags flags, @NotNull final QueryProcessingResults queryProcessingResults, - @NotNull final QueryPerformanceNugget nugget) throws IOException { - log(flags, nugget.getEvaluationNumber(), queryProcessingResults, nugget); - } - - // This prototype is going to be deprecated in 0.31 in favor of the one above. - void log( - Row.Flags flags, - final long evaluationNumber, - QueryProcessingResults queryProcessingResults, - QueryPerformanceNugget nugget) - throws IOException; + @NotNull final QueryPerformanceNugget nugget) throws IOException; enum Noop implements QueryPerformanceLogLogger { INSTANCE; @@ -42,7 +32,6 @@ enum Noop implements QueryPerformanceLogLogger { @Override public void log( @NotNull final Flags flags, - final long evaluationNumber, @NotNull final QueryProcessingResults queryProcessingResults, @NotNull final QueryPerformanceNugget nugget) throws IOException { diff --git a/props/configs/src/main/resources/defaultPackageFilters.qpr b/props/configs/src/main/resources/defaultPackageFilters.qpr index 577076bdf81..df0b4c41167 100644 --- a/props/configs/src/main/resources/defaultPackageFilters.qpr +++ b/props/configs/src/main/resources/defaultPackageFilters.qpr @@ -3,8 +3,8 @@ sun. groovy.lang. org.codehaus.groovy. io.deephaven. -io.deephaven.engine. io.grpc. com.google.common. org.eclipse. jdk.internal. +org.jpy. diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index 16e34eecd7c..52319088c75 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -989,8 +989,8 @@ private void doExport() { } else if (queryPerformanceRecorder != null) { // this is a sub-query; no need to re-log the session id exportRecorder = QueryPerformanceRecorder.getInstance().startQuery( - "ExportObject#doWork(exportId=" + logIdentity + ")", - queryPerformanceRecorder.getEvaluationNumber()); + queryPerformanceRecorder, + "ExportObject#doWork(exportId=" + logIdentity + ")"); } else { exportRecorder = QueryPerformanceRecorder.getInstance().startQuery( "ExportObject#doWork(session=" + session.sessionId + ",exportId=" + logIdentity + ")"); From 81dd004c4682f7b3fc363c7be7a52116397ff61d Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Wed, 8 Nov 2023 14:27:39 -0700 Subject: [PATCH 07/31] Split QueryPerformanceRecorder in Two --- .../impl/perf/QueryPerformanceNugget.java | 7 +- .../impl/perf/QueryPerformanceRecorder.java | 410 ++++-------------- .../perf/QueryPerformanceRecorderImpl.java | 332 ++++++++++++++ .../server/session/SessionState.java | 33 +- .../table/ops/TableServiceGrpcImpl.java | 37 +- 5 files changed, 472 insertions(+), 347 deletions(-) create mode 100644 engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java index e33474a0e8a..fdf02e0d64c 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java @@ -29,7 +29,12 @@ public class QueryPerformanceNugget extends BasePerformanceEntry implements Safe /** * A re-usable "dummy" nugget which will never collect any information or be recorded. */ - static final QueryPerformanceNugget DUMMY_NUGGET = new QueryPerformanceNugget(); + static final QueryPerformanceNugget DUMMY_NUGGET = new QueryPerformanceNugget() { + @Override + public void accumulate(@NotNull BasePerformanceEntry entry) { + // non-synchronized no-op override + } + }; public interface Factory { /** diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java index 9ef937cd8d1..e417585bd15 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java @@ -7,9 +7,6 @@ import io.deephaven.configuration.Configuration; import io.deephaven.datastructures.util.CollectionUtil; import io.deephaven.chunk.util.pools.ChunkPoolInstrumentation; -import io.deephaven.engine.exceptions.CancellationException; -import io.deephaven.engine.table.Table; -import io.deephaven.engine.util.TableTools; import io.deephaven.engine.updategraph.UpdateGraphLock; import io.deephaven.util.QueryConstants; import io.deephaven.util.function.ThrowingRunnable; @@ -17,6 +14,7 @@ import io.deephaven.util.profiling.ThreadProfiler; import org.apache.commons.lang3.mutable.MutableLong; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import java.io.*; import java.net.URL; @@ -29,28 +27,20 @@ /** * Query performance instrumentation tools. Manages a hierarchy of {@link QueryPerformanceNugget} instances. - *

- * Thread-safety note: This used to be thread-safe only by virtue of using a thread-local instance. Now it's - * aggressively synchronized so we can abort it from outside the "owner" thread. */ -public class QueryPerformanceRecorder implements Serializable { +public abstract class QueryPerformanceRecorder { public static final String UNINSTRUMENTED_CODE_DESCRIPTION = "Uninstrumented code"; private static final String[] packageFilters; - private QueryPerformanceNugget queryNugget; - private final ArrayList operationNuggets = new ArrayList<>(); + protected static final AtomicLong queriesProcessed = new AtomicLong(0); - private volatile QueryState state; - private QueryPerformanceNugget catchAllNugget; - private final Deque userNuggetStack = new ArrayDeque<>(); - private final QueryPerformanceNugget.Factory nuggetFactory = QueryPerformanceNugget.DEFAULT_FACTORY; + static final QueryPerformanceRecorder DUMMY_RECORDER = new DummyQueryPerformanceRecorder(); - private static final AtomicLong queriesProcessed = new AtomicLong(0); - - private static final ThreadLocal theLocal = - ThreadLocal.withInitial(QueryPerformanceRecorder::new); + /** thread local is package private to enable query resumption */ + static final ThreadLocal theLocal = + ThreadLocal.withInitial(() -> DUMMY_RECORDER); private static final ThreadLocal poolAllocatedBytes = ThreadLocal.withInitial( () -> new MutableLong(ThreadProfiler.DEFAULT.memoryProfilingAvailable() ? 0L : io.deephaven.util.QueryConstants.NULL_LONG)); @@ -91,315 +81,54 @@ public static void resetInstance() { theLocal.remove(); } - private QueryPerformanceRecorder() { - // private default constructor to prevent direct instantiation - } - - /** - * Start a query. - * - * @param description A description for the query. - * @return this - */ - public QueryPerformanceRecorder startQuery(@NotNull final String description) { - return startQuery(nuggetFactory.createForQuery(queriesProcessed.getAndIncrement(), description)); - } - - /** - * Start a sub-query. - * - * @param parent The parent query. - * @param description A description for the query. - * @return this - */ - public QueryPerformanceRecorder startQuery( - @NotNull final QueryPerformanceRecorder parent, - @NotNull final String description) { - return startQuery(nuggetFactory.createForSubQuery( - parent.queryNugget, queriesProcessed.getAndIncrement(), description)); - } - - /** - * Start a query. - * - * @param nugget The newly constructed query level nugget. - * @return this - */ - private synchronized QueryPerformanceRecorder startQuery(final QueryPerformanceNugget nugget) { - clear(); - queryNugget = nugget; - state = QueryState.RUNNING; - startCatchAll(); - return this; - } - - /** - * Abort a query. - */ - public synchronized void abortQuery() { - if (state != QueryState.RUNNING) { - return; - } - state = QueryState.INTERRUPTED; - if (catchAllNugget != null) { - stopCatchAll(true); - } else { - while (!userNuggetStack.isEmpty()) { - userNuggetStack.peekLast().abort(this); - } - } - queryNugget.abort(this); - } - - /** - * Return the query's current state - * - * @return the query's state or null if it isn't initialized yet - */ - public synchronized QueryState getState() { - return state; - } - - /** - * End a query. - */ - public synchronized boolean endQuery() { - if (state != QueryState.RUNNING) { - return false; - } - - state = QueryState.FINISHED; - Assert.neqNull(catchAllNugget, "catchAllNugget"); - Assert.neqNull(queryNugget, "queryNugget"); - stopCatchAll(false); - return queryNugget.done(this); - } - - /** - * Suspends a query. - *

- * This resets the thread local and assumes that this performance nugget may be resumed on another thread. - */ - public synchronized void suspendQuery() { - if (state != QueryState.RUNNING) { - throw new IllegalStateException("Can't suspend a query that isn't running"); - } - - final QueryPerformanceRecorder threadLocalInstance = getInstance(); - if (threadLocalInstance != this) { - throw new IllegalStateException("Can't suspend a query that doesn't belong to this thread"); - } - - state = QueryState.SUSPENDED; - Assert.neqNull(catchAllNugget, "catchAllNugget"); - stopCatchAll(false); - queryNugget.onBaseEntryEnd(); - - // Very likely this QPR is being passed to another thread, be safe and reset the thread local instance. - resetInstance(); - } - - /** - * Resumes a suspend query. - *

- * It is an error to resume a query while another query is running on this thread. - * - * @return this - */ - public synchronized QueryPerformanceRecorder resumeQuery() { - if (state != QueryState.SUSPENDED) { - throw new IllegalStateException("Can't resume a query that isn't suspended"); - } - - final QueryPerformanceRecorder threadLocalInstance = getInstance(); - if (threadLocalInstance.state == QueryState.RUNNING) { - throw new IllegalStateException("Can't resume a query while another query is in operation"); - } - theLocal.set(this); - - queryNugget.onBaseEntryStart(); - state = QueryState.RUNNING; - Assert.eqNull(catchAllNugget, "catchAllNugget"); - startCatchAll(); - return this; - } - - private void startCatchAll() { - catchAllNugget = nuggetFactory.createForCatchAll(queryNugget, operationNuggets.size()); - } - - private void stopCatchAll(final boolean abort) { - final boolean shouldLog; - if (abort) { - shouldLog = catchAllNugget.abort(this); - } else { - shouldLog = catchAllNugget.done(this); - } - if (shouldLog) { - Assert.eq(operationNuggets.size(), "operationsNuggets.size()", - catchAllNugget.getOperationNumber(), "catchAllNugget.getOperationNumber()"); - operationNuggets.add(catchAllNugget); - } - catchAllNugget = null; - } - /** * @param name the nugget name * @return A new QueryPerformanceNugget to encapsulate user query operations. done() must be called on the nugget. */ - public QueryPerformanceNugget getNugget(String name) { - return getNugget(name, QueryConstants.NULL_LONG); - } + public abstract QueryPerformanceNugget getNugget(@NotNull String name); /** * @param name the nugget name * @param inputSize the nugget's input size * @return A new QueryPerformanceNugget to encapsulate user query operations. done() must be called on the nugget. */ - public synchronized QueryPerformanceNugget getNugget(final String name, final long inputSize) { - if (state != QueryState.RUNNING) { - return QueryPerformanceNugget.DUMMY_NUGGET; - } - if (Thread.interrupted()) { - throw new CancellationException("interrupted in QueryPerformanceNugget"); - } - if (catchAllNugget != null) { - stopCatchAll(false); - } - final QueryPerformanceNugget parent = userNuggetStack.isEmpty() ? queryNugget : userNuggetStack.getLast(); - final QueryPerformanceNugget nugget = nuggetFactory.createForOperation( - parent, operationNuggets.size(), name, inputSize); - operationNuggets.add(nugget); - userNuggetStack.addLast(nugget); - return nugget; - } + public abstract QueryPerformanceNugget getNugget(@NotNull String name, long inputSize); + + /** + * @return The nugget currently in effect or else a dummy nugget if no nugget is in effect. + */ + public abstract QueryPerformanceNugget getOuterNugget(); /** - * Note: Do not call this directly - it's for nugget use only. Call nugget.done(), instead. TODO: Reverse the - * disclaimer above - I think it's much better for the recorder to support done/abort(nugget), rather than - * continuing to have the nugget support done/abort(recorder). + * Note: Do not call this directly - it's for nugget use only. Call {@link QueryPerformanceNugget#done()} or + * {@link QueryPerformanceNugget#close()} instead. * + * @implNote This method is package private to limit visibility. * @param nugget the nugget to be released * @return If the nugget passes criteria for logging. */ - synchronized boolean releaseNugget(QueryPerformanceNugget nugget) { - boolean shouldLog = nugget.shouldLogNugget(nugget == catchAllNugget); - if (!nugget.isUser()) { - return shouldLog; - } - - final QueryPerformanceNugget removed = userNuggetStack.removeLast(); - if (nugget != removed) { - throw new IllegalStateException( - "Released query performance nugget " + nugget + " (" + System.identityHashCode(nugget) + - ") didn't match the top of the user nugget stack " + removed + " (" - + System.identityHashCode(removed) + - ") - did you follow the correct try/finally pattern?"); - } - - if (removed.shouldLogMeAndStackParents()) { - shouldLog = true; - if (!userNuggetStack.isEmpty()) { - userNuggetStack.getLast().setShouldLogMeAndStackParents(); - } - } - if (!shouldLog) { - // If we have filtered this nugget, by our filter design we will also have filtered any nuggets it encloses. - // This means it *must* be the last entry in operationNuggets, so we can safely remove it in O(1). - final QueryPerformanceNugget lastNugget = operationNuggets.remove(operationNuggets.size() - 1); - if (nugget != lastNugget) { - throw new IllegalStateException( - "Filtered query performance nugget " + nugget + " (" + System.identityHashCode(nugget) + - ") didn't match the last operation nugget " + lastNugget + " (" - + System.identityHashCode(lastNugget) + - ")"); - } - } + abstract boolean releaseNugget(QueryPerformanceNugget nugget); - if (userNuggetStack.isEmpty() && queryNugget != null && state == QueryState.RUNNING) { - startCatchAll(); - } + /** + * @return the query level performance data + */ + public abstract QueryPerformanceNugget getQueryLevelPerformanceData(); - return shouldLog; - } + /** + * @return A list of loggable operation performance data. + */ + public abstract List getOperationLevelPerformanceData(); public interface EntrySetter { void set(long evaluationNumber, int operationNumber, boolean uninstrumented); } - public synchronized QueryPerformanceNugget getOuterNugget() { - return userNuggetStack.peekLast(); - } - - // returns true if uninstrumented code data was captured. - public void setQueryData(final EntrySetter setter) { - final long evaluationNumber; - final int operationNumber; - boolean uninstrumented = false; - synchronized (this) { - if (state != QueryState.RUNNING) { - setter.set(QueryConstants.NULL_INT, QueryConstants.NULL_INT, false); - return; - } - evaluationNumber = queryNugget.getEvaluationNumber(); - operationNumber = operationNuggets.size(); - if (operationNumber > 0) { - // ensure UPL and QOPL are consistent/joinable. - if (!userNuggetStack.isEmpty()) { - userNuggetStack.getLast().setShouldLogMeAndStackParents(); - } else { - uninstrumented = true; - if (catchAllNugget != null) { - catchAllNugget.setShouldLogMeAndStackParents(); - } - } - } - } - setter.set(evaluationNumber, operationNumber, uninstrumented); - } - - public void accumulate(@NotNull final QueryPerformanceRecorder subQuery) { - queryNugget.accumulate(subQuery.queryNugget); - } - - private void clear() { - queryNugget = null; - catchAllNugget = null; - operationNuggets.clear(); - userNuggetStack.clear(); - } - - public synchronized QueryPerformanceNugget getQueryLevelPerformanceData() { - return queryNugget; - } - - public synchronized List getOperationLevelPerformanceData() { - return operationNuggets; - } - - @SuppressWarnings("unused") - public synchronized Table getTimingResultsAsTable() { - final int count = operationNuggets.size(); - final String[] names = new String[count]; - final Long[] timeNanos = new Long[count]; - final String[] callerLine = new String[count]; - final Boolean[] isTopLevel = new Boolean[count]; - final Boolean[] isCompileTime = new Boolean[count]; - - for (int i = 0; i < operationNuggets.size(); i++) { - timeNanos[i] = operationNuggets.get(i).getTotalTimeNanos(); - names[i] = operationNuggets.get(i).getName(); - callerLine[i] = operationNuggets.get(i).getCallerLine(); - isTopLevel[i] = operationNuggets.get(i).isTopLevel(); - isCompileTime[i] = operationNuggets.get(i).getName().startsWith("Compile:"); - } - return TableTools.newTable( - TableTools.col("names", names), - TableTools.col("line", callerLine), - TableTools.col("timeNanos", timeNanos), - TableTools.col("isTopLevel", isTopLevel), - TableTools.col("isCompileTime", isCompileTime)); - } + /** + * TODO NATE NOCOMMIT WRITE JAVADOC + * + * @param setter + */ + public abstract void setQueryData(final EntrySetter setter); /** * Install {@link QueryPerformanceRecorder#recordPoolAllocation(java.util.function.Supplier)} as the allocation @@ -417,7 +146,7 @@ public static void installUpdateGraphLockInstrumentation() { @Override public void recordAction(@NotNull String description, @NotNull Runnable action) { - QueryPerformanceRecorder.withNugget(description, action::run); + QueryPerformanceRecorder.withNugget(description, action); } @Override @@ -476,20 +205,6 @@ public static String getCallerLine() { return callerLineCandidate == null ? "Internal" : callerLineCandidate; } - /*------------------------------------------------------------------------------------------------------------------ - * TODO: the following execute-around methods might be better in a separate class or interface - */ - - private static void finishAndClear(QueryPerformanceNugget nugget, boolean needClear) { - if (nugget != null) { - nugget.done(); - } - - if (needClear) { - clearCallsite(); - } - } - /** * Surround the given code with a Performance Nugget * @@ -714,4 +429,63 @@ public static boolean setCallsite() { public static void clearCallsite() { cachedCallsite.remove(); } + + /** + * Finish the nugget and clear the callsite if needed. + * + * @param nugget an optional nugget + * @param needClear true if the callsite needs to be cleared + */ + private static void finishAndClear(@Nullable final QueryPerformanceNugget nugget, final boolean needClear) { + if (nugget != null) { + nugget.done(); + } + + if (needClear) { + clearCallsite(); + } + } + + /** + * Dummy recorder for use when no recorder is installed. + */ + private static class DummyQueryPerformanceRecorder extends QueryPerformanceRecorder { + + @Override + public QueryPerformanceNugget getNugget(@NotNull final String name) { + return QueryPerformanceNugget.DUMMY_NUGGET; + } + + @Override + public QueryPerformanceNugget getNugget(@NotNull final String name, long inputSize) { + return QueryPerformanceNugget.DUMMY_NUGGET; + } + + @Override + public QueryPerformanceNugget getOuterNugget() { + return QueryPerformanceNugget.DUMMY_NUGGET; + } + + @Override + boolean releaseNugget(@NotNull final QueryPerformanceNugget nugget) { + Assert.eqTrue(nugget == QueryPerformanceNugget.DUMMY_NUGGET, + "nugget == QueryPerformanceNugget.DUMMY_NUGGET"); + return false; + } + + @Override + public QueryPerformanceNugget getQueryLevelPerformanceData() { + return QueryPerformanceNugget.DUMMY_NUGGET; + } + + @Override + public List getOperationLevelPerformanceData() { + return Collections.emptyList(); + } + + @Override + public void setQueryData(EntrySetter setter) { + setter.set(QueryConstants.NULL_LONG, QueryConstants.NULL_INT, false); + } + } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java new file mode 100644 index 00000000000..407f5d91415 --- /dev/null +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java @@ -0,0 +1,332 @@ +/** + * Copyright (c) 2016-2023 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.engine.table.impl.perf; + +import io.deephaven.base.verify.Assert; +import io.deephaven.engine.exceptions.CancellationException; +import io.deephaven.engine.table.Table; +import io.deephaven.engine.util.TableTools; +import io.deephaven.util.QueryConstants; +import org.jetbrains.annotations.NotNull; + +import java.util.*; + +/** + * Query performance instrumentation implementation. Manages a hierarchy of {@link QueryPerformanceNugget} instances. + *

+ * Many methods are synchronized to 1) support external abortion of query and 2) for scenarios where the query is + * suspended and resumed on another thread. + */ +public class QueryPerformanceRecorderImpl extends QueryPerformanceRecorder { + + private final QueryPerformanceNugget queryNugget; + private final ArrayList operationNuggets = new ArrayList<>(); + + private QueryState state; + private QueryPerformanceNugget catchAllNugget; + private final Deque userNuggetStack = new ArrayDeque<>(); + private final QueryPerformanceNugget.Factory nuggetFactory; + + /** + * Creates a new QueryPerformanceRecorderImpl and starts the query. + * + * @param description a description for the query + * @param nuggetFactory the factory to use for creating new nuggets + */ + public QueryPerformanceRecorderImpl( + @NotNull final String description, + @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { + this(nuggetFactory.createForQuery(queriesProcessed.getAndIncrement(), description), nuggetFactory); + } + + /** + * Constructor for a sub-query. + * + * @param description a description for the query + * @param parent the parent query + * @param nuggetFactory the factory to use for creating new nuggets + */ + public QueryPerformanceRecorderImpl( + @NotNull final String description, + @NotNull final QueryPerformanceRecorderImpl parent, + @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { + this(nuggetFactory.createForSubQuery( + parent.queryNugget, queriesProcessed.getAndIncrement(), description), nuggetFactory); + } + + /** + * @param queryNugget The newly constructed query level queryNugget. + * @param nuggetFactory The factory to use for creating new nuggets. + */ + private QueryPerformanceRecorderImpl( + @NotNull final QueryPerformanceNugget queryNugget, + @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { + this.queryNugget = queryNugget; + this.nuggetFactory = nuggetFactory; + state = QueryState.RUNNING; + startCatchAll(); + Assert.eqTrue(QueryPerformanceRecorder.getInstance() == DUMMY_RECORDER, + "QueryPerformanceRecorder.getInstance() == DUMMY_RECORDER"); + QueryPerformanceRecorder.theLocal.set(this); + } + + /** + * Abort a query. + */ + public synchronized void abortQuery() { + if (state != QueryState.RUNNING) { + return; + } + state = QueryState.INTERRUPTED; + if (catchAllNugget != null) { + stopCatchAll(true); + } else { + while (!userNuggetStack.isEmpty()) { + userNuggetStack.peekLast().abort(this); + } + } + queryNugget.abort(this); + } + + /** + * Return the query's current state + * + * @return the query's state or null if it isn't initialized yet + */ + public synchronized QueryState getState() { + return state; + } + + /** + * End a query. + */ + public synchronized boolean endQuery() { + if (state != QueryState.RUNNING) { + return false; + } + + state = QueryState.FINISHED; + Assert.neqNull(catchAllNugget, "catchAllNugget"); + Assert.neqNull(queryNugget, "queryNugget"); + stopCatchAll(false); + + // note that we do not resetInstance in here as that should be done from a finally-block + return queryNugget.done(this); + } + + /** + * Suspends a query. + *

+ * This resets the thread local and assumes that this performance nugget may be resumed on another thread. + */ + public synchronized void suspendQuery() { + if (state != QueryState.RUNNING) { + throw new IllegalStateException("Can't suspend a query that isn't running"); + } + + final QueryPerformanceRecorder threadLocalInstance = getInstance(); + if (threadLocalInstance != this) { + throw new IllegalStateException("Can't suspend a query that doesn't belong to this thread"); + } + + state = QueryState.SUSPENDED; + Assert.neqNull(catchAllNugget, "catchAllNugget"); + stopCatchAll(false); + queryNugget.onBaseEntryEnd(); + + // uninstall this instance from the thread local + resetInstance(); + } + + /** + * Resumes a suspend query. + *

+ * It is an error to resume a query while another query is running on this thread. + * + * @return this + */ + public synchronized QueryPerformanceRecorderImpl resumeQuery() { + if (state != QueryState.SUSPENDED) { + throw new IllegalStateException("Can't resume a query that isn't suspended"); + } + + final QueryPerformanceRecorder threadLocalInstance = getInstance(); + if (threadLocalInstance != DUMMY_RECORDER) { + throw new IllegalStateException("Can't resume a query while another query is in operation"); + } + QueryPerformanceRecorder.theLocal.set(this); + + queryNugget.onBaseEntryStart(); + state = QueryState.RUNNING; + Assert.eqNull(catchAllNugget, "catchAllNugget"); + startCatchAll(); + return this; + } + + private void startCatchAll() { + catchAllNugget = nuggetFactory.createForCatchAll(queryNugget, operationNuggets.size()); + } + + private void stopCatchAll(final boolean abort) { + final boolean shouldLog; + if (abort) { + shouldLog = catchAllNugget.abort(this); + } else { + shouldLog = catchAllNugget.done(this); + } + if (shouldLog) { + Assert.eq(operationNuggets.size(), "operationsNuggets.size()", + catchAllNugget.getOperationNumber(), "catchAllNugget.getOperationNumber()"); + operationNuggets.add(catchAllNugget); + } + catchAllNugget = null; + } + + /** + * @param name the nugget name + * @return A new QueryPerformanceNugget to encapsulate user query operations. done() must be called on the nugget. + */ + public QueryPerformanceNugget getNugget(@NotNull final String name) { + return getNugget(name, QueryConstants.NULL_LONG); + } + + /** + * @param name the nugget name + * @param inputSize the nugget's input size + * @return A new QueryPerformanceNugget to encapsulate user query operations. done() must be called on the nugget. + */ + public synchronized QueryPerformanceNugget getNugget(@NotNull final String name, final long inputSize) { + if (state != QueryState.RUNNING) { + return QueryPerformanceNugget.DUMMY_NUGGET; + } + if (Thread.interrupted()) { + throw new CancellationException("interrupted in QueryPerformanceNugget"); + } + if (catchAllNugget != null) { + stopCatchAll(false); + } + final QueryPerformanceNugget parent = userNuggetStack.isEmpty() ? queryNugget : userNuggetStack.getLast(); + final QueryPerformanceNugget nugget = nuggetFactory.createForOperation( + parent, operationNuggets.size(), name, inputSize); + operationNuggets.add(nugget); + userNuggetStack.addLast(nugget); + return nugget; + } + + /** + * Note: Do not call this directly - it's for nugget use only. Call {@link QueryPerformanceNugget#done()} or + * {@link QueryPerformanceNugget#close()} instead. + * + * @param nugget the nugget to be released + * @return If the nugget passes criteria for logging. + */ + synchronized boolean releaseNugget(QueryPerformanceNugget nugget) { + boolean shouldLog = nugget.shouldLogNugget(nugget == catchAllNugget); + if (!nugget.isUser()) { + return shouldLog; + } + + final QueryPerformanceNugget removed = userNuggetStack.removeLast(); + if (nugget != removed) { + throw new IllegalStateException( + "Released query performance nugget " + nugget + " (" + System.identityHashCode(nugget) + + ") didn't match the top of the user nugget stack " + removed + " (" + + System.identityHashCode(removed) + + ") - did you follow the correct try/finally pattern?"); + } + + if (removed.shouldLogMeAndStackParents()) { + shouldLog = true; + if (!userNuggetStack.isEmpty()) { + userNuggetStack.getLast().setShouldLogMeAndStackParents(); + } + } + if (!shouldLog) { + // If we have filtered this nugget, by our filter design we will also have filtered any nuggets it encloses. + // This means it *must* be the last entry in operationNuggets, so we can safely remove it in O(1). + final QueryPerformanceNugget lastNugget = operationNuggets.remove(operationNuggets.size() - 1); + if (nugget != lastNugget) { + throw new IllegalStateException( + "Filtered query performance nugget " + nugget + " (" + System.identityHashCode(nugget) + + ") didn't match the last operation nugget " + lastNugget + " (" + + System.identityHashCode(lastNugget) + + ")"); + } + } + + if (userNuggetStack.isEmpty() && queryNugget != null && state == QueryState.RUNNING) { + startCatchAll(); + } + + return shouldLog; + } + + @Override + public synchronized QueryPerformanceNugget getOuterNugget() { + return userNuggetStack.peekLast(); + } + + @Override + public void setQueryData(final EntrySetter setter) { + final long evaluationNumber; + final int operationNumber; + boolean uninstrumented = false; + synchronized (this) { + // we should never be called if we're not running + Assert.eq(state, "state", QueryState.RUNNING, "QueryState.RUNNING"); + evaluationNumber = queryNugget.getEvaluationNumber(); + operationNumber = operationNuggets.size(); + if (operationNumber > 0) { + // ensure UPL and QOPL are consistent/joinable. + if (!userNuggetStack.isEmpty()) { + userNuggetStack.getLast().setShouldLogMeAndStackParents(); + } else { + uninstrumented = true; + if (catchAllNugget != null) { + catchAllNugget.setShouldLogMeAndStackParents(); + } + } + } + } + setter.set(evaluationNumber, operationNumber, uninstrumented); + } + + @Override + public synchronized QueryPerformanceNugget getQueryLevelPerformanceData() { + return queryNugget; + } + + @Override + public synchronized List getOperationLevelPerformanceData() { + return operationNuggets; + } + + public void accumulate(@NotNull final QueryPerformanceRecorderImpl subQuery) { + queryNugget.accumulate(subQuery.queryNugget); + } + + @SuppressWarnings("unused") + public synchronized Table getTimingResultsAsTable() { + final int count = operationNuggets.size(); + final String[] names = new String[count]; + final Long[] timeNanos = new Long[count]; + final String[] callerLine = new String[count]; + final Boolean[] isTopLevel = new Boolean[count]; + final Boolean[] isCompileTime = new Boolean[count]; + + for (int i = 0; i < operationNuggets.size(); i++) { + timeNanos[i] = operationNuggets.get(i).getTotalTimeNanos(); + names[i] = operationNuggets.get(i).getName(); + callerLine[i] = operationNuggets.get(i).getCallerLine(); + isTopLevel[i] = operationNuggets.get(i).isTopLevel(); + isCompileTime[i] = operationNuggets.get(i).getName().startsWith("Compile:"); + } + return TableTools.newTable( + TableTools.col("names", names), + TableTools.col("line", callerLine), + TableTools.col("timeNanos", timeNanos), + TableTools.col("isTopLevel", isTopLevel), + TableTools.col("isCompileTime", isCompileTime)); + } +} diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index 52319088c75..37513c61ba5 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -13,7 +13,9 @@ import io.deephaven.engine.liveness.LivenessArtifact; import io.deephaven.engine.liveness.LivenessReferent; import io.deephaven.engine.liveness.LivenessScopeStack; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorderImpl; import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.engine.updategraph.DynamicNode; @@ -30,7 +32,6 @@ import io.deephaven.proto.util.ExportTicketHelper; import io.deephaven.server.util.Scheduler; import io.deephaven.engine.context.ExecutionContext; -import io.deephaven.util.QueryConstants; import io.deephaven.util.SafeCloseable; import io.deephaven.util.annotations.VisibleForTesting; import io.deephaven.auth.AuthContext; @@ -538,7 +539,7 @@ public final static class ExportObject extends LivenessArtifact { /** if true the queryPerformanceRecorder belongs to a batch; otherwise if it exists it belong to the export */ private boolean qprIsForBatch; /** used to keep track of performance details either for aggregation or for the async ticket resolution */ - private QueryPerformanceRecorder queryPerformanceRecorder; + private QueryPerformanceRecorderImpl queryPerformanceRecorder; /** final result of export */ private volatile T result; @@ -630,7 +631,7 @@ private boolean isNonExport() { } private synchronized void setQueryPerformanceRecorder( - final QueryPerformanceRecorder queryPerformanceRecorder, + final QueryPerformanceRecorderImpl queryPerformanceRecorder, final boolean qprIsForBatch) { if (this.queryPerformanceRecorder != null) { throw new IllegalStateException( @@ -979,28 +980,30 @@ private void doExport() { T localResult = null; boolean shouldLog = false; + QueryPerformanceRecorderImpl exportRecorder = null; QueryProcessingResults queryProcessingResults = null; try (final SafeCloseable ignored1 = session.executionContext.open(); final SafeCloseable ignored2 = LivenessScopeStack.open()) { try { - final QueryPerformanceRecorder exportRecorder; if (queryPerformanceRecorder != null && !qprIsForBatch) { exportRecorder = queryPerformanceRecorder.resumeQuery(); } else if (queryPerformanceRecorder != null) { // this is a sub-query; no need to re-log the session id - exportRecorder = QueryPerformanceRecorder.getInstance().startQuery( + exportRecorder = new QueryPerformanceRecorderImpl( + "ExportObject#doWork(exportId=" + logIdentity + ")", queryPerformanceRecorder, - "ExportObject#doWork(exportId=" + logIdentity + ")"); + QueryPerformanceNugget.DEFAULT_FACTORY); } else { - exportRecorder = QueryPerformanceRecorder.getInstance().startQuery( - "ExportObject#doWork(session=" + session.sessionId + ",exportId=" + logIdentity + ")"); + exportRecorder = new QueryPerformanceRecorderImpl( + "ExportObject#doWork(session=" + session.sessionId + ",exportId=" + logIdentity + ")", + QueryPerformanceNugget.DEFAULT_FACTORY); } queryProcessingResults = new QueryProcessingResults(exportRecorder); try { localResult = capturedExport.call(); } finally { - shouldLog = QueryPerformanceRecorder.getInstance().endQuery(); + shouldLog = exportRecorder.endQuery(); } } catch (final Exception err) { @@ -1022,7 +1025,8 @@ private void doExport() { } if ((shouldLog || caughtException != null) && queryProcessingResults != null) { if (queryPerformanceRecorder != null && qprIsForBatch) { - queryPerformanceRecorder.accumulate(queryProcessingResults.getRecorder()); + Assert.neqNull(exportRecorder, "exportRecorder"); + queryPerformanceRecorder.accumulate(exportRecorder); } EngineMetrics.getInstance().logQueryProcessingResults(queryProcessingResults); } @@ -1315,15 +1319,18 @@ public class ExportBuilder { } /** - * Set the performance recorder to aggregate performance data across exports. If set, instrumentation logging is - * the responsibility of the caller. + * Set the performance recorder to aggregate performance data across exports. + *

+ * When {@code qprIsForBatch}: - is {@code false}: The provided queryPerformanceRecorder is suspended and + * assumed by the export object - is {@code true}: Instrumentation logging is the responsibility of the caller + * and should not be performed until all sub-queries have completed. * * @param queryPerformanceRecorder the performance recorder to aggregate into * @param qprIsForBatch true if a sub-query should be created for the export and aggregated into the qpr * @return this builder */ public ExportBuilder queryPerformanceRecorder( - @NotNull final QueryPerformanceRecorder queryPerformanceRecorder, + @NotNull final QueryPerformanceRecorderImpl queryPerformanceRecorder, final boolean qprIsForBatch) { export.setQueryPerformanceRecorder(queryPerformanceRecorder, qprIsForBatch); return this; diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index 42722609caf..d045046b0a3 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -8,7 +8,9 @@ import io.deephaven.clientsupport.gotorow.SeekRow; import io.deephaven.auth.codegen.impl.TableServiceContextualAuthWiring; import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorderImpl; import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.extensions.barrage.util.ExportUtil; @@ -511,8 +513,9 @@ public void batch( } final SessionState session = sessionService.getCurrentSession(); - final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.getInstance().startQuery( - "TableService#batch(session=" + session.getSessionId() + ")"); + final QueryPerformanceRecorderImpl queryPerformanceRecorder = new QueryPerformanceRecorderImpl( + "TableService#batch(session=" + session.getSessionId() + ")", + QueryPerformanceNugget.DEFAULT_FACTORY); // step 1: initialize exports final List> exportBuilders = request.getOpsList().stream() @@ -536,17 +539,21 @@ public void batch( } Assert.geqZero(numRemaining, "numRemaining"); - queryPerformanceRecorder.resumeQuery(); - final QueryProcessingResults results = new QueryProcessingResults(queryPerformanceRecorder); - final StatusRuntimeException failure = firstFailure.get(); - if (failure != null) { - results.setException(failure.getMessage()); - safelyError(responseObserver, failure); - } else { - safelyComplete(responseObserver); + try { + queryPerformanceRecorder.resumeQuery(); + final QueryProcessingResults results = new QueryProcessingResults(queryPerformanceRecorder); + final StatusRuntimeException failure = firstFailure.get(); + if (failure != null) { + results.setException(failure.getMessage()); + safelyError(responseObserver, failure); + } else { + safelyComplete(responseObserver); + } + queryPerformanceRecorder.endQuery(); + EngineMetrics.getInstance().logQueryProcessingResults(results); + } finally { + QueryPerformanceRecorder.resetInstance(); } - queryPerformanceRecorder.endQuery(); - EngineMetrics.getInstance().logQueryProcessingResults(results); }; for (int i = 0; i < exportBuilders.size(); ++i) { @@ -656,8 +663,8 @@ private void oneShotOperationWrapper( final String description = "TableService#" + op.name() + "(session=" + session.getSessionId() + ", resultId=" + ticketRouter.getLogNameFor(resultId, "TableServiceGrpcImpl") + ")"; - final QueryPerformanceRecorder queryPerformanceRecorder = - QueryPerformanceRecorder.getInstance().startQuery(description); + final QueryPerformanceRecorderImpl queryPerformanceRecorder = new QueryPerformanceRecorderImpl( + description, QueryPerformanceNugget.DEFAULT_FACTORY); operation.validateRequest(request); @@ -718,7 +725,7 @@ private SessionState.ExportObject

resolveBatchReference( private BatchExportBuilder createBatchExportBuilder( @NotNull final SessionState session, - @NotNull final QueryPerformanceRecorder queryPerformanceRecorder, + @NotNull final QueryPerformanceRecorderImpl queryPerformanceRecorder, final BatchTableRequest.Operation op) { final GrpcTableOperation operation = getOp(op.getOpCase()); final T request = operation.getRequestFromOperation(op); From 02d48388773c3a1e099c795dbe37778a2432ad85 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Wed, 8 Nov 2023 18:30:28 -0700 Subject: [PATCH 08/31] PerformanceQueries Changes --- cpp-client/build.gradle | 2 +- .../impl/perf/UpdatePerformanceTracker.java | 4 +- .../table/impl/util/PerformanceQueries.java | 22 +++ .../impl/util/PerformanceQueriesGeneral.java | 131 +++++++++++------- 4 files changed, 107 insertions(+), 52 deletions(-) diff --git a/cpp-client/build.gradle b/cpp-client/build.gradle index 0d1dfe07b72..25669240859 100644 --- a/cpp-client/build.gradle +++ b/cpp-client/build.gradle @@ -130,7 +130,7 @@ def testCppClient = Docker.registerDockerTask(project, 'testCppClient') { // // Setup for test run. // - environmentVariable 'DH_HOST', deephavenDocker.containerName.get() + environmentVariable 'DH_HOST', '10.10.0.17' environmentVariable 'DH_PORT', '10000' } containerDependencies.dependsOn = [deephavenDocker.healthyTask] diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java index e97c53b3b86..01be9b8b686 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java @@ -137,10 +137,10 @@ private synchronized void publish( public UpdatePerformanceTracker(final UpdateGraph updateGraph) { this.updateGraph = Objects.requireNonNull(updateGraph); this.aggregatedSmallUpdatesEntry = new PerformanceEntry( - QueryConstants.NULL_LONG, QueryConstants.NULL_INT, QueryConstants.NULL_INT, + QueryConstants.NULL_LONG, QueryConstants.NULL_LONG, QueryConstants.NULL_INT, "Aggregated Small Updates", null, updateGraph.getName()); this.flushEntry = new PerformanceEntry( - QueryConstants.NULL_LONG, QueryConstants.NULL_INT, QueryConstants.NULL_INT, + QueryConstants.NULL_LONG, QueryConstants.NULL_LONG, QueryConstants.NULL_INT, "UpdatePerformanceTracker Flush", null, updateGraph.getName()); } diff --git a/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueries.java b/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueries.java index 14e6177f8be..0ec22a8b8bb 100644 --- a/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueries.java +++ b/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueries.java @@ -6,6 +6,7 @@ import com.google.auto.service.AutoService; import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.hierarchical.TreeTable; import io.deephaven.engine.util.GroovyDeephavenSession; import io.deephaven.util.annotations.ScriptApi; @@ -68,6 +69,27 @@ public static Table queryOperationPerformance(final long evaluationNumber) { evaluationNumber); } + /** + * Converts the query performance table into a tree table. + * + * @return query performance tree table. + */ + @ScriptApi + public static TreeTable queryPerformanceAsTreeTable() { + return PerformanceQueriesGeneral.queryPerformanceAsTreeTable(TableLoggers.queryPerformanceLog()); + } + + /** + * Merges the query performance and query operation performance tables into a single tree table. + * + * @return query operation performance tree table. + */ + @ScriptApi + public static TreeTable queryOperationPerformanceAsTreeTable() { + return PerformanceQueriesGeneral.queryOperationPerformanceAsTreeTable( + TableLoggers.queryPerformanceLog(), TableLoggers.queryOperationPerformanceLog()); + } + /** * Gets the information for a process. * diff --git a/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java b/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java index 43024345ed2..e3d659c9d00 100644 --- a/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java +++ b/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java @@ -4,15 +4,17 @@ package io.deephaven.engine.table.impl.util; import io.deephaven.engine.table.Table; -import io.deephaven.engine.table.impl.DataAccessHelpers; +import io.deephaven.engine.table.hierarchical.TreeTable; +import io.deephaven.engine.util.TableTools; import io.deephaven.plot.Figure; import io.deephaven.plot.PlottingConvenience; import io.deephaven.util.QueryConstants; +import org.jetbrains.annotations.NotNull; import java.util.Arrays; import java.util.HashMap; import java.util.Map; -import java.util.OptionalLong; +import java.util.stream.Stream; import static io.deephaven.api.agg.Aggregation.AggFirst; import static io.deephaven.api.agg.Aggregation.AggMax; @@ -35,20 +37,21 @@ public static Table queryPerformance(Table queryPerformanceLog, final long evalu queryPerformanceLog = queryPerformanceLog .updateView( "WorkerHeapSize = " + workerHeapSizeBytes + "L", - "TimeSecs = nanosToMillis(EndTime - StartTime) / 1000d", // How long this query ran for, in - // seconds + // How long this query ran for, in seconds + "TimeSecs = nanosToMillis(EndTime - StartTime) / 1000d", "NetMemoryChange = FreeMemoryChange - TotalMemoryChange", - "QueryMemUsed = TotalMemory - FreeMemory", // Memory in use by the query. (Only - // includes active heap memory.) - "QueryMemUsedPct = QueryMemUsed / WorkerHeapSize", // Memory usage as a percenage of max heap - // size (-Xmx) - "QueryMemFree = WorkerHeapSize - QueryMemUsed" // Remaining memory until the query runs into the - // max heap size - ) - .moveColumnsUp( - "ProcessUniqueId", "EvaluationNumber", - "QueryMemUsed", "QueryMemFree", "QueryMemUsedPct", - "EndTime", "TimeSecs", "NetMemoryChange"); + // Memory in use by the query. (Only includes active heap memory.) + "QueryMemUsed = TotalMemory - FreeMemory", + // Memory usage as a percenage of max heap size (-Xmx) + "QueryMemUsedPct = QueryMemUsed / WorkerHeapSize", + // Remaining memory until the query runs into the max heap size + "QueryMemFree = WorkerHeapSize - QueryMemUsed"); + + queryPerformanceLog = maybeMoveColumnsUp(queryPerformanceLog, + "ProcessUniqueId", "EvaluationNumber", "ParentEvaluationNumber", + "QueryMemUsed", "QueryMemFree", "QueryMemUsedPct", + "EndTime", "TimeSecs", "NetMemoryChange"); + if (formatPctColumns) { queryPerformanceLog = formatColumnsAsPct(queryPerformanceLog, "QueryMemUsedPct"); } @@ -64,15 +67,16 @@ public static Table queryOperationPerformance(Table queryOps, final long evaluat queryOps = queryOps.where(whereConditionForEvaluationNumber(evaluationNumber)); } - return queryOps + queryOps = queryOps .updateView( "TimeSecs = nanosToMillis(EndTime - StartTime) / 1000d", - "NetMemoryChange = FreeMemoryChange - TotalMemoryChange" // Change in memory usage delta while - // this query was executing - ) - .moveColumnsUp( - "ProcessUniqueId", "EvaluationNumber", "OperationNumber", - "EndTime", "TimeSecs", "NetMemoryChange"); + // Change in memory usage delta while this query was executing + "NetMemoryChange = FreeMemoryChange - TotalMemoryChange"); + + return maybeMoveColumnsUp(queryOps, + "ProcessUniqueId", "EvaluationNumber", "ParentEvaluationNumber", + "OperationNumber", "ParentOperationNumber", + "EndTime", "TimeSecs", "NetMemoryChange"); } public static Table queryOperationPerformance(final Table queryOps) { @@ -84,11 +88,7 @@ public static String processInfo(Table processInfo, final String processInfoId, processInfo = processInfo .where("Id = `" + processInfoId + "`", "Type = `" + type + "`", "Key = `" + key + "`") .select("Value"); - try { - return (String) DataAccessHelpers.getColumn(processInfo, 0).get(0); - } catch (Exception e) { - return null; - } + return processInfo.getColumnSource("Value").get(processInfo.getRowSet().firstRowKey()); } public static Table queryUpdatePerformance(Table queryUpdatePerformance, final long evaluationNumber, @@ -101,24 +101,26 @@ public static Table queryUpdatePerformance(Table queryUpdatePerformance, final l queryUpdatePerformance = queryUpdatePerformance .updateView( "WorkerHeapSize = " + workerHeapSizeBytes + "L", - "Ratio = EntryIntervalUsage / IntervalDurationNanos", // % of time during this interval that the - // operation was using CPU - "QueryMemUsed = MaxTotalMemory - MinFreeMemory", // Memory in use by the query. (Only - // includes active heap memory.) - "QueryMemUsedPct = QueryMemUsed / WorkerHeapSize", // Memory usage as a percenage of the max - // heap size (-Xmx) - "QueryMemFree = WorkerHeapSize - QueryMemUsed", // Remaining memory until the query runs into - // the max heap size - "NRows = EntryIntervalAdded + EntryIntervalRemoved + EntryIntervalModified", // Total number of - // changed rows - "RowsPerSec = round(NRows / IntervalDurationNanos * 1.0e9)", // Average rate data is ticking at - "RowsPerCPUSec = round(NRows / EntryIntervalUsage * 1.0e9)" // Approximation of how fast CPU - // handles row changes - ) - .moveColumnsUp( - "ProcessUniqueId", "EvaluationNumber", "OperationNumber", - "Ratio", "QueryMemUsed", "QueryMemUsedPct", "IntervalEndTime", - "RowsPerSec", "RowsPerCPUSec", "EntryDescription"); + // % of time during this interval that the operation was using CPU + "Ratio = EntryIntervalUsage / IntervalDurationNanos", + // Memory in use by the query. (Only includes active heap memory.) + "QueryMemUsed = MaxTotalMemory - MinFreeMemory", + // Memory usage as a percentage of the max heap size (-Xmx) + "QueryMemUsedPct = QueryMemUsed / WorkerHeapSize", + // Remaining memory until the query runs into the max heap size + "QueryMemFree = WorkerHeapSize - QueryMemUsed", + // Total number of changed rows + "NRows = EntryIntervalAdded + EntryIntervalRemoved + EntryIntervalModified", + // Average rate data is ticking at + "RowsPerSec = round(NRows / IntervalDurationNanos * 1.0e9)", + // Approximation of how fast CPU handles row changes + "RowsPerCPUSec = round(NRows / EntryIntervalUsage * 1.0e9)"); + + queryUpdatePerformance = maybeMoveColumnsUp(queryUpdatePerformance, + "ProcessUniqueId", "EvaluationNumber", "OperationNumber", + "Ratio", "QueryMemUsed", "QueryMemUsedPct", "IntervalEndTime", + "RowsPerSec", "RowsPerCPUSec", "EntryDescription"); + if (formatPctColumnsLocal && formatPctColumns) { queryUpdatePerformance = formatColumnsAsPctUpdatePerformance(queryUpdatePerformance); } @@ -149,6 +151,7 @@ public static Map queryUpdatePerformanceMap(final Table queryUpda "EntryIntervalAdded", "EntryIntervalRemoved", "EntryIntervalModified", + "EntryIntervalShifted", "NRows"); // Create a table showing the 'worst' updates, i.e. the operations with the greatest 'Ratio' @@ -270,7 +273,7 @@ public static Map serverStateWithPlots(final Table pml) { final Table pm = serverState(pml); resultMap.put("ServerState", pm); - int maxMemMiB = DataAccessHelpers.getColumn(pm, "MaxMemMiB").getInt(0); + int maxMemMiB = pm.getColumnSource("MaxMemMiB").getInt(pm.getRowSet().firstRowKey()); if (maxMemMiB == QueryConstants.NULL_INT) { maxMemMiB = 4096; } @@ -310,6 +313,33 @@ public static Map serverStateWithPlots(final Table pml) { return resultMap; } + public static TreeTable queryPerformanceAsTreeTable(@NotNull final Table qpl) { + return qpl.tree("EvaluationNumber", "ParentEvaluationNumber"); + } + + public static TreeTable queryOperationPerformanceAsTreeTable( + @NotNull final Table qpl, @NotNull final Table qopl) { + Table mergeWithAggKeys = TableTools.merge( + qpl.updateView( + "EvalKey = `` + EvaluationNumber", + "ParentEvalKey = ParentEvaluationNumber == null ? null : (`` + ParentEvaluationNumber)", + "OperationNumber = NULL_INT", + "ParentOperationNumber = NULL_INT", + "Depth = NULL_INT", + "CallerLine = (String) null", + "IsCompilation = NULL_BOOLEAN", + "InputSizeLong = NULL_LONG"), + qopl.updateView( + "EvalKey = EvaluationNumber + `:` + OperationNumber", + "ParentEvalKey = EvaluationNumber + (ParentOperationNumber == null ? `` : (`:` + ParentOperationNumber))", + "Exception = (String) null")) + .moveColumnsUp("EvalKey", "ParentEvalKey") + .moveColumnsDown("EvaluationNumber", "ParentEvaluationNumber", "OperationNumber", + "ParentOperationNumber"); + + return mergeWithAggKeys.tree("EvalKey", "ParentEvalKey"); + } + private static Table formatColumnsAsPct(final Table t, final String... cols) { final String[] formats = new String[cols.length]; for (int i = 0; i < cols.length; ++i) { @@ -323,11 +353,14 @@ private static Table formatColumnsAsPctUpdatePerformance(final Table updatePerfo } private static long getWorkerHeapSizeBytes() { - final OptionalLong opt = EngineMetrics.getProcessInfo().getMemoryInfo().heap().max(); - return opt.orElse(0); + return EngineMetrics.getProcessInfo().getMemoryInfo().heap().max().orElse(0); } private static String whereConditionForEvaluationNumber(final long evaluationNumber) { - return "EvaluationNumber = " + evaluationNumber + ""; + return "EvaluationNumber = " + evaluationNumber; + } + + private static Table maybeMoveColumnsUp(final Table source, final String... cols) { + return source.moveColumnsUp(Stream.of(cols).filter(source::hasColumns).toArray(String[]::new)); } } From 5b9fdc0d2bc106a4ed656299ab1b3c7befc8eba8 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Wed, 8 Nov 2023 18:48:58 -0700 Subject: [PATCH 09/31] Personal Review --- .../engine/table/impl/perf/QueryPerformanceRecorder.java | 6 +++--- .../table/impl/perf/QueryPerformanceRecorderImpl.java | 8 +++----- .../deephaven/engine/table/impl/util/EngineMetrics.java | 3 +-- .../table/impl/util/QueryOperationPerformanceImpl.java | 7 ++----- .../util/QueryOperationPerformanceStreamPublisher.java | 4 +--- .../java/io/deephaven/server/session/SessionState.java | 9 ++++++--- .../deephaven/server/table/ops/TableServiceGrpcImpl.java | 1 - 7 files changed, 16 insertions(+), 22 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java index e417585bd15..856d695d21a 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java @@ -124,9 +124,9 @@ public interface EntrySetter { } /** - * TODO NATE NOCOMMIT WRITE JAVADOC - * - * @param setter + * Provide current query data via the setter. + * + * @param setter a callback to receive query data */ public abstract void setQueryData(final EntrySetter setter); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java index 407f5d91415..74715d0483b 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java @@ -21,12 +21,12 @@ public class QueryPerformanceRecorderImpl extends QueryPerformanceRecorder { private final QueryPerformanceNugget queryNugget; + private final QueryPerformanceNugget.Factory nuggetFactory; private final ArrayList operationNuggets = new ArrayList<>(); + private final Deque userNuggetStack = new ArrayDeque<>(); private QueryState state; private QueryPerformanceNugget catchAllNugget; - private final Deque userNuggetStack = new ArrayDeque<>(); - private final QueryPerformanceNugget.Factory nuggetFactory; /** * Creates a new QueryPerformanceRecorderImpl and starts the query. @@ -197,9 +197,7 @@ public QueryPerformanceNugget getNugget(@NotNull final String name) { * @return A new QueryPerformanceNugget to encapsulate user query operations. done() must be called on the nugget. */ public synchronized QueryPerformanceNugget getNugget(@NotNull final String name, final long inputSize) { - if (state != QueryState.RUNNING) { - return QueryPerformanceNugget.DUMMY_NUGGET; - } + Assert.eq(state, "state", QueryState.RUNNING, "QueryState.RUNNING"); if (Thread.interrupted()) { throw new CancellationException("interrupted in QueryPerformanceNugget"); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java index bb749fbf712..0bf5c36fe22 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java @@ -75,8 +75,7 @@ private EngineMetrics() { log.fatal().append("Failed to configure process info: ").append(e.toString()).endl(); } qpImpl = new QueryPerformanceImpl(tableLoggerFactory.queryPerformanceLogLogger()); - qoplImpl = new QueryOperationPerformanceImpl(pInfo.getId(), - tableLoggerFactory.queryOperationPerformanceLogLogger()); + qoplImpl = new QueryOperationPerformanceImpl(tableLoggerFactory.queryOperationPerformanceLogLogger()); if (STATS_LOGGING_ENABLED) { statsImpl = new StatsImpl(pInfo.getId(), tableLoggerFactory.processMetricsLogLogger()); } else { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java index 3765f8fe01a..d43d8ab1ce2 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceImpl.java @@ -7,7 +7,6 @@ import io.deephaven.engine.table.Table; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.tablelogger.QueryOperationPerformanceLogLogger; -import io.deephaven.process.ProcessUniqueId; import io.deephaven.stream.StreamToBlinkTableAdapter; import io.deephaven.tablelogger.Row.Flags; import org.jetbrains.annotations.NotNull; @@ -16,15 +15,13 @@ import java.util.Objects; class QueryOperationPerformanceImpl implements QueryOperationPerformanceLogLogger { - private final ProcessUniqueId id; private final QueryOperationPerformanceLogLogger qoplLogger; private final QueryOperationPerformanceStreamPublisher publisher; @SuppressWarnings("FieldCanBeLocal") private final StreamToBlinkTableAdapter adapter; private final Table blink; - public QueryOperationPerformanceImpl(ProcessUniqueId id, QueryOperationPerformanceLogLogger qoplLogger) { - this.id = Objects.requireNonNull(id); + public QueryOperationPerformanceImpl(QueryOperationPerformanceLogLogger qoplLogger) { this.qoplLogger = Objects.requireNonNull(qoplLogger); this.publisher = new QueryOperationPerformanceStreamPublisher(); this.adapter = new StreamToBlinkTableAdapter( @@ -43,7 +40,7 @@ public Table blinkTable() { public void log( @NotNull final Flags flags, @NotNull final QueryPerformanceNugget nugget) throws IOException { - publisher.add(id.value(), nugget); + publisher.add(nugget); qoplLogger.log(flags, nugget); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java index cbdd0fa1917..9d19ee9eb77 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java @@ -65,9 +65,7 @@ public void register(@NotNull StreamConsumer consumer) { this.consumer = Objects.requireNonNull(consumer); } - public synchronized void add( - final String id, - final QueryPerformanceNugget nugget) { + public synchronized void add(final QueryPerformanceNugget nugget) { // ColumnDefinition.ofLong("EvaluationNumber"), chunks[0].asWritableLongChunk().add(nugget.getEvaluationNumber()); diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index 37513c61ba5..6d219d0a1c2 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -1321,9 +1321,12 @@ public class ExportBuilder { /** * Set the performance recorder to aggregate performance data across exports. *

- * When {@code qprIsForBatch}: - is {@code false}: The provided queryPerformanceRecorder is suspended and - * assumed by the export object - is {@code true}: Instrumentation logging is the responsibility of the caller - * and should not be performed until all sub-queries have completed. + * When {@code qprIsForBatch}: + *

    + *
  • is {@code false}: The provided queryPerformanceRecorder is suspended and assumed by the export object + *
  • is {@code true}: Instrumentation logging is the responsibility of the caller and should not be performed + * until all sub-queries have completed. + *
* * @param queryPerformanceRecorder the performance recorder to aggregate into * @param qprIsForBatch true if a sub-query should be created for the export and aggregated into the qpr diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index d045046b0a3..2cd6536ccde 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -593,7 +593,6 @@ public void batch( // now that we've submitted everything we'll suspend the query and release our refcount queryPerformanceRecorder.suspendQuery(); - QueryPerformanceRecorder.resetInstance(); onOneResolved.run(); } From 567dc983c70914d32eb9bc76e89a23450a2cb8b0 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Wed, 8 Nov 2023 20:39:50 -0700 Subject: [PATCH 10/31] Bug Fix for CI --- .../table/ops/TableServiceGrpcImpl.java | 175 +++++++++--------- 1 file changed, 91 insertions(+), 84 deletions(-) diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index 2cd6536ccde..20890529091 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -517,83 +517,87 @@ public void batch( "TableService#batch(session=" + session.getSessionId() + ")", QueryPerformanceNugget.DEFAULT_FACTORY); - // step 1: initialize exports - final List> exportBuilders = request.getOpsList().stream() - .map(op -> createBatchExportBuilder(session, queryPerformanceRecorder, op)) - .collect(Collectors.toList()); + try { + // step 1: initialize exports + final List> exportBuilders = request.getOpsList().stream() + .map(op -> createBatchExportBuilder(session, queryPerformanceRecorder, op)) + .collect(Collectors.toList()); - // step 2: resolve dependencies - exportBuilders.forEach(export -> export.resolveDependencies(session, exportBuilders)); + // step 2: resolve dependencies + exportBuilders.forEach(export -> export.resolveDependencies(session, exportBuilders)); - // step 3: check for cyclical dependencies; this is our only opportunity to check non-export cycles - // TODO: check for cycles + // step 3: check for cyclical dependencies; this is our only opportunity to check non-export cycles + // TODO: check for cycles - // step 4: submit the batched operations - final AtomicInteger remaining = new AtomicInteger(1 + exportBuilders.size()); - final AtomicReference firstFailure = new AtomicReference<>(); + // step 4: submit the batched operations + final AtomicInteger remaining = new AtomicInteger(1 + exportBuilders.size()); + final AtomicReference firstFailure = new AtomicReference<>(); - final Runnable onOneResolved = () -> { - int numRemaining = remaining.decrementAndGet(); - if (numRemaining > 0) { - return; - } - Assert.geqZero(numRemaining, "numRemaining"); - - try { - queryPerformanceRecorder.resumeQuery(); - final QueryProcessingResults results = new QueryProcessingResults(queryPerformanceRecorder); - final StatusRuntimeException failure = firstFailure.get(); - if (failure != null) { - results.setException(failure.getMessage()); - safelyError(responseObserver, failure); - } else { - safelyComplete(responseObserver); + final Runnable onOneResolved = () -> { + int numRemaining = remaining.decrementAndGet(); + if (numRemaining > 0) { + return; } - queryPerformanceRecorder.endQuery(); - EngineMetrics.getInstance().logQueryProcessingResults(results); - } finally { - QueryPerformanceRecorder.resetInstance(); - } - }; + Assert.geqZero(numRemaining, "numRemaining"); + + try { + queryPerformanceRecorder.resumeQuery(); + final QueryProcessingResults results = new QueryProcessingResults(queryPerformanceRecorder); + final StatusRuntimeException failure = firstFailure.get(); + if (failure != null) { + results.setException(failure.getMessage()); + safelyError(responseObserver, failure); + } else { + safelyComplete(responseObserver); + } + queryPerformanceRecorder.endQuery(); + EngineMetrics.getInstance().logQueryProcessingResults(results); + } finally { + QueryPerformanceRecorder.resetInstance(); + } + }; - for (int i = 0; i < exportBuilders.size(); ++i) { - final BatchExportBuilder exportBuilder = exportBuilders.get(i); - final int exportId = exportBuilder.exportBuilder.getExportId(); + for (int i = 0; i < exportBuilders.size(); ++i) { + final BatchExportBuilder exportBuilder = exportBuilders.get(i); + final int exportId = exportBuilder.exportBuilder.getExportId(); - final TableReference resultId; - if (exportId == SessionState.NON_EXPORT_ID) { - resultId = TableReference.newBuilder().setBatchOffset(i).build(); - } else { - resultId = ExportTicketHelper.tableReference(exportId); + final TableReference resultId; + if (exportId == SessionState.NON_EXPORT_ID) { + resultId = TableReference.newBuilder().setBatchOffset(i).build(); + } else { + resultId = ExportTicketHelper.tableReference(exportId); + } + + exportBuilder.exportBuilder.onError((result, errorContext, cause, dependentId) -> { + String errorInfo = errorContext; + if (dependentId != null) { + errorInfo += " dependency: " + dependentId; + } + if (cause instanceof StatusRuntimeException) { + errorInfo += " cause: " + cause.getMessage(); + firstFailure.compareAndSet(null, (StatusRuntimeException) cause); + } + final ExportedTableCreationResponse response = ExportedTableCreationResponse.newBuilder() + .setResultId(resultId) + .setSuccess(false) + .setErrorInfo(errorInfo) + .build(); + safelyOnNext(responseObserver, response); + onOneResolved.run(); + }).onSuccess(table -> { + final ExportedTableCreationResponse response = + ExportUtil.buildTableCreationResponse(resultId, table); + safelyOnNext(responseObserver, response); + onOneResolved.run(); + }).submit(exportBuilder::doExport); } - exportBuilder.exportBuilder.onError((result, errorContext, cause, dependentId) -> { - String errorInfo = errorContext; - if (dependentId != null) { - errorInfo += " dependency: " + dependentId; - } - if (cause instanceof StatusRuntimeException) { - errorInfo += " cause: " + cause.getMessage(); - firstFailure.compareAndSet(null, (StatusRuntimeException) cause); - } - final ExportedTableCreationResponse response = ExportedTableCreationResponse.newBuilder() - .setResultId(resultId) - .setSuccess(false) - .setErrorInfo(errorInfo) - .build(); - safelyOnNext(responseObserver, response); - onOneResolved.run(); - }).onSuccess(table -> { - final ExportedTableCreationResponse response = - ExportUtil.buildTableCreationResponse(resultId, table); - safelyOnNext(responseObserver, response); - onOneResolved.run(); - }).submit(exportBuilder::doExport); + // now that we've submitted everything we'll suspend the query and release our refcount + queryPerformanceRecorder.suspendQuery(); + onOneResolved.run(); + } finally { + QueryPerformanceRecorder.resetInstance(); } - - // now that we've submitted everything we'll suspend the query and release our refcount - queryPerformanceRecorder.suspendQuery(); - onOneResolved.run(); } @Override @@ -664,25 +668,28 @@ private void oneShotOperationWrapper( final QueryPerformanceRecorderImpl queryPerformanceRecorder = new QueryPerformanceRecorderImpl( description, QueryPerformanceNugget.DEFAULT_FACTORY); + try { + operation.validateRequest(request); - operation.validateRequest(request); - - final List> dependencies = operation.getTableReferences(request).stream() - .map(ref -> resolveOneShotReference(session, ref)) - .collect(Collectors.toList()); + final List> dependencies = operation.getTableReferences(request).stream() + .map(ref -> resolveOneShotReference(session, ref)) + .collect(Collectors.toList()); - session.newExport(resultId, "resultId") - .require(dependencies) - .onError(responseObserver) - .queryPerformanceRecorder(queryPerformanceRecorder, false) - .submit(() -> { - operation.checkPermission(request, dependencies); - final Table result = operation.create(request, dependencies); - final ExportedTableCreationResponse response = - ExportUtil.buildTableCreationResponse(resultId, result); - safelyComplete(responseObserver, response); - return result; - }); + session.newExport(resultId, "resultId") + .require(dependencies) + .onError(responseObserver) + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .submit(() -> { + operation.checkPermission(request, dependencies); + final Table result = operation.create(request, dependencies); + final ExportedTableCreationResponse response = + ExportUtil.buildTableCreationResponse(resultId, result); + safelyComplete(responseObserver, response); + return result; + }); + } finally { + QueryPerformanceRecorder.resetInstance(); + } } private SessionState.ExportObject
resolveOneShotReference( From 2695c077a93cde8bdc540f79e4527f2255eb9843 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Wed, 8 Nov 2023 22:16:06 -0700 Subject: [PATCH 11/31] revert cpp-test host change --- cpp-client/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp-client/build.gradle b/cpp-client/build.gradle index 25669240859..0d1dfe07b72 100644 --- a/cpp-client/build.gradle +++ b/cpp-client/build.gradle @@ -130,7 +130,7 @@ def testCppClient = Docker.registerDockerTask(project, 'testCppClient') { // // Setup for test run. // - environmentVariable 'DH_HOST', '10.10.0.17' + environmentVariable 'DH_HOST', deephavenDocker.containerName.get() environmentVariable 'DH_PORT', '10000' } containerDependencies.dependsOn = [deephavenDocker.healthyTask] From be34bb220189b327ebc2b0e23d60d156309e42e4 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Wed, 8 Nov 2023 22:27:56 -0700 Subject: [PATCH 12/31] bugfix SessionState where resetInstance occurs --- .../src/main/java/io/deephaven/server/session/SessionState.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index 6d219d0a1c2..e4466bf52f8 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -1004,6 +1004,7 @@ private void doExport() { localResult = capturedExport.call(); } finally { shouldLog = exportRecorder.endQuery(); + QueryPerformanceRecorder.resetInstance(); } } catch (final Exception err) { @@ -1021,7 +1022,6 @@ private void doExport() { if (caughtException != null && queryProcessingResults != null) { queryProcessingResults.setException(caughtException.toString()); } - QueryPerformanceRecorder.resetInstance(); } if ((shouldLog || caughtException != null) && queryProcessingResults != null) { if (queryPerformanceRecorder != null && qprIsForBatch) { From 4563ed90006e545d5895f3c81f730669cfb3b98e Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Thu, 9 Nov 2023 08:24:19 -0700 Subject: [PATCH 13/31] Fix suspend query ordering in SessionState for one shot --- .../main/java/io/deephaven/server/session/SessionState.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index e4466bf52f8..adf76c5089e 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -1487,11 +1487,11 @@ public ExportBuilder onSuccess(final Runnable successHandler) { * @return the submitted export object */ public ExportObject submit(final Callable exportMain) { - export.setWork(exportMain, errorHandler, successHandler, requiresSerialQueue); if (export.queryPerformanceRecorder != null && !export.qprIsForBatch) { - // transfer ownership of the qpr to the export + // transfer ownership of the qpr to the export before it can be resumed by the scheduler export.queryPerformanceRecorder.suspendQuery(); } + export.setWork(exportMain, errorHandler, successHandler, requiresSerialQueue); return export; } From 9d00dd2894d61402f2204defa2cfb6aaf3bbc4fa Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Thu, 9 Nov 2023 12:59:22 -0700 Subject: [PATCH 14/31] Bug Fixes + Inline Review Changes --- .../table/impl/perf/BasePerformanceEntry.java | 66 +++++++++---------- .../table/impl/perf/PerformanceEntry.java | 4 +- .../impl/perf/QueryPerformanceNugget.java | 20 +++--- .../perf/QueryPerformanceRecorderImpl.java | 22 ++++--- .../UpdatePerformanceStreamPublisher.java | 2 +- ...ryOperationPerformanceStreamPublisher.java | 2 +- .../util/QueryPerformanceStreamPublisher.java | 2 +- .../server/session/SessionState.java | 57 +++++++++------- 8 files changed, 93 insertions(+), 82 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java index d7f61d94dd4..e9aae8c9a4a 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java @@ -16,13 +16,13 @@ * A smaller entry that simply records usage data, meant for aggregating into the larger entry. */ public class BasePerformanceEntry implements LogOutputAppendable { - private long intervalUsageNanos; + private long usageNanos; - private long intervalCpuNanos; - private long intervalUserCpuNanos; + private long cpuNanos; + private long userCpuNanos; - private long intervalAllocatedBytes; - private long intervalPoolAllocatedBytes; + private long allocatedBytes; + private long poolAllocatedBytes; private long startTimeNanos; @@ -42,16 +42,16 @@ public synchronized void onBaseEntryStart() { } public synchronized void onBaseEntryEnd() { - intervalUserCpuNanos = plus(intervalUserCpuNanos, + userCpuNanos = plus(userCpuNanos, minus(ThreadProfiler.DEFAULT.getCurrentThreadUserTime(), startUserCpuNanos)); - intervalCpuNanos = - plus(intervalCpuNanos, minus(ThreadProfiler.DEFAULT.getCurrentThreadCpuTime(), startCpuNanos)); + cpuNanos = + plus(cpuNanos, minus(ThreadProfiler.DEFAULT.getCurrentThreadCpuTime(), startCpuNanos)); - intervalUsageNanos += System.nanoTime() - startTimeNanos; + usageNanos += System.nanoTime() - startTimeNanos; - intervalPoolAllocatedBytes = plus(intervalPoolAllocatedBytes, + poolAllocatedBytes = plus(poolAllocatedBytes, minus(QueryPerformanceRecorder.getPoolAllocatedBytesForCurrentThread(), startPoolAllocatedBytes)); - intervalAllocatedBytes = plus(intervalAllocatedBytes, + allocatedBytes = plus(allocatedBytes, minus(ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(), startAllocatedBytes)); startAllocatedBytes = 0; @@ -65,13 +65,13 @@ public synchronized void onBaseEntryEnd() { synchronized void baseEntryReset() { Assert.eqZero(startTimeNanos, "startTimeNanos"); - intervalUsageNanos = 0; + usageNanos = 0; - intervalCpuNanos = 0; - intervalUserCpuNanos = 0; + cpuNanos = 0; + userCpuNanos = 0; - intervalAllocatedBytes = 0; - intervalPoolAllocatedBytes = 0; + allocatedBytes = 0; + poolAllocatedBytes = 0; } /** @@ -79,17 +79,17 @@ synchronized void baseEntryReset() { * * @return total wall clock time in nanos */ - public long getTotalTimeNanos() { - return intervalUsageNanos; + public long getUsageNanos() { + return usageNanos; } /** * Get the aggregate cpu time in nanoseconds. Invoking this getter is valid iff the entry will no longer be mutated. - * + * * @return total cpu time in nanos */ public long getCpuNanos() { - return intervalCpuNanos; + return cpuNanos; } /** @@ -99,7 +99,7 @@ public long getCpuNanos() { * @return total cpu user time in nanos */ public long getUserCpuNanos() { - return intervalUserCpuNanos; + return userCpuNanos; } /** @@ -109,7 +109,7 @@ public long getUserCpuNanos() { * @return The bytes of allocated memory attributed to the instrumented operation. */ public long getAllocatedBytes() { - return intervalAllocatedBytes; + return allocatedBytes; } /** @@ -119,17 +119,17 @@ public long getAllocatedBytes() { * @return total pool allocated memory in bytes */ public long getPoolAllocatedBytes() { - return intervalPoolAllocatedBytes; + return poolAllocatedBytes; } @Override public LogOutput append(@NotNull final LogOutput logOutput) { final LogOutput currentValues = logOutput.append("BasePerformanceEntry{") - .append(", intervalUsageNanos=").append(intervalUsageNanos) - .append(", intervalCpuNanos=").append(intervalCpuNanos) - .append(", intervalUserCpuNanos=").append(intervalUserCpuNanos) - .append(", intervalAllocatedBytes=").append(intervalAllocatedBytes) - .append(", intervalPoolAllocatedBytes=").append(intervalPoolAllocatedBytes); + .append(", intervalUsageNanos=").append(usageNanos) + .append(", intervalCpuNanos=").append(cpuNanos) + .append(", intervalUserCpuNanos=").append(userCpuNanos) + .append(", intervalAllocatedBytes=").append(allocatedBytes) + .append(", intervalPoolAllocatedBytes=").append(poolAllocatedBytes); return appendStart(currentValues) .append('}'); } @@ -149,11 +149,11 @@ LogOutput appendStart(LogOutput logOutput) { * @param entry the entry to accumulate */ public synchronized void accumulate(@NotNull final BasePerformanceEntry entry) { - this.intervalUsageNanos += entry.intervalUsageNanos; - this.intervalCpuNanos = plus(this.intervalCpuNanos, entry.intervalCpuNanos); - this.intervalUserCpuNanos = plus(this.intervalUserCpuNanos, entry.intervalUserCpuNanos); + this.usageNanos += entry.usageNanos; + this.cpuNanos = plus(this.cpuNanos, entry.cpuNanos); + this.userCpuNanos = plus(this.userCpuNanos, entry.userCpuNanos); - this.intervalAllocatedBytes = plus(this.intervalAllocatedBytes, entry.intervalAllocatedBytes); - this.intervalPoolAllocatedBytes = plus(this.intervalPoolAllocatedBytes, entry.intervalPoolAllocatedBytes); + this.allocatedBytes = plus(this.allocatedBytes, entry.allocatedBytes); + this.poolAllocatedBytes = plus(this.poolAllocatedBytes, entry.poolAllocatedBytes); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java index 6d42ee2b1d3..889581ab928 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/PerformanceEntry.java @@ -123,7 +123,7 @@ public LogOutput append(@NotNull final LogOutput logOutput) { .append(", description='").append(description).append('\'') .append(", callerLine='").append(callerLine).append('\'') .append(", authContext=").append(authContext) - .append(", intervalUsageNanos=").append(getTotalTimeNanos()) + .append(", intervalUsageNanos=").append(getUsageNanos()) .append(", intervalCpuNanos=").append(getCpuNanos()) .append(", intervalUserCpuNanos=").append(getUserCpuNanos()) .append(", intervalInvocationCount=").append(intervalInvocationCount) @@ -218,7 +218,7 @@ public long getIntervalInvocationCount() { */ boolean shouldLogEntryInterval() { return intervalInvocationCount > 0 && - UpdatePerformanceTracker.LOG_THRESHOLD.shouldLog(getTotalTimeNanos()); + UpdatePerformanceTracker.LOG_THRESHOLD.shouldLog(getUsageNanos()); } public void accumulate(PerformanceEntry entry) { diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java index fdf02e0d64c..35eb3b63d41 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java @@ -143,7 +143,7 @@ default QueryPerformanceNugget createForCatchAll( private final RuntimeMemory.Sample startMemorySample; private final RuntimeMemory.Sample endMemorySample; - private boolean shouldLogMeAndStackParents; + private boolean shouldLogThisAndStackParents; /** * Full constructor for nuggets. @@ -194,7 +194,7 @@ protected QueryPerformanceNugget( onBaseEntryStart(); state = QueryState.RUNNING; - shouldLogMeAndStackParents = false; + shouldLogThisAndStackParents = false; } /** @@ -218,7 +218,7 @@ private QueryPerformanceNugget() { startClockEpochNanos = NULL_LONG; state = null; // This turns close into a no-op. - shouldLogMeAndStackParents = false; + shouldLogThisAndStackParents = false; } public void done() { @@ -416,16 +416,16 @@ public boolean wasInterrupted() { /** * Ensure this nugget gets logged, alongside its stack of nesting operations. */ - public void setShouldLogMeAndStackParents() { - shouldLogMeAndStackParents = true; + public void setShouldLogThisAndStackParents() { + shouldLogThisAndStackParents = true; } /** * @return true if this nugget triggers the logging of itself and every other nugget in its stack of nesting * operations. */ - public boolean shouldLogMeAndStackParents() { - return shouldLogMeAndStackParents; + public boolean shouldLogThisAndStackParents() { + return shouldLogThisAndStackParents; } /** @@ -437,7 +437,7 @@ public boolean shouldLogMeAndStackParents() { * @return if this nugget is significant enough to be logged. */ boolean shouldLogNugget(final boolean isUninstrumented) { - if (shouldLogMeAndStackParents) { + if (shouldLogThisAndStackParents) { return true; } @@ -448,9 +448,9 @@ boolean shouldLogNugget(final boolean isUninstrumented) { } if (isUninstrumented) { - return UNINSTRUMENTED_LOG_THRESHOLD.shouldLog(getTotalTimeNanos()); + return UNINSTRUMENTED_LOG_THRESHOLD.shouldLog(getUsageNanos()); } else { - return LOG_THRESHOLD.shouldLog(getTotalTimeNanos()); + return LOG_THRESHOLD.shouldLog(getUsageNanos()); } } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java index 74715d0483b..7b1f868023e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java @@ -219,7 +219,7 @@ public synchronized QueryPerformanceNugget getNugget(@NotNull final String name, * @param nugget the nugget to be released * @return If the nugget passes criteria for logging. */ - synchronized boolean releaseNugget(QueryPerformanceNugget nugget) { + synchronized boolean releaseNugget(@NotNull final QueryPerformanceNugget nugget) { boolean shouldLog = nugget.shouldLogNugget(nugget == catchAllNugget); if (!nugget.isUser()) { return shouldLog; @@ -234,13 +234,17 @@ synchronized boolean releaseNugget(QueryPerformanceNugget nugget) { ") - did you follow the correct try/finally pattern?"); } - if (removed.shouldLogMeAndStackParents()) { - shouldLog = true; + shouldLog |= removed.shouldLogThisAndStackParents(); + + if (shouldLog) { + // It is entirely possible, with parallelization, that this nugget should be logged while the outer nugget + // has a wall clock time less than the threshold for logging. If we ever want to log this nugget, we must + // log + // all of its parents as well regardless of the shouldLogNugget call result. if (!userNuggetStack.isEmpty()) { - userNuggetStack.getLast().setShouldLogMeAndStackParents(); + userNuggetStack.getLast().setShouldLogThisAndStackParents(); } - } - if (!shouldLog) { + } else { // If we have filtered this nugget, by our filter design we will also have filtered any nuggets it encloses. // This means it *must* be the last entry in operationNuggets, so we can safely remove it in O(1). final QueryPerformanceNugget lastNugget = operationNuggets.remove(operationNuggets.size() - 1); @@ -278,11 +282,11 @@ public void setQueryData(final EntrySetter setter) { if (operationNumber > 0) { // ensure UPL and QOPL are consistent/joinable. if (!userNuggetStack.isEmpty()) { - userNuggetStack.getLast().setShouldLogMeAndStackParents(); + userNuggetStack.getLast().setShouldLogThisAndStackParents(); } else { uninstrumented = true; if (catchAllNugget != null) { - catchAllNugget.setShouldLogMeAndStackParents(); + catchAllNugget.setShouldLogThisAndStackParents(); } } } @@ -314,7 +318,7 @@ public synchronized Table getTimingResultsAsTable() { final Boolean[] isCompileTime = new Boolean[count]; for (int i = 0; i < operationNuggets.size(); i++) { - timeNanos[i] = operationNuggets.get(i).getTotalTimeNanos(); + timeNanos[i] = operationNuggets.get(i).getUsageNanos(); names[i] = operationNuggets.get(i).getName(); callerLine[i] = operationNuggets.get(i).getCallerLine(); isTopLevel[i] = operationNuggets.get(i).isTopLevel(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java index 69432d519a9..4139aefabdf 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceStreamPublisher.java @@ -85,7 +85,7 @@ public synchronized void add(IntervalLevelDetails intervalLevelDetails, Performa // ColumnDefinition.ofLong("IntervalDurationNanos"), chunks[7].asWritableLongChunk().add(intervalLevelDetails.getIntervalDurationNanos()); // ColumnDefinition.ofLong("EntryIntervalUsage"), - chunks[8].asWritableLongChunk().add(performanceEntry.getTotalTimeNanos()); + chunks[8].asWritableLongChunk().add(performanceEntry.getUsageNanos()); // ColumnDefinition.ofLong("EntryIntervalCpuNanos"), chunks[9].asWritableLongChunk().add(performanceEntry.getCpuNanos()); // ColumnDefinition.ofLong("EntryIntervalUserCpuNanos"), diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java index 9d19ee9eb77..5f0316bbfd3 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java @@ -98,7 +98,7 @@ public synchronized void add(final QueryPerformanceNugget nugget) { chunks[9].asWritableLongChunk().add(nugget.getEndClockEpochNanos()); // ColumnDefinition.ofLong("DurationNanos"), - chunks[10].asWritableLongChunk().add(nugget.getTotalTimeNanos()); + chunks[10].asWritableLongChunk().add(nugget.getUsageNanos()); // ColumnDefinition.ofLong("CpuNanos"), chunks[11].asWritableLongChunk().add(nugget.getCpuNanos()); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java index 4c04bbd5078..9ef45b5b06f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java @@ -81,7 +81,7 @@ public synchronized void add( chunks[4].asWritableLongChunk().add(nugget.getEndClockEpochNanos()); // ColumnDefinition.ofLong("DurationNanos") - chunks[5].asWritableLongChunk().add(nugget.getTotalTimeNanos()); + chunks[5].asWritableLongChunk().add(nugget.getUsageNanos()); // ColumnDefinition.ofLong("CpuNanos") chunks[6].asWritableLongChunk().add(nugget.getCpuNanos()); diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index adf76c5089e..49454d099cb 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -984,46 +984,53 @@ private void doExport() { QueryProcessingResults queryProcessingResults = null; try (final SafeCloseable ignored1 = session.executionContext.open(); final SafeCloseable ignored2 = LivenessScopeStack.open()) { - try { - if (queryPerformanceRecorder != null && !qprIsForBatch) { - exportRecorder = queryPerformanceRecorder.resumeQuery(); - } else if (queryPerformanceRecorder != null) { - // this is a sub-query; no need to re-log the session id - exportRecorder = new QueryPerformanceRecorderImpl( - "ExportObject#doWork(exportId=" + logIdentity + ")", - queryPerformanceRecorder, - QueryPerformanceNugget.DEFAULT_FACTORY); - } else { - exportRecorder = new QueryPerformanceRecorderImpl( - "ExportObject#doWork(session=" + session.sessionId + ",exportId=" + logIdentity + ")", - QueryPerformanceNugget.DEFAULT_FACTORY); - } - queryProcessingResults = new QueryProcessingResults(exportRecorder); + if (queryPerformanceRecorder != null && !qprIsForBatch) { + exportRecorder = queryPerformanceRecorder.resumeQuery(); + } else if (queryPerformanceRecorder != null) { + // this is a sub-query; no need to re-log the session id + exportRecorder = new QueryPerformanceRecorderImpl( + "ExportObject#doWork(exportId=" + logIdentity + ")", + queryPerformanceRecorder, + QueryPerformanceNugget.DEFAULT_FACTORY); + } else { + exportRecorder = new QueryPerformanceRecorderImpl( + "ExportObject#doWork(session=" + session.sessionId + ",exportId=" + logIdentity + ")", + QueryPerformanceNugget.DEFAULT_FACTORY); + } + queryProcessingResults = new QueryProcessingResults(exportRecorder); + + try { + localResult = capturedExport.call(); + } catch (final Exception err) { + caughtException = err; + } finally { try { - localResult = capturedExport.call(); - } finally { shouldLog = exportRecorder.endQuery(); + } catch (final Exception err) { + // end query will throw if the export runner left the QPR in a bad state + if (caughtException == null) { + caughtException = err; + } + } finally { QueryPerformanceRecorder.resetInstance(); } + } - } catch (final Exception err) { - caughtException = err; + if (caughtException != null) { + queryProcessingResults.setException(caughtException.toString()); synchronized (this) { if (!isExportStateTerminal(state)) { maybeAssignErrorId(); if (!(caughtException instanceof StatusRuntimeException)) { - log.error().append("Internal Error '").append(errorId).append("' ").append(err).endl(); + log.error().append("Internal Error '").append(errorId).append("' ") + .append(caughtException).endl(); } setState(ExportNotification.State.FAILED); } } - } finally { - if (caughtException != null && queryProcessingResults != null) { - queryProcessingResults.setException(caughtException.toString()); - } } - if ((shouldLog || caughtException != null) && queryProcessingResults != null) { + if (shouldLog || caughtException != null) { if (queryPerformanceRecorder != null && qprIsForBatch) { Assert.neqNull(exportRecorder, "exportRecorder"); queryPerformanceRecorder.accumulate(exportRecorder); From 2ddb4409188e819b6fa8df64862a1faa16a10880 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Mon, 13 Nov 2023 11:10:23 -0700 Subject: [PATCH 15/31] Non-invasive Rnd3 Feedback --- .../engine/table/impl/QueryTable.java | 4 +-- .../table/impl/perf/BasePerformanceEntry.java | 18 +++++++------ .../impl/perf/QueryPerformanceNugget.java | 25 +++++++++++++------ .../impl/perf/QueryPerformanceRecorder.java | 20 +++++++++++---- .../perf/QueryPerformanceRecorderImpl.java | 4 +-- .../impl/perf/QueryProcessingResults.java | 6 +---- .../engine/table/impl/updateby/UpdateBy.java | 3 ++- .../table/ops/TableServiceGrpcImpl.java | 5 ++-- 8 files changed, 53 insertions(+), 32 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java index b43d6531d61..8d78c5ef5d4 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java @@ -1269,7 +1269,7 @@ void handleUncaughtException(Exception throwable) { initialFilterExecution.getBasePerformanceEntry(); if (basePerformanceEntry != null) { final QueryPerformanceNugget outerNugget = - QueryPerformanceRecorder.getInstance().getOuterNugget(); + QueryPerformanceRecorder.getInstance().getEnclosingNugget(); if (outerNugget != null) { outerNugget.accumulate(basePerformanceEntry); } @@ -1517,7 +1517,7 @@ this, mode, columns, rowSet, getModifiedColumnSetForUpdates(), publishTheseSourc final BasePerformanceEntry baseEntry = jobScheduler.getAccumulatedPerformance(); if (baseEntry != null) { final QueryPerformanceNugget outerNugget = - QueryPerformanceRecorder.getInstance().getOuterNugget(); + QueryPerformanceRecorder.getInstance().getEnclosingNugget(); if (outerNugget != null) { outerNugget.accumulate(baseEntry); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java index e9aae8c9a4a..40586105d58 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java @@ -75,7 +75,8 @@ synchronized void baseEntryReset() { } /** - * Get the aggregate usage in nanoseconds. Invoking this getter is valid iff the entry will no longer be mutated. + * Get the aggregate usage in nanoseconds. This getter should be called by exclusive owners of the entry, and never + * concurrently with mutators. * * @return total wall clock time in nanos */ @@ -84,7 +85,8 @@ public long getUsageNanos() { } /** - * Get the aggregate cpu time in nanoseconds. Invoking this getter is valid iff the entry will no longer be mutated. + * Get the aggregate cpu time in nanoseconds. This getter should be called by exclusive owners of the entry, and + * never concurrently with mutators. * * @return total cpu time in nanos */ @@ -93,8 +95,8 @@ public long getCpuNanos() { } /** - * Get the aggregate cpu user time in nanoseconds. Invoking this getter is valid iff the entry will no longer be - * mutated. + * Get the aggregate cpu user time in nanoseconds. This getter should be called by exclusive owners of the entry, + * and never concurrently with mutators. * * @return total cpu user time in nanos */ @@ -103,8 +105,8 @@ public long getUserCpuNanos() { } /** - * Get the aggregate allocated memory in bytes. Invoking this getter is valid iff the entry will no longer be - * mutated. + * Get the aggregate allocated memory in bytes. This getter should be called by exclusive owners of the entry, and + * never concurrently with mutators. * * @return The bytes of allocated memory attributed to the instrumented operation. */ @@ -113,8 +115,8 @@ public long getAllocatedBytes() { } /** - * Get allocated pooled/reusable memory attributed to the instrumented operation in bytes. Invoking this getter is - * valid iff the entry will no longer be mutated. + * Get allocated pooled/reusable memory attributed to the instrumented operation in bytes. This getter should be + * called by exclusive owners of the entry, and never concurrently with mutators. * * @return total pool allocated memory in bytes */ diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java index 35eb3b63d41..92aa7d8889b 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java @@ -34,6 +34,16 @@ public class QueryPerformanceNugget extends BasePerformanceEntry implements Safe public void accumulate(@NotNull BasePerformanceEntry entry) { // non-synchronized no-op override } + + @Override + public boolean shouldLogThisAndStackParents() { + return false; + } + + @Override + boolean shouldLogNugget(boolean isUninstrumented) { + return false; + } }; public interface Factory { @@ -322,15 +332,16 @@ public boolean isUser() { return isUser; } - public boolean isBatchLevel() { - return isQueryLevel() && parentEvaluationNumber == NULL_LONG; - } - public boolean isQueryLevel() { return operationNumber == NULL_INT; } - public boolean isTopLevel() { + public boolean isTopLevelQuery() { + return isQueryLevel() && parentEvaluationNumber == NULL_LONG; + } + + public boolean isTopLevelOperation() { + // note that query level nuggets have depth == NULL_INT return depth == 0; } @@ -416,7 +427,7 @@ public boolean wasInterrupted() { /** * Ensure this nugget gets logged, alongside its stack of nesting operations. */ - public void setShouldLogThisAndStackParents() { + void setShouldLogThisAndStackParents() { shouldLogThisAndStackParents = true; } @@ -424,7 +435,7 @@ public void setShouldLogThisAndStackParents() { * @return true if this nugget triggers the logging of itself and every other nugget in its stack of nesting * operations. */ - public boolean shouldLogThisAndStackParents() { + boolean shouldLogThisAndStackParents() { return shouldLogThisAndStackParents; } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java index 856d695d21a..549dfb2c9a0 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java @@ -82,22 +82,32 @@ public static void resetInstance() { } /** + * Create a nugget at the top of the user stack. May return a {@link QueryPerformanceNugget#DUMMY_NUGGET} if no + * recorder is installed. + * * @param name the nugget name - * @return A new QueryPerformanceNugget to encapsulate user query operations. done() must be called on the nugget. + * @return A new QueryPerformanceNugget to encapsulate user query operations. {@link QueryPerformanceNugget#done()} + * or {@link QueryPerformanceNugget#close()} must be called on the nugget. */ public abstract QueryPerformanceNugget getNugget(@NotNull String name); /** + * Create a nugget at the top of the user stack. May return a {@link QueryPerformanceNugget#DUMMY_NUGGET} if no + * recorder is installed. + * * @param name the nugget name * @param inputSize the nugget's input size - * @return A new QueryPerformanceNugget to encapsulate user query operations. done() must be called on the nugget. + * @return A new QueryPerformanceNugget to encapsulate user query operations. {@link QueryPerformanceNugget#done()} + * or {@link QueryPerformanceNugget#close()} must be called on the nugget. */ public abstract QueryPerformanceNugget getNugget(@NotNull String name, long inputSize); /** - * @return The nugget currently in effect or else a dummy nugget if no nugget is in effect. + * This is the nugget enclosing the current operation. It may belong to the dummy recorder, or a real one. + * + * @return Either a "catch-all" nugget, or the top of the user nugget stack. */ - public abstract QueryPerformanceNugget getOuterNugget(); + public abstract QueryPerformanceNugget getEnclosingNugget(); /** * Note: Do not call this directly - it's for nugget use only. Call {@link QueryPerformanceNugget#done()} or @@ -462,7 +472,7 @@ public QueryPerformanceNugget getNugget(@NotNull final String name, long inputSi } @Override - public QueryPerformanceNugget getOuterNugget() { + public QueryPerformanceNugget getEnclosingNugget() { return QueryPerformanceNugget.DUMMY_NUGGET; } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java index 7b1f868023e..3c70db1d0e3 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java @@ -265,7 +265,7 @@ synchronized boolean releaseNugget(@NotNull final QueryPerformanceNugget nugget) } @Override - public synchronized QueryPerformanceNugget getOuterNugget() { + public synchronized QueryPerformanceNugget getEnclosingNugget() { return userNuggetStack.peekLast(); } @@ -321,7 +321,7 @@ public synchronized Table getTimingResultsAsTable() { timeNanos[i] = operationNuggets.get(i).getUsageNanos(); names[i] = operationNuggets.get(i).getName(); callerLine[i] = operationNuggets.get(i).getCallerLine(); - isTopLevel[i] = operationNuggets.get(i).isTopLevel(); + isTopLevel[i] = operationNuggets.get(i).isTopLevelOperation(); isCompileTime[i] = operationNuggets.get(i).getName().startsWith("Compile:"); } return TableTools.newTable( diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java index 6bdc697b598..41b04ff7a8f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java @@ -3,11 +3,7 @@ */ package io.deephaven.engine.table.impl.perf; -import java.io.Serializable; - -public class QueryProcessingResults implements Serializable { - - private static final long serialVersionUID = 2L; +public class QueryProcessingResults { private final QueryPerformanceRecorder recorder; diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java index e603905e5c7..a9f1a4561ae 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java @@ -907,7 +907,8 @@ private void cleanUpAndNotify(final Runnable onCleanupComplete) { final BasePerformanceEntry accumulated = jobScheduler.getAccumulatedPerformance(); if (accumulated != null) { if (initialStep) { - final QueryPerformanceNugget outerNugget = QueryPerformanceRecorder.getInstance().getOuterNugget(); + final QueryPerformanceNugget outerNugget = + QueryPerformanceRecorder.getInstance().getEnclosingNugget(); if (outerNugget != null) { outerNugget.accumulate(accumulated); } diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index 20890529091..e7784812e9e 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -550,8 +550,9 @@ public void batch( } else { safelyComplete(responseObserver); } - queryPerformanceRecorder.endQuery(); - EngineMetrics.getInstance().logQueryProcessingResults(results); + if (queryPerformanceRecorder.endQuery()) { + EngineMetrics.getInstance().logQueryProcessingResults(results); + } } finally { QueryPerformanceRecorder.resetInstance(); } From 8d30cc60f9a11229c496e1f5c0de5349668be8ec Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Mon, 13 Nov 2023 16:17:37 -0700 Subject: [PATCH 16/31] The invasive changes of rnd3 feedback --- .../table/impl/perf/BasePerformanceEntry.java | 4 +- .../impl/perf/QueryPerformanceNugget.java | 88 ++-- .../impl/perf/QueryPerformanceRecorder.java | 402 +++++++----------- .../perf/QueryPerformanceRecorderImpl.java | 198 ++++----- .../perf/QueryPerformanceRecorderState.java | 263 ++++++++++++ .../engine/table/impl/perf/QueryState.java | 2 +- py/server/deephaven/perfmon.py | 32 ++ .../server/runner/DeephavenApiServer.java | 6 +- .../server/session/SessionState.java | 51 +-- .../table/ops/TableServiceGrpcImpl.java | 21 +- 10 files changed, 630 insertions(+), 437 deletions(-) create mode 100644 engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java index 40586105d58..4da70a34572 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/BasePerformanceEntry.java @@ -34,7 +34,7 @@ public class BasePerformanceEntry implements LogOutputAppendable { public synchronized void onBaseEntryStart() { startAllocatedBytes = ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(); - startPoolAllocatedBytes = QueryPerformanceRecorder.getPoolAllocatedBytesForCurrentThread(); + startPoolAllocatedBytes = QueryPerformanceRecorderState.getPoolAllocatedBytesForCurrentThread(); startUserCpuNanos = ThreadProfiler.DEFAULT.getCurrentThreadUserTime(); startCpuNanos = ThreadProfiler.DEFAULT.getCurrentThreadCpuTime(); @@ -50,7 +50,7 @@ public synchronized void onBaseEntryEnd() { usageNanos += System.nanoTime() - startTimeNanos; poolAllocatedBytes = plus(poolAllocatedBytes, - minus(QueryPerformanceRecorder.getPoolAllocatedBytesForCurrentThread(), startPoolAllocatedBytes)); + minus(QueryPerformanceRecorderState.getPoolAllocatedBytesForCurrentThread(), startPoolAllocatedBytes)); allocatedBytes = plus(allocatedBytes, minus(ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(), startAllocatedBytes)); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java index 92aa7d8889b..160fa90d1d2 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java @@ -13,6 +13,8 @@ import io.deephaven.util.SafeCloseable; import org.jetbrains.annotations.NotNull; +import java.util.function.Predicate; + import static io.deephaven.util.QueryConstants.*; /** @@ -52,11 +54,16 @@ public interface Factory { * * @param evaluationNumber A unique identifier for the query evaluation that triggered this nugget creation * @param description The operation description + * @param onCloseCallback A callback that is invoked when the nugget is closed. It returns whether the nugget + * should be logged. * @return A new nugget */ - default QueryPerformanceNugget createForQuery(final long evaluationNumber, @NotNull final String description) { + default QueryPerformanceNugget createForQuery( + final long evaluationNumber, + @NotNull final String description, + @NotNull final Predicate onCloseCallback) { return new QueryPerformanceNugget(evaluationNumber, NULL_LONG, NULL_INT, NULL_INT, NULL_INT, - description, false, NULL_LONG); + description, false, NULL_LONG, onCloseCallback); } /** @@ -65,15 +72,18 @@ default QueryPerformanceNugget createForQuery(final long evaluationNumber, @NotN * @param parentQuery The parent query nugget * @param evaluationNumber A unique identifier for the sub-query evaluation that triggered this nugget creation * @param description The operation description + * @param onCloseCallback A callback that is invoked when the nugget is closed. It returns whether the nugget + * should be logged. * @return A new nugget */ default QueryPerformanceNugget createForSubQuery( @NotNull final QueryPerformanceNugget parentQuery, final long evaluationNumber, - @NotNull final String description) { + @NotNull final String description, + @NotNull final Predicate onCloseCallback) { Assert.eqTrue(parentQuery.isQueryLevel(), "parentQuery.isQueryLevel()"); return new QueryPerformanceNugget(evaluationNumber, parentQuery.getEvaluationNumber(), - NULL_INT, NULL_INT, NULL_INT, description, false, NULL_LONG); + NULL_INT, NULL_INT, NULL_INT, description, false, NULL_LONG, onCloseCallback); } /** @@ -82,13 +92,16 @@ default QueryPerformanceNugget createForSubQuery( * @param parentQueryOrOperation The parent query / operation nugget * @param operationNumber A query-unique identifier for the operation * @param description The operation description + * @param onCloseCallback A callback that is invoked when the nugget is closed. It returns whether the nugget + * should be logged. * @return A new nugget */ default QueryPerformanceNugget createForOperation( @NotNull final QueryPerformanceNugget parentQueryOrOperation, final int operationNumber, final String description, - final long inputSize) { + final long inputSize, + @NotNull final Predicate onCloseCallback) { int depth = parentQueryOrOperation.getDepth(); if (depth == NULL_INT) { depth = 0; @@ -104,7 +117,8 @@ default QueryPerformanceNugget createForOperation( depth, description, true, // operations are always user - inputSize); + inputSize, + onCloseCallback); } /** @@ -112,11 +126,14 @@ default QueryPerformanceNugget createForOperation( * * @param parentQuery The parent query nugget * @param operationNumber A query-unique identifier for the operation + * @param onCloseCallback A callback that is invoked when the nugget is closed. It returns whether the nugget + * should be logged. * @return A new nugget */ default QueryPerformanceNugget createForCatchAll( @NotNull final QueryPerformanceNugget parentQuery, - final int operationNumber) { + final int operationNumber, + @NotNull final Predicate onCloseCallback) { Assert.eqTrue(parentQuery.isQueryLevel(), "parentQuery.isQueryLevel()"); return new QueryPerformanceNugget( parentQuery.getEvaluationNumber(), @@ -126,7 +143,8 @@ default QueryPerformanceNugget createForCatchAll( 0, // catch all is a root operation QueryPerformanceRecorder.UNINSTRUMENTED_CODE_DESCRIPTION, false, // catch all is not user - NULL_LONG); // catch all has no input size + NULL_LONG, + onCloseCallback); // catch all has no input size } } @@ -140,11 +158,11 @@ default QueryPerformanceNugget createForCatchAll( private final String description; private final boolean isUser; private final long inputSize; - + private final Predicate onCloseCallback; private final AuthContext authContext; private final String callerLine; - private final long startClockEpochNanos; + private long startClockEpochNanos = NULL_LONG; private long endClockEpochNanos = NULL_LONG; private volatile QueryState state; @@ -168,6 +186,8 @@ default QueryPerformanceNugget createForCatchAll( * @param description The operation description * @param isUser Whether this is a "user" nugget or one created by the system * @param inputSize The size of the input data + * @param onCloseCallback A callback that is invoked when the nugget is closed. It returns whether the nugget should + * be logged. */ protected QueryPerformanceNugget( final long evaluationNumber, @@ -175,9 +195,10 @@ protected QueryPerformanceNugget( final int operationNumber, final int parentOperationNumber, final int depth, - final String description, + @NotNull final String description, final boolean isUser, - final long inputSize) { + final long inputSize, + @NotNull final Predicate onCloseCallback) { startMemorySample = new RuntimeMemory.Sample(); endMemorySample = new RuntimeMemory.Sample(); this.evaluationNumber = evaluationNumber; @@ -193,6 +214,7 @@ protected QueryPerformanceNugget( } this.isUser = isUser; this.inputSize = inputSize; + this.onCloseCallback = onCloseCallback; authContext = ExecutionContext.getContext().getAuthContext(); callerLine = QueryPerformanceRecorder.getCallerLine(); @@ -221,43 +243,44 @@ private QueryPerformanceNugget() { description = null; isUser = false; inputSize = NULL_LONG; + onCloseCallback = null; authContext = null; callerLine = null; - startClockEpochNanos = NULL_LONG; - state = null; // This turns close into a no-op. shouldLogThisAndStackParents = false; } - public void done() { - done(QueryPerformanceRecorder.getInstance()); + public void markStartTime() { + if (startClockEpochNanos != NULL_LONG) { + throw new IllegalStateException("Nugget was already started"); + } + + startClockEpochNanos = DateTimeUtils.millisToNanos(System.currentTimeMillis()); } /** * Mark this nugget {@link QueryState#FINISHED} and notify the recorder. * - * @param recorder The recorder to notify * @return if the nugget passes logging thresholds. */ - public boolean done(final QueryPerformanceRecorder recorder) { - return close(QueryState.FINISHED, recorder); + public boolean done() { + return close(QueryState.FINISHED); } /** - * AutoCloseable implementation - wraps the no-argument version of done() used by query code outside of the - * QueryPerformance(Recorder/Nugget), reporting successful completion to the thread-local QueryPerformanceRecorder - * instance. + * Mark this nugget {@link QueryState#FINISHED} and notify the recorder. Is an alias for {@link #done()}. + *

+ * {@link SafeCloseable} implementation for try-with-resources. */ @Override public void close() { - done(); + close(QueryState.FINISHED); } - @SuppressWarnings("WeakerAccess") - public boolean abort(final QueryPerformanceRecorder recorder) { - return close(QueryState.INTERRUPTED, recorder); + public boolean abort() { + return close(QueryState.INTERRUPTED); } /** @@ -266,10 +289,9 @@ public boolean abort(final QueryPerformanceRecorder recorder) { * @param closingState The current query state. If it is anything other than {@link QueryState#RUNNING} nothing will * happen and it will return false; * - * @param recorderToNotify The {@link QueryPerformanceRecorder} to notify this nugget is closing. * @return If the nugget passes criteria for logging. */ - private boolean close(final QueryState closingState, final QueryPerformanceRecorder recorderToNotify) { + private boolean close(final QueryState closingState) { if (state != QueryState.RUNNING) { return false; } @@ -279,6 +301,10 @@ private boolean close(final QueryState closingState, final QueryPerformanceRecor return false; } + if (startClockEpochNanos == NULL_LONG) { + throw new IllegalStateException("Nugget was never started"); + } + onBaseEntryEnd(); endClockEpochNanos = DateTimeUtils.millisToNanos(System.currentTimeMillis()); @@ -286,7 +312,7 @@ private boolean close(final QueryState closingState, final QueryPerformanceRecor runtimeMemory.read(endMemorySample); state = closingState; - return recorderToNotify.releaseNugget(this); + return onCloseCallback.test(this); } } @@ -336,10 +362,12 @@ public boolean isQueryLevel() { return operationNumber == NULL_INT; } + @SuppressWarnings("unused") public boolean isTopLevelQuery() { return isQueryLevel() && parentEvaluationNumber == NULL_LONG; } + @SuppressWarnings("unused") public boolean isTopLevelOperation() { // note that query level nuggets have depth == NULL_INT return depth == 0; @@ -396,7 +424,7 @@ public long getDiffFreeMemory() { } /** - * @return total (allocated high water mark) memory difference between time of completion and creation + * @return total (allocated high watermark) memory difference between time of completion and creation */ public long getDiffTotalMemory() { return endMemorySample.totalMemory - startMemorySample.totalMemory; diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java index 549dfb2c9a0..de8a00f6022 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java @@ -3,82 +3,30 @@ */ package io.deephaven.engine.table.impl.perf; -import io.deephaven.base.verify.Assert; -import io.deephaven.configuration.Configuration; -import io.deephaven.datastructures.util.CollectionUtil; -import io.deephaven.chunk.util.pools.ChunkPoolInstrumentation; -import io.deephaven.engine.updategraph.UpdateGraphLock; import io.deephaven.util.QueryConstants; +import io.deephaven.util.SafeCloseable; +import io.deephaven.util.annotations.FinalDefault; import io.deephaven.util.function.ThrowingRunnable; import io.deephaven.util.function.ThrowingSupplier; -import io.deephaven.util.profiling.ThreadProfiler; -import org.apache.commons.lang3.mutable.MutableLong; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import java.io.*; -import java.net.URL; import java.util.*; -import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; -import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.minus; -import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.plus; - /** * Query performance instrumentation tools. Manages a hierarchy of {@link QueryPerformanceNugget} instances. */ -public abstract class QueryPerformanceRecorder { - - public static final String UNINSTRUMENTED_CODE_DESCRIPTION = "Uninstrumented code"; - - private static final String[] packageFilters; - - protected static final AtomicLong queriesProcessed = new AtomicLong(0); - - static final QueryPerformanceRecorder DUMMY_RECORDER = new DummyQueryPerformanceRecorder(); - - /** thread local is package private to enable query resumption */ - static final ThreadLocal theLocal = - ThreadLocal.withInitial(() -> DUMMY_RECORDER); - private static final ThreadLocal poolAllocatedBytes = ThreadLocal.withInitial( - () -> new MutableLong(ThreadProfiler.DEFAULT.memoryProfilingAvailable() ? 0L - : io.deephaven.util.QueryConstants.NULL_LONG)); - private static final ThreadLocal cachedCallsite = new ThreadLocal<>(); - - static { - final Configuration config = Configuration.getInstance(); - final Set filters = new HashSet<>(); - - final String propVal = config.getProperty("QueryPerformanceRecorder.packageFilter.internal"); - final URL path = QueryPerformanceRecorder.class.getResource("/" + propVal); - if (path == null) { - throw new RuntimeException("Can not locate package filter file " + propVal + " in classpath"); - } - - try (final BufferedReader reader = new BufferedReader(new InputStreamReader(path.openStream()))) { - String line; - while ((line = reader.readLine()) != null) { - if (!line.isEmpty()) { - filters.add(line); - } - } - } catch (IOException e) { - throw new UncheckedIOException("Error reading file " + propVal, e); - } +public interface QueryPerformanceRecorder { - packageFilters = filters.toArray(CollectionUtil.ZERO_LENGTH_STRING_ARRAY); - } + String UNINSTRUMENTED_CODE_DESCRIPTION = "Uninstrumented code"; - public static QueryPerformanceRecorder getInstance() { - return theLocal.get(); - } + ///////////////////////////////////// + // Core Engine Instrumentation API // + ///////////////////////////////////// - public static void resetInstance() { - // clear interrupted - because this is a good place to do it - no cancellation exception here though - // noinspection ResultOfMethodCallIgnored - Thread.interrupted(); - theLocal.remove(); + static QueryPerformanceRecorder getInstance() { + return QueryPerformanceRecorderState.getInstance(); } /** @@ -89,7 +37,10 @@ public static void resetInstance() { * @return A new QueryPerformanceNugget to encapsulate user query operations. {@link QueryPerformanceNugget#done()} * or {@link QueryPerformanceNugget#close()} must be called on the nugget. */ - public abstract QueryPerformanceNugget getNugget(@NotNull String name); + @FinalDefault + default QueryPerformanceNugget getNugget(@NotNull String name) { + return getNugget(name, QueryConstants.NULL_LONG); + } /** * Create a nugget at the top of the user stack. May return a {@link QueryPerformanceNugget#DUMMY_NUGGET} if no @@ -100,120 +51,176 @@ public static void resetInstance() { * @return A new QueryPerformanceNugget to encapsulate user query operations. {@link QueryPerformanceNugget#done()} * or {@link QueryPerformanceNugget#close()} must be called on the nugget. */ - public abstract QueryPerformanceNugget getNugget(@NotNull String name, long inputSize); + QueryPerformanceNugget getNugget(@NotNull String name, long inputSize); /** * This is the nugget enclosing the current operation. It may belong to the dummy recorder, or a real one. * * @return Either a "catch-all" nugget, or the top of the user nugget stack. */ - public abstract QueryPerformanceNugget getEnclosingNugget(); + QueryPerformanceNugget getEnclosingNugget(); + + + interface EntrySetter { + void set(long evaluationNumber, int operationNumber, boolean uninstrumented); + } /** - * Note: Do not call this directly - it's for nugget use only. Call {@link QueryPerformanceNugget#done()} or - * {@link QueryPerformanceNugget#close()} instead. + * Provide current query data via the setter. * - * @implNote This method is package private to limit visibility. - * @param nugget the nugget to be released - * @return If the nugget passes criteria for logging. + * @param setter a callback to receive query data */ - abstract boolean releaseNugget(QueryPerformanceNugget nugget); + void setQueryData(final EntrySetter setter); /** - * @return the query level performance data + * @return The current callsite. This is the last set callsite or the line number of the user's detected callsite. */ - public abstract QueryPerformanceNugget getQueryLevelPerformanceData(); + static String getCallerLine() { + return QueryPerformanceRecorderState.getCallerLine(); + } /** - * @return A list of loggable operation performance data. + * Attempt to set the thread local callsite so that invocations of {@link #getCallerLine()} will not spend time + * trying to recompute. + *

+ * This method returns a boolean if the value was successfully set. In the event this returns true, it's the + * responsibility of the caller to invoke {@link #clearCallsite()} when the operation is complete. + *

+ * It is good practice to do this with try{} finally{} block + * + *

+     * final boolean shouldClear = QueryPerformanceRecorder.setCallsite("CALLSITE");
+     * try {
+     *     // Do work
+     * } finally {
+     *     if (shouldClear) {
+     *         QueryPerformanceRecorder.clearCallsite();
+     *     }
+     * }
+     * 
+ * + * @param callsite The call site to use. + * + * @return true if successfully set, false otherwise */ - public abstract List getOperationLevelPerformanceData(); - - public interface EntrySetter { - void set(long evaluationNumber, int operationNumber, boolean uninstrumented); + static boolean setCallsite(@NotNull final String callsite) { + return QueryPerformanceRecorderState.setCallsite(callsite); } /** - * Provide current query data via the setter. + * Attempt to compute and set the thread local callsite so that invocations of {@link #getCallerLine()} will not + * spend time trying to recompute. + *

+ * Users should follow the best practice as described by {@link #setCallsite(String)} * - * @param setter a callback to receive query data + * @return true if the callsite was computed and set. */ - public abstract void setQueryData(final EntrySetter setter); + static boolean setCallsite() { + return QueryPerformanceRecorderState.setCallsite(); + } /** - * Install {@link QueryPerformanceRecorder#recordPoolAllocation(java.util.function.Supplier)} as the allocation - * recorder for {@link io.deephaven.chunk.util.pools.ChunkPool chunk pools}. + * Clear any previously set callsite. See {@link #setCallsite(String)} */ - public static void installPoolAllocationRecorder() { - ChunkPoolInstrumentation.setAllocationRecorder(QueryPerformanceRecorder::recordPoolAllocation); + static void clearCallsite() { + QueryPerformanceRecorderState.clearCallsite(); } + //////////////////////////////////////////// + // Server-Level Performance Recording API // + //////////////////////////////////////////// + /** - * Install this {@link QueryPerformanceRecorder} as the lock action recorder for {@link UpdateGraphLock}. + * Construct a QueryPerformanceRecorder for a top-level query. + * + * @param description the query description + * @param nuggetFactory the nugget factory + * @return a new QueryPerformanceRecorder */ - public static void installUpdateGraphLockInstrumentation() { - UpdateGraphLock.installInstrumentation(new UpdateGraphLock.Instrumentation() { - - @Override - public void recordAction(@NotNull String description, @NotNull Runnable action) { - QueryPerformanceRecorder.withNugget(description, action); - } - - @Override - public void recordActionInterruptibly(@NotNull String description, - @NotNull ThrowingRunnable action) - throws InterruptedException { - QueryPerformanceRecorder.withNuggetThrowing(description, action); - } - }); + static QueryPerformanceRecorder newQuery( + @NotNull final String description, + @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { + return new QueryPerformanceRecorderImpl(description, null, nuggetFactory); } /** - * Record a single-threaded operation's allocations as "pool" allocated memory attributable to the current thread. + * Construct a QueryPerformanceRecorder for a sub-level query. * - * @param operation The operation to record allocation for - * @return The result of the operation. + * @param description the query description + * @param nuggetFactory the nugget factory + * @return a new QueryPerformanceRecorder */ - public static RESULT_TYPE recordPoolAllocation(@NotNull final Supplier operation) { - final long startThreadAllocatedBytes = ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(); - try { - return operation.get(); - } finally { - final long endThreadAllocatedBytes = ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(); - final MutableLong poolAllocatedBytesForCurrentThread = poolAllocatedBytes.get(); - poolAllocatedBytesForCurrentThread.setValue(plus(poolAllocatedBytesForCurrentThread.longValue(), - minus(endThreadAllocatedBytes, startThreadAllocatedBytes))); - } + static QueryPerformanceRecorder newSubQuery( + @NotNull final String description, + @Nullable final QueryPerformanceRecorder parent, + @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { + return new QueryPerformanceRecorderImpl(description, parent, nuggetFactory); } /** - * Get the total bytes of pool-allocated memory attributed to this thread via - * {@link #recordPoolAllocation(Supplier)}. + * Starts a query. + *

+ * It is an error to start a query more than once or while another query is running on this thread. + */ + SafeCloseable startQuery(); + + /** + * End a query. + *

+ * It is an error to end a query not currently running on this thread. * - * @return The total bytes of pool-allocated memory attributed to this thread. + * @return whether the query should be logged */ - public static long getPoolAllocatedBytesForCurrentThread() { - return poolAllocatedBytes.get().longValue(); - } + boolean endQuery(); - public static String getCallerLine() { - String callerLineCandidate = cachedCallsite.get(); + /** + * Suspends a query. + *

+ * It is an error to suspend a query not currently running on this thread. + */ + void suspendQuery(); - if (callerLineCandidate == null) { - final StackTraceElement[] stack = (new Exception()).getStackTrace(); - for (int i = stack.length - 1; i > 0; i--) { - final String className = stack[i].getClassName(); + /** + * Resumes a suspend query. + *

+ * It is an error to resume a query while another query is running on this thread. + */ + SafeCloseable resumeQuery(); - if (className.startsWith("io.deephaven.engine.util.GroovyDeephavenSession")) { - callerLineCandidate = "Groovy Script"; - } else if (Arrays.stream(packageFilters).noneMatch(className::startsWith)) { - callerLineCandidate = stack[i].getFileName() + ":" + stack[i].getLineNumber(); - } - } - } + /** + * Abort a query. + */ + @SuppressWarnings("unused") + void abortQuery(); - return callerLineCandidate == null ? "Internal" : callerLineCandidate; - } + /** + * @return the query level performance data + */ + QueryPerformanceNugget getQueryLevelPerformanceData(); + + /** + * This getter should be called by exclusive owners of the recorder, and never concurrently with mutators. + * + * @return A list of loggable operation performance data. + */ + List getOperationLevelPerformanceData(); + + /** + * Accumulate the values from another recorder into this one. The provided recorder will not be mutated. + * + * @param subQuery the recorder to accumulate into this + */ + void accumulate(@NotNull QueryPerformanceRecorder subQuery); + + /** + * @return whether a sub-query was ever accumulated into this recorder + */ + @SuppressWarnings("unused") + boolean hasSubQueries(); + + /////////////////////////////////////////////////// + // Convenience Methods for Recording Performance // + /////////////////////////////////////////////////// /** * Surround the given code with a Performance Nugget @@ -221,7 +228,7 @@ public static String getCallerLine() { * @param name the nugget name * @param r the stuff to run */ - public static void withNugget(final String name, final Runnable r) { + static void withNugget(final String name, final Runnable r) { final boolean needClear = setCallsite(); QueryPerformanceNugget nugget = null; @@ -240,7 +247,7 @@ public static void withNugget(final String name, final Runnable r) { * @param r the stuff to run * @return the result of the stuff to run */ - public static T withNugget(final String name, final Supplier r) { + static T withNugget(final String name, final Supplier r) { final boolean needClear = setCallsite(); QueryPerformanceNugget nugget = null; @@ -258,7 +265,7 @@ public static T withNugget(final String name, final Supplier r) { * @param r the stuff to run * @throws T exception of type T */ - public static void withNuggetThrowing( + static void withNuggetThrowing( final String name, final ThrowingRunnable r) throws T { final boolean needClear = setCallsite(); @@ -279,7 +286,7 @@ public static void withNuggetThrowing( * @return the result of the stuff to run * @throws ExceptionType exception of type ExceptionType */ - public static R withNuggetThrowing( + static R withNuggetThrowing( final String name, final ThrowingSupplier r) throws ExceptionType { final boolean needClear = setCallsite(); @@ -298,7 +305,7 @@ public static R withNuggetThrowing( * @param name the nugget name * @param r the stuff to run */ - public static void withNugget(final String name, final long inputSize, final Runnable r) { + static void withNugget(final String name, final long inputSize, final Runnable r) { final boolean needClear = setCallsite(); QueryPerformanceNugget nugget = null; try { @@ -316,7 +323,7 @@ public static void withNugget(final String name, final long inputSize, final Run * @param r the stuff to run * @return the result of the stuff to run */ - public static T withNugget(final String name, final long inputSize, final Supplier r) { + static T withNugget(final String name, final long inputSize, final Supplier r) { final boolean needClear = setCallsite(); QueryPerformanceNugget nugget = null; try { @@ -334,7 +341,7 @@ public static T withNugget(final String name, final long inputSize, final Su * @throws T exception of type T */ @SuppressWarnings("unused") - public static void withNuggetThrowing( + static void withNuggetThrowing( final String name, final long inputSize, final ThrowingRunnable r) throws T { @@ -357,7 +364,7 @@ public static void withNuggetThrowing( * @throws ExceptionType exception of type ExceptionType */ @SuppressWarnings("unused") - public static R withNuggetThrowing( + static R withNuggetThrowing( final String name, final long inputSize, final ThrowingSupplier r) throws ExceptionType { @@ -371,74 +378,6 @@ public static R withNuggetThrowing( } } - /** - *

- * Attempt to set the thread local callsite so that invocations of {@link #getCallerLine()} will not spend time - * trying to recompute. - *

- * - *

- * This method returns a boolean if the value was successfully set. In the event this returns true, it's the - * responsibility of the caller to invoke {@link #clearCallsite()} when the operation is complete. - *

- * - *

- * It is good practice to do this with try{} finally{} block - *

- * - *
-     * final boolean shouldClear = QueryPerformanceRecorder.setCallsite("CALLSITE");
-     * try {
-     *     // Do work
-     * } finally {
-     *     if (shouldClear) {
-     *         QueryPerformanceRecorder.clearCallsite();
-     *     }
-     * }
-     * 
- * - * @param callsite The call site to use. - * - * @return true if successfully set, false otherwise/ - */ - public static boolean setCallsite(String callsite) { - if (cachedCallsite.get() == null) { - cachedCallsite.set(callsite); - return true; - } - - return false; - } - - /** - *

- * Attempt to compute and set the thread local callsite so that invocations of {@link #getCallerLine()} will not - * spend time trying to recompute. - *

- * - *

- * Users should follow the best practice as described by {@link #setCallsite(String)} - *

- * - * @return true if the callsite was computed and set. - */ - public static boolean setCallsite() { - // This is very similar to the other getCallsite, but we don't want to invoke getCallerLine() unless we - // really need to. - if (cachedCallsite.get() == null) { - cachedCallsite.set(getCallerLine()); - return true; - } - - return false; - } - - /** - * Clear any previously set callsite. See {@link #setCallsite(String)} - */ - public static void clearCallsite() { - cachedCallsite.remove(); - } /** * Finish the nugget and clear the callsite if needed. @@ -455,47 +394,4 @@ private static void finishAndClear(@Nullable final QueryPerformanceNugget nugget clearCallsite(); } } - - /** - * Dummy recorder for use when no recorder is installed. - */ - private static class DummyQueryPerformanceRecorder extends QueryPerformanceRecorder { - - @Override - public QueryPerformanceNugget getNugget(@NotNull final String name) { - return QueryPerformanceNugget.DUMMY_NUGGET; - } - - @Override - public QueryPerformanceNugget getNugget(@NotNull final String name, long inputSize) { - return QueryPerformanceNugget.DUMMY_NUGGET; - } - - @Override - public QueryPerformanceNugget getEnclosingNugget() { - return QueryPerformanceNugget.DUMMY_NUGGET; - } - - @Override - boolean releaseNugget(@NotNull final QueryPerformanceNugget nugget) { - Assert.eqTrue(nugget == QueryPerformanceNugget.DUMMY_NUGGET, - "nugget == QueryPerformanceNugget.DUMMY_NUGGET"); - return false; - } - - @Override - public QueryPerformanceNugget getQueryLevelPerformanceData() { - return QueryPerformanceNugget.DUMMY_NUGGET; - } - - @Override - public List getOperationLevelPerformanceData() { - return Collections.emptyList(); - } - - @Override - public void setQueryData(EntrySetter setter) { - setter.set(QueryConstants.NULL_LONG, QueryConstants.NULL_INT, false); - } - } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java index 3c70db1d0e3..312c1ef2ce4 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java @@ -5,10 +5,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.engine.exceptions.CancellationException; -import io.deephaven.engine.table.Table; -import io.deephaven.engine.util.TableTools; -import io.deephaven.util.QueryConstants; +import io.deephaven.util.SafeCloseable; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import java.util.*; @@ -18,57 +17,39 @@ * Many methods are synchronized to 1) support external abortion of query and 2) for scenarios where the query is * suspended and resumed on another thread. */ -public class QueryPerformanceRecorderImpl extends QueryPerformanceRecorder { +public class QueryPerformanceRecorderImpl implements QueryPerformanceRecorder { private final QueryPerformanceNugget queryNugget; private final QueryPerformanceNugget.Factory nuggetFactory; private final ArrayList operationNuggets = new ArrayList<>(); private final Deque userNuggetStack = new ArrayDeque<>(); - private QueryState state; + private QueryState state = QueryState.NOT_STARTED; + private volatile boolean hasSubQueries; private QueryPerformanceNugget catchAllNugget; /** - * Creates a new QueryPerformanceRecorderImpl and starts the query. + * Constructs a QueryPerformanceRecorderImpl. * * @param description a description for the query * @param nuggetFactory the factory to use for creating new nuggets + * @param parent the parent query if it exists */ - public QueryPerformanceRecorderImpl( + QueryPerformanceRecorderImpl( @NotNull final String description, + @Nullable final QueryPerformanceRecorder parent, @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { - this(nuggetFactory.createForQuery(queriesProcessed.getAndIncrement(), description), nuggetFactory); - } - - /** - * Constructor for a sub-query. - * - * @param description a description for the query - * @param parent the parent query - * @param nuggetFactory the factory to use for creating new nuggets - */ - public QueryPerformanceRecorderImpl( - @NotNull final String description, - @NotNull final QueryPerformanceRecorderImpl parent, - @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { - this(nuggetFactory.createForSubQuery( - parent.queryNugget, queriesProcessed.getAndIncrement(), description), nuggetFactory); - } - - /** - * @param queryNugget The newly constructed query level queryNugget. - * @param nuggetFactory The factory to use for creating new nuggets. - */ - private QueryPerformanceRecorderImpl( - @NotNull final QueryPerformanceNugget queryNugget, - @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { - this.queryNugget = queryNugget; + if (parent == null) { + queryNugget = nuggetFactory.createForQuery( + QueryPerformanceRecorderState.QUERIES_PROCESSED.getAndIncrement(), description, + this::releaseNugget); + } else { + queryNugget = nuggetFactory.createForSubQuery( + parent.getQueryLevelPerformanceData(), + QueryPerformanceRecorderState.QUERIES_PROCESSED.getAndIncrement(), description, + this::releaseNugget); + } this.nuggetFactory = nuggetFactory; - state = QueryState.RUNNING; - startCatchAll(); - Assert.eqTrue(QueryPerformanceRecorder.getInstance() == DUMMY_RECORDER, - "QueryPerformanceRecorder.getInstance() == DUMMY_RECORDER"); - QueryPerformanceRecorder.theLocal.set(this); } /** @@ -83,10 +64,10 @@ public synchronized void abortQuery() { stopCatchAll(true); } else { while (!userNuggetStack.isEmpty()) { - userNuggetStack.peekLast().abort(this); + userNuggetStack.peekLast().abort(); } } - queryNugget.abort(this); + queryNugget.abort(); } /** @@ -98,21 +79,25 @@ public synchronized QueryState getState() { return state; } - /** - * End a query. - */ + @Override + public synchronized SafeCloseable startQuery() { + if (state != QueryState.NOT_STARTED) { + throw new IllegalStateException("Can't resume a query that has already started"); + } + queryNugget.markStartTime(); + return resumeInternal(); + } + + @Override public synchronized boolean endQuery() { if (state != QueryState.RUNNING) { + // We only allow the query to be RUNNING or INTERRUPTED when we end it; else we are in an illegal state. + Assert.eq(state, "state", QueryState.INTERRUPTED, "QueryState.INTERRUPTED"); return false; } - state = QueryState.FINISHED; - Assert.neqNull(catchAllNugget, "catchAllNugget"); - Assert.neqNull(queryNugget, "queryNugget"); - stopCatchAll(false); - - // note that we do not resetInstance in here as that should be done from a finally-block - return queryNugget.done(this); + suspendInternal(); + return queryNugget.done(); } /** @@ -124,19 +109,22 @@ public synchronized void suspendQuery() { if (state != QueryState.RUNNING) { throw new IllegalStateException("Can't suspend a query that isn't running"); } + state = QueryState.SUSPENDED; + suspendInternal(); + queryNugget.onBaseEntryEnd(); + } - final QueryPerformanceRecorder threadLocalInstance = getInstance(); + private void suspendInternal() { + final QueryPerformanceRecorder threadLocalInstance = QueryPerformanceRecorderState.getInstance(); if (threadLocalInstance != this) { throw new IllegalStateException("Can't suspend a query that doesn't belong to this thread"); } - state = QueryState.SUSPENDED; Assert.neqNull(catchAllNugget, "catchAllNugget"); stopCatchAll(false); - queryNugget.onBaseEntryEnd(); // uninstall this instance from the thread local - resetInstance(); + QueryPerformanceRecorderState.resetInstance(); } /** @@ -146,34 +134,39 @@ public synchronized void suspendQuery() { * * @return this */ - public synchronized QueryPerformanceRecorderImpl resumeQuery() { + public synchronized SafeCloseable resumeQuery() { if (state != QueryState.SUSPENDED) { throw new IllegalStateException("Can't resume a query that isn't suspended"); } - final QueryPerformanceRecorder threadLocalInstance = getInstance(); - if (threadLocalInstance != DUMMY_RECORDER) { + return resumeInternal(); + } + + private SafeCloseable resumeInternal() { + final QueryPerformanceRecorder threadLocalInstance = QueryPerformanceRecorderState.getInstance(); + if (threadLocalInstance != QueryPerformanceRecorderState.DUMMY_RECORDER) { throw new IllegalStateException("Can't resume a query while another query is in operation"); } - QueryPerformanceRecorder.theLocal.set(this); + QueryPerformanceRecorderState.THE_LOCAL.set(this); queryNugget.onBaseEntryStart(); state = QueryState.RUNNING; Assert.eqNull(catchAllNugget, "catchAllNugget"); startCatchAll(); - return this; + + return QueryPerformanceRecorderState::resetInstance; } private void startCatchAll() { - catchAllNugget = nuggetFactory.createForCatchAll(queryNugget, operationNuggets.size()); + catchAllNugget = nuggetFactory.createForCatchAll(queryNugget, operationNuggets.size(), this::releaseNugget); } private void stopCatchAll(final boolean abort) { final boolean shouldLog; if (abort) { - shouldLog = catchAllNugget.abort(this); + shouldLog = catchAllNugget.abort(); } else { - shouldLog = catchAllNugget.done(this); + shouldLog = catchAllNugget.done(); } if (shouldLog) { Assert.eq(operationNuggets.size(), "operationsNuggets.size()", @@ -183,14 +176,6 @@ private void stopCatchAll(final boolean abort) { catchAllNugget = null; } - /** - * @param name the nugget name - * @return A new QueryPerformanceNugget to encapsulate user query operations. done() must be called on the nugget. - */ - public QueryPerformanceNugget getNugget(@NotNull final String name) { - return getNugget(name, QueryConstants.NULL_LONG); - } - /** * @param name the nugget name * @param inputSize the nugget's input size @@ -204,22 +189,29 @@ public synchronized QueryPerformanceNugget getNugget(@NotNull final String name, if (catchAllNugget != null) { stopCatchAll(false); } - final QueryPerformanceNugget parent = userNuggetStack.isEmpty() ? queryNugget : userNuggetStack.getLast(); + + final QueryPerformanceNugget parent; + if (userNuggetStack.isEmpty()) { + parent = queryNugget; + } else { + parent = userNuggetStack.peekLast(); + parent.onBaseEntryEnd(); + } + final QueryPerformanceNugget nugget = nuggetFactory.createForOperation( - parent, operationNuggets.size(), name, inputSize); + parent, operationNuggets.size(), name, inputSize, this::releaseNugget); operationNuggets.add(nugget); userNuggetStack.addLast(nugget); return nugget; } /** - * Note: Do not call this directly - it's for nugget use only. Call {@link QueryPerformanceNugget#done()} or - * {@link QueryPerformanceNugget#close()} instead. + * This is our onCloseCallback from the nugget. * * @param nugget the nugget to be released * @return If the nugget passes criteria for logging. */ - synchronized boolean releaseNugget(@NotNull final QueryPerformanceNugget nugget) { + private synchronized boolean releaseNugget(@NotNull final QueryPerformanceNugget nugget) { boolean shouldLog = nugget.shouldLogNugget(nugget == catchAllNugget); if (!nugget.isUser()) { return shouldLog; @@ -234,17 +226,20 @@ synchronized boolean releaseNugget(@NotNull final QueryPerformanceNugget nugget) ") - did you follow the correct try/finally pattern?"); } - shouldLog |= removed.shouldLogThisAndStackParents(); + // accumulate into the parent and resume it + if (!userNuggetStack.isEmpty()) { + final QueryPerformanceNugget parent = userNuggetStack.getLast(); + parent.accumulate(nugget); - if (shouldLog) { - // It is entirely possible, with parallelization, that this nugget should be logged while the outer nugget - // has a wall clock time less than the threshold for logging. If we ever want to log this nugget, we must - // log - // all of its parents as well regardless of the shouldLogNugget call result. - if (!userNuggetStack.isEmpty()) { - userNuggetStack.getLast().setShouldLogThisAndStackParents(); + if (removed.shouldLogThisAndStackParents()) { + parent.setShouldLogThisAndStackParents(); } - } else { + + // resume the parent + parent.onBaseEntryStart(); + } + + if (!shouldLog) { // If we have filtered this nugget, by our filter design we will also have filtered any nuggets it encloses. // This means it *must* be the last entry in operationNuggets, so we can safely remove it in O(1). final QueryPerformanceNugget lastNugget = operationNuggets.remove(operationNuggets.size() - 1); @@ -295,40 +290,23 @@ public void setQueryData(final EntrySetter setter) { } @Override - public synchronized QueryPerformanceNugget getQueryLevelPerformanceData() { + public QueryPerformanceNugget getQueryLevelPerformanceData() { return queryNugget; } @Override - public synchronized List getOperationLevelPerformanceData() { + public List getOperationLevelPerformanceData() { return operationNuggets; } - public void accumulate(@NotNull final QueryPerformanceRecorderImpl subQuery) { - queryNugget.accumulate(subQuery.queryNugget); + @Override + public void accumulate(@NotNull final QueryPerformanceRecorder subQuery) { + hasSubQueries = true; + queryNugget.accumulate(subQuery.getQueryLevelPerformanceData()); } - @SuppressWarnings("unused") - public synchronized Table getTimingResultsAsTable() { - final int count = operationNuggets.size(); - final String[] names = new String[count]; - final Long[] timeNanos = new Long[count]; - final String[] callerLine = new String[count]; - final Boolean[] isTopLevel = new Boolean[count]; - final Boolean[] isCompileTime = new Boolean[count]; - - for (int i = 0; i < operationNuggets.size(); i++) { - timeNanos[i] = operationNuggets.get(i).getUsageNanos(); - names[i] = operationNuggets.get(i).getName(); - callerLine[i] = operationNuggets.get(i).getCallerLine(); - isTopLevel[i] = operationNuggets.get(i).isTopLevelOperation(); - isCompileTime[i] = operationNuggets.get(i).getName().startsWith("Compile:"); - } - return TableTools.newTable( - TableTools.col("names", names), - TableTools.col("line", callerLine), - TableTools.col("timeNanos", timeNanos), - TableTools.col("isTopLevel", isTopLevel), - TableTools.col("isCompileTime", isCompileTime)); + @Override + public boolean hasSubQueries() { + return hasSubQueries; } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java new file mode 100644 index 00000000000..9a79f04c7ee --- /dev/null +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java @@ -0,0 +1,263 @@ +/** + * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending + */ +package io.deephaven.engine.table.impl.perf; + +import io.deephaven.chunk.util.pools.ChunkPoolInstrumentation; +import io.deephaven.configuration.Configuration; +import io.deephaven.datastructures.util.CollectionUtil; +import io.deephaven.engine.updategraph.UpdateGraphLock; +import io.deephaven.util.QueryConstants; +import io.deephaven.util.SafeCloseable; +import io.deephaven.util.function.ThrowingRunnable; +import io.deephaven.util.profiling.ThreadProfiler; +import org.apache.commons.lang3.mutable.MutableLong; +import org.jetbrains.annotations.NotNull; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.UncheckedIOException; +import java.net.URL; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; + +import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.minus; +import static io.deephaven.engine.table.impl.lang.QueryLanguageFunctionUtils.plus; + +public abstract class QueryPerformanceRecorderState { + + static final QueryPerformanceRecorder DUMMY_RECORDER = new DummyQueryPerformanceRecorder(); + static final AtomicLong QUERIES_PROCESSED = new AtomicLong(0); + static final ThreadLocal THE_LOCAL = ThreadLocal.withInitial(() -> DUMMY_RECORDER); + + private static final String[] PACKAGE_FILTERS; + private static final ThreadLocal CACHED_CALLSITE = new ThreadLocal<>(); + private static final ThreadLocal POOL_ALLOCATED_BYTES = ThreadLocal.withInitial( + () -> new MutableLong(ThreadProfiler.DEFAULT.memoryProfilingAvailable() ? 0L + : io.deephaven.util.QueryConstants.NULL_LONG)); + + static { + // initialize the packages to skip when determining the callsite + + final Configuration config = Configuration.getInstance(); + final Set filters = new HashSet<>(); + + final String propVal = config.getProperty("QueryPerformanceRecorder.packageFilter.internal"); + final URL path = QueryPerformanceRecorder.class.getResource("/" + propVal); + if (path == null) { + throw new RuntimeException("Can not locate package filter file " + propVal + " in classpath"); + } + + try (final BufferedReader reader = new BufferedReader(new InputStreamReader(path.openStream()))) { + String line; + while ((line = reader.readLine()) != null) { + if (!line.isEmpty()) { + filters.add(line); + } + } + } catch (IOException e) { + throw new UncheckedIOException("Error reading file " + propVal, e); + } + + PACKAGE_FILTERS = filters.toArray(CollectionUtil.ZERO_LENGTH_STRING_ARRAY); + } + + private QueryPerformanceRecorderState() { + throw new UnsupportedOperationException("static use only"); + } + + public static QueryPerformanceRecorder getInstance() { + return THE_LOCAL.get(); + } + + static void resetInstance() { + // clear interrupted - because this is a good place to do it - no cancellation exception here though + // noinspection ResultOfMethodCallIgnored + Thread.interrupted(); + THE_LOCAL.remove(); + } + + + /** + * Install {@link QueryPerformanceRecorderState#recordPoolAllocation(java.util.function.Supplier)} as the allocation + * recorder for {@link io.deephaven.chunk.util.pools.ChunkPool chunk pools}. + */ + public static void installPoolAllocationRecorder() { + ChunkPoolInstrumentation.setAllocationRecorder(QueryPerformanceRecorderState::recordPoolAllocation); + } + + /** + * Install this {@link QueryPerformanceRecorder} as the lock action recorder for {@link UpdateGraphLock}. + */ + public static void installUpdateGraphLockInstrumentation() { + UpdateGraphLock.installInstrumentation(new UpdateGraphLock.Instrumentation() { + + @Override + public void recordAction(@NotNull final String description, @NotNull final Runnable action) { + QueryPerformanceRecorder.withNugget(description, action); + } + + @Override + public void recordActionInterruptibly( + @NotNull final String description, + @NotNull final ThrowingRunnable action) throws InterruptedException { + QueryPerformanceRecorder.withNuggetThrowing(description, action); + } + }); + } + + /** + * Record a single-threaded operation's allocations as "pool" allocated memory attributable to the current thread. + * + * @param operation The operation to record allocation for + * @return The result of the operation. + */ + private static RESULT_TYPE recordPoolAllocation(@NotNull final Supplier operation) { + final long startThreadAllocatedBytes = ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(); + try { + return operation.get(); + } finally { + final long endThreadAllocatedBytes = ThreadProfiler.DEFAULT.getCurrentThreadAllocatedBytes(); + final MutableLong poolAllocatedBytesForCurrentThread = POOL_ALLOCATED_BYTES.get(); + poolAllocatedBytesForCurrentThread.setValue(plus(poolAllocatedBytesForCurrentThread.longValue(), + minus(endThreadAllocatedBytes, startThreadAllocatedBytes))); + } + } + + /** + * Get the total bytes of pool-allocated memory attributed to this thread via + * {@link #recordPoolAllocation(Supplier)}. + * + * @return The total bytes of pool-allocated memory attributed to this thread. + */ + static long getPoolAllocatedBytesForCurrentThread() { + return POOL_ALLOCATED_BYTES.get().longValue(); + } + + /** + * See {@link QueryPerformanceRecorder#getCallerLine()}. + */ + static String getCallerLine() { + String callerLineCandidate = CACHED_CALLSITE.get(); + + if (callerLineCandidate == null) { + final StackTraceElement[] stack = (new Exception()).getStackTrace(); + for (int i = stack.length - 1; i > 0; i--) { + final String className = stack[i].getClassName(); + + if (className.startsWith("io.deephaven.engine.util.GroovyDeephavenSession")) { + callerLineCandidate = "Groovy Script"; + } else if (Arrays.stream(PACKAGE_FILTERS).noneMatch(className::startsWith)) { + callerLineCandidate = stack[i].getFileName() + ":" + stack[i].getLineNumber(); + } + } + } + + return callerLineCandidate == null ? "Internal" : callerLineCandidate; + } + + /** + * See {@link QueryPerformanceRecorder#setCallsite(String)}. + */ + static boolean setCallsite(String callsite) { + if (CACHED_CALLSITE.get() == null) { + CACHED_CALLSITE.set(callsite); + return true; + } + + return false; + } + + /** + * See {@link QueryPerformanceRecorder#setCallsite()}. + */ + static boolean setCallsite() { + // This is very similar to the other getCallsite, but we don't want to invoke getCallerLine() unless we + // really need to. + if (CACHED_CALLSITE.get() == null) { + CACHED_CALLSITE.set(getCallerLine()); + return true; + } + + return false; + } + + /** + * Clear any previously set callsite. See {@link #setCallsite(String)} + */ + public static void clearCallsite() { + CACHED_CALLSITE.remove(); + } + + /** + * Dummy recorder for use when no recorder is installed. + */ + private static class DummyQueryPerformanceRecorder implements QueryPerformanceRecorder { + + @Override + public QueryPerformanceNugget getNugget(@NotNull final String name, long inputSize) { + return QueryPerformanceNugget.DUMMY_NUGGET; + } + + @Override + public QueryPerformanceNugget getEnclosingNugget() { + return QueryPerformanceNugget.DUMMY_NUGGET; + } + + @Override + public void setQueryData(EntrySetter setter) { + setter.set(QueryConstants.NULL_LONG, QueryConstants.NULL_INT, false); + } + + @Override + public QueryPerformanceNugget getQueryLevelPerformanceData() { + return QueryPerformanceNugget.DUMMY_NUGGET; + } + + @Override + public List getOperationLevelPerformanceData() { + return Collections.emptyList(); + } + + @Override + public void accumulate(@NotNull QueryPerformanceRecorder subQuery) { + // no-op + } + + @Override + public boolean hasSubQueries() { + return false; + } + + @Override + public SafeCloseable startQuery() { + throw new UnsupportedOperationException("Dummy recorder does not support startQuery()"); + } + + @Override + public boolean endQuery() { + throw new UnsupportedOperationException("Dummy recorder does not support endQuery()"); + } + + @Override + public void suspendQuery() { + throw new UnsupportedOperationException("Dummy recorder does not support suspendQuery()"); + } + + @Override + public SafeCloseable resumeQuery() { + throw new UnsupportedOperationException("Dummy recorder does not support resumeQuery()"); + } + + @Override + public void abortQuery() { + throw new UnsupportedOperationException("Dummy recorder does not support abortQuery()"); + } + } +} diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryState.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryState.java index ebf3df1ab58..8585b970436 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryState.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryState.java @@ -5,5 +5,5 @@ public enum QueryState { - RUNNING, FINISHED, SUSPENDED, INTERRUPTED + NOT_STARTED, RUNNING, FINISHED, SUSPENDED, INTERRUPTED } diff --git a/py/server/deephaven/perfmon.py b/py/server/deephaven/perfmon.py index ccf602a35b4..995c77994a6 100644 --- a/py/server/deephaven/perfmon.py +++ b/py/server/deephaven/perfmon.py @@ -95,6 +95,38 @@ def query_performance_log() -> Table: except Exception as e: raise DHError(e, "failed to obtain the query performance log table.") from e +def query_operation_performance_log_as_tree_table() -> TreeTable: + """ Returns a tree table with Deephaven performance data for individual subqueries. + + Returns: + a TreeTable + + Raises: + DHError + """ + try: + return TreeTable(j_tree_table=_JPerformanceQueries.queryOperationPerformanceAsTreeTable(), + id_col = 'EvaluationNumber', parent_col = 'ParentEvaluationNumber') + except Exception as e: + raise DHError(e, "failed to obtain the query operation performance log as tree table.") from e + + +def query_performance_log_as_tree_table() -> TreeTable: + """ Returns a tree table with Deephaven query performance data. Performance data for individual sub-operations as + a tree table is available from calling `query_operation_performance_log_as_tree_table`. + + Returns: + a TreeTable + + Raises: + DHError + """ + try: + return TreeTable(j_tree_table=_JPerformanceQueries.queryPerformanceAsTreeTable(), + id_col = 'EvaluationNumber', parent_col = 'ParentEvaluationNumber') + except Exception as e: + raise DHError(e, "failed to obtain the query performance log as tree table.") from e + def update_performance_log() -> Table: """ Returns a table with Deephaven update performance data. diff --git a/server/src/main/java/io/deephaven/server/runner/DeephavenApiServer.java b/server/src/main/java/io/deephaven/server/runner/DeephavenApiServer.java index 1288790f503..348663abea8 100644 --- a/server/src/main/java/io/deephaven/server/runner/DeephavenApiServer.java +++ b/server/src/main/java/io/deephaven/server/runner/DeephavenApiServer.java @@ -8,7 +8,7 @@ import io.deephaven.engine.context.ExecutionContext; import io.deephaven.engine.liveness.LivenessScopeStack; import io.deephaven.engine.table.impl.OperationInitializationThreadPool; -import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorderState; import io.deephaven.engine.table.impl.util.AsyncErrorLogger; import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.engine.table.impl.util.ServerStateTracker; @@ -153,8 +153,8 @@ public DeephavenApiServer run() throws IOException, ClassNotFoundException, Time EngineMetrics.maybeStartStatsCollection(); log.info().append("Starting Performance Trackers...").endl(); - QueryPerformanceRecorder.installPoolAllocationRecorder(); - QueryPerformanceRecorder.installUpdateGraphLockInstrumentation(); + QueryPerformanceRecorderState.installPoolAllocationRecorder(); + QueryPerformanceRecorderState.installUpdateGraphLockInstrumentation(); ServerStateTracker.start(); AsyncErrorLogger.init(); diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index 49454d099cb..d5dafd395ca 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -539,7 +539,7 @@ public final static class ExportObject extends LivenessArtifact { /** if true the queryPerformanceRecorder belongs to a batch; otherwise if it exists it belong to the export */ private boolean qprIsForBatch; /** used to keep track of performance details either for aggregation or for the async ticket resolution */ - private QueryPerformanceRecorderImpl queryPerformanceRecorder; + private QueryPerformanceRecorder queryPerformanceRecorder; /** final result of export */ private volatile T result; @@ -631,7 +631,7 @@ private boolean isNonExport() { } private synchronized void setQueryPerformanceRecorder( - final QueryPerformanceRecorderImpl queryPerformanceRecorder, + final QueryPerformanceRecorder queryPerformanceRecorder, final boolean qprIsForBatch) { if (this.queryPerformanceRecorder != null) { throw new IllegalStateException( @@ -683,6 +683,11 @@ private synchronized void setWork( throw new IllegalStateException("export object can only be defined once"); } hasHadWorkSet = true; + + if (queryPerformanceRecorder != null && !qprIsForBatch) { + // transfer ownership of the qpr to the export before it can be resumed by the scheduler + queryPerformanceRecorder.suspendQuery(); + } this.requiresSerialQueue = requiresSerialQueue; if (isExportStateTerminal(state)) { @@ -980,40 +985,40 @@ private void doExport() { T localResult = null; boolean shouldLog = false; - QueryPerformanceRecorderImpl exportRecorder = null; - QueryProcessingResults queryProcessingResults = null; + final QueryPerformanceRecorder exportRecorder; + final QueryProcessingResults queryProcessingResults; try (final SafeCloseable ignored1 = session.executionContext.open(); final SafeCloseable ignored2 = LivenessScopeStack.open()) { - if (queryPerformanceRecorder != null && !qprIsForBatch) { - exportRecorder = queryPerformanceRecorder.resumeQuery(); + final boolean isResume = queryPerformanceRecorder != null && !qprIsForBatch; + if (isResume) { + exportRecorder = queryPerformanceRecorder; } else if (queryPerformanceRecorder != null) { // this is a sub-query; no need to re-log the session id - exportRecorder = new QueryPerformanceRecorderImpl( + exportRecorder = QueryPerformanceRecorder.newSubQuery( "ExportObject#doWork(exportId=" + logIdentity + ")", queryPerformanceRecorder, QueryPerformanceNugget.DEFAULT_FACTORY); } else { - exportRecorder = new QueryPerformanceRecorderImpl( + exportRecorder = QueryPerformanceRecorder.newQuery( "ExportObject#doWork(session=" + session.sessionId + ",exportId=" + logIdentity + ")", QueryPerformanceNugget.DEFAULT_FACTORY); } queryProcessingResults = new QueryProcessingResults(exportRecorder); - try { - localResult = capturedExport.call(); - } catch (final Exception err) { - caughtException = err; - } finally { + try (final SafeCloseable ignored3 = isResume + ? exportRecorder.resumeQuery() + : exportRecorder.startQuery()) { try { - shouldLog = exportRecorder.endQuery(); + localResult = capturedExport.call(); } catch (final Exception err) { - // end query will throw if the export runner left the QPR in a bad state - if (caughtException == null) { - caughtException = err; - } - } finally { - QueryPerformanceRecorder.resetInstance(); + caughtException = err; + } + shouldLog = exportRecorder.endQuery(); + } catch (final Exception err) { + // end query will throw if the export runner left the QPR in a bad state + if (caughtException == null) { + caughtException = err; } } @@ -1340,7 +1345,7 @@ public class ExportBuilder { * @return this builder */ public ExportBuilder queryPerformanceRecorder( - @NotNull final QueryPerformanceRecorderImpl queryPerformanceRecorder, + @NotNull final QueryPerformanceRecorder queryPerformanceRecorder, final boolean qprIsForBatch) { export.setQueryPerformanceRecorder(queryPerformanceRecorder, qprIsForBatch); return this; @@ -1494,10 +1499,6 @@ public ExportBuilder onSuccess(final Runnable successHandler) { * @return the submitted export object */ public ExportObject submit(final Callable exportMain) { - if (export.queryPerformanceRecorder != null && !export.qprIsForBatch) { - // transfer ownership of the qpr to the export before it can be resumed by the scheduler - export.queryPerformanceRecorder.suspendQuery(); - } export.setWork(exportMain, errorHandler, successHandler, requiresSerialQueue); return export; } diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index e7784812e9e..2e264b0a7a1 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -11,6 +11,7 @@ import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorderImpl; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorderState; import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.extensions.barrage.util.ExportUtil; @@ -513,11 +514,11 @@ public void batch( } final SessionState session = sessionService.getCurrentSession(); - final QueryPerformanceRecorderImpl queryPerformanceRecorder = new QueryPerformanceRecorderImpl( + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( "TableService#batch(session=" + session.getSessionId() + ")", QueryPerformanceNugget.DEFAULT_FACTORY); - try { + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { // step 1: initialize exports final List> exportBuilders = request.getOpsList().stream() .map(op -> createBatchExportBuilder(session, queryPerformanceRecorder, op)) @@ -540,8 +541,7 @@ public void batch( } Assert.geqZero(numRemaining, "numRemaining"); - try { - queryPerformanceRecorder.resumeQuery(); + try (final SafeCloseable ignored2 = queryPerformanceRecorder.resumeQuery()) { final QueryProcessingResults results = new QueryProcessingResults(queryPerformanceRecorder); final StatusRuntimeException failure = firstFailure.get(); if (failure != null) { @@ -553,8 +553,6 @@ public void batch( if (queryPerformanceRecorder.endQuery()) { EngineMetrics.getInstance().logQueryProcessingResults(results); } - } finally { - QueryPerformanceRecorder.resetInstance(); } }; @@ -596,8 +594,6 @@ public void batch( // now that we've submitted everything we'll suspend the query and release our refcount queryPerformanceRecorder.suspendQuery(); onOneResolved.run(); - } finally { - QueryPerformanceRecorder.resetInstance(); } } @@ -667,9 +663,10 @@ private void oneShotOperationWrapper( final String description = "TableService#" + op.name() + "(session=" + session.getSessionId() + ", resultId=" + ticketRouter.getLogNameFor(resultId, "TableServiceGrpcImpl") + ")"; - final QueryPerformanceRecorderImpl queryPerformanceRecorder = new QueryPerformanceRecorderImpl( + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try { + + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { operation.validateRequest(request); final List> dependencies = operation.getTableReferences(request).stream() @@ -688,8 +685,6 @@ private void oneShotOperationWrapper( safelyComplete(responseObserver, response); return result; }); - } finally { - QueryPerformanceRecorder.resetInstance(); } } @@ -732,7 +727,7 @@ private SessionState.ExportObject
resolveBatchReference( private BatchExportBuilder createBatchExportBuilder( @NotNull final SessionState session, - @NotNull final QueryPerformanceRecorderImpl queryPerformanceRecorder, + @NotNull final QueryPerformanceRecorder queryPerformanceRecorder, final BatchTableRequest.Operation op) { final GrpcTableOperation operation = getOp(op.getOpCase()); final T request = operation.getRequestFromOperation(op); From e33721044df33db4932d6b13583902ddee7e7445 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Mon, 13 Nov 2023 17:01:14 -0700 Subject: [PATCH 17/31] The Fixes --- .../impl/perf/QueryPerformanceNugget.java | 23 +++++++++++++++---- .../perf/QueryPerformanceRecorderImpl.java | 4 ++++ py/server/deephaven/perfmon.py | 2 +- 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java index 160fa90d1d2..3d6928a7adc 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java @@ -222,10 +222,7 @@ protected QueryPerformanceNugget( final RuntimeMemory runtimeMemory = RuntimeMemory.getInstance(); runtimeMemory.read(startMemorySample); - startClockEpochNanos = DateTimeUtils.millisToNanos(System.currentTimeMillis()); - onBaseEntryStart(); - - state = QueryState.RUNNING; + state = QueryState.NOT_STARTED; shouldLogThisAndStackParents = false; } @@ -260,6 +257,24 @@ public void markStartTime() { startClockEpochNanos = DateTimeUtils.millisToNanos(System.currentTimeMillis()); } + @Override + public synchronized void onBaseEntryStart() { + super.onBaseEntryStart(); + if (state == QueryState.RUNNING) { + throw new IllegalStateException("Nugget was already started"); + } + state = QueryState.RUNNING; + } + + @Override + public synchronized void onBaseEntryEnd() { + if (state != QueryState.RUNNING) { + throw new IllegalStateException("Nugget isn't running"); + } + state = QueryState.SUSPENDED; + super.onBaseEntryEnd(); + } + /** * Mark this nugget {@link QueryState#FINISHED} and notify the recorder. * diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java index 312c1ef2ce4..37a754e656f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java @@ -159,6 +159,8 @@ private SafeCloseable resumeInternal() { private void startCatchAll() { catchAllNugget = nuggetFactory.createForCatchAll(queryNugget, operationNuggets.size(), this::releaseNugget); + catchAllNugget.markStartTime(); + catchAllNugget.onBaseEntryStart(); } private void stopCatchAll(final boolean abort) { @@ -200,6 +202,8 @@ public synchronized QueryPerformanceNugget getNugget(@NotNull final String name, final QueryPerformanceNugget nugget = nuggetFactory.createForOperation( parent, operationNuggets.size(), name, inputSize, this::releaseNugget); + nugget.markStartTime(); + nugget.onBaseEntryStart(); operationNuggets.add(nugget); userNuggetStack.addLast(nugget); return nugget; diff --git a/py/server/deephaven/perfmon.py b/py/server/deephaven/perfmon.py index 995c77994a6..213ee349c56 100644 --- a/py/server/deephaven/perfmon.py +++ b/py/server/deephaven/perfmon.py @@ -11,7 +11,7 @@ from deephaven import DHError from deephaven.jcompat import j_map_to_dict -from deephaven.table import Table +from deephaven.table import Table, TreeTable _JPerformanceQueries = jpy.get_type("io.deephaven.engine.table.impl.util.PerformanceQueries") _JMetricsManager = jpy.get_type("io.deephaven.util.metrics.MetricsManager") From 78b6b447535dac0c58d57917106f11bf49b4b7b3 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Mon, 13 Nov 2023 18:34:23 -0700 Subject: [PATCH 18/31] Personal Review --- .../io/deephaven/engine/table/impl/QueryTable.java | 13 +++---------- .../table/impl/perf/QueryPerformanceNugget.java | 2 +- .../impl/perf/QueryPerformanceRecorderImpl.java | 9 ++++++--- .../engine/table/impl/updateby/UpdateBy.java | 6 +----- py/server/deephaven/perfmon.py | 4 ++-- .../io/deephaven/server/session/SessionState.java | 9 ++++----- .../server/table/ops/TableServiceGrpcImpl.java | 2 -- 7 files changed, 17 insertions(+), 28 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java index 8d78c5ef5d4..187442fff12 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java @@ -1268,11 +1268,8 @@ void handleUncaughtException(Exception throwable) { final BasePerformanceEntry basePerformanceEntry = initialFilterExecution.getBasePerformanceEntry(); if (basePerformanceEntry != null) { - final QueryPerformanceNugget outerNugget = - QueryPerformanceRecorder.getInstance().getEnclosingNugget(); - if (outerNugget != null) { - outerNugget.accumulate(basePerformanceEntry); - } + QueryPerformanceRecorder.getInstance().getEnclosingNugget() + .accumulate(basePerformanceEntry); } } currentMapping.initializePreviousValue(); @@ -1516,11 +1513,7 @@ this, mode, columns, rowSet, getModifiedColumnSetForUpdates(), publishTheseSourc } finally { final BasePerformanceEntry baseEntry = jobScheduler.getAccumulatedPerformance(); if (baseEntry != null) { - final QueryPerformanceNugget outerNugget = - QueryPerformanceRecorder.getInstance().getEnclosingNugget(); - if (outerNugget != null) { - outerNugget.accumulate(baseEntry); - } + QueryPerformanceRecorder.getInstance().getEnclosingNugget().accumulate(baseEntry); } } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java index 3d6928a7adc..e7151ce9eee 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java @@ -251,7 +251,7 @@ private QueryPerformanceNugget() { public void markStartTime() { if (startClockEpochNanos != NULL_LONG) { - throw new IllegalStateException("Nugget was already started"); + throw new IllegalStateException("Nugget start time already set"); } startClockEpochNanos = DateTimeUtils.millisToNanos(System.currentTimeMillis()); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java index 37a754e656f..fb203b83188 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java @@ -265,6 +265,10 @@ private synchronized boolean releaseNugget(@NotNull final QueryPerformanceNugget @Override public synchronized QueryPerformanceNugget getEnclosingNugget() { + if (userNuggetStack.isEmpty()) { + Assert.neqNull(catchAllNugget, "catchAllNugget"); + return catchAllNugget; + } return userNuggetStack.peekLast(); } @@ -284,9 +288,8 @@ public void setQueryData(final EntrySetter setter) { userNuggetStack.getLast().setShouldLogThisAndStackParents(); } else { uninstrumented = true; - if (catchAllNugget != null) { - catchAllNugget.setShouldLogThisAndStackParents(); - } + Assert.neqNull(catchAllNugget, "catchAllNugget"); + catchAllNugget.setShouldLogThisAndStackParents(); } } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java index a9f1a4561ae..6e18e24dd30 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java @@ -907,11 +907,7 @@ private void cleanUpAndNotify(final Runnable onCleanupComplete) { final BasePerformanceEntry accumulated = jobScheduler.getAccumulatedPerformance(); if (accumulated != null) { if (initialStep) { - final QueryPerformanceNugget outerNugget = - QueryPerformanceRecorder.getInstance().getEnclosingNugget(); - if (outerNugget != null) { - outerNugget.accumulate(accumulated); - } + QueryPerformanceRecorder.getInstance().getEnclosingNugget().accumulate(accumulated); } else { source.getUpdateGraph().addNotification(new TerminalNotification() { @Override diff --git a/py/server/deephaven/perfmon.py b/py/server/deephaven/perfmon.py index 213ee349c56..7dc1a06ba49 100644 --- a/py/server/deephaven/perfmon.py +++ b/py/server/deephaven/perfmon.py @@ -106,7 +106,7 @@ def query_operation_performance_log_as_tree_table() -> TreeTable: """ try: return TreeTable(j_tree_table=_JPerformanceQueries.queryOperationPerformanceAsTreeTable(), - id_col = 'EvaluationNumber', parent_col = 'ParentEvaluationNumber') + id_col = "EvalKey", parent_col = "ParentEvalKey") except Exception as e: raise DHError(e, "failed to obtain the query operation performance log as tree table.") from e @@ -123,7 +123,7 @@ def query_performance_log_as_tree_table() -> TreeTable: """ try: return TreeTable(j_tree_table=_JPerformanceQueries.queryPerformanceAsTreeTable(), - id_col = 'EvaluationNumber', parent_col = 'ParentEvaluationNumber') + id_col = "EvaluationNumber", parent_col = "ParentEvaluationNumber") except Exception as e: raise DHError(e, "failed to obtain the query performance log as tree table.") from e diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index d5dafd395ca..da2057cd59e 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -15,7 +15,6 @@ import io.deephaven.engine.liveness.LivenessScopeStack; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; -import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorderImpl; import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.engine.updategraph.DynamicNode; @@ -1035,11 +1034,11 @@ private void doExport() { } } } + if (queryPerformanceRecorder != null && qprIsForBatch) { + Assert.neqNull(exportRecorder, "exportRecorder"); + queryPerformanceRecorder.accumulate(exportRecorder); + } if (shouldLog || caughtException != null) { - if (queryPerformanceRecorder != null && qprIsForBatch) { - Assert.neqNull(exportRecorder, "exportRecorder"); - queryPerformanceRecorder.accumulate(exportRecorder); - } EngineMetrics.getInstance().logQueryProcessingResults(queryProcessingResults); } if (caughtException == null) { diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index 2e264b0a7a1..0b5c7eae212 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -10,8 +10,6 @@ import io.deephaven.engine.table.Table; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; -import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorderImpl; -import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorderState; import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.extensions.barrage.util.ExportUtil; From e5fdfc24472884c09a4bb295ec9e1c61be90c158 Mon Sep 17 00:00:00 2001 From: Nate Bauernfeind Date: Mon, 13 Nov 2023 21:40:53 -0700 Subject: [PATCH 19/31] Chip's Suggestions from CR Co-authored-by: Chip Kent <5250374+chipkent@users.noreply.github.com> --- py/server/deephaven/perfmon.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/py/server/deephaven/perfmon.py b/py/server/deephaven/perfmon.py index 7dc1a06ba49..52eb9db9684 100644 --- a/py/server/deephaven/perfmon.py +++ b/py/server/deephaven/perfmon.py @@ -95,7 +95,7 @@ def query_performance_log() -> Table: except Exception as e: raise DHError(e, "failed to obtain the query performance log table.") from e -def query_operation_performance_log_as_tree_table() -> TreeTable: +def query_operation_performance_tree_table() -> TreeTable: """ Returns a tree table with Deephaven performance data for individual subqueries. Returns: @@ -111,9 +111,9 @@ def query_operation_performance_log_as_tree_table() -> TreeTable: raise DHError(e, "failed to obtain the query operation performance log as tree table.") from e -def query_performance_log_as_tree_table() -> TreeTable: +def query_performance_tree_table() -> TreeTable: """ Returns a tree table with Deephaven query performance data. Performance data for individual sub-operations as - a tree table is available from calling `query_operation_performance_log_as_tree_table`. + a tree table is available from calling `query_operation_performance_tree_table`. Returns: a TreeTable From 10e455f1342aafaae7bfe9a162e6f9e8ea0ee2a1 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Tue, 14 Nov 2023 13:39:27 -0700 Subject: [PATCH 20/31] Add python tests for the new QPL QOPL tree table methods --- py/server/deephaven/perfmon.py | 11 +++++++---- py/server/tests/test_perfmon.py | 6 +++++- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/py/server/deephaven/perfmon.py b/py/server/deephaven/perfmon.py index 52eb9db9684..4f9b76c1483 100644 --- a/py/server/deephaven/perfmon.py +++ b/py/server/deephaven/perfmon.py @@ -12,6 +12,7 @@ from deephaven import DHError from deephaven.jcompat import j_map_to_dict from deephaven.table import Table, TreeTable +from deephaven.update_graph import auto_locking_ctx _JPerformanceQueries = jpy.get_type("io.deephaven.engine.table.impl.util.PerformanceQueries") _JMetricsManager = jpy.get_type("io.deephaven.util.metrics.MetricsManager") @@ -105,8 +106,9 @@ def query_operation_performance_tree_table() -> TreeTable: DHError """ try: - return TreeTable(j_tree_table=_JPerformanceQueries.queryOperationPerformanceAsTreeTable(), - id_col = "EvalKey", parent_col = "ParentEvalKey") + with auto_locking_ctx(query_performance_log()): + return TreeTable(j_tree_table=_JPerformanceQueries.queryOperationPerformanceAsTreeTable(), + id_col = "EvalKey", parent_col = "ParentEvalKey") except Exception as e: raise DHError(e, "failed to obtain the query operation performance log as tree table.") from e @@ -122,8 +124,9 @@ def query_performance_tree_table() -> TreeTable: DHError """ try: - return TreeTable(j_tree_table=_JPerformanceQueries.queryPerformanceAsTreeTable(), - id_col = "EvaluationNumber", parent_col = "ParentEvaluationNumber") + with auto_locking_ctx(query_performance_log()): + return TreeTable(j_tree_table=_JPerformanceQueries.queryPerformanceAsTreeTable(), + id_col = "EvaluationNumber", parent_col = "ParentEvaluationNumber") except Exception as e: raise DHError(e, "failed to obtain the query performance log as tree table.") from e diff --git a/py/server/tests/test_perfmon.py b/py/server/tests/test_perfmon.py index 121b774aea4..60b864cd696 100644 --- a/py/server/tests/test_perfmon.py +++ b/py/server/tests/test_perfmon.py @@ -7,7 +7,7 @@ from deephaven import empty_table from deephaven.perfmon import process_info_log, process_metrics_log, server_state_log, \ query_operation_performance_log, query_performance_log, update_performance_log, metrics_get_counters, \ - metrics_reset_counters + metrics_reset_counters, query_performance_tree_table, query_operation_performance_tree_table from deephaven.perfmon import query_update_performance, query_performance, query_operation_performance, server_state from tests.testbase import BaseTestCase @@ -62,6 +62,8 @@ def test_query_logs(self): self.assertTrue(log_table.to_string()) log_table = update_performance_log() self.assertTrue(log_table.to_string()) + log_table = query_performance_tree_table() + self.assertIsNotNone(log_table) def test_performance_queries(self): q = query_performance(1) @@ -72,6 +74,8 @@ def test_performance_queries(self): self.assertTrue(q.to_string()) q = query_update_performance(1) self.assertTrue(q.to_string()) + q = query_operation_performance_tree_table() + self.assertIsNotNone(q) if __name__ == '__main__': From 42e04f2205ba54671aa374c2361fb9012bae8ab2 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Tue, 14 Nov 2023 19:54:04 -0700 Subject: [PATCH 21/31] Audited ExportObject Creation --- .../server/arrow/ArrowFlightUtil.java | 236 +++++---- .../server/arrow/FlightServiceGrpcImpl.java | 131 +++-- .../console/ConsoleServiceGrpcImpl.java | 126 +++-- .../HierarchicalTableServiceGrpcImpl.java | 481 ++++++++++-------- .../server/object/ObjectServiceGrpcImpl.java | 107 ++-- .../PartitionedTableServiceGrpcImpl.java | 220 ++++---- .../session/SessionServiceGrpcImpl.java | 74 ++- .../InputTableServiceGrpcImpl.java | 204 +++++--- .../table/ops/TableServiceGrpcImpl.java | 108 ++-- 9 files changed, 1028 insertions(+), 659 deletions(-) diff --git a/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java b/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java index c2351dfeae2..b67bf676978 100644 --- a/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java +++ b/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java @@ -19,6 +19,8 @@ import io.deephaven.engine.table.Table; import io.deephaven.engine.table.impl.BaseTable; import io.deephaven.engine.table.impl.QueryTable; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.util.BarrageMessage; import io.deephaven.engine.updategraph.UpdateGraph; import io.deephaven.extensions.barrage.BarragePerformanceLog; @@ -71,37 +73,49 @@ public static void DoGetCustom( final Flight.Ticket request, final StreamObserver observer) { - final SessionState.ExportObject> export = - ticketRouter.resolve(session, request, "request"); + final String description = "FlightService#DoGet(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); - final BarragePerformanceLog.SnapshotMetricsHelper metrics = - new BarragePerformanceLog.SnapshotMetricsHelper(); + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor(request, "ArrowFlightUtil"); - final long queueStartTm = System.nanoTime(); - session.nonExport() - .require(export) - .onError(observer) - .submit(() -> { - metrics.queueNanos = System.nanoTime() - queueStartTm; - final BaseTable table = export.get(); - metrics.tableId = Integer.toHexString(System.identityHashCode(table)); - metrics.tableKey = BarragePerformanceLog.getKeyFor(table); - - // create an adapter for the response observer - final StreamObserver listener = - ArrowModule.provideListenerAdapter().adapt(observer); - - // push the schema to the listener - listener.onNext(streamGeneratorFactory.getSchemaView( - fbb -> BarrageUtil.makeTableSchemaPayload(fbb, DEFAULT_SNAPSHOT_DESER_OPTIONS, - table.getDefinition(), table.getAttributes()))); - - // shared code between `DoGet` and `BarrageSnapshotRequest` - BarrageUtil.createAndSendSnapshot(streamGeneratorFactory, table, null, null, false, - DEFAULT_SNAPSHOT_DESER_OPTIONS, listener, metrics); + final SessionState.ExportObject> export; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTicket:" + ticketName)) { + export = ticketRouter.resolve(session, request, "request"); + } - listener.onCompleted(); - }); + final BarragePerformanceLog.SnapshotMetricsHelper metrics = + new BarragePerformanceLog.SnapshotMetricsHelper(); + + final long queueStartTm = System.nanoTime(); + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(export) + .onError(observer) + .submit(() -> { + metrics.queueNanos = System.nanoTime() - queueStartTm; + final BaseTable table = export.get(); + metrics.tableId = Integer.toHexString(System.identityHashCode(table)); + metrics.tableKey = BarragePerformanceLog.getKeyFor(table); + + // create an adapter for the response observer + final StreamObserver listener = + ArrowModule.provideListenerAdapter().adapt(observer); + + // push the schema to the listener + listener.onNext(streamGeneratorFactory.getSchemaView( + fbb -> BarrageUtil.makeTableSchemaPayload(fbb, DEFAULT_SNAPSHOT_DESER_OPTIONS, + table.getDefinition(), table.getAttributes()))); + + // shared code between `DoGet` and `BarrageSnapshotRequest` + BarrageUtil.createAndSendSnapshot(streamGeneratorFactory, table, null, null, false, + DEFAULT_SNAPSHOT_DESER_OPTIONS, listener, metrics); + + listener.onCompleted(); + }); + } } /** @@ -478,69 +492,84 @@ public void handleMessage(@NotNull final BarrageProtoUtil.MessageInfo message) { final BarrageSnapshotRequest snapshotRequest = BarrageSnapshotRequest .getRootAsBarrageSnapshotRequest(message.app_metadata.msgPayloadAsByteBuffer()); - final SessionState.ExportObject> parent = - ticketRouter.resolve(session, snapshotRequest.ticketAsByteBuffer(), "ticket"); - - final BarragePerformanceLog.SnapshotMetricsHelper metrics = - new BarragePerformanceLog.SnapshotMetricsHelper(); - - final long queueStartTm = System.nanoTime(); - session.nonExport() - .require(parent) - .onError(listener) - .submit(() -> { - metrics.queueNanos = System.nanoTime() - queueStartTm; - final BaseTable table = parent.get(); - metrics.tableId = Integer.toHexString(System.identityHashCode(table)); - metrics.tableKey = BarragePerformanceLog.getKeyFor(table); - - if (table.isFailed()) { - throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, - "Table is already failed"); - } - - // push the schema to the listener - listener.onNext(streamGeneratorFactory.getSchemaView( - fbb -> BarrageUtil.makeTableSchemaPayload(fbb, - snapshotOptAdapter.adapt(snapshotRequest), - table.getDefinition(), table.getAttributes()))); - - // collect the viewport and columnsets (if provided) - final boolean hasColumns = snapshotRequest.columnsVector() != null; - final BitSet columns = - hasColumns ? BitSet.valueOf(snapshotRequest.columnsAsByteBuffer()) : null; - - final boolean hasViewport = snapshotRequest.viewportVector() != null; - RowSet viewport = - hasViewport - ? BarrageProtoUtil.toRowSet(snapshotRequest.viewportAsByteBuffer()) - : null; - - final boolean reverseViewport = snapshotRequest.reverseViewport(); - - // leverage common code for `DoGet` and `BarrageSnapshotOptions` - BarrageUtil.createAndSendSnapshot(streamGeneratorFactory, table, columns, viewport, - reverseViewport, snapshotOptAdapter.adapt(snapshotRequest), listener, metrics); - HalfClosedState newState = halfClosedState.updateAndGet(current -> { - switch (current) { - case DONT_CLOSE: - // record that we have finished sending - return HalfClosedState.FINISHED_SENDING; - case CLIENT_HALF_CLOSED: - // since streaming has now finished, and client already half-closed, time to - // half close from server - return HalfClosedState.CLOSED; - case FINISHED_SENDING: - case CLOSED: - throw new IllegalStateException("Can't finish streaming twice"); - default: - throw new IllegalStateException("Unknown state " + current); + final String description = + "FlightService#DoExchange(snapshot, session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor( + snapshotRequest.ticketAsByteBuffer(), "ArrowFlightUtil"); + + final SessionState.ExportObject> parent; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTicket:" + ticketName)) { + parent = ticketRouter.resolve(session, snapshotRequest.ticketAsByteBuffer(), "parent"); + } + + final BarragePerformanceLog.SnapshotMetricsHelper metrics = + new BarragePerformanceLog.SnapshotMetricsHelper(); + + final long queueStartTm = System.nanoTime(); + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(parent) + .onError(listener) + .submit(() -> { + metrics.queueNanos = System.nanoTime() - queueStartTm; + final BaseTable table = parent.get(); + metrics.tableId = Integer.toHexString(System.identityHashCode(table)); + metrics.tableKey = BarragePerformanceLog.getKeyFor(table); + + if (table.isFailed()) { + throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, + "Table is already failed"); + } + + // push the schema to the listener + listener.onNext(streamGeneratorFactory.getSchemaView( + fbb -> BarrageUtil.makeTableSchemaPayload(fbb, + snapshotOptAdapter.adapt(snapshotRequest), + table.getDefinition(), table.getAttributes()))); + + // collect the viewport and columnsets (if provided) + final boolean hasColumns = snapshotRequest.columnsVector() != null; + final BitSet columns = + hasColumns ? BitSet.valueOf(snapshotRequest.columnsAsByteBuffer()) : null; + + final boolean hasViewport = snapshotRequest.viewportVector() != null; + RowSet viewport = + hasViewport + ? BarrageProtoUtil.toRowSet(snapshotRequest.viewportAsByteBuffer()) + : null; + + final boolean reverseViewport = snapshotRequest.reverseViewport(); + + // leverage common code for `DoGet` and `BarrageSnapshotOptions` + BarrageUtil.createAndSendSnapshot(streamGeneratorFactory, table, columns, viewport, + reverseViewport, snapshotOptAdapter.adapt(snapshotRequest), listener, + metrics); + HalfClosedState newState = halfClosedState.updateAndGet(current -> { + switch (current) { + case DONT_CLOSE: + // record that we have finished sending + return HalfClosedState.FINISHED_SENDING; + case CLIENT_HALF_CLOSED: + // since streaming has now finished, and client already half-closed, + // time to half close from server + return HalfClosedState.CLOSED; + case FINISHED_SENDING: + case CLOSED: + throw new IllegalStateException("Can't finish streaming twice"); + default: + throw new IllegalStateException("Unknown state " + current); + } + }); + if (newState == HalfClosedState.CLOSED) { + listener.onCompleted(); } }); - if (newState == HalfClosedState.CLOSED) { - listener.onCompleted(); - } - }); + } } } @@ -623,14 +652,29 @@ public void handleMessage(@NotNull final MessageInfo message) { preExportSubscriptions = new ArrayDeque<>(); preExportSubscriptions.add(subscriptionRequest); - final SessionState.ExportObject parent = - ticketRouter.resolve(session, subscriptionRequest.ticketAsByteBuffer(), "ticket"); - synchronized (this) { - onExportResolvedContinuation = session.nonExport() - .require(parent) - .onErrorHandler(DoExchangeMarshaller.this::onError) - .submit(() -> onExportResolved(parent)); + final String description = + "FlightService#DoExchange(subscription, session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor( + subscriptionRequest.ticketAsByteBuffer(), "ArrowFlightUtil"); + + final SessionState.ExportObject parent; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTicket:" + ticketName)) { + parent = ticketRouter.resolve(session, subscriptionRequest.ticketAsByteBuffer(), "parent"); + } + + synchronized (this) { + onExportResolvedContinuation = session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(parent) + .onErrorHandler(DoExchangeMarshaller.this::onError) + .submit(() -> onExportResolved(parent)); + } } } } diff --git a/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java index 66a7de0d37a..557ab7f91da 100644 --- a/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java @@ -10,6 +10,11 @@ import io.deephaven.auth.AuthenticationException; import io.deephaven.auth.AuthenticationRequestHandler; import io.deephaven.auth.BasicAuthMarshaller; +import io.deephaven.engine.table.impl.BaseTable; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; +import io.deephaven.engine.table.impl.perf.QueryProcessingResults; +import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.extensions.barrage.BarrageStreamGenerator; import io.deephaven.extensions.barrage.util.GrpcUtil; import io.deephaven.internal.log.LoggerFactory; @@ -22,6 +27,8 @@ import io.deephaven.server.session.SessionState; import io.deephaven.server.session.TicketRouter; import io.deephaven.auth.AuthContext; +import io.deephaven.util.SafeCloseable; +import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; import org.apache.arrow.flight.impl.Flight; import org.apache.arrow.flight.impl.FlightServiceGrpc; @@ -170,30 +177,53 @@ public void getFlightInfo( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getOptionalSession(); - final SessionState.ExportObject export = - ticketRouter.flightInfoFor(session, request, "request"); - - if (session != null) { - session.nonExport() - .require(export) - .onError(responseObserver) - .submit(() -> { - responseObserver.onNext(export.get()); - responseObserver.onCompleted(); - }); - } else { + final String description = + "FlightService#getFlightInfo(session=" + (session == null ? "Anonymous" : session.getSessionId()) + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + + final SessionState.ExportObject export; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget("flightInfoFor")) { + export = ticketRouter.flightInfoFor(session, request, "request"); + } + + if (session != null) { + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(export) + .onError(responseObserver) + .submit(() -> { + responseObserver.onNext(export.get()); + responseObserver.onCompleted(); + }); + return; + } + + String exception = null; if (export.tryRetainReference()) { try { if (export.getState() == ExportNotification.State.EXPORTED) { - responseObserver.onNext(export.get()); - responseObserver.onCompleted(); + GrpcUtil.safelyOnNext(responseObserver, export.get()); + GrpcUtil.safelyComplete(responseObserver); } } finally { export.dropReference(); } } else { - responseObserver.onError( - Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "Could not find flight info")); + final StatusRuntimeException err = + Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "Could not find flight info"); + exception = err.getMessage(); + GrpcUtil.safelyError(responseObserver, err); + } + + if (queryPerformanceRecorder.endQuery() || exception != null) { + QueryProcessingResults results = new QueryProcessingResults(queryPerformanceRecorder); + if (exception != null) { + results.setException(exception); + } + EngineMetrics.getInstance().logQueryProcessingResults(results); } } } @@ -204,33 +234,58 @@ public void getSchema( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getOptionalSession(); - final SessionState.ExportObject export = - ticketRouter.flightInfoFor(session, request, "request"); + final String description = + "FlightService#getSchema(session=" + (session == null ? "Anonymous" : session.getSessionId()) + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); - if (session != null) { - session.nonExport() - .require(export) - .onError(responseObserver) - .submit(() -> { - responseObserver.onNext(Flight.SchemaResult.newBuilder() + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + + final SessionState.ExportObject export; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget("flightInfoFor")) { + export = ticketRouter.flightInfoFor(session, request, "request"); + } + + if (session != null) { + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(export) + .onError(responseObserver) + .submit(() -> { + responseObserver.onNext(Flight.SchemaResult.newBuilder() + .setSchema(export.get().getSchema()) + .build()); + responseObserver.onCompleted(); + }); + return; + } + + String exception = null; + if (export.tryRetainReference()) { + try { + if (export.getState() == ExportNotification.State.EXPORTED) { + GrpcUtil.safelyOnNext(responseObserver, Flight.SchemaResult.newBuilder() .setSchema(export.get().getSchema()) .build()); - responseObserver.onCompleted(); - }); - } else if (export.tryRetainReference()) { - try { - if (export.getState() == ExportNotification.State.EXPORTED) { - responseObserver.onNext(Flight.SchemaResult.newBuilder() - .setSchema(export.get().getSchema()) - .build()); - responseObserver.onCompleted(); + GrpcUtil.safelyComplete(responseObserver); + } + } finally { + export.dropReference(); } - } finally { - export.dropReference(); + } else { + final StatusRuntimeException err = + Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "Could not find flight info"); + exception = err.getMessage(); + responseObserver.onError(err); + } + + if (queryPerformanceRecorder.endQuery() || exception != null) { + QueryProcessingResults results = new QueryProcessingResults(queryPerformanceRecorder); + if (exception != null) { + results.setException(exception); + } + EngineMetrics.getInstance().logQueryProcessingResults(results); } - } else { - responseObserver.onError( - Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "Could not find flight info")); } } diff --git a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java index e99e14f1efb..31382986fae 100644 --- a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java @@ -8,6 +8,8 @@ import io.deephaven.base.LockFreeArrayQueue; import io.deephaven.configuration.Configuration; import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.util.RuntimeMemory; import io.deephaven.engine.table.impl.util.RuntimeMemory.Sample; import io.deephaven.engine.updategraph.DynamicNode; @@ -35,6 +37,7 @@ import io.deephaven.server.session.SessionState.ExportBuilder; import io.deephaven.server.session.TicketRouter; import io.deephaven.server.util.Scheduler; +import io.deephaven.util.SafeCloseable; import io.grpc.stub.ServerCallStreamObserver; import io.grpc.stub.StreamObserver; import org.jetbrains.annotations.NotNull; @@ -164,29 +167,42 @@ public void executeCommand( throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No consoleId supplied"); } - SessionState.ExportObject exportedConsole = - ticketRouter.resolve(session, consoleId, "consoleId"); - session.nonExport() - .requiresSerialQueue() - .require(exportedConsole) - .onError(responseObserver) - .submit(() -> { - ScriptSession scriptSession = exportedConsole.get(); - ScriptSession.Changes changes = scriptSession.evaluateScript(request.getCode()); - ExecuteCommandResponse.Builder diff = ExecuteCommandResponse.newBuilder(); - FieldsChangeUpdate.Builder fieldChanges = FieldsChangeUpdate.newBuilder(); - changes.created.entrySet() - .forEach(entry -> fieldChanges.addCreated(makeVariableDefinition(entry))); - changes.updated.entrySet() - .forEach(entry -> fieldChanges.addUpdated(makeVariableDefinition(entry))); - changes.removed.entrySet() - .forEach(entry -> fieldChanges.addRemoved(makeVariableDefinition(entry))); - if (changes.error != null) { - diff.setErrorMessage(Throwables.getStackTraceAsString(changes.error)); - log.error().append("Error running script: ").append(changes.error).endl(); - } - safelyComplete(responseObserver, diff.setChanges(fieldChanges).build()); - }); + final String description = "ConsoleServiceGrpcImpl#executeCommand(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor(consoleId, "consoleId"); + + final SessionState.ExportObject exportedConsole; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTicket:" + ticketName)) { + exportedConsole = ticketRouter.resolve(session, consoleId, "consoleId"); + } + + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .requiresSerialQueue() + .require(exportedConsole) + .onError(responseObserver) + .submit(() -> { + ScriptSession scriptSession = exportedConsole.get(); + ScriptSession.Changes changes = scriptSession.evaluateScript(request.getCode()); + ExecuteCommandResponse.Builder diff = ExecuteCommandResponse.newBuilder(); + FieldsChangeUpdate.Builder fieldChanges = FieldsChangeUpdate.newBuilder(); + changes.created.entrySet() + .forEach(entry -> fieldChanges.addCreated(makeVariableDefinition(entry))); + changes.updated.entrySet() + .forEach(entry -> fieldChanges.addUpdated(makeVariableDefinition(entry))); + changes.removed.entrySet() + .forEach(entry -> fieldChanges.addRemoved(makeVariableDefinition(entry))); + if (changes.error != null) { + diff.setErrorMessage(Throwables.getStackTraceAsString(changes.error)); + log.error().append("Error running script: ").append(changes.error).endl(); + } + safelyComplete(responseObserver, diff.setChanges(fieldChanges).build()); + }); + } } @Override @@ -240,32 +256,52 @@ public void bindTableToVariable( if (tableId.getTicket().isEmpty()) { throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No source tableId supplied"); } - final SessionState.ExportObject
exportedTable = ticketRouter.resolve(session, tableId, "tableId"); - final SessionState.ExportObject exportedConsole; - ExportBuilder exportBuilder = session.nonExport() - .requiresSerialQueue() - .onError(responseObserver); + final String description = "ConsoleServiceGrpcImpl#bindTableToVariable(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); - if (request.hasConsoleId()) { - exportedConsole = ticketRouter.resolve(session, request.getConsoleId(), "consoleId"); - exportBuilder.require(exportedTable, exportedConsole); - } else { - exportedConsole = null; - exportBuilder.require(exportedTable); - } + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String tableTicketName = ticketRouter.getLogNameFor(tableId, "tableId"); + + final SessionState.ExportObject
exportedTable; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTableTicket:" + tableTicketName)) { + exportedTable = ticketRouter.resolve(session, tableId, "tableId"); + } + + final SessionState.ExportObject exportedConsole; + + ExportBuilder exportBuilder = session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .requiresSerialQueue() + .onError(responseObserver); - exportBuilder.submit(() -> { - ScriptSession scriptSession = - exportedConsole != null ? exportedConsole.get() : scriptSessionProvider.get(); - Table table = exportedTable.get(); - scriptSession.setVariable(request.getVariableName(), table); - if (DynamicNode.notDynamicOrIsRefreshing(table)) { - scriptSession.manage(table); + if (request.hasConsoleId()) { + final String consoleTicketName = ticketRouter.getLogNameFor(request.getConsoleId(), "consoleId"); + + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveConsoleTicket:" + consoleTicketName)) { + exportedConsole = ticketRouter.resolve(session, request.getConsoleId(), "consoleId"); + } + exportBuilder.require(exportedTable, exportedConsole); + } else { + exportedConsole = null; + exportBuilder.require(exportedTable); } - responseObserver.onNext(BindTableToVariableResponse.getDefaultInstance()); - responseObserver.onCompleted(); - }); + + exportBuilder.submit(() -> { + ScriptSession scriptSession = + exportedConsole != null ? exportedConsole.get() : scriptSessionProvider.get(); + Table table = exportedTable.get(); + scriptSession.setVariable(request.getVariableName(), table); + if (DynamicNode.notDynamicOrIsRefreshing(table)) { + scriptSession.manage(table); + } + responseObserver.onNext(BindTableToVariableResponse.getDefaultInstance()); + responseObserver.onCompleted(); + }); + } } @Override diff --git a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java index 8824b4f358e..c20ae5176e9 100644 --- a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java @@ -18,6 +18,8 @@ import io.deephaven.engine.table.impl.AbsoluteSortColumnConventions; import io.deephaven.engine.table.impl.BaseGridAttributes; import io.deephaven.engine.table.impl.hierarchical.RollupTableImpl; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.select.WhereFilter; import io.deephaven.extensions.barrage.util.ExportUtil; import io.deephaven.internal.log.LoggerFactory; @@ -31,6 +33,7 @@ import io.deephaven.server.table.ops.AggregationAdapter; import io.deephaven.server.table.ops.FilterTableGrpcImpl; import io.deephaven.server.table.ops.filter.FilterFactory; +import io.deephaven.util.SafeCloseable; import io.grpc.stub.StreamObserver; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -73,31 +76,44 @@ public void rollup( final SessionState session = sessionService.getCurrentSession(); - final SessionState.ExportObject
sourceTableExport = ticketRouter.resolve( - session, request.getSourceTableId(), "rollup.sourceTableId"); - - session.newExport(request.getResultRollupTableId(), "rollup.resultRollupTableId") - .require(sourceTableExport) - .onError(responseObserver) - .submit(() -> { - final Table sourceTable = sourceTableExport.get(); - - authWiring.checkPermissionRollup(session.getAuthContext(), request, List.of(sourceTable)); - - final Collection aggregations = request.getAggregationsList().stream() - .map(AggregationAdapter::adapt) - .collect(Collectors.toList()); - final boolean includeConstituents = request.getIncludeConstituents(); - final Collection groupByColumns = request.getGroupByColumnsList().stream() - .map(ColumnName::of) - .collect(Collectors.toList()); - final RollupTable result = sourceTable.rollup( - aggregations, includeConstituents, groupByColumns); - - final RollupTable transformedResult = authTransformation.transform(result); - safelyComplete(responseObserver, RollupResponse.getDefaultInstance()); - return transformedResult; - }); + final String description = "HierarchicalTableServiceGrpcImpl#rollup(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor(request.getSourceTableId(), + "HierarchicalTableServiceGrpcImpl"); + + final SessionState.ExportObject
sourceTableExport; + try (final SafeCloseable ignored2 = + QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { + sourceTableExport = ticketRouter.resolve(session, request.getSourceTableId(), "rollup.sourceTableId"); + } + + session.newExport(request.getResultRollupTableId(), "rollup.resultRollupTableId") + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(sourceTableExport) + .onError(responseObserver) + .submit(() -> { + final Table sourceTable = sourceTableExport.get(); + + authWiring.checkPermissionRollup(session.getAuthContext(), request, List.of(sourceTable)); + + final Collection aggregations = request.getAggregationsList().stream() + .map(AggregationAdapter::adapt) + .collect(Collectors.toList()); + final boolean includeConstituents = request.getIncludeConstituents(); + final Collection groupByColumns = request.getGroupByColumnsList().stream() + .map(ColumnName::of) + .collect(Collectors.toList()); + final RollupTable result = sourceTable.rollup( + aggregations, includeConstituents, groupByColumns); + + final RollupTable transformedResult = authTransformation.transform(result); + safelyComplete(responseObserver, RollupResponse.getDefaultInstance()); + return transformedResult; + }); + } } private static void validate(@NotNull final RollupRequest request) { @@ -117,35 +133,48 @@ public void tree( final SessionState session = sessionService.getCurrentSession(); - final SessionState.ExportObject
sourceTableExport = ticketRouter.resolve( - session, request.getSourceTableId(), "tree.sourceTableId"); + final String description = "HierarchicalTableServiceGrpcImpl#tree(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor(request.getSourceTableId(), + "HierarchicalTableServiceGrpcImpl"); - session.newExport(request.getResultTreeTableId(), "tree.resultTreeTableId") - .require(sourceTableExport) - .onError(responseObserver) - .submit(() -> { - final Table sourceTable = sourceTableExport.get(); + final SessionState.ExportObject
sourceTableExport; + try (final SafeCloseable ignored2 = + QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { + sourceTableExport = ticketRouter.resolve(session, request.getSourceTableId(), "tree.sourceTableId"); + } + + session.newExport(request.getResultTreeTableId(), "tree.resultTreeTableId") + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(sourceTableExport) + .onError(responseObserver) + .submit(() -> { + final Table sourceTable = sourceTableExport.get(); - authWiring.checkPermissionTree(session.getAuthContext(), request, List.of(sourceTable)); + authWiring.checkPermissionTree(session.getAuthContext(), request, List.of(sourceTable)); - final ColumnName identifierColumn = ColumnName.of(request.getIdentifierColumn()); - final ColumnName parentIdentifierColumn = ColumnName.of(request.getParentIdentifierColumn()); + final ColumnName identifierColumn = ColumnName.of(request.getIdentifierColumn()); + final ColumnName parentIdentifierColumn = ColumnName.of(request.getParentIdentifierColumn()); - final Table sourceTableToUse; - if (request.getPromoteOrphans()) { - sourceTableToUse = TreeTable.promoteOrphans( - sourceTable, identifierColumn.name(), parentIdentifierColumn.name()); - } else { - sourceTableToUse = sourceTable; - } + final Table sourceTableToUse; + if (request.getPromoteOrphans()) { + sourceTableToUse = TreeTable.promoteOrphans( + sourceTable, identifierColumn.name(), parentIdentifierColumn.name()); + } else { + sourceTableToUse = sourceTable; + } - final TreeTable result = sourceTableToUse.tree( - identifierColumn.name(), parentIdentifierColumn.name()); + final TreeTable result = sourceTableToUse.tree( + identifierColumn.name(), parentIdentifierColumn.name()); - final TreeTable transformedResult = authTransformation.transform(result); - safelyComplete(responseObserver, TreeResponse.getDefaultInstance()); - return transformedResult; - }); + final TreeTable transformedResult = authTransformation.transform(result); + safelyComplete(responseObserver, TreeResponse.getDefaultInstance()); + return transformedResult; + }); + } } private static void validate(@NotNull final TreeRequest request) { @@ -166,79 +195,93 @@ public void apply( final SessionState session = sessionService.getCurrentSession(); - final SessionState.ExportObject> inputHierarchicalTableExport = ticketRouter.resolve( - session, request.getInputHierarchicalTableId(), "apply.inputHierarchicalTableId"); - - session.newExport(request.getResultHierarchicalTableId(), "apply.resultHierarchicalTableId") - .require(inputHierarchicalTableExport) - .onError(responseObserver) - .submit(() -> { - final HierarchicalTable inputHierarchicalTable = inputHierarchicalTableExport.get(); - - authWiring.checkPermissionApply(session.getAuthContext(), request, - List.of(inputHierarchicalTable.getSource())); - - if (request.getFiltersCount() == 0 && request.getSortsCount() == 0) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "No operations specified"); - } - final Collection finishedConditions = request.getFiltersCount() == 0 - ? null - : FilterTableGrpcImpl.finishConditions(request.getFiltersList()); - final Collection translatedSorts = - translateAndValidateSorts(request, (BaseGridAttributes) inputHierarchicalTable); - - final HierarchicalTable result; - if (inputHierarchicalTable instanceof RollupTable) { - RollupTable rollupTable = (RollupTable) inputHierarchicalTable; - // Rollups only support filtering on the group-by columns, so we can safely use the - // aggregated node definition here. - final TableDefinition nodeDefinition = - rollupTable.getNodeDefinition(RollupTable.NodeType.Aggregated); - if (finishedConditions != null) { - final Collection filters = - makeWhereFilters(finishedConditions, nodeDefinition); - RollupTableImpl.initializeAndValidateFilters( - rollupTable.getSource(), - rollupTable.getGroupByColumns(), - filters, - message -> Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, message)); - rollupTable = rollupTable.withFilter(Filter.and(filters)); + final String description = "HierarchicalTableServiceGrpcImpl#apply(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor(request.getInputHierarchicalTableId(), + "HierarchicalTableServiceGrpcImpl"); + + final SessionState.ExportObject> inputHierarchicalTableExport; + try (final SafeCloseable ignored2 = + QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { + inputHierarchicalTableExport = ticketRouter.resolve( + session, request.getInputHierarchicalTableId(), "apply.inputHierarchicalTableId"); + } + + session.newExport(request.getResultHierarchicalTableId(), "apply.resultHierarchicalTableId") + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(inputHierarchicalTableExport) + .onError(responseObserver) + .submit(() -> { + final HierarchicalTable inputHierarchicalTable = inputHierarchicalTableExport.get(); + + authWiring.checkPermissionApply(session.getAuthContext(), request, + List.of(inputHierarchicalTable.getSource())); + + if (request.getFiltersCount() == 0 && request.getSortsCount() == 0) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "No operations specified"); } - if (translatedSorts != null) { - RollupTable.NodeOperationsRecorder aggregatedSorts = - rollupTable.makeNodeOperationsRecorder(RollupTable.NodeType.Aggregated); - aggregatedSorts = aggregatedSorts.sort(translatedSorts); - if (rollupTable.includesConstituents()) { - final RollupTable.NodeOperationsRecorder constituentSorts = rollupTable - .translateAggregatedNodeOperationsForConstituentNodes(aggregatedSorts); - rollupTable = rollupTable.withNodeOperations(aggregatedSorts, constituentSorts); - } else { - rollupTable = rollupTable.withNodeOperations(aggregatedSorts); + final Collection finishedConditions = request.getFiltersCount() == 0 + ? null + : FilterTableGrpcImpl.finishConditions(request.getFiltersList()); + final Collection translatedSorts = + translateAndValidateSorts(request, (BaseGridAttributes) inputHierarchicalTable); + + final HierarchicalTable result; + if (inputHierarchicalTable instanceof RollupTable) { + RollupTable rollupTable = (RollupTable) inputHierarchicalTable; + // Rollups only support filtering on the group-by columns, so we can safely use the + // aggregated node definition here. + final TableDefinition nodeDefinition = + rollupTable.getNodeDefinition(RollupTable.NodeType.Aggregated); + if (finishedConditions != null) { + final Collection filters = + makeWhereFilters(finishedConditions, nodeDefinition); + RollupTableImpl.initializeAndValidateFilters( + rollupTable.getSource(), + rollupTable.getGroupByColumns(), + filters, + message -> Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, message)); + rollupTable = rollupTable.withFilter(Filter.and(filters)); } + if (translatedSorts != null) { + RollupTable.NodeOperationsRecorder aggregatedSorts = + rollupTable.makeNodeOperationsRecorder(RollupTable.NodeType.Aggregated); + aggregatedSorts = aggregatedSorts.sort(translatedSorts); + if (rollupTable.includesConstituents()) { + final RollupTable.NodeOperationsRecorder constituentSorts = rollupTable + .translateAggregatedNodeOperationsForConstituentNodes(aggregatedSorts); + rollupTable = rollupTable.withNodeOperations(aggregatedSorts, constituentSorts); + } else { + rollupTable = rollupTable.withNodeOperations(aggregatedSorts); + } + } + result = rollupTable; + } else if (inputHierarchicalTable instanceof TreeTable) { + TreeTable treeTable = (TreeTable) inputHierarchicalTable; + final TableDefinition nodeDefinition = treeTable.getNodeDefinition(); + if (finishedConditions != null) { + treeTable = treeTable + .withFilter(Filter.and(makeWhereFilters(finishedConditions, nodeDefinition))); + } + if (translatedSorts != null) { + TreeTable.NodeOperationsRecorder treeSorts = treeTable.makeNodeOperationsRecorder(); + treeSorts = treeSorts.sort(translatedSorts); + treeTable = treeTable.withNodeOperations(treeSorts); + } + result = treeTable; + } else { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Input is not a supported HierarchicalTable type"); } - result = rollupTable; - } else if (inputHierarchicalTable instanceof TreeTable) { - TreeTable treeTable = (TreeTable) inputHierarchicalTable; - final TableDefinition nodeDefinition = treeTable.getNodeDefinition(); - if (finishedConditions != null) { - treeTable = treeTable - .withFilter(Filter.and(makeWhereFilters(finishedConditions, nodeDefinition))); - } - if (translatedSorts != null) { - TreeTable.NodeOperationsRecorder treeSorts = treeTable.makeNodeOperationsRecorder(); - treeSorts = treeSorts.sort(translatedSorts); - treeTable = treeTable.withNodeOperations(treeSorts); - } - result = treeTable; - } else { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Input is not a supported HierarchicalTable type"); - } - - final HierarchicalTable transformedResult = authTransformation.transform(result); - safelyComplete(responseObserver, HierarchicalTableApplyResponse.getDefaultInstance()); - return transformedResult; - }); + + final HierarchicalTable transformedResult = authTransformation.transform(result); + safelyComplete(responseObserver, HierarchicalTableApplyResponse.getDefaultInstance()); + return transformedResult; + }); + } } private static void validate(@NotNull final HierarchicalTableApplyRequest request) { @@ -315,81 +358,98 @@ public void view( final SessionState session = sessionService.getCurrentSession(); - final SessionState.ExportBuilder resultExportBuilder = - session.newExport(request.getResultViewId(), "view.resultViewId"); + final String description = "HierarchicalTableServiceGrpcImpl#view(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + + final SessionState.ExportBuilder resultExportBuilder = + session.newExport(request.getResultViewId(), "view.resultViewId"); + + final boolean usedExisting; + final Ticket targetTicket; + switch (request.getTargetCase()) { + case HIERARCHICAL_TABLE_ID: + usedExisting = false; + targetTicket = request.getHierarchicalTableId(); + break; + case EXISTING_VIEW_ID: + usedExisting = true; + targetTicket = request.getExistingViewId(); + break; + case TARGET_NOT_SET: + default: + throw new IllegalStateException(); + } + final String ticketName = ticketRouter.getLogNameFor(request.getResultViewId(), + "HierarchicalTableServiceGrpcImpl"); + final SessionState.ExportObject targetExport; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance() + .getNugget("resolveTargetTicket:" + ticketName)) { + targetExport = ticketRouter.resolve(session, targetTicket, "view.target"); + } - final boolean usedExisting; - final Ticket targetTicket; - switch (request.getTargetCase()) { - case HIERARCHICAL_TABLE_ID: - usedExisting = false; - targetTicket = request.getHierarchicalTableId(); - break; - case EXISTING_VIEW_ID: - usedExisting = true; - targetTicket = request.getExistingViewId(); - break; - case TARGET_NOT_SET: - default: - throw new IllegalStateException(); - } - final SessionState.ExportObject targetExport = ticketRouter.resolve( - session, targetTicket, "view.target"); - - final SessionState.ExportObject
keyTableExport; - if (request.hasExpansions()) { - keyTableExport = ticketRouter.resolve( - session, request.getExpansions().getKeyTableId(), "view.expansions.keyTableId"); - resultExportBuilder.require(targetExport, keyTableExport); - } else { - keyTableExport = null; - resultExportBuilder.require(targetExport); - } + final SessionState.ExportObject
keyTableExport; + if (request.hasExpansions()) { + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance() + .getNugget("resolveExpansionsTicket:" + ticketName)) { + keyTableExport = ticketRouter.resolve( + session, request.getExpansions().getKeyTableId(), "view.expansions.keyTableId"); + } + resultExportBuilder.require(targetExport, keyTableExport); + } else { + keyTableExport = null; + resultExportBuilder.require(targetExport); + } - resultExportBuilder.onError(responseObserver) - .submit(() -> { - final Table keyTable = keyTableExport == null ? null : keyTableExport.get(); - final Object target = targetExport.get(); - final HierarchicalTableView targetExistingView = usedExisting - ? (HierarchicalTableView) target - : null; - final HierarchicalTable targetHierarchicalTable = usedExisting - ? targetExistingView.getHierarchicalTable() - : (HierarchicalTable) target; - - authWiring.checkPermissionView(session.getAuthContext(), request, keyTable == null - ? List.of(targetHierarchicalTable.getSource()) - : List.of(keyTable, targetHierarchicalTable.getSource())); - - final HierarchicalTableView result; - if (usedExisting) { - if (keyTable != null) { - result = HierarchicalTableView.makeFromExistingView( - targetExistingView, - keyTable, - request.getExpansions().hasKeyTableActionColumn() - ? ColumnName.of(request.getExpansions().getKeyTableActionColumn()) - : null); - } else { - result = HierarchicalTableView.makeFromExistingView(targetExistingView); - } - } else { - if (keyTable != null) { - result = HierarchicalTableView.makeFromHierarchicalTable( - targetHierarchicalTable, - keyTable, - request.getExpansions().hasKeyTableActionColumn() - ? ColumnName.of(request.getExpansions().getKeyTableActionColumn()) - : null); + resultExportBuilder + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .onError(responseObserver) + .submit(() -> { + final Table keyTable = keyTableExport == null ? null : keyTableExport.get(); + final Object target = targetExport.get(); + final HierarchicalTableView targetExistingView = usedExisting + ? (HierarchicalTableView) target + : null; + final HierarchicalTable targetHierarchicalTable = usedExisting + ? targetExistingView.getHierarchicalTable() + : (HierarchicalTable) target; + + authWiring.checkPermissionView(session.getAuthContext(), request, keyTable == null + ? List.of(targetHierarchicalTable.getSource()) + : List.of(keyTable, targetHierarchicalTable.getSource())); + + final HierarchicalTableView result; + if (usedExisting) { + if (keyTable != null) { + result = HierarchicalTableView.makeFromExistingView( + targetExistingView, + keyTable, + request.getExpansions().hasKeyTableActionColumn() + ? ColumnName.of(request.getExpansions().getKeyTableActionColumn()) + : null); + } else { + result = HierarchicalTableView.makeFromExistingView(targetExistingView); + } } else { - result = HierarchicalTableView.makeFromHierarchicalTable(targetHierarchicalTable); + if (keyTable != null) { + result = HierarchicalTableView.makeFromHierarchicalTable( + targetHierarchicalTable, + keyTable, + request.getExpansions().hasKeyTableActionColumn() + ? ColumnName.of(request.getExpansions().getKeyTableActionColumn()) + : null); + } else { + result = HierarchicalTableView.makeFromHierarchicalTable(targetHierarchicalTable); + } } - } - final HierarchicalTableView transformedResult = authTransformation.transform(result); - safelyComplete(responseObserver, HierarchicalTableViewResponse.getDefaultInstance()); - return transformedResult; - }); + final HierarchicalTableView transformedResult = authTransformation.transform(result); + safelyComplete(responseObserver, HierarchicalTableViewResponse.getDefaultInstance()); + return transformedResult; + }); + } } private static void validate(@NotNull final HierarchicalTableViewRequest request) { @@ -421,24 +481,39 @@ public void exportSource( final SessionState session = sessionService.getCurrentSession(); - final SessionState.ExportObject> hierarchicalTableExport = ticketRouter.resolve( - session, request.getHierarchicalTableId(), "exportSource.hierarchicalTableId"); + final String description = + "HierarchicalTableServiceGrpcImpl#exportSource(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); - session.newExport(request.getResultTableId(), "exportSource.resultTableId") - .require(hierarchicalTableExport) - .onError(responseObserver) - .submit(() -> { - final HierarchicalTable hierarchicalTable = hierarchicalTableExport.get(); + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor(request.getHierarchicalTableId(), + "HierarchicalTableServiceGrpcImpl"); - final Table result = hierarchicalTable.getSource(); - authWiring.checkPermissionExportSource(session.getAuthContext(), request, List.of(result)); + final SessionState.ExportObject> hierarchicalTableExport; + try (final SafeCloseable ignored2 = + QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { + hierarchicalTableExport = ticketRouter.resolve( + session, request.getHierarchicalTableId(), "exportSource.hierarchicalTableId"); + } - final Table transformedResult = authTransformation.transform(result); - final ExportedTableCreationResponse response = - ExportUtil.buildTableCreationResponse(request.getResultTableId(), transformedResult); - safelyComplete(responseObserver, response); - return transformedResult; - }); + session.newExport(request.getResultTableId(), "exportSource.resultTableId") + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(hierarchicalTableExport) + .onError(responseObserver) + .submit(() -> { + final HierarchicalTable hierarchicalTable = hierarchicalTableExport.get(); + + final Table result = hierarchicalTable.getSource(); + authWiring.checkPermissionExportSource(session.getAuthContext(), request, List.of(result)); + + final Table transformedResult = authTransformation.transform(result); + final ExportedTableCreationResponse response = + ExportUtil.buildTableCreationResponse(request.getResultTableId(), transformedResult); + safelyComplete(responseObserver, response); + return transformedResult; + }); + } } private static void validate(@NotNull final HierarchicalTableSourceExportRequest request) { diff --git a/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java index 56e56388eb3..951b79730c4 100644 --- a/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java @@ -8,6 +8,9 @@ import io.deephaven.base.verify.Assert; import io.deephaven.engine.liveness.LivenessScope; import io.deephaven.engine.liveness.LivenessScopeStack; +import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.extensions.barrage.util.GrpcUtil; import io.deephaven.plugin.type.ObjectCommunicationException; import io.deephaven.plugin.type.ObjectType; @@ -257,55 +260,69 @@ public void fetchObject( if (request.getSourceId().getTicket().getTicket().isEmpty()) { throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "No ticket supplied"); } - final SessionState.ExportObject object = ticketRouter.resolve( - session, request.getSourceId().getTicket(), "sourceId"); - session.nonExport() - .require(object) - .onError(responseObserver) - .submit(() -> { - final Object o = object.get(); - ObjectType objectTypeInstance = getObjectTypeInstance(type, o); - - AtomicReference singleResponse = new AtomicReference<>(); - AtomicBoolean isClosed = new AtomicBoolean(false); - StreamObserver wrappedResponseObserver = new StreamObserver<>() { - @Override - public void onNext(StreamResponse value) { - singleResponse.set(FetchObjectResponse.newBuilder() - .setType(type) - .setData(value.getData().getPayload()) - .addAllTypedExportIds(value.getData().getExportedReferencesList()) - .build()); - } - @Override - public void onError(Throwable t) { - responseObserver.onError(t); - } + final String description = "ObjectServiceGrpcImpl#fetchObject(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor(request.getSourceId().getTicket(), "sourceId"); + + final SessionState.ExportObject object; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTicket:" + ticketName)) { + object = ticketRouter.resolve(session, request.getSourceId().getTicket(), "sourceId"); + } + + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(object) + .onError(responseObserver) + .submit(() -> { + final Object o = object.get(); + ObjectType objectTypeInstance = getObjectTypeInstance(type, o); + + AtomicReference singleResponse = new AtomicReference<>(); + AtomicBoolean isClosed = new AtomicBoolean(false); + StreamObserver wrappedResponseObserver = new StreamObserver<>() { + @Override + public void onNext(StreamResponse value) { + singleResponse.set(FetchObjectResponse.newBuilder() + .setType(type) + .setData(value.getData().getPayload()) + .addAllTypedExportIds(value.getData().getExportedReferencesList()) + .build()); + } - @Override - public void onCompleted() { - isClosed.set(true); + @Override + public void onError(Throwable t) { + responseObserver.onError(t); + } + + @Override + public void onCompleted() { + isClosed.set(true); + } + }; + PluginMessageSender connection = new PluginMessageSender(wrappedResponseObserver, session); + objectTypeInstance.clientConnection(o, connection); + + FetchObjectResponse message = singleResponse.get(); + if (message == null) { + connection.onClose(); + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Plugin didn't send a response before returning from clientConnection()"); } - }; - PluginMessageSender connection = new PluginMessageSender(wrappedResponseObserver, session); - objectTypeInstance.clientConnection(o, connection); - - FetchObjectResponse message = singleResponse.get(); - if (message == null) { - connection.onClose(); - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Plugin didn't send a response before returning from clientConnection()"); - } - if (!isClosed.get()) { - connection.onClose(); - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Plugin didn't close response, use MessageStream instead for this object"); - } - GrpcUtil.safelyComplete(responseObserver, message); + if (!isClosed.get()) { + connection.onClose(); + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Plugin didn't close response, use MessageStream instead for this object"); + } + GrpcUtil.safelyComplete(responseObserver, message); - return null; - }); + return null; + }); + } } @Override diff --git a/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java index 91effc96849..2ab00348fe6 100644 --- a/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java @@ -7,6 +7,8 @@ import io.deephaven.auth.codegen.impl.PartitionedTableServiceContextualAuthWiring; import io.deephaven.engine.table.PartitionedTable; import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; import io.deephaven.proto.backplane.grpc.ExportedTableCreationResponse; @@ -18,6 +20,7 @@ import io.deephaven.proto.util.Exceptions; import io.deephaven.server.auth.AuthorizationProvider; import io.deephaven.server.session.*; +import io.deephaven.util.SafeCloseable; import io.grpc.stub.StreamObserver; import org.jetbrains.annotations.NotNull; @@ -55,20 +58,34 @@ public void partitionBy( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - SessionState.ExportObject
targetTable = - ticketRouter.resolve(session, request.getTableId(), "tableId"); - - session.newExport(request.getResultId(), "resultId") - .require(targetTable) - .onError(responseObserver) - .submit(() -> { - authWiring.checkPermissionPartitionBy(session.getAuthContext(), request, - Collections.singletonList(targetTable.get())); - PartitionedTable partitionedTable = targetTable.get().partitionBy(request.getDropKeys(), - request.getKeyColumnNamesList().toArray(String[]::new)); - safelyComplete(responseObserver, PartitionByResponse.getDefaultInstance()); - return partitionedTable; - }); + final String description = + "PartitionedTableServiceGrpcImpl#partitionBy(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor( + request.getTableId(), "PartitionedTableServiceGrpcImpl"); + + final SessionState.ExportObject
targetTable; + try (final SafeCloseable ignored2 = + QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { + targetTable = ticketRouter.resolve(session, request.getTableId(), "partition.tableId"); + } + + session.newExport(request.getResultId(), "partition.resultId") + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(targetTable) + .onError(responseObserver) + .submit(() -> { + authWiring.checkPermissionPartitionBy(session.getAuthContext(), request, + Collections.singletonList(targetTable.get())); + PartitionedTable partitionedTable = targetTable.get().partitionBy(request.getDropKeys(), + request.getKeyColumnNamesList().toArray(String[]::new)); + safelyComplete(responseObserver, PartitionByResponse.getDefaultInstance()); + return partitionedTable; + }); + } } @Override @@ -77,28 +94,41 @@ public void merge( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - SessionState.ExportObject partitionedTable = - ticketRouter.resolve(session, request.getPartitionedTable(), "partitionedTable"); - - session.newExport(request.getResultId(), "resultId") - .require(partitionedTable) - .onError(responseObserver) - .submit(() -> { - final Table table = partitionedTable.get().table(); - authWiring.checkPermissionMerge(session.getAuthContext(), request, - Collections.singletonList(table)); - Table merged; - if (table.isRefreshing()) { - merged = table.getUpdateGraph().sharedLock().computeLocked(partitionedTable.get()::merge); - } else { - merged = partitionedTable.get().merge(); - } - merged = authorizationTransformation.transform(merged); - final ExportedTableCreationResponse response = - buildTableCreationResponse(request.getResultId(), merged); - safelyComplete(responseObserver, response); - return merged; - }); + final String description = "PartitionedTableServiceGrpcImpl#merge(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor( + request.getPartitionedTable(), "PartitionedTableServiceGrpcImpl"); + + final SessionState.ExportObject partitionedTable; + try (final SafeCloseable ignored2 = + QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { + partitionedTable = ticketRouter.resolve(session, request.getPartitionedTable(), "partitionedTable"); + } + + session.newExport(request.getResultId(), "resultId") + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(partitionedTable) + .onError(responseObserver) + .submit(() -> { + final Table table = partitionedTable.get().table(); + authWiring.checkPermissionMerge(session.getAuthContext(), request, + Collections.singletonList(table)); + Table merged; + if (table.isRefreshing()) { + merged = table.getUpdateGraph().sharedLock().computeLocked(partitionedTable.get()::merge); + } else { + merged = partitionedTable.get().merge(); + } + merged = authorizationTransformation.transform(merged); + final ExportedTableCreationResponse response = + buildTableCreationResponse(request.getResultId(), merged); + safelyComplete(responseObserver, response); + return merged; + }); + } } @Override @@ -107,61 +137,79 @@ public void getTable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - SessionState.ExportObject partitionedTable = - ticketRouter.resolve(session, request.getPartitionedTable(), "partitionedTable"); - SessionState.ExportObject
keys = - ticketRouter.resolve(session, request.getKeyTableTicket(), "keyTableTicket"); - - session.newExport(request.getResultId(), "resultId") - .require(partitionedTable, keys) - .onError(responseObserver) - .submit(() -> { - Table table; - Table keyTable = keys.get(); - authWiring.checkPermissionGetTable(session.getAuthContext(), request, - List.of(partitionedTable.get().table(), keyTable)); - if (!keyTable.isRefreshing()) { - long keyTableSize = keyTable.size(); - if (keyTableSize != 1) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Provided key table does not have one row, instead has " + keyTableSize); - } - long row = keyTable.getRowSet().firstRowKey(); - Object[] values = - partitionedTable.get().keyColumnNames().stream() - .map(keyTable::getColumnSource) - .map(cs -> cs.get(row)) - .toArray(); - table = partitionedTable.get().constituentFor(values); - } else { - table = keyTable.getUpdateGraph().sharedLock().computeLocked(() -> { + final String description = "PartitionedTableServiceGrpcImpl#getTable(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String partitionTableLogId = ticketRouter.getLogNameFor( + request.getPartitionedTable(), "PartitionedTableServiceGrpcImpl"); + final String keyTableLogId = ticketRouter.getLogNameFor( + request.getKeyTableTicket(), "PartitionedTableServiceGrpcImpl"); + + final SessionState.ExportObject partitionedTable; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolvePartitionTableTicket:" + partitionTableLogId)) { + partitionedTable = ticketRouter.resolve(session, request.getPartitionedTable(), "partitionedTable"); + } + final SessionState.ExportObject
keys; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolvePartitionTableTicket:" + keyTableLogId)) { + keys = ticketRouter.resolve(session, request.getKeyTableTicket(), "keyTableTicket"); + } + + session.newExport(request.getResultId(), "resultId") + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(partitionedTable, keys) + .onError(responseObserver) + .submit(() -> { + Table table; + Table keyTable = keys.get(); + authWiring.checkPermissionGetTable(session.getAuthContext(), request, + List.of(partitionedTable.get().table(), keyTable)); + if (!keyTable.isRefreshing()) { long keyTableSize = keyTable.size(); if (keyTableSize != 1) { throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "Provided key table does not have one row, instead has " + keyTableSize); } - Table requestedRow = partitionedTable.get().table().whereIn(keyTable, - partitionedTable.get().keyColumnNames().toArray(String[]::new)); - if (requestedRow.size() != 1) { - if (requestedRow.isEmpty()) { - throw Exceptions.statusRuntimeException(Code.NOT_FOUND, - "Key matches zero rows in the partitioned table"); - } else { - throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, - "Key matches more than one entry in the partitioned table: " - + requestedRow.size()); + long row = keyTable.getRowSet().firstRowKey(); + Object[] values = + partitionedTable.get().keyColumnNames().stream() + .map(keyTable::getColumnSource) + .map(cs -> cs.get(row)) + .toArray(); + table = partitionedTable.get().constituentFor(values); + } else { + table = keyTable.getUpdateGraph().sharedLock().computeLocked(() -> { + long keyTableSize = keyTable.size(); + if (keyTableSize != 1) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Provided key table does not have one row, instead has " + keyTableSize); } - } - return (Table) requestedRow - .getColumnSource(partitionedTable.get().constituentColumnName()) - .get(requestedRow.getRowSet().firstRowKey()); - }); - } - table = authorizationTransformation.transform(table); - final ExportedTableCreationResponse response = - buildTableCreationResponse(request.getResultId(), table); - safelyComplete(responseObserver, response); - return table; - }); + Table requestedRow = partitionedTable.get().table().whereIn(keyTable, + partitionedTable.get().keyColumnNames().toArray(String[]::new)); + if (requestedRow.size() != 1) { + if (requestedRow.isEmpty()) { + throw Exceptions.statusRuntimeException(Code.NOT_FOUND, + "Key matches zero rows in the partitioned table"); + } else { + throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, + "Key matches more than one entry in the partitioned table: " + + requestedRow.size()); + } + } + return (Table) requestedRow + .getColumnSource(partitionedTable.get().constituentColumnName()) + .get(requestedRow.getRowSet().firstRowKey()); + }); + } + table = authorizationTransformation.transform(table); + final ExportedTableCreationResponse response = + buildTableCreationResponse(request.getResultId(), table); + safelyComplete(responseObserver, response); + return table; + }); + } } } diff --git a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java index cd4ce627a59..0d3771a0f27 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java @@ -9,6 +9,9 @@ import io.deephaven.auth.AuthenticationException; import io.deephaven.csv.util.MutableObject; import io.deephaven.engine.liveness.LivenessScopeStack; +import io.deephaven.engine.table.PartitionedTable; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.extensions.barrage.util.GrpcUtil; import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; @@ -165,16 +168,30 @@ public void exportFromTicket( return; } - final SessionState.ExportObject source = ticketRouter.resolve( - session, request.getSourceId(), "sourceId"); - session.newExport(request.getResultId(), "resultId") - .require(source) - .onError(responseObserver) - .submit(() -> { - final Object o = source.get(); - GrpcUtil.safelyComplete(responseObserver, ExportResponse.getDefaultInstance()); - return o; - }); + final String description = "SessionServiceGrpcImpl#exportFromTicket(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor( + request.getSourceId(), "SessionServiceServiceGrpcImpl"); + + final SessionState.ExportObject source; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTicket:" + ticketName)) { + source = ticketRouter.resolve(session, request.getSourceId(), "sourceId"); + } + + session.newExport(request.getResultId(), "resultId") + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(source) + .onError(responseObserver) + .submit(() -> { + final Object o = source.get(); + GrpcUtil.safelyComplete(responseObserver, ExportResponse.getDefaultInstance()); + return o; + }); + } } @Override @@ -194,18 +211,31 @@ public void publishFromTicket( return; } - final SessionState.ExportObject source = ticketRouter.resolve( - session, request.getSourceId(), "sourceId"); - Ticket resultId = request.getResultId(); - - final SessionState.ExportBuilder publisher = ticketRouter.publish( - session, resultId, "resultId", () -> { - // when publish is complete, complete the gRPC request - GrpcUtil.safelyComplete(responseObserver, PublishResponse.getDefaultInstance()); - }); - publisher.require(source) - .onError(responseObserver) - .submit(source::get); + final String description = "SessionServiceGrpcImpl#publishFromTicket(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor( + request.getSourceId(), "SessionServiceServiceGrpcImpl"); + + final SessionState.ExportObject source; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTicket:" + ticketName)) { + source = ticketRouter.resolve(session, request.getSourceId(), "sourceId"); + } + + Ticket resultId = request.getResultId(); + + ticketRouter.publish(session, resultId, "resultId", () -> { + // when publish is complete, complete the gRPC request + GrpcUtil.safelyComplete(responseObserver, PublishResponse.getDefaultInstance()); + }) + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(source) + .onError(responseObserver) + .submit(source::get); + } } @Override diff --git a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java index 4a52597c444..68fc9132bc7 100644 --- a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java @@ -8,6 +8,8 @@ import io.deephaven.engine.context.ExecutionContext; import io.deephaven.engine.table.Table; import io.deephaven.engine.table.TableDefinition; +import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.util.config.MutableInputTable; import io.deephaven.extensions.barrage.util.GrpcUtil; import io.deephaven.internal.log.LoggerFactory; @@ -21,6 +23,7 @@ import io.deephaven.server.session.SessionService; import io.deephaven.server.session.SessionState; import io.deephaven.server.session.TicketRouter; +import io.deephaven.util.SafeCloseable; import io.grpc.stub.StreamObserver; import org.jetbrains.annotations.NotNull; @@ -52,46 +55,64 @@ public void addTableToInputTable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - SessionState.ExportObject
targetTable = - ticketRouter.resolve(session, request.getInputTable(), "inputTable"); - SessionState.ExportObject
tableToAddExport = - ticketRouter.resolve(session, request.getTableToAdd(), "tableToAdd"); - - session.nonExport() - .requiresSerialQueue() - .onError(responseObserver) - .require(targetTable, tableToAddExport) - .submit(() -> { - Object inputTable = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - if (!(inputTable instanceof MutableInputTable)) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Table can't be used as an input table"); - } - - MutableInputTable mutableInputTable = (MutableInputTable) inputTable; - Table tableToAdd = tableToAddExport.get(); - - authWiring.checkPermissionAddTableToInputTable( - ExecutionContext.getContext().getAuthContext(), request, - List.of(targetTable.get(), tableToAdd)); - - // validate that the columns are compatible - try { - mutableInputTable.validateAddOrModify(tableToAdd); - } catch (TableDefinition.IncompatibleTableDefinitionException exception) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Provided tables's columns are not compatible: " + exception.getMessage()); - } - - // actually add the tables contents - try { - mutableInputTable.add(tableToAdd); - GrpcUtil.safelyComplete(responseObserver, AddTableResponse.getDefaultInstance()); - } catch (IOException ioException) { - throw Exceptions.statusRuntimeException(Code.DATA_LOSS, - "Error adding table to input table"); - } - }); + final String description = + "InputTableServiceGrpcImpl#addTableToInputTable(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String targetName = ticketRouter.getLogNameFor(request.getInputTable(), "inputTable"); + final SessionState.ExportObject
targetTable; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTargetTableTicket:" + targetName)) { + targetTable = ticketRouter.resolve(session, request.getInputTable(), "inputTable"); + } + + final String tableToAddName = ticketRouter.getLogNameFor(request.getTableToAdd(), "tableToAdd"); + final SessionState.ExportObject
tableToAddExport; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTableToAddTicket:" + tableToAddName)) { + tableToAddExport = ticketRouter.resolve(session, request.getTableToAdd(), "tableToAdd"); + } + + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .requiresSerialQueue() + .onError(responseObserver) + .require(targetTable, tableToAddExport) + .submit(() -> { + Object inputTable = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + if (!(inputTable instanceof MutableInputTable)) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Table can't be used as an input table"); + } + + MutableInputTable mutableInputTable = (MutableInputTable) inputTable; + Table tableToAdd = tableToAddExport.get(); + + authWiring.checkPermissionAddTableToInputTable( + ExecutionContext.getContext().getAuthContext(), request, + List.of(targetTable.get(), tableToAdd)); + + // validate that the columns are compatible + try { + mutableInputTable.validateAddOrModify(tableToAdd); + } catch (TableDefinition.IncompatibleTableDefinitionException exception) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Provided tables's columns are not compatible: " + exception.getMessage()); + } + + // actually add the tables contents + try { + mutableInputTable.add(tableToAdd); + GrpcUtil.safelyComplete(responseObserver, AddTableResponse.getDefaultInstance()); + } catch (IOException ioException) { + throw Exceptions.statusRuntimeException(Code.DATA_LOSS, + "Error adding table to input table"); + } + }); + } } @Override @@ -100,48 +121,65 @@ public void deleteTableFromInputTable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - SessionState.ExportObject
targetTable = - ticketRouter.resolve(session, request.getInputTable(), "inputTable"); - SessionState.ExportObject
tableToDeleteExport = - ticketRouter.resolve(session, request.getTableToRemove(), "tableToDelete"); - - session.nonExport() - .requiresSerialQueue() - .onError(responseObserver) - .require(targetTable, tableToDeleteExport) - .submit(() -> { - Object inputTable = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); - if (!(inputTable instanceof MutableInputTable)) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Table can't be used as an input table"); - } - - MutableInputTable mutableInputTable = (MutableInputTable) inputTable; - Table tableToDelete = tableToDeleteExport.get(); - - authWiring.checkPermissionDeleteTableFromInputTable( - ExecutionContext.getContext().getAuthContext(), request, - List.of(targetTable.get(), tableToDelete)); - - // validate that the columns are compatible - try { - mutableInputTable.validateDelete(tableToDelete); - } catch (TableDefinition.IncompatibleTableDefinitionException exception) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Provided tables's columns are not compatible: " + exception.getMessage()); - } catch (UnsupportedOperationException exception) { - throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, - "Provided input table does not support delete."); - } - - // actually delete the table's contents - try { - mutableInputTable.delete(tableToDelete); - GrpcUtil.safelyComplete(responseObserver, DeleteTableResponse.getDefaultInstance()); - } catch (IOException ioException) { - throw Exceptions.statusRuntimeException(Code.DATA_LOSS, - "Error deleting table from inputtable"); - } - }); + final String description = + "InputTableServiceGrpcImpl#deleteTableFromInputTable(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String targetName = ticketRouter.getLogNameFor(request.getInputTable(), "inputTable"); + final SessionState.ExportObject
targetTable; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTargetTableTicket:" + targetName)) { + targetTable = ticketRouter.resolve(session, request.getInputTable(), "inputTable"); + } + + final String tableToRemove = ticketRouter.getLogNameFor(request.getTableToRemove(), "tableToRemove"); + final SessionState.ExportObject
tableToRemoveExport; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTableToRemoveTicket:" + tableToRemove)) { + tableToRemoveExport = ticketRouter.resolve(session, request.getTableToRemove(), "tableToRemove"); + } + + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .requiresSerialQueue() + .onError(responseObserver) + .require(targetTable, tableToRemoveExport) + .submit(() -> { + Object inputTable = targetTable.get().getAttribute(Table.INPUT_TABLE_ATTRIBUTE); + if (!(inputTable instanceof MutableInputTable)) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Table can't be used as an input table"); + } + + MutableInputTable mutableInputTable = (MutableInputTable) inputTable; + Table tableToDelete = tableToRemoveExport.get(); + + authWiring.checkPermissionDeleteTableFromInputTable( + ExecutionContext.getContext().getAuthContext(), request, + List.of(targetTable.get(), tableToDelete)); + + // validate that the columns are compatible + try { + mutableInputTable.validateDelete(tableToDelete); + } catch (TableDefinition.IncompatibleTableDefinitionException exception) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Provided tables's columns are not compatible: " + exception.getMessage()); + } catch (UnsupportedOperationException exception) { + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, + "Provided input table does not support delete."); + } + + // actually delete the table's contents + try { + mutableInputTable.delete(tableToDelete); + GrpcUtil.safelyComplete(responseObserver, DeleteTableResponse.getDefaultInstance()); + } catch (IOException ioException) { + throw Exceptions.statusRuntimeException(Code.DATA_LOSS, + "Error deleting table from inputtable"); + } + }); + } } } diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index 0b5c7eae212..7232fd99971 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -469,28 +469,41 @@ public void seekRow( if (sourceId.getTicket().isEmpty()) { throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No consoleId supplied"); } - SessionState.ExportObject
exportedTable = - ticketRouter.resolve(session, sourceId, "sourceId"); - session.nonExport() - .require(exportedTable) - .onError(responseObserver) - .submit(() -> { - final Table table = exportedTable.get(); - authWiring.checkPermissionSeekRow(session.getAuthContext(), request, - Collections.singletonList(table)); - final String columnName = request.getColumnName(); - final Class dataType = table.getDefinition().getColumn(columnName).getDataType(); - final Object seekValue = getSeekValue(request.getSeekValue(), dataType); - final Long result = table.apply(new SeekRow( - request.getStartingRow(), - columnName, - seekValue, - request.getInsensitive(), - request.getContains(), - request.getIsBackward())); - SeekRowResponse.Builder rowResponse = SeekRowResponse.newBuilder(); - safelyComplete(responseObserver, rowResponse.setResultRow(result).build()); - }); + final String description = + "TableServiceGrpcImpl#seekRow(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor(sourceId, "sourceId"); + final SessionState.ExportObject
exportedTable; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTicket:" + ticketName)) { + exportedTable = ticketRouter.resolve(session, sourceId, "sourceId"); + } + + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(exportedTable) + .onError(responseObserver) + .submit(() -> { + final Table table = exportedTable.get(); + authWiring.checkPermissionSeekRow(session.getAuthContext(), request, + Collections.singletonList(table)); + final String columnName = request.getColumnName(); + final Class dataType = table.getDefinition().getColumn(columnName).getDataType(); + final Object seekValue = getSeekValue(request.getSeekValue(), dataType); + final Long result = table.apply(new SeekRow( + request.getStartingRow(), + columnName, + seekValue, + request.getInsensitive(), + request.getContains(), + request.getIsBackward())); + SeekRowResponse.Builder rowResponse = SeekRowResponse.newBuilder(); + safelyComplete(responseObserver, rowResponse.setResultRow(result).build()); + }); + } } @Override @@ -617,25 +630,38 @@ public void getExportedTableCreationResponse( throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No request ticket supplied"); } - final SessionState.ExportObject export = ticketRouter.resolve(session, request, "request"); - - session.nonExport() - .require(export) - .onError(responseObserver) - .submit(() -> { - final Object obj = export.get(); - if (!(obj instanceof Table)) { - responseObserver.onError( - Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, - "Ticket is not a table")); - return; - } - authWiring.checkPermissionGetExportedTableCreationResponse( - session.getAuthContext(), request, Collections.singletonList((Table) obj)); - final ExportedTableCreationResponse response = - ExportUtil.buildTableCreationResponse(request, (Table) obj); - safelyComplete(responseObserver, response); - }); + final String description = + "TableServiceGrpcImpl#getExportedTableCreationResponse(session=" + session.getSessionId() + ")"; + final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( + description, QueryPerformanceNugget.DEFAULT_FACTORY); + + try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + final String ticketName = ticketRouter.getLogNameFor(request, "request"); + final SessionState.ExportObject export; + try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTicket:" + ticketName)) { + export = ticketRouter.resolve(session, request, "request"); + } + + session.nonExport() + .queryPerformanceRecorder(queryPerformanceRecorder, false) + .require(export) + .onError(responseObserver) + .submit(() -> { + final Object obj = export.get(); + if (!(obj instanceof Table)) { + responseObserver.onError( + Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, + "Ticket is not a table")); + return; + } + authWiring.checkPermissionGetExportedTableCreationResponse( + session.getAuthContext(), request, Collections.singletonList((Table) obj)); + final ExportedTableCreationResponse response = + ExportUtil.buildTableCreationResponse(request, (Table) obj); + safelyComplete(responseObserver, response); + }); + } } /** From 68513286052f6fcf56664fcce10ce5ec4a9e5805 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Tue, 14 Nov 2023 22:39:48 -0700 Subject: [PATCH 22/31] Publishing State Change Bug ?? --- .../java/io/deephaven/server/session/SessionState.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index da2057cd59e..8299bd2007a 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -707,7 +707,11 @@ private synchronized void setWork( this.errorHandler = errorHandler; this.successHandler = successHandler; - setState(ExportNotification.State.PENDING); + if (state != ExportNotification.State.PUBLISHING) { + setState(ExportNotification.State.PENDING); + } else if (dependentCount > 0) { + throw new IllegalStateException("published exports cannot have dependencies"); + } if (dependentCount <= 0) { dependentCount = 0; scheduleExport(); @@ -945,7 +949,7 @@ private void onResolveOne(@Nullable final ExportObject parent) { */ private void scheduleExport() { synchronized (this) { - if (state != ExportNotification.State.PENDING) { + if (state != ExportNotification.State.PENDING && state != ExportNotification.State.PUBLISHING) { return; } setState(ExportNotification.State.QUEUED); From 947f85421f1d47c1b6da8778dc8c233e3af22a60 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Wed, 15 Nov 2023 09:14:26 -0700 Subject: [PATCH 23/31] ExportObject Builder API - Explicit Methods for Sub-Query vs Resume --- .../server/arrow/ArrowFlightUtil.java | 6 ++-- .../server/arrow/FlightServiceGrpcImpl.java | 4 +-- .../console/ConsoleServiceGrpcImpl.java | 4 +-- .../HierarchicalTableServiceGrpcImpl.java | 10 +++---- .../server/object/ObjectServiceGrpcImpl.java | 2 +- .../PartitionedTableServiceGrpcImpl.java | 6 ++-- .../session/SessionServiceGrpcImpl.java | 4 +-- .../server/session/SessionState.java | 30 +++++++++++-------- .../InputTableServiceGrpcImpl.java | 4 +-- .../table/ops/TableServiceGrpcImpl.java | 8 ++--- 10 files changed, 42 insertions(+), 36 deletions(-) diff --git a/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java b/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java index b67bf676978..dde14602afc 100644 --- a/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java +++ b/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java @@ -91,7 +91,7 @@ public static void DoGetCustom( final long queueStartTm = System.nanoTime(); session.nonExport() - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(export) .onError(observer) .submit(() -> { @@ -512,7 +512,7 @@ public void handleMessage(@NotNull final BarrageProtoUtil.MessageInfo message) { final long queueStartTm = System.nanoTime(); session.nonExport() - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(parent) .onError(listener) .submit(() -> { @@ -670,7 +670,7 @@ public void handleMessage(@NotNull final MessageInfo message) { synchronized (this) { onExportResolvedContinuation = session.nonExport() - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(parent) .onErrorHandler(DoExchangeMarshaller.this::onError) .submit(() -> onExportResolved(parent)); diff --git a/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java index 557ab7f91da..23aec4f61b8 100644 --- a/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java @@ -191,7 +191,7 @@ public void getFlightInfo( if (session != null) { session.nonExport() - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(export) .onError(responseObserver) .submit(() -> { @@ -248,7 +248,7 @@ public void getSchema( if (session != null) { session.nonExport() - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(export) .onError(responseObserver) .submit(() -> { diff --git a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java index 31382986fae..72554f7cb31 100644 --- a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java @@ -181,7 +181,7 @@ public void executeCommand( } session.nonExport() - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .requiresSerialQueue() .require(exportedConsole) .onError(responseObserver) @@ -273,7 +273,7 @@ public void bindTableToVariable( final SessionState.ExportObject exportedConsole; ExportBuilder exportBuilder = session.nonExport() - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .requiresSerialQueue() .onError(responseObserver); diff --git a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java index c20ae5176e9..00d88e1cd07 100644 --- a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java @@ -91,7 +91,7 @@ public void rollup( } session.newExport(request.getResultRollupTableId(), "rollup.resultRollupTableId") - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(sourceTableExport) .onError(responseObserver) .submit(() -> { @@ -148,7 +148,7 @@ public void tree( } session.newExport(request.getResultTreeTableId(), "tree.resultTreeTableId") - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(sourceTableExport) .onError(responseObserver) .submit(() -> { @@ -211,7 +211,7 @@ public void apply( } session.newExport(request.getResultHierarchicalTableId(), "apply.resultHierarchicalTableId") - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(inputHierarchicalTableExport) .onError(responseObserver) .submit(() -> { @@ -404,7 +404,7 @@ public void view( } resultExportBuilder - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .onError(responseObserver) .submit(() -> { final Table keyTable = keyTableExport == null ? null : keyTableExport.get(); @@ -498,7 +498,7 @@ public void exportSource( } session.newExport(request.getResultTableId(), "exportSource.resultTableId") - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(hierarchicalTableExport) .onError(responseObserver) .submit(() -> { diff --git a/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java index 951b79730c4..7df54bd6a9c 100644 --- a/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java @@ -275,7 +275,7 @@ public void fetchObject( } session.nonExport() - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(object) .onError(responseObserver) .submit(() -> { diff --git a/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java index 2ab00348fe6..1f55fc94072 100644 --- a/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java @@ -74,7 +74,7 @@ public void partitionBy( } session.newExport(request.getResultId(), "partition.resultId") - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(targetTable) .onError(responseObserver) .submit(() -> { @@ -109,7 +109,7 @@ public void merge( } session.newExport(request.getResultId(), "resultId") - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(partitionedTable) .onError(responseObserver) .submit(() -> { @@ -159,7 +159,7 @@ public void getTable( } session.newExport(request.getResultId(), "resultId") - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(partitionedTable, keys) .onError(responseObserver) .submit(() -> { diff --git a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java index 0d3771a0f27..35e17020d3b 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java @@ -183,7 +183,7 @@ public void exportFromTicket( } session.newExport(request.getResultId(), "resultId") - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(source) .onError(responseObserver) .submit(() -> { @@ -231,7 +231,7 @@ public void publishFromTicket( // when publish is complete, complete the gRPC request GrpcUtil.safelyComplete(responseObserver, PublishResponse.getDefaultInstance()); }) - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(source) .onError(responseObserver) .submit(source::get); diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index 8299bd2007a..a5ac5870972 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -1333,24 +1333,30 @@ public class ExportBuilder { } } + /** + * Set the performance recorder to resume when running this export. + * + * @param queryPerformanceRecorder the performance recorder + * @return this builder + */ + public ExportBuilder queryPerformanceRecorder( + @NotNull final QueryPerformanceRecorder queryPerformanceRecorder) { + export.setQueryPerformanceRecorder(queryPerformanceRecorder, false); + return this; + } + /** * Set the performance recorder to aggregate performance data across exports. *

- * When {@code qprIsForBatch}: - *

    - *
  • is {@code false}: The provided queryPerformanceRecorder is suspended and assumed by the export object - *
  • is {@code true}: Instrumentation logging is the responsibility of the caller and should not be performed - * until all sub-queries have completed. - *
+ * Instrumentation logging is the responsibility of the caller and should not be performed until all sub-queries + * have completed. * - * @param queryPerformanceRecorder the performance recorder to aggregate into - * @param qprIsForBatch true if a sub-query should be created for the export and aggregated into the qpr + * @param parentQueryPerformanceRecorder the performance recorder to aggregate into * @return this builder */ - public ExportBuilder queryPerformanceRecorder( - @NotNull final QueryPerformanceRecorder queryPerformanceRecorder, - final boolean qprIsForBatch) { - export.setQueryPerformanceRecorder(queryPerformanceRecorder, qprIsForBatch); + public ExportBuilder parentQueryPerformanceRecorder( + @NotNull final QueryPerformanceRecorder parentQueryPerformanceRecorder) { + export.setQueryPerformanceRecorder(parentQueryPerformanceRecorder, true); return this; } diff --git a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java index 68fc9132bc7..f2f64ef9aa5 100644 --- a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java @@ -77,7 +77,7 @@ public void addTableToInputTable( } session.nonExport() - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .requiresSerialQueue() .onError(responseObserver) .require(targetTable, tableToAddExport) @@ -142,7 +142,7 @@ public void deleteTableFromInputTable( } session.nonExport() - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .requiresSerialQueue() .onError(responseObserver) .require(targetTable, tableToRemoveExport) diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index 7232fd99971..592f8fbaed4 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -483,7 +483,7 @@ public void seekRow( } session.nonExport() - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(exportedTable) .onError(responseObserver) .submit(() -> { @@ -644,7 +644,7 @@ public void getExportedTableCreationResponse( } session.nonExport() - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .require(export) .onError(responseObserver) .submit(() -> { @@ -700,7 +700,7 @@ private void oneShotOperationWrapper( session.newExport(resultId, "resultId") .require(dependencies) .onError(responseObserver) - .queryPerformanceRecorder(queryPerformanceRecorder, false) + .queryPerformanceRecorder(queryPerformanceRecorder) .submit(() -> { operation.checkPermission(request, dependencies); final Table result = operation.create(request, dependencies); @@ -760,7 +760,7 @@ private BatchExportBuilder createBatchExportBuilder( final Ticket resultId = operation.getResultTicket(request); final ExportBuilder
exportBuilder = resultId.getTicket().isEmpty() ? session.nonExport() : session.newExport(resultId, "resultId"); - exportBuilder.queryPerformanceRecorder(queryPerformanceRecorder, true); + exportBuilder.parentQueryPerformanceRecorder(queryPerformanceRecorder); return new BatchExportBuilder<>(operation, request, exportBuilder); } From 771ec3673252fba711c26ad315c3da12e9a1bab4 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Wed, 15 Nov 2023 14:04:32 -0700 Subject: [PATCH 24/31] put nugget inside of resolve call.. (duh) --- .../server/arrow/ArrowFlightUtil.java | 35 +++------- .../server/arrow/FlightServiceGrpcImpl.java | 18 ++--- .../console/ConsoleServiceGrpcImpl.java | 29 ++------ .../HierarchicalTableServiceGrpcImpl.java | 70 +++++-------------- .../server/object/ObjectServiceGrpcImpl.java | 11 +-- .../PartitionedTableServiceGrpcImpl.java | 45 +++--------- .../session/SessionServiceGrpcImpl.java | 24 ++----- .../server/session/TicketRouter.java | 37 +++++++--- .../InputTableServiceGrpcImpl.java | 39 +++-------- .../table/ops/TableServiceGrpcImpl.java | 31 ++------ 10 files changed, 107 insertions(+), 232 deletions(-) diff --git a/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java b/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java index dde14602afc..01d498bdb74 100644 --- a/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java +++ b/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java @@ -77,14 +77,9 @@ public static void DoGetCustom( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor(request, "ArrowFlightUtil"); - - final SessionState.ExportObject> export; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTicket:" + ticketName)) { - export = ticketRouter.resolve(session, request, "request"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject> export = + ticketRouter.resolve(session, request, "request"); final BarragePerformanceLog.SnapshotMetricsHelper metrics = new BarragePerformanceLog.SnapshotMetricsHelper(); @@ -497,15 +492,9 @@ public void handleMessage(@NotNull final BarrageProtoUtil.MessageInfo message) { final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor( - snapshotRequest.ticketAsByteBuffer(), "ArrowFlightUtil"); - - final SessionState.ExportObject> parent; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTicket:" + ticketName)) { - parent = ticketRouter.resolve(session, snapshotRequest.ticketAsByteBuffer(), "parent"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject> parent = + ticketRouter.resolve(session, snapshotRequest.ticketAsByteBuffer(), "parent"); final BarragePerformanceLog.SnapshotMetricsHelper metrics = new BarragePerformanceLog.SnapshotMetricsHelper(); @@ -658,15 +647,9 @@ public void handleMessage(@NotNull final MessageInfo message) { final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor( - subscriptionRequest.ticketAsByteBuffer(), "ArrowFlightUtil"); - - final SessionState.ExportObject parent; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTicket:" + ticketName)) { - parent = ticketRouter.resolve(session, subscriptionRequest.ticketAsByteBuffer(), "parent"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject parent = + ticketRouter.resolve(session, subscriptionRequest.ticketAsByteBuffer(), "parent"); synchronized (this) { onExportResolvedContinuation = session.nonExport() diff --git a/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java index 23aec4f61b8..ed1c6ccbbc0 100644 --- a/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java @@ -182,12 +182,9 @@ public void getFlightInfo( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - - final SessionState.ExportObject export; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget("flightInfoFor")) { - export = ticketRouter.flightInfoFor(session, request, "request"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject export = + ticketRouter.flightInfoFor(session, request, "request"); if (session != null) { session.nonExport() @@ -239,12 +236,9 @@ public void getSchema( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - - final SessionState.ExportObject export; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget("flightInfoFor")) { - export = ticketRouter.flightInfoFor(session, request, "request"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject export = + ticketRouter.flightInfoFor(session, request, "request"); if (session != null) { session.nonExport() diff --git a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java index 72554f7cb31..b4a0d7b2804 100644 --- a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java @@ -171,14 +171,9 @@ public void executeCommand( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor(consoleId, "consoleId"); - - final SessionState.ExportObject exportedConsole; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTicket:" + ticketName)) { - exportedConsole = ticketRouter.resolve(session, consoleId, "consoleId"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject exportedConsole = + ticketRouter.resolve(session, consoleId, "consoleId"); session.nonExport() .queryPerformanceRecorder(queryPerformanceRecorder) @@ -261,14 +256,9 @@ public void bindTableToVariable( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String tableTicketName = ticketRouter.getLogNameFor(tableId, "tableId"); - - final SessionState.ExportObject
exportedTable; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTableTicket:" + tableTicketName)) { - exportedTable = ticketRouter.resolve(session, tableId, "tableId"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
exportedTable = + ticketRouter.resolve(session, tableId, "tableId"); final SessionState.ExportObject exportedConsole; @@ -278,12 +268,7 @@ public void bindTableToVariable( .onError(responseObserver); if (request.hasConsoleId()) { - final String consoleTicketName = ticketRouter.getLogNameFor(request.getConsoleId(), "consoleId"); - - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveConsoleTicket:" + consoleTicketName)) { - exportedConsole = ticketRouter.resolve(session, request.getConsoleId(), "consoleId"); - } + exportedConsole = ticketRouter.resolve(session, request.getConsoleId(), "consoleId"); exportBuilder.require(exportedTable, exportedConsole); } else { exportedConsole = null; diff --git a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java index 00d88e1cd07..22b71097e6d 100644 --- a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java @@ -80,15 +80,9 @@ public void rollup( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor(request.getSourceTableId(), - "HierarchicalTableServiceGrpcImpl"); - - final SessionState.ExportObject
sourceTableExport; - try (final SafeCloseable ignored2 = - QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { - sourceTableExport = ticketRouter.resolve(session, request.getSourceTableId(), "rollup.sourceTableId"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
sourceTableExport = + ticketRouter.resolve(session, request.getSourceTableId(), "rollup.sourceTableId"); session.newExport(request.getResultRollupTableId(), "rollup.resultRollupTableId") .queryPerformanceRecorder(queryPerformanceRecorder) @@ -137,15 +131,9 @@ public void tree( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor(request.getSourceTableId(), - "HierarchicalTableServiceGrpcImpl"); - - final SessionState.ExportObject
sourceTableExport; - try (final SafeCloseable ignored2 = - QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { - sourceTableExport = ticketRouter.resolve(session, request.getSourceTableId(), "tree.sourceTableId"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
sourceTableExport = + ticketRouter.resolve(session, request.getSourceTableId(), "tree.sourceTableId"); session.newExport(request.getResultTreeTableId(), "tree.resultTreeTableId") .queryPerformanceRecorder(queryPerformanceRecorder) @@ -199,16 +187,10 @@ public void apply( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor(request.getInputHierarchicalTableId(), - "HierarchicalTableServiceGrpcImpl"); - - final SessionState.ExportObject> inputHierarchicalTableExport; - try (final SafeCloseable ignored2 = - QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { - inputHierarchicalTableExport = ticketRouter.resolve( - session, request.getInputHierarchicalTableId(), "apply.inputHierarchicalTableId"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject> inputHierarchicalTableExport = + ticketRouter.resolve(session, request.getInputHierarchicalTableId(), + "apply.inputHierarchicalTableId"); session.newExport(request.getResultHierarchicalTableId(), "apply.resultHierarchicalTableId") .queryPerformanceRecorder(queryPerformanceRecorder) @@ -362,7 +344,7 @@ public void view( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportBuilder resultExportBuilder = session.newExport(request.getResultViewId(), "view.resultViewId"); @@ -382,21 +364,13 @@ public void view( default: throw new IllegalStateException(); } - final String ticketName = ticketRouter.getLogNameFor(request.getResultViewId(), - "HierarchicalTableServiceGrpcImpl"); - final SessionState.ExportObject targetExport; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance() - .getNugget("resolveTargetTicket:" + ticketName)) { - targetExport = ticketRouter.resolve(session, targetTicket, "view.target"); - } + final SessionState.ExportObject targetExport = + ticketRouter.resolve(session, targetTicket, "view.target"); final SessionState.ExportObject
keyTableExport; if (request.hasExpansions()) { - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance() - .getNugget("resolveExpansionsTicket:" + ticketName)) { - keyTableExport = ticketRouter.resolve( - session, request.getExpansions().getKeyTableId(), "view.expansions.keyTableId"); - } + keyTableExport = ticketRouter.resolve( + session, request.getExpansions().getKeyTableId(), "view.expansions.keyTableId"); resultExportBuilder.require(targetExport, keyTableExport); } else { keyTableExport = null; @@ -486,16 +460,10 @@ public void exportSource( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor(request.getHierarchicalTableId(), - "HierarchicalTableServiceGrpcImpl"); - - final SessionState.ExportObject> hierarchicalTableExport; - try (final SafeCloseable ignored2 = - QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { - hierarchicalTableExport = ticketRouter.resolve( - session, request.getHierarchicalTableId(), "exportSource.hierarchicalTableId"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject> hierarchicalTableExport = + ticketRouter.resolve(session, request.getHierarchicalTableId(), + "exportSource.hierarchicalTableId"); session.newExport(request.getResultTableId(), "exportSource.resultTableId") .queryPerformanceRecorder(queryPerformanceRecorder) diff --git a/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java index 7df54bd6a9c..5b17def4b76 100644 --- a/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java @@ -265,14 +265,9 @@ public void fetchObject( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor(request.getSourceId().getTicket(), "sourceId"); - - final SessionState.ExportObject object; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTicket:" + ticketName)) { - object = ticketRouter.resolve(session, request.getSourceId().getTicket(), "sourceId"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject object = + ticketRouter.resolve(session, request.getSourceId().getTicket(), "sourceId"); session.nonExport() .queryPerformanceRecorder(queryPerformanceRecorder) diff --git a/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java index 1f55fc94072..eb466ee7a57 100644 --- a/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java @@ -63,15 +63,9 @@ public void partitionBy( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor( - request.getTableId(), "PartitionedTableServiceGrpcImpl"); - - final SessionState.ExportObject
targetTable; - try (final SafeCloseable ignored2 = - QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { - targetTable = ticketRouter.resolve(session, request.getTableId(), "partition.tableId"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
targetTable = + ticketRouter.resolve(session, request.getTableId(), "partition.tableId"); session.newExport(request.getResultId(), "partition.resultId") .queryPerformanceRecorder(queryPerformanceRecorder) @@ -98,15 +92,9 @@ public void merge( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor( - request.getPartitionedTable(), "PartitionedTableServiceGrpcImpl"); - - final SessionState.ExportObject partitionedTable; - try (final SafeCloseable ignored2 = - QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { - partitionedTable = ticketRouter.resolve(session, request.getPartitionedTable(), "partitionedTable"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject partitionedTable = + ticketRouter.resolve(session, request.getPartitionedTable(), "partitionedTable"); session.newExport(request.getResultId(), "resultId") .queryPerformanceRecorder(queryPerformanceRecorder) @@ -141,22 +129,11 @@ public void getTable( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String partitionTableLogId = ticketRouter.getLogNameFor( - request.getPartitionedTable(), "PartitionedTableServiceGrpcImpl"); - final String keyTableLogId = ticketRouter.getLogNameFor( - request.getKeyTableTicket(), "PartitionedTableServiceGrpcImpl"); - - final SessionState.ExportObject partitionedTable; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolvePartitionTableTicket:" + partitionTableLogId)) { - partitionedTable = ticketRouter.resolve(session, request.getPartitionedTable(), "partitionedTable"); - } - final SessionState.ExportObject
keys; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolvePartitionTableTicket:" + keyTableLogId)) { - keys = ticketRouter.resolve(session, request.getKeyTableTicket(), "keyTableTicket"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject partitionedTable = + ticketRouter.resolve(session, request.getPartitionedTable(), "partitionedTable"); + final SessionState.ExportObject
keys = + ticketRouter.resolve(session, request.getKeyTableTicket(), "keyTableTicket"); session.newExport(request.getResultId(), "resultId") .queryPerformanceRecorder(queryPerformanceRecorder) diff --git a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java index 35e17020d3b..f57b4c2c3a6 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java @@ -172,15 +172,9 @@ public void exportFromTicket( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor( - request.getSourceId(), "SessionServiceServiceGrpcImpl"); - - final SessionState.ExportObject source; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTicket:" + ticketName)) { - source = ticketRouter.resolve(session, request.getSourceId(), "sourceId"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject source = + ticketRouter.resolve(session, request.getSourceId(), "sourceId"); session.newExport(request.getResultId(), "resultId") .queryPerformanceRecorder(queryPerformanceRecorder) @@ -215,15 +209,9 @@ public void publishFromTicket( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor( - request.getSourceId(), "SessionServiceServiceGrpcImpl"); - - final SessionState.ExportObject source; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTicket:" + ticketName)) { - source = ticketRouter.resolve(session, request.getSourceId(), "sourceId"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject source = + ticketRouter.resolve(session, request.getSourceId(), "sourceId"); Ticket resultId = request.getResultId(); diff --git a/server/src/main/java/io/deephaven/server/session/TicketRouter.java b/server/src/main/java/io/deephaven/server/session/TicketRouter.java index fc8375e0411..792cad9a0c7 100644 --- a/server/src/main/java/io/deephaven/server/session/TicketRouter.java +++ b/server/src/main/java/io/deephaven/server/session/TicketRouter.java @@ -5,6 +5,7 @@ import com.google.rpc.Code; import io.deephaven.engine.table.Table; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.extensions.barrage.util.BarrageUtil; import io.deephaven.hash.KeyedIntObjectHashMap; import io.deephaven.hash.KeyedIntObjectKey; @@ -13,6 +14,7 @@ import io.deephaven.proto.backplane.grpc.Ticket; import io.deephaven.proto.util.Exceptions; import io.deephaven.server.auth.AuthorizationProvider; +import io.deephaven.util.SafeCloseable; import org.apache.arrow.flight.impl.Flight; import org.jetbrains.annotations.Nullable; @@ -65,7 +67,11 @@ public SessionState.ExportObject resolve( throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "could not resolve '" + logId + "' it's an empty ticket"); } - return getResolver(ticket.get(ticket.position()), logId).resolve(session, ticket, logId); + final String ticketName = getLogNameFor(ticket, logId); + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget( + "resolveTicket:" + ticketName)) { + return getResolver(ticket.get(ticket.position()), logId).resolve(session, ticket, logId); + } } /** @@ -113,7 +119,10 @@ public SessionState.ExportObject resolve( @Nullable final SessionState session, final Flight.FlightDescriptor descriptor, final String logId) { - return getResolver(descriptor, logId).resolve(session, descriptor, logId); + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget( + "resolveDescriptor:" + descriptor)) { + return getResolver(descriptor, logId).resolve(session, descriptor, logId); + } } /** @@ -134,9 +143,13 @@ public SessionState.ExportBuilder publish( final ByteBuffer ticket, final String logId, @Nullable final Runnable onPublish) { - final TicketResolver resolver = getResolver(ticket.get(ticket.position()), logId); - authorization.authorizePublishRequest(resolver, ticket); - return resolver.publish(session, ticket, logId, onPublish); + final String ticketName = getLogNameFor(ticket, logId); + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget( + "publishTicket:" + ticketName)) { + final TicketResolver resolver = getResolver(ticket.get(ticket.position()), logId); + authorization.authorizePublishRequest(resolver, ticket); + return resolver.publish(session, ticket, logId, onPublish); + } } /** @@ -201,9 +214,12 @@ public SessionState.ExportBuilder publish( final Flight.FlightDescriptor descriptor, final String logId, @Nullable final Runnable onPublish) { - final TicketResolver resolver = getResolver(descriptor, logId); - authorization.authorizePublishRequest(resolver, descriptor); - return resolver.publish(session, descriptor, logId, onPublish); + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget( + "publishDescriptor:" + descriptor)) { + final TicketResolver resolver = getResolver(descriptor, logId); + authorization.authorizePublishRequest(resolver, descriptor); + return resolver.publish(session, descriptor, logId, onPublish); + } } /** @@ -220,7 +236,10 @@ public SessionState.ExportObject flightInfoFor( @Nullable final SessionState session, final Flight.FlightDescriptor descriptor, final String logId) { - return getResolver(descriptor, logId).flightInfoFor(session, descriptor, logId); + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget( + "flightInfoForDescriptor:" + descriptor)) { + return getResolver(descriptor, logId).flightInfoFor(session, descriptor, logId); + } } /** diff --git a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java index f2f64ef9aa5..c1f41fe38d5 100644 --- a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java @@ -60,21 +60,12 @@ public void addTableToInputTable( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
targetTable = + ticketRouter.resolve(session, request.getInputTable(), "inputTable"); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String targetName = ticketRouter.getLogNameFor(request.getInputTable(), "inputTable"); - final SessionState.ExportObject
targetTable; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTargetTableTicket:" + targetName)) { - targetTable = ticketRouter.resolve(session, request.getInputTable(), "inputTable"); - } - - final String tableToAddName = ticketRouter.getLogNameFor(request.getTableToAdd(), "tableToAdd"); - final SessionState.ExportObject
tableToAddExport; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTableToAddTicket:" + tableToAddName)) { - tableToAddExport = ticketRouter.resolve(session, request.getTableToAdd(), "tableToAdd"); - } + final SessionState.ExportObject
tableToAddExport = + ticketRouter.resolve(session, request.getTableToAdd(), "tableToAdd"); session.nonExport() .queryPerformanceRecorder(queryPerformanceRecorder) @@ -126,20 +117,12 @@ public void deleteTableFromInputTable( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String targetName = ticketRouter.getLogNameFor(request.getInputTable(), "inputTable"); - final SessionState.ExportObject
targetTable; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTargetTableTicket:" + targetName)) { - targetTable = ticketRouter.resolve(session, request.getInputTable(), "inputTable"); - } - - final String tableToRemove = ticketRouter.getLogNameFor(request.getTableToRemove(), "tableToRemove"); - final SessionState.ExportObject
tableToRemoveExport; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTableToRemoveTicket:" + tableToRemove)) { - tableToRemoveExport = ticketRouter.resolve(session, request.getTableToRemove(), "tableToRemove"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
targetTable = + ticketRouter.resolve(session, request.getInputTable(), "inputTable"); + + final SessionState.ExportObject
tableToRemoveExport = + ticketRouter.resolve(session, request.getTableToRemove(), "tableToRemove"); session.nonExport() .queryPerformanceRecorder(queryPerformanceRecorder) diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index 592f8fbaed4..966c3b364ca 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -474,13 +474,9 @@ public void seekRow( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor(sourceId, "sourceId"); - final SessionState.ExportObject
exportedTable; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTicket:" + ticketName)) { - exportedTable = ticketRouter.resolve(session, sourceId, "sourceId"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject
exportedTable = + ticketRouter.resolve(session, sourceId, "sourceId"); session.nonExport() .queryPerformanceRecorder(queryPerformanceRecorder) @@ -635,13 +631,8 @@ public void getExportedTableCreationResponse( final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, QueryPerformanceNugget.DEFAULT_FACTORY); - try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { - final String ticketName = ticketRouter.getLogNameFor(request, "request"); - final SessionState.ExportObject export; - try (final SafeCloseable ignored2 = QueryPerformanceRecorder.getInstance().getNugget( - "resolveTicket:" + ticketName)) { - export = ticketRouter.resolve(session, request, "request"); - } + try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { + final SessionState.ExportObject export = ticketRouter.resolve(session, request, "request"); session.nonExport() .queryPerformanceRecorder(queryPerformanceRecorder) @@ -720,11 +711,7 @@ private SessionState.ExportObject
resolveOneShotReference( "One-shot operations must use ticket references"); } - final String ticketName = ticketRouter.getLogNameFor(ref.getTicket(), "TableServiceGrpcImpl"); - try (final SafeCloseable ignored = - QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { - return ticketRouter.resolve(session, ref.getTicket(), "sourceId"); - } + return ticketRouter.resolve(session, ref.getTicket(), "sourceId"); } private SessionState.ExportObject
resolveBatchReference( @@ -733,11 +720,7 @@ private SessionState.ExportObject
resolveBatchReference( @NotNull final TableReference ref) { switch (ref.getRefCase()) { case TICKET: - final String ticketName = ticketRouter.getLogNameFor(ref.getTicket(), "TableServiceGrpcImpl"); - try (final SafeCloseable ignored = - QueryPerformanceRecorder.getInstance().getNugget("resolveTicket:" + ticketName)) { - return ticketRouter.resolve(session, ref.getTicket(), "sourceId"); - } + return ticketRouter.resolve(session, ref.getTicket(), "sourceId"); case BATCH_OFFSET: final int offset = ref.getBatchOffset(); if (offset < 0 || offset >= exportBuilders.size()) { From 58d366a73f8a7fe650034871ac85faaa3039ea5b Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Wed, 15 Nov 2023 14:08:16 -0700 Subject: [PATCH 25/31] Make non-export descriptions clearer --- .../io/deephaven/server/session/SessionState.java | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index a5ac5870972..28af084204d 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -993,18 +993,25 @@ private void doExport() { try (final SafeCloseable ignored1 = session.executionContext.open(); final SafeCloseable ignored2 = LivenessScopeStack.open()) { + final String queryId; + if (isNonExport()) { + queryId = "nonExport=" + logIdentity; + } else { + queryId = "exportId=" + logIdentity; + } + final boolean isResume = queryPerformanceRecorder != null && !qprIsForBatch; if (isResume) { exportRecorder = queryPerformanceRecorder; } else if (queryPerformanceRecorder != null) { // this is a sub-query; no need to re-log the session id exportRecorder = QueryPerformanceRecorder.newSubQuery( - "ExportObject#doWork(exportId=" + logIdentity + ")", + "ExportObject#doWork(" + queryId + ")", queryPerformanceRecorder, QueryPerformanceNugget.DEFAULT_FACTORY); } else { exportRecorder = QueryPerformanceRecorder.newQuery( - "ExportObject#doWork(session=" + session.sessionId + ",exportId=" + logIdentity + ")", + "ExportObject#doWork(session=" + session.sessionId + "," + queryId + ")", QueryPerformanceNugget.DEFAULT_FACTORY); } queryProcessingResults = new QueryProcessingResults(exportRecorder); From fc006e7ae15113f5c2cfc3c46ee3aa01492b0534 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Wed, 15 Nov 2023 14:12:37 -0700 Subject: [PATCH 26/31] rm unused import --- .../java/io/deephaven/server/session/SessionServiceGrpcImpl.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java index f57b4c2c3a6..a3b65db87ff 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java @@ -9,7 +9,6 @@ import io.deephaven.auth.AuthenticationException; import io.deephaven.csv.util.MutableObject; import io.deephaven.engine.liveness.LivenessScopeStack; -import io.deephaven.engine.table.PartitionedTable; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.extensions.barrage.util.GrpcUtil; From 9460a59e2a64ccb06de63e04ace22c4fff0b831a Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Thu, 16 Nov 2023 18:38:12 -0700 Subject: [PATCH 27/31] Synchronous Review Changes 11/16 --- .../impl/perf/QueryPerformanceRecorderState.java | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java index 9a79f04c7ee..f382f6cfa1a 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java @@ -39,7 +39,8 @@ public abstract class QueryPerformanceRecorderState { private static final String[] PACKAGE_FILTERS; private static final ThreadLocal CACHED_CALLSITE = new ThreadLocal<>(); private static final ThreadLocal POOL_ALLOCATED_BYTES = ThreadLocal.withInitial( - () -> new MutableLong(ThreadProfiler.DEFAULT.memoryProfilingAvailable() ? 0L + () -> new MutableLong(ThreadProfiler.DEFAULT.memoryProfilingAvailable() + ? 0L : io.deephaven.util.QueryConstants.NULL_LONG)); static { @@ -83,7 +84,6 @@ static void resetInstance() { THE_LOCAL.remove(); } - /** * Install {@link QueryPerformanceRecorderState#recordPoolAllocation(java.util.function.Supplier)} as the allocation * recorder for {@link io.deephaven.chunk.util.pools.ChunkPool chunk pools}. @@ -93,7 +93,8 @@ public static void installPoolAllocationRecorder() { } /** - * Install this {@link QueryPerformanceRecorder} as the lock action recorder for {@link UpdateGraphLock}. + * Use nuggets from the current {@link QueryPerformanceRecorder} as the lock action recorder for + * {@link UpdateGraphLock}. */ public static void installUpdateGraphLockInstrumentation() { UpdateGraphLock.installInstrumentation(new UpdateGraphLock.Instrumentation() { @@ -178,7 +179,7 @@ static boolean setCallsite(String callsite) { * See {@link QueryPerformanceRecorder#setCallsite()}. */ static boolean setCallsite() { - // This is very similar to the other getCallsite, but we don't want to invoke getCallerLine() unless we + // This is very similar to the other setCallsite overload, but we don't want to invoke getCallerLine() unless we // really need to. if (CACHED_CALLSITE.get() == null) { CACHED_CALLSITE.set(getCallerLine()); @@ -191,7 +192,7 @@ static boolean setCallsite() { /** * Clear any previously set callsite. See {@link #setCallsite(String)} */ - public static void clearCallsite() { + static void clearCallsite() { CACHED_CALLSITE.remove(); } From 6f742b6368c8ec647df6fb9a0dc12f21d3aa59eb Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Thu, 16 Nov 2023 22:40:36 -0700 Subject: [PATCH 28/31] Add SessionId to QPL and QOPL --- .../impl/perf/QueryPerformanceNugget.java | 22 +++++-- .../impl/perf/QueryPerformanceRecorder.java | 5 +- .../perf/QueryPerformanceRecorderImpl.java | 3 +- ...ryOperationPerformanceStreamPublisher.java | 40 ++++++------ .../util/QueryPerformanceStreamPublisher.java | 36 ++++++----- .../server/arrow/ArrowFlightUtil.java | 25 ++++---- .../server/arrow/FlightServiceGrpcImpl.java | 10 ++- .../console/ConsoleServiceGrpcImpl.java | 13 ++-- .../HierarchicalTableServiceGrpcImpl.java | 62 +++++++++++-------- .../server/object/ObjectServiceGrpcImpl.java | 5 +- .../PartitionedTableServiceGrpcImpl.java | 17 ++--- .../session/SessionServiceGrpcImpl.java | 10 +-- .../server/session/SessionState.java | 4 +- .../InputTableServiceGrpcImpl.java | 14 +++-- .../table/ops/TableServiceGrpcImpl.java | 19 +++--- 15 files changed, 163 insertions(+), 122 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java index e7151ce9eee..de3a5537703 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java @@ -12,6 +12,7 @@ import io.deephaven.util.QueryConstants; import io.deephaven.util.SafeCloseable; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import java.util.function.Predicate; @@ -54,6 +55,7 @@ public interface Factory { * * @param evaluationNumber A unique identifier for the query evaluation that triggered this nugget creation * @param description The operation description + * @param sessionId The gRPC client session-id if applicable * @param onCloseCallback A callback that is invoked when the nugget is closed. It returns whether the nugget * should be logged. * @return A new nugget @@ -61,9 +63,10 @@ public interface Factory { default QueryPerformanceNugget createForQuery( final long evaluationNumber, @NotNull final String description, + @Nullable final String sessionId, @NotNull final Predicate onCloseCallback) { - return new QueryPerformanceNugget(evaluationNumber, NULL_LONG, NULL_INT, NULL_INT, NULL_INT, - description, false, NULL_LONG, onCloseCallback); + return new QueryPerformanceNugget(evaluationNumber, NULL_LONG, NULL_INT, NULL_INT, NULL_INT, description, + sessionId, false, NULL_LONG, onCloseCallback); } /** @@ -82,8 +85,8 @@ default QueryPerformanceNugget createForSubQuery( @NotNull final String description, @NotNull final Predicate onCloseCallback) { Assert.eqTrue(parentQuery.isQueryLevel(), "parentQuery.isQueryLevel()"); - return new QueryPerformanceNugget(evaluationNumber, parentQuery.getEvaluationNumber(), - NULL_INT, NULL_INT, NULL_INT, description, false, NULL_LONG, onCloseCallback); + return new QueryPerformanceNugget(evaluationNumber, parentQuery.getEvaluationNumber(), NULL_INT, NULL_INT, + NULL_INT, description, parentQuery.getSessionId(), false, NULL_LONG, onCloseCallback); } /** @@ -116,6 +119,7 @@ default QueryPerformanceNugget createForOperation( parentQueryOrOperation.getOperationNumber(), depth, description, + parentQueryOrOperation.getSessionId(), true, // operations are always user inputSize, onCloseCallback); @@ -142,6 +146,7 @@ default QueryPerformanceNugget createForCatchAll( NULL_INT, // catch all has no parent operation 0, // catch all is a root operation QueryPerformanceRecorder.UNINSTRUMENTED_CODE_DESCRIPTION, + parentQuery.getSessionId(), false, // catch all is not user NULL_LONG, onCloseCallback); // catch all has no input size @@ -156,6 +161,7 @@ default QueryPerformanceNugget createForCatchAll( private final int parentOperationNumber; private final int depth; private final String description; + private final String sessionId; private final boolean isUser; private final long inputSize; private final Predicate onCloseCallback; @@ -196,6 +202,7 @@ protected QueryPerformanceNugget( final int parentOperationNumber, final int depth, @NotNull final String description, + @Nullable final String sessionId, final boolean isUser, final long inputSize, @NotNull final Predicate onCloseCallback) { @@ -212,6 +219,7 @@ protected QueryPerformanceNugget( } else { this.description = description; } + this.sessionId = sessionId; this.isUser = isUser; this.inputSize = inputSize; this.onCloseCallback = onCloseCallback; @@ -238,6 +246,7 @@ private QueryPerformanceNugget() { parentOperationNumber = NULL_INT; depth = 0; description = null; + sessionId = null; isUser = false; inputSize = NULL_LONG; onCloseCallback = null; @@ -369,6 +378,11 @@ public String getName() { return description; } + @Nullable + public String getSessionId() { + return sessionId; + } + public boolean isUser() { return isUser; } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java index de8a00f6022..f57ed278f64 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java @@ -139,8 +139,9 @@ static void clearCallsite() { */ static QueryPerformanceRecorder newQuery( @NotNull final String description, + @Nullable final String sessionId, @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { - return new QueryPerformanceRecorderImpl(description, null, nuggetFactory); + return new QueryPerformanceRecorderImpl(description, sessionId, null, nuggetFactory); } /** @@ -154,7 +155,7 @@ static QueryPerformanceRecorder newSubQuery( @NotNull final String description, @Nullable final QueryPerformanceRecorder parent, @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { - return new QueryPerformanceRecorderImpl(description, parent, nuggetFactory); + return new QueryPerformanceRecorderImpl(description, null, parent, nuggetFactory); } /** diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java index fb203b83188..253410a82d6 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java @@ -37,11 +37,12 @@ public class QueryPerformanceRecorderImpl implements QueryPerformanceRecorder { */ QueryPerformanceRecorderImpl( @NotNull final String description, + @Nullable final String sessionId, @Nullable final QueryPerformanceRecorder parent, @NotNull final QueryPerformanceNugget.Factory nuggetFactory) { if (parent == null) { queryNugget = nuggetFactory.createForQuery( - QueryPerformanceRecorderState.QUERIES_PROCESSED.getAndIncrement(), description, + QueryPerformanceRecorderState.QUERIES_PROCESSED.getAndIncrement(), description, sessionId, this::releaseNugget); } else { queryNugget = nuggetFactory.createForSubQuery( diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java index 5f0316bbfd3..5ac60db7cc5 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java @@ -26,6 +26,7 @@ class QueryOperationPerformanceStreamPublisher implements StreamPublisher { ColumnDefinition.ofInt("ParentOperationNumber"), ColumnDefinition.ofInt("Depth"), ColumnDefinition.ofString("Description"), + ColumnDefinition.ofString("SessionId"), ColumnDefinition.ofString("CallerLine"), ColumnDefinition.ofBoolean("IsCompilation"), ColumnDefinition.ofTime("StartTime"), @@ -85,59 +86,62 @@ public synchronized void add(final QueryPerformanceNugget nugget) { // ColumnDefinition.ofString("Description"), chunks[5].asWritableObjectChunk().add(nugget.getName()); + // ColumnDefinition.ofString("SessionId"), + chunks[6].asWritableObjectChunk().add(nugget.getSessionId()); + // ColumnDefinition.ofString("CallerLine"), - chunks[6].asWritableObjectChunk().add(nugget.getCallerLine()); + chunks[7].asWritableObjectChunk().add(nugget.getCallerLine()); // ColumnDefinition.ofBoolean("IsCompilation"), - chunks[7].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.getName().startsWith("Compile:"))); + chunks[8].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.getName().startsWith("Compile:"))); // ColumnDefinition.ofTime("StartTime"), - chunks[8].asWritableLongChunk().add(nugget.getStartClockEpochNanos()); + chunks[9].asWritableLongChunk().add(nugget.getStartClockEpochNanos()); // ColumnDefinition.ofTime("EndTime"), - chunks[9].asWritableLongChunk().add(nugget.getEndClockEpochNanos()); + chunks[10].asWritableLongChunk().add(nugget.getEndClockEpochNanos()); // ColumnDefinition.ofLong("DurationNanos"), - chunks[10].asWritableLongChunk().add(nugget.getUsageNanos()); + chunks[11].asWritableLongChunk().add(nugget.getUsageNanos()); // ColumnDefinition.ofLong("CpuNanos"), - chunks[11].asWritableLongChunk().add(nugget.getCpuNanos()); + chunks[12].asWritableLongChunk().add(nugget.getCpuNanos()); // ColumnDefinition.ofLong("UserCpuNanos"), - chunks[12].asWritableLongChunk().add(nugget.getUserCpuNanos()); + chunks[13].asWritableLongChunk().add(nugget.getUserCpuNanos()); // ColumnDefinition.ofLong("FreeMemory"), - chunks[13].asWritableLongChunk().add(nugget.getEndFreeMemory()); + chunks[14].asWritableLongChunk().add(nugget.getEndFreeMemory()); // ColumnDefinition.ofLong("TotalMemory"), - chunks[14].asWritableLongChunk().add(nugget.getEndTotalMemory()); + chunks[15].asWritableLongChunk().add(nugget.getEndTotalMemory()); // ColumnDefinition.ofLong("FreeMemoryChange"), - chunks[15].asWritableLongChunk().add(nugget.getDiffFreeMemory()); + chunks[16].asWritableLongChunk().add(nugget.getDiffFreeMemory()); // ColumnDefinition.ofLong("TotalMemoryChange"), - chunks[16].asWritableLongChunk().add(nugget.getDiffTotalMemory()); + chunks[17].asWritableLongChunk().add(nugget.getDiffTotalMemory()); // ColumnDefinition.ofLong("Collections") - chunks[17].asWritableLongChunk().add(nugget.getDiffCollections()); + chunks[18].asWritableLongChunk().add(nugget.getDiffCollections()); // ColumnDefinition.ofLong("CollectionTimeNanos"), - chunks[18].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); + chunks[19].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); // ColumnDefinition.ofLong("AllocatedBytes"), - chunks[19].asWritableLongChunk().add(nugget.getAllocatedBytes()); + chunks[20].asWritableLongChunk().add(nugget.getAllocatedBytes()); // ColumnDefinition.ofLong("PoolAllocatedBytes"), - chunks[20].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); + chunks[21].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); // ColumnDefinition.ofLong("InputSizeLong"), - chunks[21].asWritableLongChunk().add(nugget.getInputSize()); + chunks[22].asWritableLongChunk().add(nugget.getInputSize()); // ColumnDefinition.ofBoolean("WasInterrupted") - chunks[22].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); + chunks[23].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); // ColumnDefinition.ofString("AuthContext") - chunks[23].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); + chunks[24].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); if (chunks[0].size() == CHUNK_SIZE) { flushInternal(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java index 9ef45b5b06f..ef7b97fc7ff 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java @@ -24,6 +24,7 @@ class QueryPerformanceStreamPublisher implements StreamPublisher { ColumnDefinition.ofLong("EvaluationNumber"), ColumnDefinition.ofLong("ParentEvaluationNumber"), ColumnDefinition.ofString("Description"), + ColumnDefinition.ofString("SessionId"), ColumnDefinition.ofTime("StartTime"), ColumnDefinition.ofTime("EndTime"), ColumnDefinition.ofLong("DurationNanos"), @@ -74,53 +75,56 @@ public synchronized void add( // ColumnDefinition.ofString("Description") chunks[2].asWritableObjectChunk().add(nugget.getName()); + // ColumnDefinition.ofString("SessionId") + chunks[3].asWritableObjectChunk().add(nugget.getSessionId()); + // ColumnDefinition.ofTime("StartTime"); - chunks[3].asWritableLongChunk().add(nugget.getStartClockEpochNanos()); + chunks[4].asWritableLongChunk().add(nugget.getStartClockEpochNanos()); // ColumnDefinition.ofTime("EndTime") - chunks[4].asWritableLongChunk().add(nugget.getEndClockEpochNanos()); + chunks[5].asWritableLongChunk().add(nugget.getEndClockEpochNanos()); // ColumnDefinition.ofLong("DurationNanos") - chunks[5].asWritableLongChunk().add(nugget.getUsageNanos()); + chunks[6].asWritableLongChunk().add(nugget.getUsageNanos()); // ColumnDefinition.ofLong("CpuNanos") - chunks[6].asWritableLongChunk().add(nugget.getCpuNanos()); + chunks[7].asWritableLongChunk().add(nugget.getCpuNanos()); // ColumnDefinition.ofLong("UserCpuNanos") - chunks[7].asWritableLongChunk().add(nugget.getUserCpuNanos()); + chunks[8].asWritableLongChunk().add(nugget.getUserCpuNanos()); // ColumnDefinition.ofLong("FreeMemory") - chunks[8].asWritableLongChunk().add(nugget.getEndFreeMemory()); + chunks[9].asWritableLongChunk().add(nugget.getEndFreeMemory()); // ColumnDefinition.ofLong("TotalMemory") - chunks[9].asWritableLongChunk().add(nugget.getEndTotalMemory()); + chunks[10].asWritableLongChunk().add(nugget.getEndTotalMemory()); // ColumnDefinition.ofLong("FreeMemoryChange") - chunks[10].asWritableLongChunk().add(nugget.getDiffFreeMemory()); + chunks[11].asWritableLongChunk().add(nugget.getDiffFreeMemory()); // ColumnDefinition.ofLong("TotalMemoryChange") - chunks[11].asWritableLongChunk().add(nugget.getDiffTotalMemory()); + chunks[12].asWritableLongChunk().add(nugget.getDiffTotalMemory()); // ColumnDefinition.ofLong("Collections") - chunks[12].asWritableLongChunk().add(nugget.getDiffCollections()); + chunks[13].asWritableLongChunk().add(nugget.getDiffCollections()); // ColumnDefinition.ofLong("CollectionTimeNanos") - chunks[13].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); + chunks[14].asWritableLongChunk().add(nugget.getDiffCollectionTimeNanos()); // ColumnDefinition.ofLong("AllocatedBytes") - chunks[14].asWritableLongChunk().add(nugget.getAllocatedBytes()); + chunks[15].asWritableLongChunk().add(nugget.getAllocatedBytes()); // ColumnDefinition.ofLong("PoolAllocatedBytes") - chunks[15].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); + chunks[16].asWritableLongChunk().add(nugget.getPoolAllocatedBytes()); // ColumnDefinition.ofBoolean("WasInterrupted") - chunks[16].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); + chunks[17].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); // ColumnDefinition.ofString("Exception") - chunks[17].asWritableObjectChunk().add(queryProcessingResults.getException()); + chunks[18].asWritableObjectChunk().add(queryProcessingResults.getException()); // ColumnDefinition.ofString("AuthContext") - chunks[18].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); + chunks[19].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); if (chunks[0].size() == CHUNK_SIZE) { flushInternal(); diff --git a/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java b/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java index 01d498bdb74..1fae3c97eb3 100644 --- a/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java +++ b/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java @@ -73,9 +73,10 @@ public static void DoGetCustom( final Flight.Ticket request, final StreamObserver observer) { - final String description = "FlightService#DoGet(session=" + session.getSessionId() + ")"; + final String description = + "FlightService#DoGet(request=" + ticketRouter.getLogNameFor(request, "request") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject> export = @@ -487,10 +488,10 @@ public void handleMessage(@NotNull final BarrageProtoUtil.MessageInfo message) { final BarrageSnapshotRequest snapshotRequest = BarrageSnapshotRequest .getRootAsBarrageSnapshotRequest(message.app_metadata.msgPayloadAsByteBuffer()); - final String description = - "FlightService#DoExchange(snapshot, session=" + session.getSessionId() + ")"; + final String description = "FlightService#DoExchange(snapshot, request=" + + ticketRouter.getLogNameFor(snapshotRequest.ticketAsByteBuffer(), "request") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject> parent = @@ -642,21 +643,21 @@ public void handleMessage(@NotNull final MessageInfo message) { preExportSubscriptions = new ArrayDeque<>(); preExportSubscriptions.add(subscriptionRequest); - final String description = - "FlightService#DoExchange(subscription, session=" + session.getSessionId() + ")"; + final String description = "FlightService#DoExchange(subscription, request=" + + ticketRouter.getLogNameFor(subscriptionRequest.ticketAsByteBuffer(), "request") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { - final SessionState.ExportObject parent = - ticketRouter.resolve(session, subscriptionRequest.ticketAsByteBuffer(), "parent"); + final SessionState.ExportObject request = + ticketRouter.resolve(session, subscriptionRequest.ticketAsByteBuffer(), "request"); synchronized (this) { onExportResolvedContinuation = session.nonExport() .queryPerformanceRecorder(queryPerformanceRecorder) - .require(parent) + .require(request) .onErrorHandler(DoExchangeMarshaller.this::onError) - .submit(() -> onExportResolved(parent)); + .submit(() -> onExportResolved(request)); } } } diff --git a/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java index ed1c6ccbbc0..d7108286015 100644 --- a/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java @@ -177,10 +177,9 @@ public void getFlightInfo( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getOptionalSession(); - final String description = - "FlightService#getFlightInfo(session=" + (session == null ? "Anonymous" : session.getSessionId()) + ")"; + final String description = "FlightService#getFlightInfo(request=" + request + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session == null ? null : session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject export = @@ -231,10 +230,9 @@ public void getSchema( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getOptionalSession(); - final String description = - "FlightService#getSchema(session=" + (session == null ? "Anonymous" : session.getSessionId()) + ")"; + final String description = "FlightService#getSchema(request=" + request + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session == null ? null : session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject export = diff --git a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java index b4a0d7b2804..5677fa59ae0 100644 --- a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java @@ -167,9 +167,10 @@ public void executeCommand( throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No consoleId supplied"); } - final String description = "ConsoleServiceGrpcImpl#executeCommand(session=" + session.getSessionId() + ")"; + final String description = "ConsoleServiceGrpcImpl#executeCommand(console=" + + ticketRouter.getLogNameFor(consoleId, "consoleId") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject exportedConsole = @@ -247,14 +248,16 @@ public void bindTableToVariable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - Ticket tableId = request.getTableId(); + final Ticket tableId = request.getTableId(); if (tableId.getTicket().isEmpty()) { throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No source tableId supplied"); } - final String description = "ConsoleServiceGrpcImpl#bindTableToVariable(session=" + session.getSessionId() + ")"; + final String description = "ConsoleServiceGrpcImpl#bindTableToVariable(tableId=" + + ticketRouter.getLogNameFor(tableId, "tableId") + ", variableName=" + request.getVariableName() + + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject
exportedTable = diff --git a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java index 22b71097e6d..9ea8a3a18d6 100644 --- a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java @@ -34,6 +34,8 @@ import io.deephaven.server.table.ops.FilterTableGrpcImpl; import io.deephaven.server.table.ops.filter.FilterFactory; import io.deephaven.util.SafeCloseable; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -76,9 +78,10 @@ public void rollup( final SessionState session = sessionService.getCurrentSession(); - final String description = "HierarchicalTableServiceGrpcImpl#rollup(session=" + session.getSessionId() + ")"; + final String description = "HierarchicalTableServiceGrpcImpl#rollup(source=" + + ticketRouter.getLogNameFor(request.getSourceTableId(), "sourceId") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject
sourceTableExport = @@ -127,9 +130,10 @@ public void tree( final SessionState session = sessionService.getCurrentSession(); - final String description = "HierarchicalTableServiceGrpcImpl#tree(session=" + session.getSessionId() + ")"; + final String description = "HierarchicalTableServiceGrpcImpl#tree(source=" + + ticketRouter.getLogNameFor(request.getSourceTableId(), "source") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject
sourceTableExport = @@ -183,9 +187,10 @@ public void apply( final SessionState session = sessionService.getCurrentSession(); - final String description = "HierarchicalTableServiceGrpcImpl#apply(session=" + session.getSessionId() + ")"; + final String description = "HierarchicalTableServiceGrpcImpl#apply(source=" + + ticketRouter.getLogNameFor(request.getInputHierarchicalTableId(), "source") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject> inputHierarchicalTableExport = @@ -340,30 +345,33 @@ public void view( final SessionState session = sessionService.getCurrentSession(); - final String description = "HierarchicalTableServiceGrpcImpl#view(session=" + session.getSessionId() + ")"; + final boolean usedExisting; + final Ticket targetTicket; + switch (request.getTargetCase()) { + case HIERARCHICAL_TABLE_ID: + usedExisting = false; + targetTicket = request.getHierarchicalTableId(); + break; + case EXISTING_VIEW_ID: + usedExisting = true; + targetTicket = request.getExistingViewId(); + break; + case TARGET_NOT_SET: + default: + throw Status.INVALID_ARGUMENT + .augmentDescription("No target specified") + .asRuntimeException(); + } + + final String description = "HierarchicalTableServiceGrpcImpl#view(target=" + + ticketRouter.getLogNameFor(targetTicket, "target") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { - final SessionState.ExportBuilder resultExportBuilder = session.newExport(request.getResultViewId(), "view.resultViewId"); - final boolean usedExisting; - final Ticket targetTicket; - switch (request.getTargetCase()) { - case HIERARCHICAL_TABLE_ID: - usedExisting = false; - targetTicket = request.getHierarchicalTableId(); - break; - case EXISTING_VIEW_ID: - usedExisting = true; - targetTicket = request.getExistingViewId(); - break; - case TARGET_NOT_SET: - default: - throw new IllegalStateException(); - } final SessionState.ExportObject targetExport = ticketRouter.resolve(session, targetTicket, "view.target"); @@ -455,10 +463,10 @@ public void exportSource( final SessionState session = sessionService.getCurrentSession(); - final String description = - "HierarchicalTableServiceGrpcImpl#exportSource(session=" + session.getSessionId() + ")"; + final String description = "HierarchicalTableServiceGrpcImpl#exportSource(source=" + + ticketRouter.getLogNameFor(request.getHierarchicalTableId(), "source") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject> hierarchicalTableExport = diff --git a/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java index 5b17def4b76..89793af3d3b 100644 --- a/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java @@ -261,9 +261,10 @@ public void fetchObject( throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "No ticket supplied"); } - final String description = "ObjectServiceGrpcImpl#fetchObject(session=" + session.getSessionId() + ")"; + final String description = "ObjectServiceGrpcImpl#fetchObject(source=" + + ticketRouter.getLogNameFor(request.getSourceId().getTicket(), "source") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject object = diff --git a/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java index eb466ee7a57..57eb9ee9f43 100644 --- a/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java @@ -58,10 +58,10 @@ public void partitionBy( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - final String description = - "PartitionedTableServiceGrpcImpl#partitionBy(session=" + session.getSessionId() + ")"; + final String description = "PartitionedTableServiceGrpcImpl#partitionBy(source=" + + ticketRouter.getLogNameFor(request.getTableId(), "source") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject
targetTable = @@ -88,9 +88,10 @@ public void merge( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - final String description = "PartitionedTableServiceGrpcImpl#merge(session=" + session.getSessionId() + ")"; + final String description = "PartitionedTableServiceGrpcImpl#merge(source=" + + ticketRouter.getLogNameFor(request.getPartitionedTable(), "source") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject partitionedTable = @@ -125,9 +126,11 @@ public void getTable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - final String description = "PartitionedTableServiceGrpcImpl#getTable(session=" + session.getSessionId() + ")"; + final String description = "PartitionedTableServiceGrpcImpl#getTable(source=" + + ticketRouter.getLogNameFor(request.getPartitionedTable(), "source") + ", keyTable=" + + ticketRouter.getLogNameFor(request.getKeyTableTicket(), "keyTable") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject partitionedTable = diff --git a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java index a3b65db87ff..7170b64e26b 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java @@ -167,9 +167,10 @@ public void exportFromTicket( return; } - final String description = "SessionServiceGrpcImpl#exportFromTicket(session=" + session.getSessionId() + ")"; + final String description = "SessionServiceGrpcImpl#exportFromTicket(source=" + + ticketRouter.getLogNameFor(request.getSourceId(), "source") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject source = @@ -204,9 +205,10 @@ public void publishFromTicket( return; } - final String description = "SessionServiceGrpcImpl#publishFromTicket(session=" + session.getSessionId() + ")"; + final String description = "SessionServiceGrpcImpl#publishFromTicket(source=" + + ticketRouter.getLogNameFor(request.getSourceId(), "source") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject source = diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index 28af084204d..f54939cef74 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -1011,8 +1011,8 @@ private void doExport() { QueryPerformanceNugget.DEFAULT_FACTORY); } else { exportRecorder = QueryPerformanceRecorder.newQuery( - "ExportObject#doWork(session=" + session.sessionId + "," + queryId + ")", - QueryPerformanceNugget.DEFAULT_FACTORY); + "ExportObject#doWork(" + queryId + ")", + session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); } queryProcessingResults = new QueryProcessingResults(exportRecorder); diff --git a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java index c1f41fe38d5..2f08572102c 100644 --- a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java @@ -55,10 +55,11 @@ public void addTableToInputTable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - final String description = - "InputTableServiceGrpcImpl#addTableToInputTable(session=" + session.getSessionId() + ")"; + final String description = "InputTableServiceGrpcImpl#addTableToInputTable(inputTable=" + + ticketRouter.getLogNameFor(request.getInputTable(), "inputTable") + ", tableToAdd=" + + ticketRouter.getLogNameFor(request.getTableToAdd(), "tableToAdd") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject
targetTable = @@ -112,10 +113,11 @@ public void deleteTableFromInputTable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - final String description = - "InputTableServiceGrpcImpl#deleteTableFromInputTable(session=" + session.getSessionId() + ")"; + final String description = "InputTableServiceGrpcImpl#deleteTableFromInputTable(inputTable=" + + ticketRouter.getLogNameFor(request.getInputTable(), "inputTable") + ", tableToRemove=" + + ticketRouter.getLogNameFor(request.getTableToRemove(), "tableToRemove") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject
targetTable = diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index 966c3b364ca..e6be286e5ee 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -469,10 +469,10 @@ public void seekRow( if (sourceId.getTicket().isEmpty()) { throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No consoleId supplied"); } - final String description = - "TableServiceGrpcImpl#seekRow(session=" + session.getSessionId() + ")"; + final String description = "TableServiceGrpcImpl#seekRow(source=" + + ticketRouter.getLogNameFor(sourceId, "source") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject
exportedTable = @@ -522,8 +522,7 @@ public void batch( final SessionState session = sessionService.getCurrentSession(); final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - "TableService#batch(session=" + session.getSessionId() + ")", - QueryPerformanceNugget.DEFAULT_FACTORY); + "TableService#batch()", session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { // step 1: initialize exports @@ -626,10 +625,10 @@ public void getExportedTableCreationResponse( throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No request ticket supplied"); } - final String description = - "TableServiceGrpcImpl#getExportedTableCreationResponse(session=" + session.getSessionId() + ")"; + final String description = "TableServiceGrpcImpl#getExportedTableCreationResponse(request=" + + ticketRouter.getLogNameFor(request, "request") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject export = ticketRouter.resolve(session, request, "request"); @@ -675,11 +674,11 @@ private void oneShotOperationWrapper( throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No result ticket supplied"); } - final String description = "TableService#" + op.name() + "(session=" + session.getSessionId() + ", resultId=" + final String description = "TableService#" + op.name() + "(resultId=" + ticketRouter.getLogNameFor(resultId, "TableServiceGrpcImpl") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( - description, QueryPerformanceNugget.DEFAULT_FACTORY); + description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { operation.validateRequest(request); From e08a97b8d23063b5ce9b6ea98371f4ffa38ea173 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Thu, 16 Nov 2023 23:43:06 -0700 Subject: [PATCH 29/31] Create QPR Sub-Query During Batch Delegation --- .../impl/perf/QueryPerformanceRecorder.java | 7 +++ .../perf/QueryPerformanceRecorderImpl.java | 11 +++- .../perf/QueryPerformanceRecorderState.java | 5 ++ .../HierarchicalTableServiceGrpcImpl.java | 6 +-- .../server/session/SessionState.java | 52 ++++--------------- .../table/ops/TableServiceGrpcImpl.java | 21 ++++++-- 6 files changed, 50 insertions(+), 52 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java index f57ed278f64..f8648622214 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java @@ -158,6 +158,13 @@ static QueryPerformanceRecorder newSubQuery( return new QueryPerformanceRecorderImpl(description, null, parent, nuggetFactory); } + /** + * Return the query's current state + * + * @return the query's state + */ + QueryState getState(); + /** * Starts a query. *

diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java index 253410a82d6..6fd050a5af6 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java @@ -19,6 +19,8 @@ */ public class QueryPerformanceRecorderImpl implements QueryPerformanceRecorder { + @Nullable + private final QueryPerformanceRecorder parent; private final QueryPerformanceNugget queryNugget; private final QueryPerformanceNugget.Factory nuggetFactory; private final ArrayList operationNuggets = new ArrayList<>(); @@ -50,6 +52,7 @@ public class QueryPerformanceRecorderImpl implements QueryPerformanceRecorder { QueryPerformanceRecorderState.QUERIES_PROCESSED.getAndIncrement(), description, this::releaseNugget); } + this.parent = parent; this.nuggetFactory = nuggetFactory; } @@ -76,6 +79,7 @@ public synchronized void abortQuery() { * * @return the query's state or null if it isn't initialized yet */ + @Override public synchronized QueryState getState() { return state; } @@ -98,7 +102,12 @@ public synchronized boolean endQuery() { } state = QueryState.FINISHED; suspendInternal(); - return queryNugget.done(); + + boolean shouldLog = queryNugget.done(); + if (parent != null) { + parent.accumulate(this); + } + return shouldLog; } /** diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java index f382f6cfa1a..300be84e35a 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java @@ -236,6 +236,11 @@ public boolean hasSubQueries() { return false; } + @Override + public QueryState getState() { + throw new UnsupportedOperationException("Dummy recorder does not support getState()"); + } + @Override public SafeCloseable startQuery() { throw new UnsupportedOperationException("Dummy recorder does not support startQuery()"); diff --git a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java index 9ea8a3a18d6..f25fd35adf5 100644 --- a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java @@ -34,8 +34,6 @@ import io.deephaven.server.table.ops.FilterTableGrpcImpl; import io.deephaven.server.table.ops.filter.FilterFactory; import io.deephaven.util.SafeCloseable; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -358,9 +356,7 @@ public void view( break; case TARGET_NOT_SET: default: - throw Status.INVALID_ARGUMENT - .augmentDescription("No target specified") - .asRuntimeException(); + throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "No target specified"); } final String description = "HierarchicalTableServiceGrpcImpl#view(target=" diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index f54939cef74..3e80b7c4346 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -16,6 +16,7 @@ import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.perf.QueryProcessingResults; +import io.deephaven.engine.table.impl.perf.QueryState; import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.engine.updategraph.DynamicNode; import io.deephaven.hash.KeyedIntObjectHash; @@ -535,8 +536,6 @@ public final static class ExportObject extends LivenessArtifact { private final SessionService.ErrorTransformer errorTransformer; private final SessionState session; - /** if true the queryPerformanceRecorder belongs to a batch; otherwise if it exists it belong to the export */ - private boolean qprIsForBatch; /** used to keep track of performance details either for aggregation or for the async ticket resolution */ private QueryPerformanceRecorder queryPerformanceRecorder; @@ -630,13 +629,11 @@ private boolean isNonExport() { } private synchronized void setQueryPerformanceRecorder( - final QueryPerformanceRecorder queryPerformanceRecorder, - final boolean qprIsForBatch) { + final QueryPerformanceRecorder queryPerformanceRecorder) { if (this.queryPerformanceRecorder != null) { throw new IllegalStateException( "performance query recorder can only be set once on an exportable object"); } - this.qprIsForBatch = qprIsForBatch; this.queryPerformanceRecorder = queryPerformanceRecorder; } @@ -683,7 +680,7 @@ private synchronized void setWork( } hasHadWorkSet = true; - if (queryPerformanceRecorder != null && !qprIsForBatch) { + if (queryPerformanceRecorder != null && queryPerformanceRecorder.getState() == QueryState.RUNNING) { // transfer ownership of the qpr to the export before it can be resumed by the scheduler queryPerformanceRecorder.suspendQuery(); } @@ -1000,20 +997,11 @@ private void doExport() { queryId = "exportId=" + logIdentity; } - final boolean isResume = queryPerformanceRecorder != null && !qprIsForBatch; - if (isResume) { - exportRecorder = queryPerformanceRecorder; - } else if (queryPerformanceRecorder != null) { - // this is a sub-query; no need to re-log the session id - exportRecorder = QueryPerformanceRecorder.newSubQuery( - "ExportObject#doWork(" + queryId + ")", - queryPerformanceRecorder, - QueryPerformanceNugget.DEFAULT_FACTORY); - } else { - exportRecorder = QueryPerformanceRecorder.newQuery( - "ExportObject#doWork(" + queryId + ")", - session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); - } + final boolean isResume = queryPerformanceRecorder != null + && queryPerformanceRecorder.getState() == QueryState.SUSPENDED; + exportRecorder = Objects.requireNonNullElseGet(queryPerformanceRecorder, + () -> QueryPerformanceRecorder.newQuery("ExportObject#doWork(" + queryId + ")", + session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY)); queryProcessingResults = new QueryProcessingResults(exportRecorder); try (final SafeCloseable ignored3 = isResume @@ -1045,16 +1033,11 @@ private void doExport() { } } } - if (queryPerformanceRecorder != null && qprIsForBatch) { - Assert.neqNull(exportRecorder, "exportRecorder"); - queryPerformanceRecorder.accumulate(exportRecorder); - } if (shouldLog || caughtException != null) { EngineMetrics.getInstance().logQueryProcessingResults(queryProcessingResults); } if (caughtException == null) { - // must set result after ending the query and accumulating into the parent so that onSuccess - // may resume and/or finalize a parent query + // must set result after ending the query so that onSuccess may resume / finalize a parent query setResult(localResult); } } @@ -1348,22 +1331,7 @@ public class ExportBuilder { */ public ExportBuilder queryPerformanceRecorder( @NotNull final QueryPerformanceRecorder queryPerformanceRecorder) { - export.setQueryPerformanceRecorder(queryPerformanceRecorder, false); - return this; - } - - /** - * Set the performance recorder to aggregate performance data across exports. - *

- * Instrumentation logging is the responsibility of the caller and should not be performed until all sub-queries - * have completed. - * - * @param parentQueryPerformanceRecorder the performance recorder to aggregate into - * @return this builder - */ - public ExportBuilder parentQueryPerformanceRecorder( - @NotNull final QueryPerformanceRecorder parentQueryPerformanceRecorder) { - export.setQueryPerformanceRecorder(parentQueryPerformanceRecorder, true); + export.setQueryPerformanceRecorder(queryPerformanceRecorder); return this; } diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index e6be286e5ee..6d9756ca5db 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -73,6 +73,7 @@ import io.grpc.StatusRuntimeException; import io.grpc.stub.ServerCallStreamObserver; import io.grpc.stub.StreamObserver; +import org.apache.commons.lang3.mutable.MutableInt; import org.jetbrains.annotations.NotNull; import javax.inject.Inject; @@ -526,8 +527,10 @@ public void batch( try (final SafeCloseable ignored1 = queryPerformanceRecorder.startQuery()) { // step 1: initialize exports + final MutableInt offset = new MutableInt(0); final List> exportBuilders = request.getOpsList().stream() - .map(op -> createBatchExportBuilder(session, queryPerformanceRecorder, op)) + .map(op -> createBatchExportBuilder( + offset.getAndIncrement(), session, queryPerformanceRecorder, op)) .collect(Collectors.toList()); // step 2: resolve dependencies @@ -732,17 +735,27 @@ private SessionState.ExportObject

resolveBatchReference( } private BatchExportBuilder createBatchExportBuilder( + final int offset, @NotNull final SessionState session, - @NotNull final QueryPerformanceRecorder queryPerformanceRecorder, + @NotNull final QueryPerformanceRecorder batchQueryPerformanceRecorder, final BatchTableRequest.Operation op) { final GrpcTableOperation operation = getOp(op.getOpCase()); final T request = operation.getRequestFromOperation(op); operation.validateRequest(request); final Ticket resultId = operation.getResultTicket(request); + boolean hasResultId = !resultId.getTicket().isEmpty(); final ExportBuilder
exportBuilder = - resultId.getTicket().isEmpty() ? session.nonExport() : session.newExport(resultId, "resultId"); - exportBuilder.parentQueryPerformanceRecorder(queryPerformanceRecorder); + hasResultId ? session.newExport(resultId, "resultId") : session.nonExport(); + final String resultDescription = hasResultId + ? "resultId=" + ticketRouter.getLogNameFor(resultId, "resultId") + ", " + : ""; + + final String description = "TableService#" + op.getOpCase().name() + "(" + resultDescription + "batchOffset=" + + offset + ")"; + exportBuilder.queryPerformanceRecorder(QueryPerformanceRecorder.newSubQuery( + description, batchQueryPerformanceRecorder, QueryPerformanceNugget.DEFAULT_FACTORY)); + return new BatchExportBuilder<>(operation, request, exportBuilder); } From 6bf84eb83b1a2c8531497a02cbc678e17381d6b7 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Fri, 17 Nov 2023 13:54:55 -0700 Subject: [PATCH 30/31] almost final round? --- .../engine/table/impl/QueryTable.java | 7 +- .../impl/perf/QueryPerformanceNugget.java | 160 +++++++----------- .../impl/perf/QueryPerformanceRecorder.java | 108 ++++++------ .../perf/QueryPerformanceRecorderImpl.java | 65 ++++--- .../perf/QueryPerformanceRecorderState.java | 4 +- .../impl/perf/QueryProcessingResults.java | 28 --- .../impl/perf/UpdatePerformanceTracker.java | 2 +- .../table/impl/select/ConditionFilter.java | 6 +- .../table/impl/select/DhFormulaColumn.java | 5 +- .../select/codegen/JavaKernelBuilder.java | 5 +- .../engine/table/impl/updateby/UpdateBy.java | 1 - .../engine/table/impl/util/EngineMetrics.java | 13 +- ...ryOperationPerformanceStreamPublisher.java | 4 +- .../table/impl/util/QueryPerformanceImpl.java | 10 +- .../util/QueryPerformanceStreamPublisher.java | 10 +- .../QueryPerformanceLogLogger.java | 22 +-- .../deephaven/engine/util/TableShowTools.java | 7 +- .../select/TestConstantFormulaEvaluation.java | 24 +-- .../main/java/io/deephaven/csv/CsvTools.java | 9 +- .../impl/util/PerformanceQueriesGeneral.java | 30 +++- .../server/arrow/ArrowFlightUtil.java | 35 ++-- .../server/arrow/FlightServiceGrpcImpl.java | 30 +--- .../console/ConsoleServiceGrpcImpl.java | 6 +- .../HierarchicalTableServiceGrpcImpl.java | 44 +++-- .../server/object/ObjectServiceGrpcImpl.java | 5 +- .../PartitionedTableServiceGrpcImpl.java | 18 +- .../session/SessionServiceGrpcImpl.java | 8 +- .../server/session/SessionState.java | 6 +- .../InputTableServiceGrpcImpl.java | 12 +- .../table/ops/TableServiceGrpcImpl.java | 17 +- 30 files changed, 300 insertions(+), 401 deletions(-) delete mode 100644 engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java index 187442fff12..05d2c4c9c75 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/QueryTable.java @@ -3573,12 +3573,9 @@ public static void checkInitiateBinaryOperation(@NotNull final Table first, @Not } private R applyInternal(@NotNull final Function function) { - final QueryPerformanceNugget nugget = - QueryPerformanceRecorder.getInstance().getNugget("apply(" + function + ")"); - try { + try (final SafeCloseable ignored = + QueryPerformanceRecorder.getInstance().getNugget("apply(" + function + ")")) { return function.apply(this); - } finally { - nugget.done(); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java index de3a5537703..950e279af2a 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceNugget.java @@ -4,9 +4,11 @@ package io.deephaven.engine.table.impl.perf; import io.deephaven.auth.AuthContext; +import io.deephaven.base.clock.SystemClock; import io.deephaven.base.log.LogOutput; import io.deephaven.base.verify.Assert; import io.deephaven.engine.context.ExecutionContext; +import io.deephaven.io.log.impl.LogOutputStringImpl; import io.deephaven.time.DateTimeUtils; import io.deephaven.engine.table.impl.util.RuntimeMemory; import io.deephaven.util.QueryConstants; @@ -14,7 +16,7 @@ import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import java.util.function.Predicate; +import java.util.function.Consumer; import static io.deephaven.util.QueryConstants.*; @@ -24,9 +26,6 @@ * of encapsulation into account. */ public class QueryPerformanceNugget extends BasePerformanceEntry implements SafeCloseable { - private static final QueryPerformanceLogThreshold LOG_THRESHOLD = new QueryPerformanceLogThreshold("", 1_000_000); - private static final QueryPerformanceLogThreshold UNINSTRUMENTED_LOG_THRESHOLD = - new QueryPerformanceLogThreshold("Uninstrumented", 1_000_000_000); private static final int MAX_DESCRIPTION_LENGTH = 16 << 10; /** @@ -39,12 +38,7 @@ public void accumulate(@NotNull BasePerformanceEntry entry) { } @Override - public boolean shouldLogThisAndStackParents() { - return false; - } - - @Override - boolean shouldLogNugget(boolean isUninstrumented) { + public boolean shouldLog() { return false; } }; @@ -64,7 +58,7 @@ default QueryPerformanceNugget createForQuery( final long evaluationNumber, @NotNull final String description, @Nullable final String sessionId, - @NotNull final Predicate onCloseCallback) { + @NotNull final Consumer onCloseCallback) { return new QueryPerformanceNugget(evaluationNumber, NULL_LONG, NULL_INT, NULL_INT, NULL_INT, description, sessionId, false, NULL_LONG, onCloseCallback); } @@ -83,7 +77,7 @@ default QueryPerformanceNugget createForSubQuery( @NotNull final QueryPerformanceNugget parentQuery, final long evaluationNumber, @NotNull final String description, - @NotNull final Predicate onCloseCallback) { + @NotNull final Consumer onCloseCallback) { Assert.eqTrue(parentQuery.isQueryLevel(), "parentQuery.isQueryLevel()"); return new QueryPerformanceNugget(evaluationNumber, parentQuery.getEvaluationNumber(), NULL_INT, NULL_INT, NULL_INT, description, parentQuery.getSessionId(), false, NULL_LONG, onCloseCallback); @@ -104,7 +98,7 @@ default QueryPerformanceNugget createForOperation( final int operationNumber, final String description, final long inputSize, - @NotNull final Predicate onCloseCallback) { + @NotNull final Consumer onCloseCallback) { int depth = parentQueryOrOperation.getDepth(); if (depth == NULL_INT) { depth = 0; @@ -137,7 +131,7 @@ default QueryPerformanceNugget createForOperation( default QueryPerformanceNugget createForCatchAll( @NotNull final QueryPerformanceNugget parentQuery, final int operationNumber, - @NotNull final Predicate onCloseCallback) { + @NotNull final Consumer onCloseCallback) { Assert.eqTrue(parentQuery.isQueryLevel(), "parentQuery.isQueryLevel()"); return new QueryPerformanceNugget( parentQuery.getEvaluationNumber(), @@ -164,20 +158,20 @@ default QueryPerformanceNugget createForCatchAll( private final String sessionId; private final boolean isUser; private final long inputSize; - private final Predicate onCloseCallback; + private final Consumer onCloseCallback; private final AuthContext authContext; private final String callerLine; - private long startClockEpochNanos = NULL_LONG; - private long endClockEpochNanos = NULL_LONG; + private long startClockEpochNanos; + private long endClockEpochNanos; private volatile QueryState state; + private RuntimeMemory.Sample startMemorySample; + private RuntimeMemory.Sample endMemorySample; - private final RuntimeMemory.Sample startMemorySample; - private final RuntimeMemory.Sample endMemorySample; - - private boolean shouldLogThisAndStackParents; + /** whether this nugget triggers the logging of itself and every other nugget in its stack of nesting operations */ + private boolean shouldLog; /** * Full constructor for nuggets. @@ -205,9 +199,7 @@ protected QueryPerformanceNugget( @Nullable final String sessionId, final boolean isUser, final long inputSize, - @NotNull final Predicate onCloseCallback) { - startMemorySample = new RuntimeMemory.Sample(); - endMemorySample = new RuntimeMemory.Sample(); + @NotNull final Consumer onCloseCallback) { this.evaluationNumber = evaluationNumber; this.parentEvaluationNumber = parentEvaluationNumber; this.operationNumber = operationNumber; @@ -227,19 +219,16 @@ protected QueryPerformanceNugget( authContext = ExecutionContext.getContext().getAuthContext(); callerLine = QueryPerformanceRecorder.getCallerLine(); - final RuntimeMemory runtimeMemory = RuntimeMemory.getInstance(); - runtimeMemory.read(startMemorySample); + startClockEpochNanos = NULL_LONG; + endClockEpochNanos = NULL_LONG; state = QueryState.NOT_STARTED; - shouldLogThisAndStackParents = false; } /** * Construct a "dummy" nugget, which will never gather any information or be recorded. */ private QueryPerformanceNugget() { - startMemorySample = null; - endMemorySample = null; evaluationNumber = NULL_LONG; parentEvaluationNumber = NULL_LONG; operationNumber = NULL_INT; @@ -250,28 +239,36 @@ private QueryPerformanceNugget() { isUser = false; inputSize = NULL_LONG; onCloseCallback = null; - authContext = null; callerLine = null; - state = null; // This turns close into a no-op. - shouldLogThisAndStackParents = false; - } + startClockEpochNanos = NULL_LONG; + endClockEpochNanos = NULL_LONG; - public void markStartTime() { - if (startClockEpochNanos != NULL_LONG) { - throw new IllegalStateException("Nugget start time already set"); - } - - startClockEpochNanos = DateTimeUtils.millisToNanos(System.currentTimeMillis()); + state = QueryState.NOT_STARTED; } + /** + * Start clock epoch nanos is set if this is the first time this nugget has been started. + */ @Override public synchronized void onBaseEntryStart() { - super.onBaseEntryStart(); + // note that we explicitly do not call super.onBaseEntryStart() on query level nuggets as all top level nuggets + // accumulate into it to account for parallelized execution + if (operationNumber != NULL_INT) { + super.onBaseEntryStart(); + } if (state == QueryState.RUNNING) { throw new IllegalStateException("Nugget was already started"); } + if (startClockEpochNanos == NULL_LONG) { + startClockEpochNanos = SystemClock.systemUTC().currentTimeNanos(); + } + startMemorySample = new RuntimeMemory.Sample(); + endMemorySample = new RuntimeMemory.Sample(); + final RuntimeMemory runtimeMemory = RuntimeMemory.getInstance(); + runtimeMemory.read(startMemorySample); + state = QueryState.RUNNING; } @@ -281,20 +278,15 @@ public synchronized void onBaseEntryEnd() { throw new IllegalStateException("Nugget isn't running"); } state = QueryState.SUSPENDED; - super.onBaseEntryEnd(); + // note that we explicitly do not call super.onBaseEntryEnd() on query level nuggets as all top level nuggets + // accumulate into it to account for parallelized execution + if (operationNumber != NULL_INT) { + super.onBaseEntryEnd(); + } } /** * Mark this nugget {@link QueryState#FINISHED} and notify the recorder. - * - * @return if the nugget passes logging thresholds. - */ - public boolean done() { - return close(QueryState.FINISHED); - } - - /** - * Mark this nugget {@link QueryState#FINISHED} and notify the recorder. Is an alias for {@link #done()}. *

* {@link SafeCloseable} implementation for try-with-resources. */ @@ -303,8 +295,8 @@ public void close() { close(QueryState.FINISHED); } - public boolean abort() { - return close(QueryState.INTERRUPTED); + public void abort() { + close(QueryState.INTERRUPTED); } /** @@ -312,46 +304,40 @@ public boolean abort() { * * @param closingState The current query state. If it is anything other than {@link QueryState#RUNNING} nothing will * happen and it will return false; - * - * @return If the nugget passes criteria for logging. */ - private boolean close(final QueryState closingState) { + private void close(final QueryState closingState) { if (state != QueryState.RUNNING) { - return false; + return; } synchronized (this) { if (state != QueryState.RUNNING) { - return false; - } - - if (startClockEpochNanos == NULL_LONG) { - throw new IllegalStateException("Nugget was never started"); + return; } onBaseEntryEnd(); - endClockEpochNanos = DateTimeUtils.millisToNanos(System.currentTimeMillis()); + endClockEpochNanos = SystemClock.systemUTC().currentTimeNanos(); final RuntimeMemory runtimeMemory = RuntimeMemory.getInstance(); runtimeMemory.read(endMemorySample); state = closingState; - return onCloseCallback.test(this); + onCloseCallback.accept(this); } } @Override public String toString() { - return evaluationNumber - + ":" + (isQueryLevel() ? "query_level" : operationNumber) - + ":" + description - + ":" + callerLine; + return new LogOutputStringImpl().append(this).toString(); } @Override public LogOutput append(@NotNull final LogOutput logOutput) { - // override BasePerformanceEntry's impl and match toString() - return logOutput.append(toString()); + // override BasePerformanceEntry's impl + return logOutput.append(evaluationNumber) + .append(":").append(isQueryLevel() ? "query_level" : Integer.toString(operationNumber)) + .append(":").append(description) + .append(":").append(callerLine); } public long getEvaluationNumber() { @@ -374,7 +360,7 @@ public int getDepth() { return depth; } - public String getName() { + public String getDescription() { return description; } @@ -484,41 +470,15 @@ public boolean wasInterrupted() { /** * Ensure this nugget gets logged, alongside its stack of nesting operations. */ - void setShouldLogThisAndStackParents() { - shouldLogThisAndStackParents = true; + void setShouldLog() { + shouldLog = true; } /** * @return true if this nugget triggers the logging of itself and every other nugget in its stack of nesting * operations. */ - boolean shouldLogThisAndStackParents() { - return shouldLogThisAndStackParents; - } - - /** - * Suppress de minimus performance nuggets using the properties defined above. - * - * @param isUninstrumented this nugget for uninstrumented code? If so the thresholds for inclusion in the logs are - * configured distinctly. - * - * @return if this nugget is significant enough to be logged. - */ - boolean shouldLogNugget(final boolean isUninstrumented) { - if (shouldLogThisAndStackParents) { - return true; - } - - // Nuggets will have a null value for end time if they weren't closed for a RUNNING query; this is an abnormal - // condition and the nugget should be logged - if (endClockEpochNanos == NULL_LONG) { - return true; - } - - if (isUninstrumented) { - return UNINSTRUMENTED_LOG_THRESHOLD.shouldLog(getUsageNanos()); - } else { - return LOG_THRESHOLD.shouldLog(getUsageNanos()); - } + boolean shouldLog() { + return shouldLog; } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java index f8648622214..5282d6b9c73 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java @@ -34,8 +34,8 @@ static QueryPerformanceRecorder getInstance() { * recorder is installed. * * @param name the nugget name - * @return A new QueryPerformanceNugget to encapsulate user query operations. {@link QueryPerformanceNugget#done()} - * or {@link QueryPerformanceNugget#close()} must be called on the nugget. + * @return A new QueryPerformanceNugget to encapsulate user query operations. {@link QueryPerformanceNugget#close()} + * must be called on the nugget. */ @FinalDefault default QueryPerformanceNugget getNugget(@NotNull String name) { @@ -48,8 +48,8 @@ default QueryPerformanceNugget getNugget(@NotNull String name) { * * @param name the nugget name * @param inputSize the nugget's input size - * @return A new QueryPerformanceNugget to encapsulate user query operations. {@link QueryPerformanceNugget#done()} - * or {@link QueryPerformanceNugget#close()} must be called on the nugget. + * @return A new QueryPerformanceNugget to encapsulate user query operations. {@link QueryPerformanceNugget#close()} + * must be called on the nugget. */ QueryPerformanceNugget getNugget(@NotNull String name, long inputSize); @@ -61,16 +61,16 @@ default QueryPerformanceNugget getNugget(@NotNull String name) { QueryPerformanceNugget getEnclosingNugget(); - interface EntrySetter { - void set(long evaluationNumber, int operationNumber, boolean uninstrumented); + interface QueryDataConsumer { + void accept(long evaluationNumber, int operationNumber, boolean uninstrumented); } /** - * Provide current query data via the setter. + * Provide current query data via the consumer. * - * @param setter a callback to receive query data + * @param consumer a callback to receive query data */ - void setQueryData(final EntrySetter setter); + void supplyQueryData(@NotNull QueryDataConsumer consumer); /** * @return The current callsite. This is the last set callsite or the line number of the user's detected callsite. @@ -168,35 +168,54 @@ static QueryPerformanceRecorder newSubQuery( /** * Starts a query. *

- * It is an error to start a query more than once or while another query is running on this thread. + * A query is {@link QueryState#RUNNING} if it has been started or {@link #resumeQuery() resumed} without a + * subsequent {@link #endQuery() end}, {@link #suspendQuery() suspend}, or {@link #abortQuery() abort}. + * + * @throws IllegalStateException if the query state isn't {@link QueryState#NOT_STARTED} or another query is running + * on this thread */ SafeCloseable startQuery(); /** * End a query. *

- * It is an error to end a query not currently running on this thread. + * A query is {@link QueryState#RUNNING} if it has been {@link #startQuery() started} or {@link #resumeQuery() + * resumed} without a subsequent end, {@link #suspendQuery() suspend}, or {@link #abortQuery() abort}. * * @return whether the query should be logged + * @throws IllegalStateException if the query staet isn't {@link QueryState#RUNNING}, + * {@link QueryState#INTERRUPTED}, or was not running on this thread */ boolean endQuery(); /** * Suspends a query. *

- * It is an error to suspend a query not currently running on this thread. + * A query is {@link QueryState#RUNNING} if it has been {@link #startQuery() started} or {@link #resumeQuery() + * resumed} without a subsequent {@link #endQuery() end}, suspend, or {@link #abortQuery() abort}. + * + * @throws IllegalStateException if the query wasn't running or was not running on this thread */ void suspendQuery(); /** * Resumes a suspend query. *

- * It is an error to resume a query while another query is running on this thread. + * A query is {@link QueryState#RUNNING} if it has been {@link #startQuery() started} or resumed without a + * subsequent {@link #endQuery() end}, {@link #suspendQuery() suspend}, or {@link #abortQuery() abort}. + * + * @throws IllegalStateException if the query state was not {@link QueryState#SUSPENDED} or another query is running + * on this thread */ SafeCloseable resumeQuery(); /** * Abort a query. + *

+ * A query is {@link QueryState#RUNNING} if it has been {@link #startQuery() started} or {@link #resumeQuery() + * resumed} without a subsequent {@link #endQuery() end}, {@link #suspendQuery() suspend}, or abort. + *

+ * Note that this method is invoked out-of-band and does not throw if the query has been completed. */ @SuppressWarnings("unused") void abortQuery(); @@ -214,7 +233,8 @@ static QueryPerformanceRecorder newSubQuery( List getOperationLevelPerformanceData(); /** - * Accumulate the values from another recorder into this one. The provided recorder will not be mutated. + * Accumulate performance information from another recorder into this one. The provided recorder will not be + * mutated. * * @param subQuery the recorder to accumulate into this */ @@ -238,13 +258,10 @@ static QueryPerformanceRecorder newSubQuery( */ static void withNugget(final String name, final Runnable r) { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - - try { - nugget = getInstance().getNugget(name); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name)) { r.run(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } @@ -257,13 +274,10 @@ static void withNugget(final String name, final Runnable r) { */ static T withNugget(final String name, final Supplier r) { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - - try { - nugget = getInstance().getNugget(name); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name)) { return r.get(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } @@ -277,12 +291,10 @@ static void withNuggetThrowing( final String name, final ThrowingRunnable r) throws T { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - try { - nugget = getInstance().getNugget(name); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name)) { r.run(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } @@ -298,12 +310,10 @@ static R withNuggetThrowing( final String name, final ThrowingSupplier r) throws ExceptionType { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - try { - nugget = getInstance().getNugget(name); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name)) { return r.get(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } @@ -315,12 +325,10 @@ static R withNuggetThrowing( */ static void withNugget(final String name, final long inputSize, final Runnable r) { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - try { - nugget = getInstance().getNugget(name, inputSize); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name, inputSize)) { r.run(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } @@ -333,12 +341,10 @@ static void withNugget(final String name, final long inputSize, final Runnable r */ static T withNugget(final String name, final long inputSize, final Supplier r) { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - try { - nugget = getInstance().getNugget(name, inputSize); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name, inputSize)) { return r.get(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } @@ -354,12 +360,10 @@ static void withNuggetThrowing( final long inputSize, final ThrowingRunnable r) throws T { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - try { - nugget = getInstance().getNugget(name, inputSize); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name, inputSize)) { r.run(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } @@ -377,27 +381,19 @@ static R withNuggetThrowing( final long inputSize, final ThrowingSupplier r) throws ExceptionType { final boolean needClear = setCallsite(); - QueryPerformanceNugget nugget = null; - try { - nugget = getInstance().getNugget(name, inputSize); + try (final QueryPerformanceNugget ignored = getInstance().getNugget(name, inputSize)) { return r.get(); } finally { - finishAndClear(nugget, needClear); + maybeClearCallsite(needClear); } } - /** - * Finish the nugget and clear the callsite if needed. + * Clear the callsite if needed. * - * @param nugget an optional nugget * @param needClear true if the callsite needs to be cleared */ - private static void finishAndClear(@Nullable final QueryPerformanceNugget nugget, final boolean needClear) { - if (nugget != null) { - nugget.done(); - } - + private static void maybeClearCallsite(final boolean needClear) { if (needClear) { clearCallsite(); } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java index 6fd050a5af6..dc228c7d228 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java @@ -11,6 +11,8 @@ import java.util.*; +import static io.deephaven.util.QueryConstants.NULL_LONG; + /** * Query performance instrumentation implementation. Manages a hierarchy of {@link QueryPerformanceNugget} instances. *

@@ -18,6 +20,9 @@ * suspended and resumed on another thread. */ public class QueryPerformanceRecorderImpl implements QueryPerformanceRecorder { + private static final QueryPerformanceLogThreshold LOG_THRESHOLD = new QueryPerformanceLogThreshold("", 1_000_000); + private static final QueryPerformanceLogThreshold UNINSTRUMENTED_LOG_THRESHOLD = + new QueryPerformanceLogThreshold("Uninstrumented", 1_000_000_000); @Nullable private final QueryPerformanceRecorder parent; @@ -56,10 +61,8 @@ public class QueryPerformanceRecorderImpl implements QueryPerformanceRecorder { this.nuggetFactory = nuggetFactory; } - /** - * Abort a query. - */ public synchronized void abortQuery() { + // TODO (https://github.com/deephaven/deephaven-core/issues/53): support out-of-order abortion if (state != QueryState.RUNNING) { return; } @@ -89,25 +92,26 @@ public synchronized SafeCloseable startQuery() { if (state != QueryState.NOT_STARTED) { throw new IllegalStateException("Can't resume a query that has already started"); } - queryNugget.markStartTime(); return resumeInternal(); } @Override public synchronized boolean endQuery() { if (state != QueryState.RUNNING) { - // We only allow the query to be RUNNING or INTERRUPTED when we end it; else we are in an illegal state. - Assert.eq(state, "state", QueryState.INTERRUPTED, "QueryState.INTERRUPTED"); + if (state != QueryState.INTERRUPTED) { + // We only allow the query to be RUNNING or INTERRUPTED when we end it; else we are in an illegal state. + throw new IllegalStateException("Can't end a query that isn't running or interrupted"); + } return false; } state = QueryState.FINISHED; suspendInternal(); - boolean shouldLog = queryNugget.done(); + queryNugget.close(); if (parent != null) { parent.accumulate(this); } - return shouldLog; + return shouldLogNugget(queryNugget); } /** @@ -169,18 +173,16 @@ private SafeCloseable resumeInternal() { private void startCatchAll() { catchAllNugget = nuggetFactory.createForCatchAll(queryNugget, operationNuggets.size(), this::releaseNugget); - catchAllNugget.markStartTime(); catchAllNugget.onBaseEntryStart(); } private void stopCatchAll(final boolean abort) { - final boolean shouldLog; if (abort) { - shouldLog = catchAllNugget.abort(); + catchAllNugget.abort(); } else { - shouldLog = catchAllNugget.done(); + catchAllNugget.close(); } - if (shouldLog) { + if (catchAllNugget.shouldLog()) { Assert.eq(operationNuggets.size(), "operationsNuggets.size()", catchAllNugget.getOperationNumber(), "catchAllNugget.getOperationNumber()"); operationNuggets.add(catchAllNugget); @@ -212,7 +214,6 @@ public synchronized QueryPerformanceNugget getNugget(@NotNull final String name, final QueryPerformanceNugget nugget = nuggetFactory.createForOperation( parent, operationNuggets.size(), name, inputSize, this::releaseNugget); - nugget.markStartTime(); nugget.onBaseEntryStart(); operationNuggets.add(nugget); userNuggetStack.addLast(nugget); @@ -223,12 +224,11 @@ public synchronized QueryPerformanceNugget getNugget(@NotNull final String name, * This is our onCloseCallback from the nugget. * * @param nugget the nugget to be released - * @return If the nugget passes criteria for logging. */ - private synchronized boolean releaseNugget(@NotNull final QueryPerformanceNugget nugget) { - boolean shouldLog = nugget.shouldLogNugget(nugget == catchAllNugget); + private synchronized void releaseNugget(@NotNull final QueryPerformanceNugget nugget) { + final boolean shouldLog = shouldLogNugget(nugget); if (!nugget.isUser()) { - return shouldLog; + return; } final QueryPerformanceNugget removed = userNuggetStack.removeLast(); @@ -245,12 +245,14 @@ private synchronized boolean releaseNugget(@NotNull final QueryPerformanceNugget final QueryPerformanceNugget parent = userNuggetStack.getLast(); parent.accumulate(nugget); - if (removed.shouldLogThisAndStackParents()) { - parent.setShouldLogThisAndStackParents(); + if (shouldLog) { + parent.setShouldLog(); } // resume the parent parent.onBaseEntryStart(); + } else { + queryNugget.accumulate(nugget); } if (!shouldLog) { @@ -269,8 +271,21 @@ private synchronized boolean releaseNugget(@NotNull final QueryPerformanceNugget if (userNuggetStack.isEmpty() && queryNugget != null && state == QueryState.RUNNING) { startCatchAll(); } + } - return shouldLog; + private boolean shouldLogNugget(@NotNull QueryPerformanceNugget nugget) { + if (nugget.shouldLog()) { + return true; + } else if (nugget.getEndClockEpochNanos() == NULL_LONG) { + // Nuggets will have a null value for end time if they weren't closed for a RUNNING query; this is an + // abnormal + // condition and the nugget should be logged + return true; + } else if (nugget == catchAllNugget) { + return UNINSTRUMENTED_LOG_THRESHOLD.shouldLog(nugget.getUsageNanos()); + } else { + return LOG_THRESHOLD.shouldLog(nugget.getUsageNanos()); + } } @Override @@ -283,7 +298,7 @@ public synchronized QueryPerformanceNugget getEnclosingNugget() { } @Override - public void setQueryData(final EntrySetter setter) { + public void supplyQueryData(final @NotNull QueryDataConsumer consumer) { final long evaluationNumber; final int operationNumber; boolean uninstrumented = false; @@ -295,15 +310,15 @@ public void setQueryData(final EntrySetter setter) { if (operationNumber > 0) { // ensure UPL and QOPL are consistent/joinable. if (!userNuggetStack.isEmpty()) { - userNuggetStack.getLast().setShouldLogThisAndStackParents(); + userNuggetStack.getLast().setShouldLog(); } else { uninstrumented = true; Assert.neqNull(catchAllNugget, "catchAllNugget"); - catchAllNugget.setShouldLogThisAndStackParents(); + catchAllNugget.setShouldLog(); } } } - setter.set(evaluationNumber, operationNumber, uninstrumented); + consumer.accept(evaluationNumber, operationNumber, uninstrumented); } @Override diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java index 300be84e35a..958cbf4ac0f 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderState.java @@ -212,8 +212,8 @@ public QueryPerformanceNugget getEnclosingNugget() { } @Override - public void setQueryData(EntrySetter setter) { - setter.set(QueryConstants.NULL_LONG, QueryConstants.NULL_INT, false); + public void supplyQueryData(final @NotNull QueryDataConsumer consumer) { + consumer.accept(QueryConstants.NULL_LONG, QueryConstants.NULL_INT, false); } @Override diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java deleted file mode 100644 index 41b04ff7a8f..00000000000 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryProcessingResults.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending - */ -package io.deephaven.engine.table.impl.perf; - -public class QueryProcessingResults { - - private final QueryPerformanceRecorder recorder; - - private volatile String exception = null; - - - public QueryProcessingResults(final QueryPerformanceRecorder recorder) { - this.recorder = recorder; - } - - public String getException() { - return exception; - } - - public void setException(String exception) { - this.exception = exception; - } - - public QueryPerformanceRecorder getRecorder() { - return recorder; - } -} diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java index 01be9b8b686..332104884ef 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/UpdatePerformanceTracker.java @@ -192,7 +192,7 @@ public final PerformanceEntry getEntry(final String description) { final QueryPerformanceRecorder qpr = QueryPerformanceRecorder.getInstance(); final MutableObject entryMu = new MutableObject<>(); - qpr.setQueryData((evaluationNumber, operationNumber, uninstrumented) -> { + qpr.supplyQueryData((evaluationNumber, operationNumber, uninstrumented) -> { final String effectiveDescription; if (StringUtils.isNullOrEmpty(description) && uninstrumented) { effectiveDescription = QueryPerformanceRecorder.UNINSTRUMENTED_CODE_DESCRIPTION; diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/select/ConditionFilter.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/select/ConditionFilter.java index d77db0aeb98..c66db402565 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/select/ConditionFilter.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/select/ConditionFilter.java @@ -18,7 +18,6 @@ import io.deephaven.engine.table.impl.util.codegen.CodeGenerator; import io.deephaven.engine.context.QueryScopeParam; import io.deephaven.time.TimeLiteralReplacedExpression; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.ColumnSource; import io.deephaven.chunk.*; @@ -382,8 +381,7 @@ protected void generateFilterCode( final StringBuilder classBody = getClassBody(tableDefinition, timeConversionResult, result); if (classBody == null) return; - final QueryPerformanceNugget nugget = QueryPerformanceRecorder.getInstance().getNugget("Compile:" + formula); - try { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget("Compile:" + formula)) { final List> paramClasses = new ArrayList<>(); final Consumer> addParamClass = (cls) -> { if (cls != null) { @@ -409,8 +407,6 @@ protected void generateFilterCode( filterKernelClass = ExecutionContext.getContext().getQueryCompiler() .compile("GeneratedFilterKernel", this.classBody = classBody.toString(), QueryCompiler.FORMULA_PREFIX, QueryScopeParamTypeUtil.expandParameterClasses(paramClasses)); - } finally { - nugget.done(); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/select/DhFormulaColumn.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/select/DhFormulaColumn.java index f9b97f739fe..c1caeede3c2 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/select/DhFormulaColumn.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/select/DhFormulaColumn.java @@ -14,7 +14,6 @@ import io.deephaven.engine.table.impl.MatchPair; import io.deephaven.engine.table.Table; import io.deephaven.engine.table.impl.lang.QueryLanguageParser; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.select.codegen.FormulaAnalyzer; import io.deephaven.engine.table.impl.select.codegen.JavaKernelBuilder; @@ -32,6 +31,7 @@ import io.deephaven.internal.log.LoggerFactory; import io.deephaven.io.logger.Logger; import io.deephaven.time.TimeLiteralReplacedExpression; +import io.deephaven.util.SafeCloseable; import io.deephaven.util.type.TypeUtils; import io.deephaven.vector.ObjectVector; import io.deephaven.vector.Vector; @@ -772,8 +772,7 @@ private FormulaFactory createFormulaFactory() { @SuppressWarnings("SameParameterValue") private Class compileFormula(final String what, final String classBody, final String className) { // System.out.printf("compileFormula: what is %s. Code is...%n%s%n", what, classBody); - try (final QueryPerformanceNugget ignored = - QueryPerformanceRecorder.getInstance().getNugget("Compile:" + what)) { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget("Compile:" + what)) { // Compilation needs to take place with elevated privileges, but the created object should not have them. final List> paramClasses = new ArrayList<>(); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/select/codegen/JavaKernelBuilder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/select/codegen/JavaKernelBuilder.java index e34be36fc18..94702af0e11 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/select/codegen/JavaKernelBuilder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/select/codegen/JavaKernelBuilder.java @@ -5,9 +5,9 @@ import io.deephaven.engine.context.QueryCompiler; import io.deephaven.engine.context.ExecutionContext; +import io.deephaven.util.SafeCloseable; import io.deephaven.vector.Vector; import io.deephaven.engine.context.QueryScopeParam; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.select.Formula; import io.deephaven.engine.table.impl.select.DhFormulaColumn; @@ -260,8 +260,7 @@ private List visitFormulaParameters( @SuppressWarnings("SameParameterValue") private static Class compileFormula(final String what, final String classBody, final String className) { // System.out.printf("compileFormula: formulaString is %s. Code is...%n%s%n", what, classBody); - try (final QueryPerformanceNugget nugget = - QueryPerformanceRecorder.getInstance().getNugget("Compile:" + what)) { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget("Compile:" + what)) { // Compilation needs to take place with elevated privileges, but the created object should not have them. final QueryCompiler compiler = ExecutionContext.getContext().getQueryCompiler(); return compiler.compile(className, classBody, QueryCompiler.FORMULA_PREFIX); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java index 6e18e24dd30..415bbd9df83 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/updateby/UpdateBy.java @@ -22,7 +22,6 @@ import io.deephaven.engine.table.impl.*; import io.deephaven.engine.table.impl.perf.BasePerformanceEntry; import io.deephaven.engine.table.impl.perf.PerformanceEntry; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.sources.*; import io.deephaven.engine.table.impl.sources.sparse.SparseConstants; diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java index 0bf5c36fe22..e55dbcc3c8e 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/EngineMetrics.java @@ -9,7 +9,7 @@ import io.deephaven.engine.table.impl.BlinkTableTools; import io.deephaven.engine.table.impl.QueryTable; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; -import io.deephaven.engine.table.impl.perf.QueryProcessingResults; +import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.tablelogger.EngineTableLoggers; import io.deephaven.engine.tablelogger.QueryOperationPerformanceLogLogger; import io.deephaven.engine.tablelogger.QueryPerformanceLogLogger; @@ -20,6 +20,7 @@ import io.deephaven.stats.Driver; import io.deephaven.stats.StatsIntradayLogger; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import java.io.IOException; import java.util.List; @@ -111,19 +112,21 @@ private StatsIntradayLogger getStatsLogger() { return statsImpl; } - public void logQueryProcessingResults(@NotNull final QueryProcessingResults results) { + public void logQueryProcessingResults( + @NotNull final QueryPerformanceRecorder recorder, + @Nullable final Exception exception) { final QueryPerformanceLogLogger qplLogger = getQplLogger(); final QueryOperationPerformanceLogLogger qoplLogger = getQoplLogger(); try { final QueryPerformanceNugget queryNugget = Require.neqNull( - results.getRecorder().getQueryLevelPerformanceData(), + recorder.getQueryLevelPerformanceData(), "queryProcessingResults.getRecorder().getQueryLevelPerformanceData()"); synchronized (qplLogger) { - qplLogger.log(results, queryNugget); + qplLogger.log(queryNugget, exception); } final List nuggets = - results.getRecorder().getOperationLevelPerformanceData(); + recorder.getOperationLevelPerformanceData(); synchronized (qoplLogger) { for (QueryPerformanceNugget nugget : nuggets) { qoplLogger.log(nugget); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java index 5ac60db7cc5..c0529be8681 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryOperationPerformanceStreamPublisher.java @@ -84,7 +84,7 @@ public synchronized void add(final QueryPerformanceNugget nugget) { chunks[4].asWritableIntChunk().add(nugget.getDepth()); // ColumnDefinition.ofString("Description"), - chunks[5].asWritableObjectChunk().add(nugget.getName()); + chunks[5].asWritableObjectChunk().add(nugget.getDescription()); // ColumnDefinition.ofString("SessionId"), chunks[6].asWritableObjectChunk().add(nugget.getSessionId()); @@ -93,7 +93,7 @@ public synchronized void add(final QueryPerformanceNugget nugget) { chunks[7].asWritableObjectChunk().add(nugget.getCallerLine()); // ColumnDefinition.ofBoolean("IsCompilation"), - chunks[8].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.getName().startsWith("Compile:"))); + chunks[8].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.getDescription().startsWith("Compile:"))); // ColumnDefinition.ofTime("StartTime"), chunks[9].asWritableLongChunk().add(nugget.getStartClockEpochNanos()); diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java index db3b045080e..355dabe72ae 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceImpl.java @@ -6,11 +6,11 @@ import io.deephaven.engine.context.ExecutionContext; import io.deephaven.engine.table.Table; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; -import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.tablelogger.QueryPerformanceLogLogger; import io.deephaven.stream.StreamToBlinkTableAdapter; import io.deephaven.tablelogger.Row.Flags; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import java.io.IOException; import java.util.Objects; @@ -40,9 +40,9 @@ public Table blinkTable() { @Override public void log( @NotNull final Flags flags, - @NotNull final QueryProcessingResults queryProcessingResults, - @NotNull final QueryPerformanceNugget nugget) throws IOException { - publisher.add(queryProcessingResults, nugget); - qplLogger.log(flags, queryProcessingResults, nugget); + @NotNull final QueryPerformanceNugget nugget, + @Nullable final Exception exception) throws IOException { + publisher.add(nugget, exception); + qplLogger.log(flags, nugget, exception); } } diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java index ef7b97fc7ff..6923360f032 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/util/QueryPerformanceStreamPublisher.java @@ -8,13 +8,13 @@ import io.deephaven.engine.table.ColumnDefinition; import io.deephaven.engine.table.TableDefinition; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; -import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.table.impl.sources.ArrayBackedColumnSource; import io.deephaven.stream.StreamChunkUtils; import io.deephaven.stream.StreamConsumer; import io.deephaven.stream.StreamPublisher; import io.deephaven.util.BooleanUtils; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import java.util.Objects; @@ -63,8 +63,8 @@ public void register(@NotNull StreamConsumer consumer) { } public synchronized void add( - final QueryProcessingResults queryProcessingResults, - final QueryPerformanceNugget nugget) { + @NotNull final QueryPerformanceNugget nugget, + @Nullable final Exception exception) { // ColumnDefinition.ofLong("EvaluationNumber") chunks[0].asWritableLongChunk().add(nugget.getEvaluationNumber()); @@ -73,7 +73,7 @@ public synchronized void add( chunks[1].asWritableLongChunk().add(nugget.getParentEvaluationNumber()); // ColumnDefinition.ofString("Description") - chunks[2].asWritableObjectChunk().add(nugget.getName()); + chunks[2].asWritableObjectChunk().add(nugget.getDescription()); // ColumnDefinition.ofString("SessionId") chunks[3].asWritableObjectChunk().add(nugget.getSessionId()); @@ -121,7 +121,7 @@ public synchronized void add( chunks[17].asWritableByteChunk().add(BooleanUtils.booleanAsByte(nugget.wasInterrupted())); // ColumnDefinition.ofString("Exception") - chunks[18].asWritableObjectChunk().add(queryProcessingResults.getException()); + chunks[18].asWritableObjectChunk().add(exception == null ? null : exception.getMessage()); // ColumnDefinition.ofString("AuthContext") chunks[19].asWritableObjectChunk().add(Objects.toString(nugget.getAuthContext())); diff --git a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java index 9d6959930dd..97a7aa0c834 100644 --- a/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java +++ b/engine/table/src/main/java/io/deephaven/engine/tablelogger/QueryPerformanceLogLogger.java @@ -1,10 +1,9 @@ package io.deephaven.engine.tablelogger; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; -import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.tablelogger.Row; -import io.deephaven.tablelogger.Row.Flags; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import java.io.IOException; @@ -16,26 +15,23 @@ */ public interface QueryPerformanceLogLogger { default void log( - @NotNull final QueryProcessingResults queryProcessingResults, - @NotNull final QueryPerformanceNugget nugget) throws IOException { - log(DEFAULT_INTRADAY_LOGGER_FLAGS, queryProcessingResults, nugget); + @NotNull final QueryPerformanceNugget nugget, + @Nullable final Exception exception) throws IOException { + log(DEFAULT_INTRADAY_LOGGER_FLAGS, nugget, exception); } void log( @NotNull final Row.Flags flags, - @NotNull final QueryProcessingResults queryProcessingResults, - @NotNull final QueryPerformanceNugget nugget) throws IOException; + @NotNull final QueryPerformanceNugget nugget, + @Nullable final Exception exception) throws IOException; enum Noop implements QueryPerformanceLogLogger { INSTANCE; @Override public void log( - @NotNull final Flags flags, - @NotNull final QueryProcessingResults queryProcessingResults, - @NotNull final QueryPerformanceNugget nugget) - throws IOException { - - } + @NotNull final Row.Flags flags, + @NotNull final QueryPerformanceNugget nugget, + @Nullable final Exception exception) {} } } diff --git a/engine/table/src/main/java/io/deephaven/engine/util/TableShowTools.java b/engine/table/src/main/java/io/deephaven/engine/util/TableShowTools.java index ac65b727082..4867ab01de6 100644 --- a/engine/table/src/main/java/io/deephaven/engine/util/TableShowTools.java +++ b/engine/table/src/main/java/io/deephaven/engine/util/TableShowTools.java @@ -5,11 +5,11 @@ import io.deephaven.datastructures.util.CollectionUtil; import io.deephaven.engine.table.Table; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.rowset.RowSet; import io.deephaven.time.DateTimeUtils; +import io.deephaven.util.SafeCloseable; import io.deephaven.util.type.ArrayTypeUtils; import java.io.PrintStream; @@ -28,8 +28,7 @@ class TableShowTools { static void showInternal(Table source, long firstRow, long lastRowExclusive, ZoneId timeZone, String delimiter, PrintStream out, boolean showRowSet, String[] columns) { - final QueryPerformanceNugget nugget = QueryPerformanceRecorder.getInstance().getNugget("TableTools.show()"); - try { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget("TableTools.show()")) { if (columns.length == 0) { final List columnNames = source.getDefinition().getColumnNames(); columns = columnNames.toArray(CollectionUtil.ZERO_LENGTH_STRING_ARRAY); @@ -107,8 +106,6 @@ static void showInternal(Table source, long firstRow, long lastRowExclusive, Zon } out.println(); out.flush(); - } finally { - nugget.done(); } } diff --git a/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestConstantFormulaEvaluation.java b/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestConstantFormulaEvaluation.java index c3db84fd0bd..da3be26e696 100644 --- a/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestConstantFormulaEvaluation.java +++ b/engine/table/src/test/java/io/deephaven/engine/table/impl/select/TestConstantFormulaEvaluation.java @@ -7,12 +7,12 @@ import io.deephaven.engine.table.Table; import io.deephaven.engine.table.impl.QueryTable; import io.deephaven.engine.table.impl.lang.JavaExpressionParser; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.sources.SingleValueColumnSource; import io.deephaven.engine.testutil.TstUtils; import io.deephaven.engine.testutil.junit4.EngineCleanup; import io.deephaven.engine.util.TableTools; +import io.deephaven.util.SafeCloseable; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; @@ -190,8 +190,7 @@ public void constantLongValueTest() { private void singleColumnConstantValueFormulaTest(final String formula, final Class columnType, final T columnRowValue, final int tableLength, final String description) { - final QueryPerformanceNugget nugget = QueryPerformanceRecorder.getInstance().getNugget(description); - try { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget(description)) { final Table source = TableTools.emptyTable(tableLength).update(formula); String[] columns = source.getDefinition().getColumnNamesArray(); Assert.assertEquals("length of columns = 1", 1, columns.length); @@ -202,8 +201,6 @@ private void singleColumnConstantValueFormulaTest(final String formula, fina Assert.assertEquals(columnType, source.getColumnSource(columns[0]).getType()); Assert.assertEquals(columnRowValue, source.getColumnSource(columns[0]).get(key)); }); - } finally { - nugget.done(); } } @@ -230,8 +227,7 @@ public void threeColumnConstantValueFormulaTest() { private void threeColumnConstantValueFormulaTest(final String[] formulas, final Class calculatedColType, final T expectedConstValue, final ColumnFormula columnFormula, final int tableLength, final String description) { - final QueryPerformanceNugget nugget = QueryPerformanceRecorder.getInstance().getNugget(description); - try { + try (final SafeCloseable nugget = QueryPerformanceRecorder.getInstance().getNugget(description)) { final Table source = TableTools.emptyTable(tableLength).update(formulas); String[] columns = source.getDefinition().getColumnNamesArray(); boolean constantValueColFound = false; @@ -262,16 +258,12 @@ private void threeColumnConstantValueFormulaTest(final String[] formulas, fi (T) source.getColumnSource(columns[1]).get(key)); Assert.assertEquals(expected, source.getColumnSource(columns[2]).get(key)); }); - } finally { - nugget.done(); } } @Test public void queryScopeForAtomicIntPlusConstantFormulaTest() { - final QueryPerformanceNugget nugget = QueryPerformanceRecorder.getInstance() - .getNugget("queryScopeForAtomicInt"); - try { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget("queryScopeForAtomicInt")) { final AtomicInteger atomicValue = new AtomicInteger(1); QueryScope.addParam("atomicValue", atomicValue); String[] formulas = new String[] { @@ -309,8 +301,6 @@ public void queryScopeForAtomicIntPlusConstantFormulaTest() { Assert.assertEquals("Calculate Col verification", expectedCalculatedColValue, source.getColumnSource(columns[2]).get(key)); }); - } finally { - nugget.done(); } } @@ -379,9 +369,7 @@ public void testRefreshingTableForConstantFormulaColumnSource() { @SuppressWarnings("SameParameterValue") private void checkConstantFormula(final Table source, final Set expectedConstValueColumns, final T[] expectedConstValues, final Class calculatedColType) { - final QueryPerformanceNugget nugget = - QueryPerformanceRecorder.getInstance().getNugget("queryScopeForAtomicInt"); - try { + try (final SafeCloseable ignored = QueryPerformanceRecorder.getInstance().getNugget("queryScopeForAtomicInt")) { int count = 0; int[] constantColIndex = new int[expectedConstValues.length]; String[] columns = source.getDefinition().getColumnNamesArray(); @@ -412,8 +400,6 @@ private void checkConstantFormula(final Table source, final Set expe source.getColumnSource(columns[constantColIndex[i]]).get(key)); } }); - } finally { - nugget.done(); } } } diff --git a/extensions/csv/src/main/java/io/deephaven/csv/CsvTools.java b/extensions/csv/src/main/java/io/deephaven/csv/CsvTools.java index 9e9a431923b..305bdc9a8f9 100644 --- a/extensions/csv/src/main/java/io/deephaven/csv/CsvTools.java +++ b/extensions/csv/src/main/java/io/deephaven/csv/CsvTools.java @@ -41,7 +41,6 @@ import io.deephaven.engine.table.impl.DataAccessHelpers; import io.deephaven.engine.table.WritableColumnSource; import io.deephaven.engine.table.impl.InMemoryTable; -import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.engine.table.impl.sources.BooleanArraySource; import io.deephaven.engine.table.impl.sources.ByteArraySource; @@ -59,6 +58,7 @@ import io.deephaven.time.DateTimeUtils; import io.deephaven.util.BooleanUtils; import io.deephaven.util.QueryConstants; +import io.deephaven.util.SafeCloseable; import io.deephaven.util.annotations.ScriptApi; import org.jetbrains.annotations.Nullable; @@ -917,9 +917,8 @@ private static void writeCsvContentsSeq( final boolean nullsAsEmpty, final char separator, @Nullable final BiConsumer progress) throws IOException { - final QueryPerformanceNugget nugget = - QueryPerformanceRecorder.getInstance().getNugget("CsvTools.writeCsvContentsSeq()"); - try { + try (final SafeCloseable ignored = + QueryPerformanceRecorder.getInstance().getNugget("CsvTools.writeCsvContentsSeq()")) { String separatorStr = String.valueOf(separator); for (long i = 0; i < size; i++) { for (int j = 0; j < cols.length; j++) { @@ -945,8 +944,6 @@ private static void writeCsvContentsSeq( progress.accept(i, size); } } - } finally { - nugget.done(); } } diff --git a/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java b/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java index e3d659c9d00..813b29cd1da 100644 --- a/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java +++ b/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java @@ -3,6 +3,7 @@ */ package io.deephaven.engine.table.impl.util; +import com.google.common.collect.Sets; import io.deephaven.engine.table.Table; import io.deephaven.engine.table.hierarchical.TreeTable; import io.deephaven.engine.util.TableTools; @@ -14,6 +15,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.stream.Stream; import static io.deephaven.api.agg.Aggregation.AggFirst; @@ -25,7 +27,12 @@ * Generalizes {@link PerformanceQueries} to accept table parameters and make evaluation number parameter optional. */ public class PerformanceQueriesGeneral { - private static boolean formatPctColumns = true; + private static boolean FORMAT_PCT_COLUMNS = true; + private static final Set ALLOWED_MISSING_COLUMN_NAMES = Sets.newHashSet( + "ProcessUniqueId", // does not exist in DHC + "ParentEvaluationNumber", // may not exist in DHE + "ParentOperationNumber" // may not exist in DHE + ); public static Table queryPerformance(Table queryPerformanceLog, final long evaluationNumber) { @@ -52,7 +59,7 @@ public static Table queryPerformance(Table queryPerformanceLog, final long evalu "QueryMemUsed", "QueryMemFree", "QueryMemUsedPct", "EndTime", "TimeSecs", "NetMemoryChange"); - if (formatPctColumns) { + if (FORMAT_PCT_COLUMNS) { queryPerformanceLog = formatColumnsAsPct(queryPerformanceLog, "QueryMemUsedPct"); } return queryPerformanceLog; @@ -121,7 +128,7 @@ public static Table queryUpdatePerformance(Table queryUpdatePerformance, final l "Ratio", "QueryMemUsed", "QueryMemUsedPct", "IntervalEndTime", "RowsPerSec", "RowsPerCPUSec", "EntryDescription"); - if (formatPctColumnsLocal && formatPctColumns) { + if (formatPctColumnsLocal && FORMAT_PCT_COLUMNS) { queryUpdatePerformance = formatColumnsAsPctUpdatePerformance(queryUpdatePerformance); } return queryUpdatePerformance; @@ -177,7 +184,7 @@ public static Map queryUpdatePerformanceMap(final Table queryUpda AggPct(0.50, "Ratio_50_Percentile = Ratio", "QueryMemUsedPct_50_Percentile = QueryMemUsedPct"), AggMax("Ratio_Max = Ratio", "QueryMemUsedPct_Max = QueryMemUsedPct"))); - if (formatPctColumns) { + if (FORMAT_PCT_COLUMNS) { qup = formatColumnsAsPctUpdatePerformance(qup); worstInterval = formatColumnsAsPct(worstInterval, "Ratio"); updateWorst = formatColumnsAsPctUpdatePerformance(updateWorst); @@ -319,13 +326,15 @@ public static TreeTable queryPerformanceAsTreeTable(@NotNull final Table qpl) { public static TreeTable queryOperationPerformanceAsTreeTable( @NotNull final Table qpl, @NotNull final Table qopl) { + // TODO (https://github.com/deephaven/deephaven-core/issues/4814): use NULL_INT for ParentOperationNumber and + // Depth once we can prevent any compilation or at least reduce multiple usages to a single formula Table mergeWithAggKeys = TableTools.merge( qpl.updateView( - "EvalKey = `` + EvaluationNumber", - "ParentEvalKey = ParentEvaluationNumber == null ? null : (`` + ParentEvaluationNumber)", + "EvalKey = Long.toString(EvaluationNumber)", + "ParentEvalKey = ParentEvaluationNumber == null ? null : (long.toString(ParentEvaluationNumber))", "OperationNumber = NULL_INT", - "ParentOperationNumber = NULL_INT", - "Depth = NULL_INT", + "ParentOperationNumber = OperationNumber", + "Depth = OperationNumber", "CallerLine = (String) null", "IsCompilation = NULL_BOOLEAN", "InputSizeLong = NULL_LONG"), @@ -361,6 +370,9 @@ private static String whereConditionForEvaluationNumber(final long evaluationNum } private static Table maybeMoveColumnsUp(final Table source, final String... cols) { - return source.moveColumnsUp(Stream.of(cols).filter(source::hasColumns).toArray(String[]::new)); + return source.moveColumnsUp(Stream.of(cols) + .filter(columnName -> !ALLOWED_MISSING_COLUMN_NAMES.contains(columnName) + || source.hasColumns(columnName)) + .toArray(String[]::new)); } } diff --git a/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java b/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java index 1fae3c97eb3..73df0eead12 100644 --- a/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java +++ b/server/src/main/java/io/deephaven/server/arrow/ArrowFlightUtil.java @@ -73,14 +73,13 @@ public static void DoGetCustom( final Flight.Ticket request, final StreamObserver observer) { - final String description = - "FlightService#DoGet(request=" + ticketRouter.getLogNameFor(request, "request") + ")"; + final String description = "FlightService#DoGet(table=" + ticketRouter.getLogNameFor(request, "table") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { - final SessionState.ExportObject> export = - ticketRouter.resolve(session, request, "request"); + final SessionState.ExportObject> tableExport = + ticketRouter.resolve(session, request, "table"); final BarragePerformanceLog.SnapshotMetricsHelper metrics = new BarragePerformanceLog.SnapshotMetricsHelper(); @@ -88,11 +87,11 @@ public static void DoGetCustom( final long queueStartTm = System.nanoTime(); session.nonExport() .queryPerformanceRecorder(queryPerformanceRecorder) - .require(export) + .require(tableExport) .onError(observer) .submit(() -> { metrics.queueNanos = System.nanoTime() - queueStartTm; - final BaseTable table = export.get(); + final BaseTable table = tableExport.get(); metrics.tableId = Integer.toHexString(System.identityHashCode(table)); metrics.tableKey = BarragePerformanceLog.getKeyFor(table); @@ -488,14 +487,14 @@ public void handleMessage(@NotNull final BarrageProtoUtil.MessageInfo message) { final BarrageSnapshotRequest snapshotRequest = BarrageSnapshotRequest .getRootAsBarrageSnapshotRequest(message.app_metadata.msgPayloadAsByteBuffer()); - final String description = "FlightService#DoExchange(snapshot, request=" - + ticketRouter.getLogNameFor(snapshotRequest.ticketAsByteBuffer(), "request") + ")"; + final String description = "FlightService#DoExchange(snapshot, table=" + + ticketRouter.getLogNameFor(snapshotRequest.ticketAsByteBuffer(), "table") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { - final SessionState.ExportObject> parent = - ticketRouter.resolve(session, snapshotRequest.ticketAsByteBuffer(), "parent"); + final SessionState.ExportObject> tableExport = + ticketRouter.resolve(session, snapshotRequest.ticketAsByteBuffer(), "table"); final BarragePerformanceLog.SnapshotMetricsHelper metrics = new BarragePerformanceLog.SnapshotMetricsHelper(); @@ -503,11 +502,11 @@ public void handleMessage(@NotNull final BarrageProtoUtil.MessageInfo message) { final long queueStartTm = System.nanoTime(); session.nonExport() .queryPerformanceRecorder(queryPerformanceRecorder) - .require(parent) + .require(tableExport) .onError(listener) .submit(() -> { metrics.queueNanos = System.nanoTime() - queueStartTm; - final BaseTable table = parent.get(); + final BaseTable table = tableExport.get(); metrics.tableId = Integer.toHexString(System.identityHashCode(table)); metrics.tableKey = BarragePerformanceLog.getKeyFor(table); @@ -643,21 +642,21 @@ public void handleMessage(@NotNull final MessageInfo message) { preExportSubscriptions = new ArrayDeque<>(); preExportSubscriptions.add(subscriptionRequest); - final String description = "FlightService#DoExchange(subscription, request=" - + ticketRouter.getLogNameFor(subscriptionRequest.ticketAsByteBuffer(), "request") + ")"; + final String description = "FlightService#DoExchange(subscription, table=" + + ticketRouter.getLogNameFor(subscriptionRequest.ticketAsByteBuffer(), "table") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { - final SessionState.ExportObject request = - ticketRouter.resolve(session, subscriptionRequest.ticketAsByteBuffer(), "request"); + final SessionState.ExportObject table = + ticketRouter.resolve(session, subscriptionRequest.ticketAsByteBuffer(), "table"); synchronized (this) { onExportResolvedContinuation = session.nonExport() .queryPerformanceRecorder(queryPerformanceRecorder) - .require(request) + .require(table) .onErrorHandler(DoExchangeMarshaller.this::onError) - .submit(() -> onExportResolved(request)); + .submit(() -> onExportResolved(table)); } } } diff --git a/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java index d7108286015..ca0b55a69b5 100644 --- a/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/arrow/FlightServiceGrpcImpl.java @@ -10,10 +10,8 @@ import io.deephaven.auth.AuthenticationException; import io.deephaven.auth.AuthenticationRequestHandler; import io.deephaven.auth.BasicAuthMarshaller; -import io.deephaven.engine.table.impl.BaseTable; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; -import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.extensions.barrage.BarrageStreamGenerator; import io.deephaven.extensions.barrage.util.GrpcUtil; @@ -197,7 +195,7 @@ public void getFlightInfo( return; } - String exception = null; + StatusRuntimeException exception = null; if (export.tryRetainReference()) { try { if (export.getState() == ExportNotification.State.EXPORTED) { @@ -208,18 +206,12 @@ public void getFlightInfo( export.dropReference(); } } else { - final StatusRuntimeException err = - Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "Could not find flight info"); - exception = err.getMessage(); - GrpcUtil.safelyError(responseObserver, err); + exception = Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "Could not find flight info"); + GrpcUtil.safelyError(responseObserver, exception); } if (queryPerformanceRecorder.endQuery() || exception != null) { - QueryProcessingResults results = new QueryProcessingResults(queryPerformanceRecorder); - if (exception != null) { - results.setException(exception); - } - EngineMetrics.getInstance().logQueryProcessingResults(results); + EngineMetrics.getInstance().logQueryProcessingResults(queryPerformanceRecorder, exception); } } } @@ -252,7 +244,7 @@ public void getSchema( return; } - String exception = null; + StatusRuntimeException exception = null; if (export.tryRetainReference()) { try { if (export.getState() == ExportNotification.State.EXPORTED) { @@ -265,18 +257,12 @@ public void getSchema( export.dropReference(); } } else { - final StatusRuntimeException err = - Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "Could not find flight info"); - exception = err.getMessage(); - responseObserver.onError(err); + exception = Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "Could not find flight info"); + responseObserver.onError(exception); } if (queryPerformanceRecorder.endQuery() || exception != null) { - QueryProcessingResults results = new QueryProcessingResults(queryPerformanceRecorder); - if (exception != null) { - results.setException(exception); - } - EngineMetrics.getInstance().logQueryProcessingResults(results); + EngineMetrics.getInstance().logQueryProcessingResults(queryPerformanceRecorder, exception); } } } diff --git a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java index 5677fa59ae0..10d77657092 100644 --- a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java @@ -167,7 +167,7 @@ public void executeCommand( throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No consoleId supplied"); } - final String description = "ConsoleServiceGrpcImpl#executeCommand(console=" + final String description = "ConsoleService#executeCommand(console=" + ticketRouter.getLogNameFor(consoleId, "consoleId") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); @@ -253,8 +253,8 @@ public void bindTableToVariable( throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No source tableId supplied"); } - final String description = "ConsoleServiceGrpcImpl#bindTableToVariable(tableId=" - + ticketRouter.getLogNameFor(tableId, "tableId") + ", variableName=" + request.getVariableName() + final String description = "ConsoleService#bindTableToVariable(table=" + + ticketRouter.getLogNameFor(tableId, "table") + ", variableName=" + request.getVariableName() + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); diff --git a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java index f25fd35adf5..96d5d018b09 100644 --- a/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/hierarchicaltable/HierarchicalTableServiceGrpcImpl.java @@ -76,16 +76,16 @@ public void rollup( final SessionState session = sessionService.getCurrentSession(); - final String description = "HierarchicalTableServiceGrpcImpl#rollup(source=" - + ticketRouter.getLogNameFor(request.getSourceTableId(), "sourceId") + ")"; + final String description = "HierarchicalTableService#rollup(table=" + + ticketRouter.getLogNameFor(request.getSourceTableId(), "sourceTableId") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject

sourceTableExport = - ticketRouter.resolve(session, request.getSourceTableId(), "rollup.sourceTableId"); + ticketRouter.resolve(session, request.getSourceTableId(), "sourceTableId"); - session.newExport(request.getResultRollupTableId(), "rollup.resultRollupTableId") + session.newExport(request.getResultRollupTableId(), "resultRollupTableId") .queryPerformanceRecorder(queryPerformanceRecorder) .require(sourceTableExport) .onError(responseObserver) @@ -128,16 +128,16 @@ public void tree( final SessionState session = sessionService.getCurrentSession(); - final String description = "HierarchicalTableServiceGrpcImpl#tree(source=" - + ticketRouter.getLogNameFor(request.getSourceTableId(), "source") + ")"; + final String description = "HierarchicalTableService#tree(table=" + + ticketRouter.getLogNameFor(request.getSourceTableId(), "sourceTableId") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject
sourceTableExport = - ticketRouter.resolve(session, request.getSourceTableId(), "tree.sourceTableId"); + ticketRouter.resolve(session, request.getSourceTableId(), "sourceTableId"); - session.newExport(request.getResultTreeTableId(), "tree.resultTreeTableId") + session.newExport(request.getResultTreeTableId(), "resultTreeTableId") .queryPerformanceRecorder(queryPerformanceRecorder) .require(sourceTableExport) .onError(responseObserver) @@ -185,17 +185,16 @@ public void apply( final SessionState session = sessionService.getCurrentSession(); - final String description = "HierarchicalTableServiceGrpcImpl#apply(source=" - + ticketRouter.getLogNameFor(request.getInputHierarchicalTableId(), "source") + ")"; + final String description = "HierarchicalTableService#apply(table=" + + ticketRouter.getLogNameFor(request.getInputHierarchicalTableId(), "inputHierarchicalTableId") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject> inputHierarchicalTableExport = - ticketRouter.resolve(session, request.getInputHierarchicalTableId(), - "apply.inputHierarchicalTableId"); + ticketRouter.resolve(session, request.getInputHierarchicalTableId(), "inputHierarchicalTableId"); - session.newExport(request.getResultHierarchicalTableId(), "apply.resultHierarchicalTableId") + session.newExport(request.getResultHierarchicalTableId(), "resultHierarchicalTableId") .queryPerformanceRecorder(queryPerformanceRecorder) .require(inputHierarchicalTableExport) .onError(responseObserver) @@ -359,22 +358,22 @@ public void view( throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "No target specified"); } - final String description = "HierarchicalTableServiceGrpcImpl#view(target=" - + ticketRouter.getLogNameFor(targetTicket, "target") + ")"; + final String description = "HierarchicalTableService#view(table=" + + ticketRouter.getLogNameFor(targetTicket, "targetTableId") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportBuilder resultExportBuilder = - session.newExport(request.getResultViewId(), "view.resultViewId"); + session.newExport(request.getResultViewId(), "resultViewId"); final SessionState.ExportObject targetExport = - ticketRouter.resolve(session, targetTicket, "view.target"); + ticketRouter.resolve(session, targetTicket, "targetTableId"); final SessionState.ExportObject
keyTableExport; if (request.hasExpansions()) { keyTableExport = ticketRouter.resolve( - session, request.getExpansions().getKeyTableId(), "view.expansions.keyTableId"); + session, request.getExpansions().getKeyTableId(), "expansions.keyTableId"); resultExportBuilder.require(targetExport, keyTableExport); } else { keyTableExport = null; @@ -459,17 +458,16 @@ public void exportSource( final SessionState session = sessionService.getCurrentSession(); - final String description = "HierarchicalTableServiceGrpcImpl#exportSource(source=" - + ticketRouter.getLogNameFor(request.getHierarchicalTableId(), "source") + ")"; + final String description = "HierarchicalTableService#exportSource(table=" + + ticketRouter.getLogNameFor(request.getHierarchicalTableId(), "hierarchicalTableId") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject> hierarchicalTableExport = - ticketRouter.resolve(session, request.getHierarchicalTableId(), - "exportSource.hierarchicalTableId"); + ticketRouter.resolve(session, request.getHierarchicalTableId(), "hierarchicalTableId"); - session.newExport(request.getResultTableId(), "exportSource.resultTableId") + session.newExport(request.getResultTableId(), "resultTableId") .queryPerformanceRecorder(queryPerformanceRecorder) .require(hierarchicalTableExport) .onError(responseObserver) diff --git a/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java index 89793af3d3b..9b0fa5560d8 100644 --- a/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/object/ObjectServiceGrpcImpl.java @@ -8,7 +8,6 @@ import io.deephaven.base.verify.Assert; import io.deephaven.engine.liveness.LivenessScope; import io.deephaven.engine.liveness.LivenessScopeStack; -import io.deephaven.engine.table.Table; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; import io.deephaven.extensions.barrage.util.GrpcUtil; @@ -261,8 +260,8 @@ public void fetchObject( throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "No ticket supplied"); } - final String description = "ObjectServiceGrpcImpl#fetchObject(source=" - + ticketRouter.getLogNameFor(request.getSourceId().getTicket(), "source") + ")"; + final String description = "ObjectService#fetchObject(object=" + + ticketRouter.getLogNameFor(request.getSourceId().getTicket(), "sourceId") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); diff --git a/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java index 57eb9ee9f43..87590cb6de6 100644 --- a/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/partitionedtable/PartitionedTableServiceGrpcImpl.java @@ -58,16 +58,16 @@ public void partitionBy( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - final String description = "PartitionedTableServiceGrpcImpl#partitionBy(source=" - + ticketRouter.getLogNameFor(request.getTableId(), "source") + ")"; + final String description = "PartitionedTableService#partitionBy(table=" + + ticketRouter.getLogNameFor(request.getTableId(), "tableId") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); try (final SafeCloseable ignored = queryPerformanceRecorder.startQuery()) { final SessionState.ExportObject
targetTable = - ticketRouter.resolve(session, request.getTableId(), "partition.tableId"); + ticketRouter.resolve(session, request.getTableId(), "tableId"); - session.newExport(request.getResultId(), "partition.resultId") + session.newExport(request.getResultId(), "resultId") .queryPerformanceRecorder(queryPerformanceRecorder) .require(targetTable) .onError(responseObserver) @@ -88,8 +88,8 @@ public void merge( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - final String description = "PartitionedTableServiceGrpcImpl#merge(source=" - + ticketRouter.getLogNameFor(request.getPartitionedTable(), "source") + ")"; + final String description = "PartitionedTableService#merge(table=" + + ticketRouter.getLogNameFor(request.getPartitionedTable(), "partitionedTable") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); @@ -126,8 +126,8 @@ public void getTable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - final String description = "PartitionedTableServiceGrpcImpl#getTable(source=" - + ticketRouter.getLogNameFor(request.getPartitionedTable(), "source") + ", keyTable=" + final String description = "PartitionedTableService#getTable(table=" + + ticketRouter.getLogNameFor(request.getPartitionedTable(), "partitionedTable") + ", keyTable=" + ticketRouter.getLogNameFor(request.getKeyTableTicket(), "keyTable") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); @@ -136,7 +136,7 @@ public void getTable( final SessionState.ExportObject partitionedTable = ticketRouter.resolve(session, request.getPartitionedTable(), "partitionedTable"); final SessionState.ExportObject
keys = - ticketRouter.resolve(session, request.getKeyTableTicket(), "keyTableTicket"); + ticketRouter.resolve(session, request.getKeyTableTicket(), "keyTable"); session.newExport(request.getResultId(), "resultId") .queryPerformanceRecorder(queryPerformanceRecorder) diff --git a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java index 7170b64e26b..b355ba776d8 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/session/SessionServiceGrpcImpl.java @@ -167,8 +167,8 @@ public void exportFromTicket( return; } - final String description = "SessionServiceGrpcImpl#exportFromTicket(source=" - + ticketRouter.getLogNameFor(request.getSourceId(), "source") + ")"; + final String description = "SessionService#exportFromTicket(object=" + + ticketRouter.getLogNameFor(request.getSourceId(), "sourceId") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); @@ -205,8 +205,8 @@ public void publishFromTicket( return; } - final String description = "SessionServiceGrpcImpl#publishFromTicket(source=" - + ticketRouter.getLogNameFor(request.getSourceId(), "source") + ")"; + final String description = "SessionService#publishFromTicket(object=" + + ticketRouter.getLogNameFor(request.getSourceId(), "sourceId") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); diff --git a/server/src/main/java/io/deephaven/server/session/SessionState.java b/server/src/main/java/io/deephaven/server/session/SessionState.java index 3e80b7c4346..862cb131855 100644 --- a/server/src/main/java/io/deephaven/server/session/SessionState.java +++ b/server/src/main/java/io/deephaven/server/session/SessionState.java @@ -15,7 +15,6 @@ import io.deephaven.engine.liveness.LivenessScopeStack; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; -import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.table.impl.perf.QueryState; import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.engine.updategraph.DynamicNode; @@ -986,7 +985,6 @@ private void doExport() { T localResult = null; boolean shouldLog = false; final QueryPerformanceRecorder exportRecorder; - final QueryProcessingResults queryProcessingResults; try (final SafeCloseable ignored1 = session.executionContext.open(); final SafeCloseable ignored2 = LivenessScopeStack.open()) { @@ -1002,7 +1000,6 @@ private void doExport() { exportRecorder = Objects.requireNonNullElseGet(queryPerformanceRecorder, () -> QueryPerformanceRecorder.newQuery("ExportObject#doWork(" + queryId + ")", session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY)); - queryProcessingResults = new QueryProcessingResults(exportRecorder); try (final SafeCloseable ignored3 = isResume ? exportRecorder.resumeQuery() @@ -1021,7 +1018,6 @@ private void doExport() { } if (caughtException != null) { - queryProcessingResults.setException(caughtException.toString()); synchronized (this) { if (!isExportStateTerminal(state)) { maybeAssignErrorId(); @@ -1034,7 +1030,7 @@ private void doExport() { } } if (shouldLog || caughtException != null) { - EngineMetrics.getInstance().logQueryProcessingResults(queryProcessingResults); + EngineMetrics.getInstance().logQueryProcessingResults(exportRecorder, caughtException); } if (caughtException == null) { // must set result after ending the query so that onSuccess may resume / finalize a parent query diff --git a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java index 2f08572102c..673e39e35b1 100644 --- a/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/inputtables/InputTableServiceGrpcImpl.java @@ -55,7 +55,7 @@ public void addTableToInputTable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - final String description = "InputTableServiceGrpcImpl#addTableToInputTable(inputTable=" + final String description = "InputTableService#addTableToInputTable(inputTable=" + ticketRouter.getLogNameFor(request.getInputTable(), "inputTable") + ", tableToAdd=" + ticketRouter.getLogNameFor(request.getTableToAdd(), "tableToAdd") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( @@ -113,7 +113,7 @@ public void deleteTableFromInputTable( @NotNull final StreamObserver responseObserver) { final SessionState session = sessionService.getCurrentSession(); - final String description = "InputTableServiceGrpcImpl#deleteTableFromInputTable(inputTable=" + final String description = "InputTableService#deleteTableFromInputTable(inputTable=" + ticketRouter.getLogNameFor(request.getInputTable(), "inputTable") + ", tableToRemove=" + ticketRouter.getLogNameFor(request.getTableToRemove(), "tableToRemove") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( @@ -139,15 +139,15 @@ public void deleteTableFromInputTable( } MutableInputTable mutableInputTable = (MutableInputTable) inputTable; - Table tableToDelete = tableToRemoveExport.get(); + Table tableToRemove = tableToRemoveExport.get(); authWiring.checkPermissionDeleteTableFromInputTable( ExecutionContext.getContext().getAuthContext(), request, - List.of(targetTable.get(), tableToDelete)); + List.of(targetTable.get(), tableToRemove)); // validate that the columns are compatible try { - mutableInputTable.validateDelete(tableToDelete); + mutableInputTable.validateDelete(tableToRemove); } catch (TableDefinition.IncompatibleTableDefinitionException exception) { throw Exceptions.statusRuntimeException(Code.INVALID_ARGUMENT, "Provided tables's columns are not compatible: " + exception.getMessage()); @@ -158,7 +158,7 @@ public void deleteTableFromInputTable( // actually delete the table's contents try { - mutableInputTable.delete(tableToDelete); + mutableInputTable.delete(tableToRemove); GrpcUtil.safelyComplete(responseObserver, DeleteTableResponse.getDefaultInstance()); } catch (IOException ioException) { throw Exceptions.statusRuntimeException(Code.DATA_LOSS, diff --git a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java index 6d9756ca5db..7574253f2c7 100644 --- a/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/table/ops/TableServiceGrpcImpl.java @@ -10,7 +10,6 @@ import io.deephaven.engine.table.Table; import io.deephaven.engine.table.impl.perf.QueryPerformanceNugget; import io.deephaven.engine.table.impl.perf.QueryPerformanceRecorder; -import io.deephaven.engine.table.impl.perf.QueryProcessingResults; import io.deephaven.engine.table.impl.util.EngineMetrics; import io.deephaven.extensions.barrage.util.ExportUtil; import io.deephaven.internal.log.LoggerFactory; @@ -470,8 +469,8 @@ public void seekRow( if (sourceId.getTicket().isEmpty()) { throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No consoleId supplied"); } - final String description = "TableServiceGrpcImpl#seekRow(source=" - + ticketRouter.getLogNameFor(sourceId, "source") + ")"; + final String description = "TableService#seekRow(table=" + + ticketRouter.getLogNameFor(sourceId, "sourceId") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); @@ -545,22 +544,20 @@ public void batch( final Runnable onOneResolved = () -> { int numRemaining = remaining.decrementAndGet(); + Assert.geqZero(numRemaining, "numRemaining"); if (numRemaining > 0) { return; } - Assert.geqZero(numRemaining, "numRemaining"); try (final SafeCloseable ignored2 = queryPerformanceRecorder.resumeQuery()) { - final QueryProcessingResults results = new QueryProcessingResults(queryPerformanceRecorder); final StatusRuntimeException failure = firstFailure.get(); if (failure != null) { - results.setException(failure.getMessage()); safelyError(responseObserver, failure); } else { safelyComplete(responseObserver); } if (queryPerformanceRecorder.endQuery()) { - EngineMetrics.getInstance().logQueryProcessingResults(results); + EngineMetrics.getInstance().logQueryProcessingResults(queryPerformanceRecorder, failure); } } }; @@ -628,7 +625,7 @@ public void getExportedTableCreationResponse( throw Exceptions.statusRuntimeException(Code.FAILED_PRECONDITION, "No request ticket supplied"); } - final String description = "TableServiceGrpcImpl#getExportedTableCreationResponse(request=" + final String description = "TableService#getExportedTableCreationResponse(table=" + ticketRouter.getLogNameFor(request, "request") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); @@ -678,7 +675,7 @@ private void oneShotOperationWrapper( } final String description = "TableService#" + op.name() + "(resultId=" - + ticketRouter.getLogNameFor(resultId, "TableServiceGrpcImpl") + ")"; + + ticketRouter.getLogNameFor(resultId, "TableService") + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY); @@ -744,7 +741,7 @@ private BatchExportBuilder createBatchExportBuilder( operation.validateRequest(request); final Ticket resultId = operation.getResultTicket(request); - boolean hasResultId = !resultId.getTicket().isEmpty(); + final boolean hasResultId = !resultId.getTicket().isEmpty(); final ExportBuilder
exportBuilder = hasResultId ? session.newExport(resultId, "resultId") : session.nonExport(); final String resultDescription = hasResultId From 962f55c1fa62283bc8debce32229b26470d40207 Mon Sep 17 00:00:00 2001 From: Nathaniel Bauernfeind Date: Fri, 17 Nov 2023 17:05:45 -0700 Subject: [PATCH 31/31] most recent review comments --- .../impl/perf/QueryPerformanceRecorder.java | 34 +++++++++++-------- .../perf/QueryPerformanceRecorderImpl.java | 12 +++---- .../impl/util/PerformanceQueriesGeneral.java | 11 +++--- .../console/ConsoleServiceGrpcImpl.java | 2 +- 4 files changed, 31 insertions(+), 28 deletions(-) diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java index 5282d6b9c73..6a91ae9e201 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorder.java @@ -168,52 +168,56 @@ static QueryPerformanceRecorder newSubQuery( /** * Starts a query. *

- * A query is {@link QueryState#RUNNING} if it has been started or {@link #resumeQuery() resumed} without a + * A query is {@link QueryState#RUNNING RUNNING} if it has been started or {@link #resumeQuery() resumed} without a * subsequent {@link #endQuery() end}, {@link #suspendQuery() suspend}, or {@link #abortQuery() abort}. * - * @throws IllegalStateException if the query state isn't {@link QueryState#NOT_STARTED} or another query is running - * on this thread + * @throws IllegalStateException if the query state isn't {@link QueryState#NOT_STARTED NOT_STARTED} or another + * query is running on this thread */ SafeCloseable startQuery(); /** * End a query. *

- * A query is {@link QueryState#RUNNING} if it has been {@link #startQuery() started} or {@link #resumeQuery() - * resumed} without a subsequent end, {@link #suspendQuery() suspend}, or {@link #abortQuery() abort}. + * A query is {@link QueryState#RUNNING RUNNING} if it has been {@link #startQuery() started} or + * {@link #resumeQuery() resumed} without a subsequent end, {@link #suspendQuery() suspend}, or {@link #abortQuery() + * abort}. * * @return whether the query should be logged - * @throws IllegalStateException if the query staet isn't {@link QueryState#RUNNING}, - * {@link QueryState#INTERRUPTED}, or was not running on this thread + * @throws IllegalStateException if the query state isn't {@link QueryState#RUNNING RUNNING}, + * {@link QueryState#INTERRUPTED INTERRUPTED}, or was not running on this thread */ boolean endQuery(); /** * Suspends a query. *

- * A query is {@link QueryState#RUNNING} if it has been {@link #startQuery() started} or {@link #resumeQuery() - * resumed} without a subsequent {@link #endQuery() end}, suspend, or {@link #abortQuery() abort}. + * A query is {@link QueryState#RUNNING RUNNING} if it has been {@link #startQuery() started} or + * {@link #resumeQuery() resumed} without a subsequent {@link #endQuery() end}, suspend, or {@link #abortQuery() + * abort}. * - * @throws IllegalStateException if the query wasn't running or was not running on this thread + * @throws IllegalStateException if the query state isn't {@link QueryState#RUNNING RUNNING} or was not running on + * this thread */ void suspendQuery(); /** * Resumes a suspend query. *

- * A query is {@link QueryState#RUNNING} if it has been {@link #startQuery() started} or resumed without a + * A query is {@link QueryState#RUNNING RUNNING} if it has been {@link #startQuery() started} or resumed without a * subsequent {@link #endQuery() end}, {@link #suspendQuery() suspend}, or {@link #abortQuery() abort}. * - * @throws IllegalStateException if the query state was not {@link QueryState#SUSPENDED} or another query is running - * on this thread + * @throws IllegalStateException if the query state isn't {@link QueryState#SUSPENDED SUSPENDED} or another query is + * running on this thread */ SafeCloseable resumeQuery(); /** * Abort a query. *

- * A query is {@link QueryState#RUNNING} if it has been {@link #startQuery() started} or {@link #resumeQuery() - * resumed} without a subsequent {@link #endQuery() end}, {@link #suspendQuery() suspend}, or abort. + * A query is {@link QueryState#RUNNING RUNNING} if it has been {@link #startQuery() started} or + * {@link #resumeQuery() resumed} without a subsequent {@link #endQuery() end}, {@link #suspendQuery() suspend}, or + * abort. *

* Note that this method is invoked out-of-band and does not throw if the query has been completed. */ diff --git a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java index dc228c7d228..1900a70bcdf 100644 --- a/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java +++ b/engine/table/src/main/java/io/deephaven/engine/table/impl/perf/QueryPerformanceRecorderImpl.java @@ -16,8 +16,8 @@ /** * Query performance instrumentation implementation. Manages a hierarchy of {@link QueryPerformanceNugget} instances. *

- * Many methods are synchronized to 1) support external abortion of query and 2) for scenarios where the query is - * suspended and resumed on another thread. + * Many methods are synchronized to 1) support external abort of query and 2) for scenarios where the query is suspended + * and resumed on another thread. */ public class QueryPerformanceRecorderImpl implements QueryPerformanceRecorder { private static final QueryPerformanceLogThreshold LOG_THRESHOLD = new QueryPerformanceLogThreshold("", 1_000_000); @@ -61,8 +61,9 @@ public class QueryPerformanceRecorderImpl implements QueryPerformanceRecorder { this.nuggetFactory = nuggetFactory; } + @Override public synchronized void abortQuery() { - // TODO (https://github.com/deephaven/deephaven-core/issues/53): support out-of-order abortion + // TODO (https://github.com/deephaven/deephaven-core/issues/53): support out-of-order abort if (state != QueryState.RUNNING) { return; } @@ -111,7 +112,7 @@ public synchronized boolean endQuery() { if (parent != null) { parent.accumulate(this); } - return shouldLogNugget(queryNugget); + return shouldLogNugget(queryNugget) || !operationNuggets.isEmpty() || hasSubQueries; } /** @@ -278,8 +279,7 @@ private boolean shouldLogNugget(@NotNull QueryPerformanceNugget nugget) { return true; } else if (nugget.getEndClockEpochNanos() == NULL_LONG) { // Nuggets will have a null value for end time if they weren't closed for a RUNNING query; this is an - // abnormal - // condition and the nugget should be logged + // abnormal condition and the nugget should be logged return true; } else if (nugget == catchAllNugget) { return UNINSTRUMENTED_LOG_THRESHOLD.shouldLog(nugget.getUsageNanos()); diff --git a/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java b/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java index 813b29cd1da..b06e5946845 100644 --- a/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java +++ b/extensions/performance/src/main/java/io/deephaven/engine/table/impl/util/PerformanceQueriesGeneral.java @@ -27,12 +27,11 @@ * Generalizes {@link PerformanceQueries} to accept table parameters and make evaluation number parameter optional. */ public class PerformanceQueriesGeneral { - private static boolean FORMAT_PCT_COLUMNS = true; + private static final boolean FORMAT_PCT_COLUMNS = true; private static final Set ALLOWED_MISSING_COLUMN_NAMES = Sets.newHashSet( - "ProcessUniqueId", // does not exist in DHC - "ParentEvaluationNumber", // may not exist in DHE - "ParentOperationNumber" // may not exist in DHE - ); + "ProcessUniqueId", + "ParentEvaluationNumber", + "ParentOperationNumber"); public static Table queryPerformance(Table queryPerformanceLog, final long evaluationNumber) { @@ -331,7 +330,7 @@ public static TreeTable queryOperationPerformanceAsTreeTable( Table mergeWithAggKeys = TableTools.merge( qpl.updateView( "EvalKey = Long.toString(EvaluationNumber)", - "ParentEvalKey = ParentEvaluationNumber == null ? null : (long.toString(ParentEvaluationNumber))", + "ParentEvalKey = ParentEvaluationNumber == null ? null : (Long.toString(ParentEvaluationNumber))", "OperationNumber = NULL_INT", "ParentOperationNumber = OperationNumber", "Depth = OperationNumber", diff --git a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java index 10d77657092..3fec26cab7b 100644 --- a/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java +++ b/server/src/main/java/io/deephaven/server/console/ConsoleServiceGrpcImpl.java @@ -254,7 +254,7 @@ public void bindTableToVariable( } final String description = "ConsoleService#bindTableToVariable(table=" - + ticketRouter.getLogNameFor(tableId, "table") + ", variableName=" + request.getVariableName() + + ticketRouter.getLogNameFor(tableId, "tableId") + ", variableName=" + request.getVariableName() + ")"; final QueryPerformanceRecorder queryPerformanceRecorder = QueryPerformanceRecorder.newQuery( description, session.getSessionId(), QueryPerformanceNugget.DEFAULT_FACTORY);