diff --git a/.gitignore b/.gitignore index d0a8940254..c288e0a6cb 100644 --- a/.gitignore +++ b/.gitignore @@ -109,3 +109,4 @@ internal/plugins/externals/ebpf/demo/ internal/plugins/externals/ebpf/internal/testuitls/mysqlins/mysqlins internal/export/doc/zh/inputs/imgs/tracing.png /git +/build diff --git a/go.mod b/go.mod index e156f07500..f21a9dfa4d 100644 --- a/go.mod +++ b/go.mod @@ -355,6 +355,7 @@ require ( github.com/cilium/ebpf v0.11.0 github.com/gin-contrib/size v0.0.0-20231230013409-e0f46cc9c1db github.com/google/gopacket v0.0.0-00010101000000-000000000000 + github.com/grafana/jfr-parser v0.0.1 github.com/grafana/pyroscope/ebpf v0.2.1 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/ibmdb/go_ibm_db v0.4.4 @@ -372,6 +373,7 @@ require ( ) require ( + github.com/GuanceCloud/zipstream v0.1.0 // indirect github.com/VictoriaMetrics/easyproto v0.1.4 // indirect github.com/avast/retry-go/v4 v4.1.0 // indirect github.com/avvmoto/buf-readerat v0.0.0-20171115124131-a17c8cb89270 // indirect @@ -389,6 +391,7 @@ require ( replace ( github.com/c-bata/go-prompt => github.com/coanor/go-prompt v0.2.6 github.com/google/gopacket => github.com/GuanceCloud/gopacket v0.0.1 + github.com/grafana/jfr-parser => github.com/GuanceCloud/jfr-parser v0.8.6 github.com/influxdata/influxdb1-client => github.com/GuanceCloud/influxdb1-client v0.1.8 github.com/iovisor/gobpf => github.com/DataDog/gobpf v0.0.0-20210322155958-9866ef4cd22c github.com/kardianos/service => github.com/GuanceCloud/service v1.2.4 diff --git a/go.sum b/go.sum index feb924a013..e96e9daa26 100644 --- a/go.sum +++ b/go.sum @@ -143,8 +143,6 @@ github.com/DataDog/sketches-go v1.4.1 h1:j5G6as+9FASM2qC36lvpvQAj9qsv/jUs3FtO8Cw github.com/DataDog/sketches-go v1.4.1/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= github.com/GuanceCloud/client_model v0.0.0-20230418154757-93bd4e878a5e h1:i34dA4kiRTfG+KdvkIXCLPDduarVeFlQhGDD3TefgS4= github.com/GuanceCloud/client_model v0.0.0-20230418154757-93bd4e878a5e/go.mod h1:PMnE48aPzuRu83FmWZugC0O3d54ZupJd/MmiaYxz8sM= -github.com/GuanceCloud/cliutils v1.1.21-0.20240904042137-2a87297900d6 h1:hXvV/9i3aWkVVnpnHLngbpjZwTU+ut36YxOZOS2J4MM= -github.com/GuanceCloud/cliutils v1.1.21-0.20240904042137-2a87297900d6/go.mod h1:Qbeedf/Ji3immd8Ka01NDQG6SP6j8JBnbZwsHTtxyqs= github.com/GuanceCloud/cliutils v1.1.21 h1:UkENug9Kg4GVTq1ITWIz2KmIPIvpNrZxKKUmRxWWFfA= github.com/GuanceCloud/cliutils v1.1.21/go.mod h1:5bIAZ9yA6l7W8MMUKw0+SIZJRpmEwxM6ZYLy4vweTgU= github.com/GuanceCloud/confd v0.1.101 h1:yjHgfl6YzAlTbFOFMTE4ERpFJzIyovOW7ZFc2/ZssL0= @@ -159,6 +157,8 @@ github.com/GuanceCloud/grok v1.1.4 h1:+w/U5a54cgY0O+dvfcKc2qD3JuhmaS8Hi29BM4QMYt github.com/GuanceCloud/grok v1.1.4/go.mod h1:AHkJZYf7Qbo1FTZT6htdyScpICpgnkQ5+Hc0EmA88vM= github.com/GuanceCloud/influxdb1-client v0.1.8 h1:7XNICWcW+NxAHFkzQ8mkOCKA/8U2WNH5m+Hm9g0vz4k= github.com/GuanceCloud/influxdb1-client v0.1.8/go.mod h1:4HC4b/O653/ezBiHMPBnHYnHCCfsNT2LvCr7wNLngw4= +github.com/GuanceCloud/jfr-parser v0.8.6 h1:kyiVxH5LcxNc1Xc3R9uSJz8f8RmBDhy9ytJrXCL6pn8= +github.com/GuanceCloud/jfr-parser v0.8.6/go.mod h1:mngmZuDZbFhqGn2F+fK7tyxq+EmwvNZqWnQQ+heWmE4= github.com/GuanceCloud/kubernetes v0.0.0-20230801080916-ca299820872b h1:9pkl38Cro+7xCCruRvPh9z1L6DwX8xo2N4RDgHGUmtg= github.com/GuanceCloud/kubernetes v0.0.0-20230801080916-ca299820872b/go.mod h1:Acv+3eRHxCb4Qvs1YQcZ17X/D0H7DArQrew+WJtsLiE= github.com/GuanceCloud/mdcheck v0.0.0-20230718065937-44c6728c995f h1:0+A0eeT48LSlnDpVOQ/sqoW/lbYmerKKF7NVNBlgnww= @@ -177,6 +177,8 @@ github.com/GuanceCloud/toml v1.2.5 h1:jBWfqFSVortEY0C4RYqFPvhDKcGxIosKzcQqTPtZMf github.com/GuanceCloud/toml v1.2.5/go.mod h1:D7S1XowYqOvMQdtsp2+lg2rKmO6RVuyekXJL+MzkD5Y= github.com/GuanceCloud/tracing-protos v0.0.0-20230619071516-54c8cff1b6b3 h1:+b+MkQrj/eJcODklzCSObp19TBycmfuooqCBD+89qmU= github.com/GuanceCloud/tracing-protos v0.0.0-20230619071516-54c8cff1b6b3/go.mod h1:5nclDehqFMaV8YMZzt1FuXz9/JRVKq0LYhmV2Djc1GU= +github.com/GuanceCloud/zipstream v0.1.0 h1:RToNErercYk7y/nmyvshjN0Zt12lFNg2BpLh3YXXSNY= +github.com/GuanceCloud/zipstream v0.1.0/go.mod h1:d5rjEl0N0ucmRRvrfX1+9JtsZZMYt5sWg9AR6pyTkCM= github.com/HdrHistogram/hdrhistogram-go v1.1.0 h1:6dpdDPTRoo78HxAJ6T1HfMiKSnqhgRRqzCuPshRkQ7I= github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/IBM/sarama v1.41.2 h1:ZDBZfGPHAD4uuAtSv4U22fRZBgst0eEwGFzLj0fb85c= @@ -2079,8 +2081,6 @@ github.com/rosedblabs/wal v1.3.6/go.mod h1:wdq54KJUyVTOv1uddMc6Cdh2d/YCIo8yjcwJA github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= @@ -2959,8 +2959,6 @@ golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -3256,8 +3254,6 @@ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/CodapeWild/dd-trace-go.v1 v1.35.18 h1:RvveUgTvm5M1oOJGIySvMJnKcXbCQ+dTjcTBuQTKbWY= diff --git a/internal/export/doc/zh/datakit-operator.md b/internal/export/doc/zh/datakit-operator.md index 24bc52ca49..c5f906a95b 100644 --- a/internal/export/doc/zh/datakit-operator.md +++ b/internal/export/doc/zh/datakit-operator.md @@ -469,12 +469,15 @@ spec: labels: app: movies-java annotations: - admission.datakit/java-profiler.version: "latest" + admission.datakit/java-profiler.version: "0.4.4" spec: containers: - name: movies-java image: zhangyicloud/movies-java:latest imagePullPolicy: IfNotPresent + securityContext: + seccompProfile: + type: Unconfined env: - name: JAVA_OPTS value: "" diff --git a/internal/export/doc/zh/inputs/profile-go.md b/internal/export/doc/zh/inputs/profile-go.md index fffedaa74e..e0db039fb4 100644 --- a/internal/export/doc/zh/inputs/profile-go.md +++ b/internal/export/doc/zh/inputs/profile-go.md @@ -96,6 +96,44 @@ func demo() { 运行该程序后,DDTrace 会定期(默认 1 分钟一次)将数据推送给 DataKit。 +### 生成性能指标 {#metrics} + +Datakit 自 [1.39.0](changelog.md#cl-1.39.0) 开始支持从 `dd-trace-go` 的输出中抽取一组 Go 运行时的相关指标,下面列举其中部分指标加以说明: + +| 指标名称 | 说明 | 单位 | +|-----------------------------------|--------------------------------------------------------|------------| +| prof_go_cpu_cores | 消耗 CPU 核心数 | core | +| prof_go_cpu_cores_gc_overhead | 执行 GC 使用的 CPU 核心数 | core | +| prof_go_alloc_bytes_per_sec | 每秒分配内存字节数大小 | byte | +| prof_go_frees_per_sec | 每秒 GC 回收对象数 | count | +| prof_go_heap_growth_bytes_per_sec | 每秒堆内存增长大小 | byte | +| prof_go_allocs_per_sec | 每秒执行内存分配次数 | count | +| prof_go_alloc_bytes_total | 单次 profiling 持续期间(dd-trace 默认以 60 秒为一个采集周期,下同)分配的总内存大小 | byte | +| prof_go_blocked_time | 单次 profiling 持续期间协程阻塞的总时长 | nanosecond | +| prof_go_mutex_delay_time | 单次 profiling 持续期间用于等待锁所消耗的总时间 | nanosecond | +| prof_go_gcs_per_sec | 每秒运行 GC 次数 | count | +| prof_go_max_gc_pause_time | 单次 profiling 持续期间由于执行 GC 导致的程序中断的单次最长时长 | nanosecond | +| prof_go_gc_pause_time | 单次 profiling 持续期间由于执行 GC 导致的程序中断的总时长 | nanosecond | +| prof_go_num_goroutine | 当前协程总数 | count | +| prof_go_lifetime_heap_bytes | 当前堆内存中存活对象占用的内存总大小 | byte | +| prof_go_lifetime_heap_objects | 当前堆内存中存活的对象总数 | count | + + + +???+ tips + + 该功能默认开启,如果不需要可以通过修改采集器的配置文件 `/datakit/conf.d/profile/profile.conf` 把其中的配置项 `generate_metrics` 置为 false 并重启 Datakit. + + ```toml + [[inputs.profile]] + + ... + + ## set false to stop generating apm metrics from ddtrace output. + generate_metrics = false + ``` + + ## Pull 方式 {#pull-mode} ### Go 应用开启 Profiling {#app-config} diff --git a/internal/export/doc/zh/inputs/profile-java.md b/internal/export/doc/zh/inputs/profile-java.md index 8126afae88..1f448c7828 100644 --- a/internal/export/doc/zh/inputs/profile-java.md +++ b/internal/export/doc/zh/inputs/profile-java.md @@ -60,24 +60,85 @@ java -javaagent://dd-java-agent.jar \ -Ddd.profiling.ddprof.wall.enabled=true \ -Ddd.profiling.ddprof.alloc.enabled=true \ -Ddd.profiling.ddprof.liveheap.enabled=true \ + -Ddd.profiling.ddprof.memleak.enabled=true \ -jar your-app.jar ``` 部分参数说明: -| 参数名 | 对应的环境变量 | 说明 | -|------------------------------------------|--------------------------------------|------------------------------------------------------------------------| -| `-Ddd.profiling.enabled` | DD_PROFILING_ENABLED | 是否开启 profiling 功能 | -| `-Ddd.profiling.allocation.enabled` | DD_PROFILING_ALLOCATION_ENABLED | 是否开启 `JFR` 引擎的内存分析,对性能有一定影响,开启后关注对性能的影响 | -| `-Ddd.profiling.ddprof.enabled` | DD_PROFILING_DDPROF_ENABLED | 是否启用 `Datadog Profiler` 分析引擎 | -| `-Ddd.profiling.ddprof.cpu.enabled` | DD_PROFILING_DDPROF_CPU_ENABLED | 是否启用 `Datadog Profiler` CPU 分析 | -| `-Ddd.profiling.ddprof.wall.enabled` | DD_PROFILING_DDPROF_WALL_ENABLED | 是否启用 `Datadog Profiler` Wall time 采集,此选项关系到 Trace 和 Profile 之间的关联,建议开启 | -| `-Ddd.profiling.ddprof.alloc.enabled` | DD_PROFILING_DDPROF_ALLOC_ENABLED | 是否启用 `Datadog Profiler` 引擎的内存分析 | -| `-Ddd.profiling.ddprof.liveheap.enabled` | DD_PROFILING_DDPROF_LIVEHEAP_ENABLED | 是否启用 `Datadog Profiler` 引擎 Heap 分析 | +| 参数名 | 对应的环境变量 | 说明 | +|-------------------------------------------|---------------------------------------|------------------------------------------------------------------------| +| `-Ddd.profiling.enabled` | DD_PROFILING_ENABLED | 是否开启 profiling 功能 | +| `-Ddd.profiling.allocation.enabled` | DD_PROFILING_ALLOCATION_ENABLED | 是否开启 `JFR` 引擎的内存分配采样,可能会对性能有一定影响,建议高版本 JDK 使用 `Datadog Profiler` | +| `-Ddd.profiling.heap.enabled` | DD_PROFILING_HEAP_ENABLED | 是否开启 `JFR` 引擎堆内存对象采样 | +| `-Ddd.profiling.directallocation.enabled` | DD_PROFILING_DIRECTALLOCATION_ENABLED | 是否启用 `JFR` 引擎 JVM 直接内存分配采样 | +| `-Ddd.profiling.ddprof.enabled` | DD_PROFILING_DDPROF_ENABLED | 是否启用 `Datadog Profiler` 分析引擎 | +| `-Ddd.profiling.ddprof.cpu.enabled` | DD_PROFILING_DDPROF_CPU_ENABLED | 是否启用 `Datadog Profiler` CPU 分析 | +| `-Ddd.profiling.ddprof.wall.enabled` | DD_PROFILING_DDPROF_WALL_ENABLED | 是否启用 `Datadog Profiler` Wall time 采集,此选项关系到 Trace 和 Profile 之间的关联,建议开启 | +| `-Ddd.profiling.ddprof.alloc.enabled` | DD_PROFILING_DDPROF_ALLOC_ENABLED | 是否启用 `Datadog Profiler` 引擎的内存分析 | +| `-Ddd.profiling.ddprof.liveheap.enabled` | DD_PROFILING_DDPROF_LIVEHEAP_ENABLED | 是否启用 `Datadog Profiler` 引擎 Heap 分析 | +| `-Ddd.profiling.ddprof.memleak.enabled` | DD_PROFILING_DDPROF_MEMLEAK_ENABLED | 是否启用 `Datadog Profiler` 引擎内存泄漏采样分析 | 程序运行后,约 1 分钟后即可在观测云平台查看相关数据。 +### 生成性能指标 {#metrics} + +Datakit 自 [1.39.0](changelog.md#cl-1.39.0) 开始支持从 `dd-trace-java` 的输出信息中抽取一组 JVM 运行时的相关指标,下面列举其中部分指标加以说明: + +| 指标名称 | 说明 | 单位 | +|-------------------------------------|--------------------------------------------------------------|------------| +| prof_jvm_cpu_cores | 应用程序消耗的 CPU 总核数 | core | | +| prof_jvm_alloc_bytes_per_sec | 程序每秒分配内存总大小 | byte | | +| prof_jvm_allocs_per_sec | 程序每秒分配内存次数 | count | | +| prof_jvm_alloc_bytes_total | 单次 profiling 期间分配的总内存大小 | byte | +| prof_jvm_class_loads_per_sec | 程序每秒执行类加载的次数 | count | +| prof_jvm_compilation_time | 单次 profiling 持续期间( dd-trace 默认以 60 秒为一个采集周期,下同)执行 JIT 编译的总时间 | nanosecond | +| prof_jvm_context_switches_per_sec | 每秒线程上下文切换次数 | count | +| prof_jvm_direct_alloc_bytes_per_sec | 每秒分配直接内存的大小 | byte | +| prof_jvm_throws_per_sec | 每秒抛出异常次数 | count | +| prof_jvm_throws_total | 单次 profiling 持续期间抛出异常总次数 | count | +| prof_jvm_file_io_max_read_bytes | 单次 profiling 持续期间一次文件读写读取的最大字节数 | byte | +| prof_jvm_file_io_max_read_time | 单次 profiling 持续期间一次文件读持续的最长时间 | nanosecond | +| prof_jvm_file_io_max_write_bytes | 单次 profiling 持续期间读一次文件操作的最大字节数 | byte | +| prof_jvm_file_io_max_write_time | 单次 profiling 持续期间写一次文件花费的最长时间 | nanosecond | +| prof_jvm_file_io_read_bytes | 单次 profiling 持续期间读取的文件总字节数 | byte | +| prof_jvm_file_io_time | 单次 profiling 持续期间执行文件 IO 总耗时 | nanosecond | +| prof_jvm_file_io_read_time | 单次 profiling 持续期间执行文件读取总耗时 | nanosecond | +| prof_jvm_file_io_write_time | 单次 profiling 持续期间执行文件写入总耗时 | nanosecond | +| prof_jvm_avg_gc_pause_time | 每次 GC 导致的程序中断平均持续时间 | nanosecond | +| prof_jvm_max_gc_pause_time | 单次 profiling 持续期间 GC 导致的最大程序中断时间 | nanosecond | +| prof_jvm_gc_pauses_per_sec | 每秒因 GC 导致程序中断的次数 | count | +| prof_jvm_gc_pause_time | 单次 profiling 持续期间 GC 导致程序中断持续时间总和 | nanosecond | +| prof_jvm_lifetime_heap_bytes | 活跃的堆内对象占用内存总大小 | byte | +| prof_jvm_lifetime_heap_objects | 活跃的堆内对象总数 | count | +| prof_jvm_locks_max_wait_time | 单次 profiling 持续期间锁争用导致的最长等待时间 | nanosecond | +| prof_jvm_locks_per_sec | 每秒出现锁争用次数 | count | +| prof_jvm_socket_io_max_read_time | 单次 profiling 持续期间 socket 单次读取数据消耗最长时间 | nanosecond | +| prof_jvm_socket_io_max_write_bytes | 单次 profiling 持续期间 socket 单次最大发送字节数 | byte | +| prof_jvm_socket_io_max_write_time | 单次 profiling 持续期间 socket 单次发送数据消耗的最大时间 | nanosecond | +| prof_jvm_socket_io_read_bytes | 单次 profiling 持续期间 socket 收取的总字节数 | byte | +| prof_jvm_socket_io_read_time | 单次 profiling 持续期间 socket 用于读取数据的时间总消耗 | nanosecond | +| prof_jvm_socket_io_write_time | 单次 profiling 持续期间 socket 用于发送数据的时间总消耗 | nanosecond | +| prof_jvm_socket_io_write_bytes | 单次 profiling 持续期间 socket 发送数据总字节数 | byte | +| prof_jvm_threads_created_per_sec | 每秒线程创建次数 | count | +| prof_jvm_threads_deadlocked | 处于死锁状态的线程数 | count | +| prof_jvm_uptime_nanoseconds | 程序已启动时长 | nanosecond | + + + +???+ tips + + 该功能默认开启,如果不需要可以通过修改采集器的配置文件 `/datakit/conf.d/profile/profile.conf` 把其中的配置项 `generate_metrics` 置为 false 并重启 Datakit. + + ```toml + [[inputs.profile]] + + ## set false to stop generating apm metrics from ddtrace output. + generate_metrics = false + ``` + + ## Async Profiler {#async-profiler} async-profiler 是一款开源的 Java 性能分析工具,基于 HotSpot 的 API,可以收集程序运行中的堆栈和内存分配等信息。 diff --git a/internal/export/doc/zh/inputs/profile-python.md b/internal/export/doc/zh/inputs/profile-python.md index ec6b72d564..1cdc48e2b0 100644 --- a/internal/export/doc/zh/inputs/profile-python.md +++ b/internal/export/doc/zh/inputs/profile-python.md @@ -61,7 +61,7 @@ prof.start(True, True) # time.sleep(1) ``` -此时启动项目则无需再加 `ddtrace-run` 命令: +此时启动项目则无需再用 `ddtrace-run` 命令: ```shell DD_ENV=testing DD_SERVICE=python-profiling-manual DD_VERSION=1.2.3 python3 app.py @@ -71,6 +71,41 @@ DD_ENV=testing DD_SERVICE=python-profiling-manual DD_VERSION=1.2.3 python3 app.p 程序启动后,DDTrace 会定期(默认 1 分钟上报一次)收集数据并上报给 Datakit,稍等几分钟后就可以在观测云空间[应用性能监测 -> Profile](https://console.guance.com/tracing/profile){:target="_blank"} 查看相应数据。 +### 生成性能指标 {#metrics} + +Datakit 自 [1.39.0](changelog.md#cl-1.39.0) 开始支持从 `dd-trace-py` 的输出信息中抽取一组 Python 运行时的相关指标,下面列举其中部分指标加以说明: + +| 指标名称 | 说明 | 单位 | +|---------------------------------------|--------------------------------------------------------|------------| +| prof_python_cpu_cores | 消耗 CPU 核心数 | core | +| prof_python_alloc_bytes_per_sec | 每秒分配内存字节数大小 | byte | +| prof_python_allocs_per_sec | 每秒分配内存次数 | count | +| prof_python_alloc_bytes_total | 单次 profiling 持续期间(dd-trace 默认以 60 秒为一个采集周期,下同)分配的总内存大小 | byte | +| prof_python_lock_acquisition_time | 单次 profiling 持续期间用于等待锁所消耗的总时间 | nanosecond | +| prof_python_lock_acquisitions_per_sec | 每秒发生锁争用的次数 | count | +| prof_python_lock_hold_time | 单次 profiling 持续期间持有锁的总和时长 | nanosecond | +| prof_python_exceptions_per_sec | 每秒抛出的异常数 | count | +| prof_python_exceptions_total | 单次 profiling 持续期间抛出的异常总数 | count | +| prof_python_lifetime_heap_bytes | 当前堆内存对象占用的内存总大小 | byte | +| prof_python_wall_time | 时钟时长 | nanosecond | + + + +???+ tips + + 该功能默认开启,如果不需要可以通过修改采集器的配置文件 `/datakit/conf.d/profile/profile.conf` 把其中的配置项 `generate_metrics` 置为 false 并重启 Datakit. + + ```toml + [[inputs.profile]] + + ... + + ## set false to stop generating apm metrics from ddtrace output. + generate_metrics = false + ``` + + + ## `py-spy` 接入 {#py-spy} ### 主机环境下使用 {#py-spy-on-host} diff --git a/internal/plugins/inputs/profile/input.go b/internal/plugins/inputs/profile/input.go index 6624d7f701..2d34879ffa 100644 --- a/internal/plugins/inputs/profile/input.go +++ b/internal/plugins/inputs/profile/input.go @@ -37,8 +37,9 @@ import ( "gitlab.jiagouyun.com/cloudcare-tools/datakit/internal/httpapi" dkio "gitlab.jiagouyun.com/cloudcare-tools/datakit/internal/io" "gitlab.jiagouyun.com/cloudcare-tools/datakit/internal/io/dataway" - "gitlab.jiagouyun.com/cloudcare-tools/datakit/internal/metrics" + dkMetrics "gitlab.jiagouyun.com/cloudcare-tools/datakit/internal/metrics" "gitlab.jiagouyun.com/cloudcare-tools/datakit/internal/plugins/inputs" + "gitlab.jiagouyun.com/cloudcare-tools/datakit/internal/plugins/inputs/profile/metrics" "gitlab.jiagouyun.com/cloudcare-tools/datakit/internal/plugins/inputs/rum" "gitlab.jiagouyun.com/cloudcare-tools/datakit/internal/trace" ) @@ -67,6 +68,9 @@ const ( ## the max allowed size of http request body (of MB), 32MB by default. body_size_limit_mb = 32 # MB + ## set false to stop generating apm metrics from ddtrace output. + generate_metrics = true + ## io_config is used to control profiling uploading behavior. ## cache_path set the disk directory where temporarily cache profiling data. ## cache_capacity_mb specify the max storage space (in MiB) that profiling cache can use. @@ -313,7 +317,7 @@ func defaultDiskCachePath() string { return filepath.Join(datakit.CacheDir, defaultDiskCacheFileName) } -func defaultInput() *Input { +func DefaultInput() *Input { return &Input{ BodySizeLimitMB: defaultProfileMaxSize, IOConfig: ioConfig{ @@ -324,18 +328,19 @@ func defaultInput() *Input { SendTimeout: defaultHTTPClientTimeout, SendRetryCount: defaultHTTPRetryCount, }, - pauseCh: make(chan bool, inputs.ElectionPauseChannelLength), - Election: true, - semStop: cliutils.NewSem(), - feeder: dkio.DefaultFeeder(), - Tagger: datakit.DefaultGlobalTagger(), - httpClient: http.DefaultClient, + GenerateMetrics: true, + pauseCh: make(chan bool, inputs.ElectionPauseChannelLength), + Election: true, + semStop: cliutils.NewSem(), + feeder: dkio.DefaultFeeder(), + Tagger: datakit.DefaultGlobalTagger(), + httpClient: http.DefaultClient, } } func init() { //nolint:gochecknoinits inputs.Add(inputName, func() inputs.Input { - return defaultInput() + return DefaultInput() }) } @@ -347,6 +352,7 @@ type Input struct { Go []*GoProfiler `toml:"go"` PyroscopeLists []*pyroscopeOpts `toml:"pyroscope"` Election bool `toml:"election"` + GenerateMetrics bool `toml:"generate_metrics"` pause bool pauseCh chan bool @@ -359,7 +365,7 @@ type Input struct { Tagger datakit.GlobalTagger } -func (ipt *Input) getBodySizeLimit() int64 { +func (ipt *Input) GetBodySizeLimit() int64 { return int64(ipt.BodySizeLimitMB) * MiB } @@ -456,7 +462,7 @@ func cacheRequest(w http.ResponseWriter, r *http.Request, bodySizeLimit int64) * } func (ipt *Input) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if err := cacheRequest(w, req, ipt.getBodySizeLimit()); err != nil { + if err := cacheRequest(w, req, ipt.GetBodySizeLimit()); err != nil { log.Errorf("unable to cache profiling request: %v", err) w.WriteHeader(err.HttpCode) _, _ = io.WriteString(w, err.Error()) @@ -464,13 +470,13 @@ func (ipt *Input) ServeHTTP(w http.ResponseWriter, req *http.Request) { } } -func insertEventFormFile(form *multipart.Form, mw *multipart.Writer, metadata *resolvedMetadata) error { - f, err := mw.CreateFormFile(eventJSONFile, eventJSONFileWithSuffix) +func insertEventFormFile(form *multipart.Form, mw *multipart.Writer, metadata *metrics.ResolvedMetadata) error { + f, err := mw.CreateFormFile(metrics.EventFile, metrics.EventJSONFile) if err != nil { return fmt.Errorf("unable to create form file: %w", err) } - md := Metadata{} + md := metrics.Metadata{} for name, fileHeaders := range form.File { extName := filepath.Ext(name) @@ -488,33 +494,32 @@ func insertEventFormFile(form *multipart.Form, mw *multipart.Writer, metadata *r md.Attachments = append(md.Attachments, name) switch strings.ToLower(extName) { case ".pprof": - md.Format = PPROF + md.Format = metrics.PPROF case ".jfr": - md.Format = JFR + md.Format = metrics.JFR } } if md.Format == "" { md.Format = "unknown" } - startTime, err := resolveStartTime(metadata.formValue) + startTime, err := metrics.ResolveStartTime(metadata.FormValue) if err != nil { log.Warnf("unable to resolve profile start time: %w", err) } else { - md.Start = newRFC3339Time(startTime) + md.Start = metrics.NewRFC3339Time(startTime) } - endTime, err := resolveEndTime(metadata.formValue) + endTime, err := metrics.ResolveEndTime(metadata.FormValue) if err != nil { log.Warnf("unable to resolve profile end time: %w", err) } else { - md.End = newRFC3339Time(endTime) + md.End = metrics.NewRFC3339Time(endTime) } - tags := metadata.tags - lang := resolveLang(metadata.formValue, tags) + lang := metrics.ResolveLang(metadata) md.Language = lang - md.TagsProfiler = joinTags(metadata.tags) + md.TagsProfiler = metrics.JoinTags(metadata.Tags) mdBytes, err := json.Marshal(md) if err != nil { @@ -547,18 +552,43 @@ func (ipt *Input) sendRequestToDW(ctx context.Context, pbBytes []byte) error { } } - if err := req.ParseMultipartForm(ipt.getBodySizeLimit()); err != nil { + if err := req.ParseMultipartForm(ipt.GetBodySizeLimit()); err != nil { return fmt.Errorf("unable to parse multipart/formdata: %w", err) } - metadata, _, err := parseMetadata(req) + metadata, _, err := metrics.ParseMetadata(req) if err != nil { return fmt.Errorf("unable to resolve profiling tags: %w", err) } var subCustomTags map[string]string - if len(metadata.formValue[subCustomTagsKey]) > 0 && metadata.formValue[subCustomTagsKey][0] != "" { - subCustomTags = newTags(strings.Split(metadata.formValue[subCustomTagsKey][0], ",")) + if len(metadata.FormValue[metrics.SubCustomTagsKey]) > 0 && metadata.FormValue[metrics.SubCustomTagsKey][0] != "" { + subCustomTags = metrics.NewTags(strings.Split(metadata.FormValue[metrics.SubCustomTagsKey][0], ",")) + } + + language := metrics.ResolveLang(metadata) + if ipt.GenerateMetrics { + allCustomTags := make(map[string]string, len(ipt.Tags)+len(subCustomTags)) + for k, v := range ipt.Tags { + allCustomTags[k] = v + } + for k, v := range subCustomTags { + allCustomTags[k] = v + } + switch language { // nolint:exhaustive + case metrics.Java: + if err = metrics.ExportJVMMetrics(req.MultipartForm.File, metadata, allCustomTags); err != nil { + log.Errorf("unable to export java ddtrace profiling metrics: %v", err) + } + case metrics.Golang: + if err = metrics.ExportGoMetrics(req.MultipartForm.File, metadata, allCustomTags); err != nil { + log.Errorf("unable to export golang ddtrace profiling metrics: %v", err) + } + case metrics.Python: + if err = metrics.ExportPythonMetrics(req.MultipartForm.File, metadata, allCustomTags); err != nil { + log.Errorf("unable to export python ddtrace profiling metrics: %v", err) + } + } } customTagsDefined := false @@ -567,15 +597,15 @@ func (ipt *Input) sendRequestToDW(ctx context.Context, pbBytes []byte) error { // has set tags in sub settings, ignore continue } - if old, ok := metadata.tags[tk]; !ok || old != tv { + if old, ok := metadata.Tags[tk]; !ok || old != tv { customTagsDefined = true - metadata.tags[tk] = tv + metadata.Tags[tk] = tv } } // Add event form file to multipartForm if it doesn't exist - _, ok1 := req.MultipartForm.File[eventJSONFile] - _, ok2 := req.MultipartForm.File[eventJSONFileWithSuffix] + _, ok1 := req.MultipartForm.File[metrics.EventFile] + _, ok2 := req.MultipartForm.File[metrics.EventJSONFile] if (!ok1 && !ok2) || customTagsDefined { if newBody, err := modifyMultipartForm(req, req.MultipartForm, metadata); err != nil { @@ -587,7 +617,7 @@ func (ipt *Input) sendRequestToDW(ctx context.Context, pbBytes []byte) error { req.Header.Set(XDataKitVersionHeader, datakit.Version) - xGlobalTag := dataway.SinkHeaderValueFromTags(metadata.tags, + xGlobalTag := dataway.SinkHeaderValueFromTags(metadata.Tags, config.Cfg.Dataway.GlobalTags(), config.Cfg.Dataway.CustomTagKeys()) if xGlobalTag == "" { @@ -672,7 +702,7 @@ func (ipt *Input) sendRequestToDW(ctx context.Context, pbBytes []byte) error { } reqCost = time.Since(reqStart) - metricName := inputName + "/" + resolveLang(metadata.formValue, metadata.tags).String() + metricName := inputName + "/" + language.String() if sendErr == nil && resp.StatusCode/100 == 2 { dkio.InputsFeedVec().WithLabelValues(metricName, point.Profiling.String()).Inc() dkio.InputsFeedPtsVec().WithLabelValues(metricName, point.Profiling.String()).Observe(float64(1)) @@ -684,8 +714,8 @@ func (ipt *Input) sendRequestToDW(ctx context.Context, pbBytes []byte) error { feedErr = fmt.Errorf("error status code %d", resp.StatusCode) } ipt.feeder.FeedLastError(feedErr.Error(), - metrics.WithLastErrorInput(metricName), - metrics.WithLastErrorCategory(point.Profiling), + dkMetrics.WithLastErrorInput(metricName), + dkMetrics.WithLastErrorCategory(point.Profiling), ) } @@ -782,6 +812,8 @@ func (ipt *Input) Run() { log = logger.SLogger(inputName) log.Infof("the input %s is running...", inputName) + metrics.InitLog() + if err := ipt.InitDiskQueueIO(); err != nil { log.Errorf("unable to start IO process for profiling: %s", err) } @@ -873,7 +905,7 @@ type pushProfileDataOpt struct { Input *Input } -func pushProfileData(opt *pushProfileDataOpt, event *Metadata, bodySizeLimit int64) error { +func pushProfileData(opt *pushProfileDataOpt, event *metrics.Metadata, bodySizeLimit int64) error { b := new(bytes.Buffer) mw := multipart.NewWriter(b) @@ -888,7 +920,7 @@ func pushProfileData(opt *pushProfileDataOpt, event *Metadata, bodySizeLimit int } } - f, err := mw.CreateFormFile(eventJSONFile, eventJSONFileWithSuffix) + f, err := mw.CreateFormFile(metrics.EventFile, metrics.EventJSONFile) if err != nil { return err } diff --git a/internal/plugins/inputs/profile/input_test.go b/internal/plugins/inputs/profile/input_test.go index 51df1d55e9..74ad468d6a 100644 --- a/internal/plugins/inputs/profile/input_test.go +++ b/internal/plugins/inputs/profile/input_test.go @@ -69,7 +69,7 @@ func TestIOConfig(t *testing.T) { send_retry_count = 5 ` - ipt := defaultInput() + ipt := DefaultInput() assert.Equal(t, defaultDiskCachePath(), ipt.IOConfig.CachePath) assert.Equal(t, defaultDiskCacheSize, ipt.IOConfig.CacheCapacityMB) assert.Equal(t, false, ipt.IOConfig.ClearCacheOnStart) @@ -95,7 +95,7 @@ func TestIOConfig(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 64, ipt.BodySizeLimitMB) - assert.Equal(t, int64(64<<20), ipt.getBodySizeLimit()) + assert.Equal(t, int64(64<<20), ipt.GetBodySizeLimit()) assert.Equal(t, "/usr/local/datakit/cache/profiling_inputs", ipt.IOConfig.CachePath) assert.Equal(t, 20480, ipt.IOConfig.CacheCapacityMB) assert.Equal(t, int64(20480<<20), ipt.getDiskCacheCapacity()) @@ -262,7 +262,21 @@ func Test_getPyroscopeTagFromLabels(t *testing.T) { } func TestInput_sendRequestToDW(t *testing.T) { - ipt := defaultInput() + eventJSON := `{ + "attachments": [ + "main.jfr", + "metrics.json" + ], + "tags_profiler": "process_id:31145,service:zy-profiling-test,profiler_version:0.102.0~b67f6e3380,host:zydeMacBook-Air.local,runtime-id:06dddda1-957b-4619-97cb-1a78fc7e3f07,language:jvm,env:test,version:v1.2", + "start": "2022-06-17T09:20:07.002305Z", + "end": "2022-06-17T09:21:08.261768Z", + "family": "java", + "version": "4", + "numbers": [1, 3, 5], + "stable": false +}` + + ipt := DefaultInput() buf := &bytes.Buffer{} mw := multipart.NewWriter(buf) diff --git a/internal/plugins/inputs/profile/metrics/jfr.go b/internal/plugins/inputs/profile/metrics/jfr.go new file mode 100644 index 0000000000..65b5366f67 --- /dev/null +++ b/internal/plugins/inputs/profile/metrics/jfr.go @@ -0,0 +1,452 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +package metrics + +import ( + "fmt" + "time" + + "github.com/grafana/jfr-parser/common/attributes" + "github.com/grafana/jfr-parser/common/filters" + "github.com/grafana/jfr-parser/common/units" + "github.com/grafana/jfr-parser/parser" +) + +const ( + defaultCPUSampleInterval = 10_000_000 // 10ms + defaultWallSampleInterval = 10_000_000 // 10ms +) + +type jfrChunks []*parser.Chunk + +var minValidTime = time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC) + +func (j jfrChunks) jvmStartTime() (startTime time.Time, err error) { + var quantity units.IQuantity + for _, chunk := range j { + for _, event := range chunk.Apply(filters.VmInfo) { + if event != nil { + if quantity, err = attributes.JVMStartTime.GetValue(event); err == nil { + if startTime, err = units.ToTime(quantity); err == nil && minValidTime.Before(startTime) { + return + } + } + } + } + } + return startTime, fmt.Errorf("unable to get jvm start time: %w", err) +} + +type ddProfilerSetting struct { + cpuIntervalNanos int64 + wallIntervalNanos int64 +} + +func (j jfrChunks) resolveDDProfilerSetting() *ddProfilerSetting { + cfg := &ddProfilerSetting{ + cpuIntervalNanos: -1, + wallIntervalNanos: -1, + } + + for _, chunk := range j { + for _, event := range chunk.Apply(filters.DatadogProfilerConfig) { + if cfg.cpuIntervalNanos <= 0 { + if cpuInterval, err := attributes.CpuSamplingInterval.GetValue(event); err == nil { + if nanoInterval, err := cpuInterval.In(units.Nanosecond); err == nil && nanoInterval.IntValue() > 0 { + cfg.cpuIntervalNanos = nanoInterval.IntValue() + } + } + } + if cfg.wallIntervalNanos <= 0 { + if wallInterval, err := attributes.WallSampleInterval.GetValue(event); err == nil { + if nanoInterval, err := wallInterval.In(units.Nanosecond); err == nil && nanoInterval.IntValue() > 0 { + cfg.wallIntervalNanos = nanoInterval.IntValue() + } + } + } + + if cfg.cpuIntervalNanos > 0 && cfg.wallIntervalNanos > 0 { + return cfg + } + } + } + if cfg.cpuIntervalNanos <= 0 { + cfg.cpuIntervalNanos = defaultCPUSampleInterval + } + if cfg.wallIntervalNanos <= 0 { + cfg.wallIntervalNanos = defaultWallSampleInterval + } + return cfg +} + +func (j jfrChunks) cpuTimeDurationNS() int64 { + cfg := j.resolveDDProfilerSetting() + + totalSamples := int64(0) + for _, chunk := range j { + for _, event := range chunk.Apply(filters.DatadogExecutionSample) { + weight, err := attributes.SampleWeight.GetValue(event) + if err != nil { + log.Warnf("unable to get datadog execution sample weight: %v", err) + continue + } + totalSamples += weight + } + } + return totalSamples * cfg.cpuIntervalNanos +} + +func (j jfrChunks) allocations() (allocBytes float64, allocCount float64) { + for _, chunk := range j { + for _, event := range chunk.Apply(filters.DatadogAllocationSample) { + size, err := attributes.AllocSize.GetValue(event) + if err != nil { + log.Warnf("unable to resolve ddprof allocation size: %v", err) + continue + } + byteSize, err := size.In(units.Byte) + if err != nil { + log.Warnf("unable to convert allocation size to bytes: %v", err) + continue + } + weight, err := attributes.AllocWeight.GetValue(event) + if err != nil { + log.Warnf("unable to resolve ddprof allocation weight: %v", err) + continue + } + allocBytes += byteSize.FloatValue() * weight + allocCount += weight + } + } + return +} + +func (j jfrChunks) directAllocationBytes() (totalBytes int64) { + for _, chunk := range j { + for _, event := range chunk.Apply(filters.DatadogDirectAllocationTotal) { + value, err := attributes.Allocated.GetValue(event) + if err != nil { + log.Warnf("unable to resolve datadog direct allocation allocated bytes: %v", err) + continue + } + if value.Unit() != units.Byte { + value, err = value.In(units.Byte) + if err != nil { + log.Warnf("unable to convert direct allocation allocated to unit byte: %v", err) + continue + } + } + totalBytes += value.IntValue() + } + } + return +} + +func (j jfrChunks) classLoaderCount() (count int64) { + for _, chunk := range j { + count += int64(len(chunk.Apply(filters.ClassLoaderStatistics))) + } + return +} + +func (j jfrChunks) exceptionCount() (count int64) { + for _, chunk := range j { + count += int64(len(chunk.Apply(filters.DatadogExceptionSample))) + } + return +} + +func (j jfrChunks) ioRead(filter parser.EventFilter) (maxReadTimeNS int64, maxBytesRead int64, totalReadTimeNS int64, totalBytesRead int64) { + for _, chunk := range j { + for _, event := range chunk.Apply(filter) { + duration, err := attributes.Duration.GetValue(event) + if err != nil { + log.Warnf("unable to resolve file read duration: %v", err) + continue + } + if duration.Unit() != units.Nanosecond { + duration, err = duration.In(units.Nanosecond) + if err != nil { + log.Warnf("unable to convert to file read duration to nanoseconds: %v", err) + continue + } + } + durationNS := duration.IntValue() + if maxReadTimeNS < durationNS { + maxReadTimeNS = durationNS + } + totalReadTimeNS += durationNS + + bytesRead, err := attributes.BytesRead.GetValue(event) + if err != nil { + log.Warnf("unable to resolve file read bytes: %v", err) + continue + } + if bytesRead.Unit() != units.Byte { + bytesRead, err = bytesRead.In(units.Byte) + if err != nil { + log.Warnf("unable to convert to file bytesread to bytes: %v", err) + continue + } + } + + bytesNum := bytesRead.IntValue() + if maxBytesRead < bytesNum { + maxBytesRead = bytesNum + } + totalBytesRead += bytesNum + } + } + return // nolint:nakedret +} + +func (j jfrChunks) ioWrite(filter parser.EventFilter) (maxWriteTimeNS int64, maxBytesWritten int64, totalWriteTimeNS int64, totalBytesWritten int64) { + for _, chunk := range j { + for _, event := range chunk.Apply(filter) { + duration, err := attributes.Duration.GetValue(event) + if err != nil { + log.Warnf("unable to resolve file read duration: %v", err) + continue + } + if duration.Unit() != units.Nanosecond { + duration, err = duration.In(units.Nanosecond) + if err != nil { + log.Warnf("unable to convert to file read duration to nanoseconds: %v", err) + continue + } + } + durationNS := duration.IntValue() + if maxWriteTimeNS < durationNS { + maxWriteTimeNS = durationNS + } + totalWriteTimeNS += durationNS + + bytesWritten, err := attributes.BytesWritten.GetValue(event) + if err != nil { + log.Warnf("unable to resolve file read bytes: %v", err) + continue + } + if bytesWritten.Unit() != units.Byte { + bytesWritten, err = bytesWritten.In(units.Byte) + if err != nil { + log.Warnf("unable to convert to file bytesread to bytes: %v", err) + continue + } + } + + bytesNum := bytesWritten.IntValue() + if maxBytesWritten < bytesNum { + maxBytesWritten = bytesNum + } + totalBytesWritten += bytesNum + } + } + return // nolint:nakedret +} + +func (j jfrChunks) fileRead() (maxReadTimeNS int64, maxBytesRead int64, totalReadTimeNS int64, totalBytesRead int64) { + return j.ioRead(filters.FileRead) +} + +func (j jfrChunks) fileWrite() (maxWriteTimeNS int64, maxBytesWritten int64, totalWriteTimeNS int64, totalBytesWritten int64) { + return j.ioWrite(filters.FileWrite) +} + +func (j jfrChunks) gcDuration() (durationNS, count int64) { + for _, chunk := range j { + for _, event := range chunk.Apply(filters.GarbageCollection) { + duration, err := attributes.Duration.GetValue(event) + if err != nil { + log.Warnf("unable to resolve jfr GC duration: %v", err) + continue + } + if duration.Unit() != units.Nanosecond { + duration, err = duration.In(units.Nanosecond) + if err != nil { + log.Warnf("unable to convert to GC duration to ns: %v", err) + continue + } + } + durationNS += duration.IntValue() + count++ + } + } + return +} + +func (j jfrChunks) gcPauseDuration() (maxPauseNS, totalPauseNS, pauseCount int64) { + for _, chunk := range j { + for _, event := range chunk.Apply(filters.GcPause) { + duration, err := attributes.Duration.GetValue(event) + if err != nil { + log.Warnf("unable to resolve to GC pause duration: %v", err) + continue + } + if duration.Unit() != units.Nanosecond { + duration, err = duration.In(units.Nanosecond) + if err != nil { + log.Warnf("unable to convert to GC pause duration to nanoseconds: %v", err) + continue + } + } + + durationNS := duration.IntValue() + if maxPauseNS < durationNS { + maxPauseNS = durationNS + } + totalPauseNS += durationNS + pauseCount++ + } + } + return +} + +func (j jfrChunks) liveHeapSamples() (count int64) { + for _, chunk := range j { + count += int64(len(chunk.Apply(filters.DatadogHeapLiveObject))) + } + return +} + +func (j jfrChunks) jvmHeapUsage() (usageBytes int64) { + for _, chunk := range j { + for _, event := range chunk.Apply(filters.DatadogHeapUsage) { + size, err := attributes.Size.GetValue(event) + if err != nil { + log.Warnf("unable to resolve jvm heap usage: %v", err) + continue + } + if size.Unit() != units.Byte { + size, err = size.In(units.Byte) + if err != nil { + log.Warnf("unable to convert to jvm heap usage to byte: %v", err) + continue + } + } + usageBytes = size.IntValue() + return + } + } + return +} + +func (j jfrChunks) threadContextSwitchRate() float64 { + var ( + sum float64 + count int64 + ) + for _, chunk := range j { + for _, event := range chunk.Apply(filters.ContextSwitchRate) { + rate, err := attributes.SwitchRate.GetValue(event) + if err != nil { + log.Warnf("unable to resolve thread context switch rate: %v", err) + continue + } + sum += rate + count++ + } + } + if count > 0 { + return sum / float64(count) + } + return 0 +} + +func (j jfrChunks) liveHeap() (totalBytes, objectCount float64) { //nolint: unused + for _, chunk := range j { + for _, event := range chunk.Apply(filters.DatadogHeapLiveObject) { + size, err := attributes.Size.GetValue(event) + if err != nil { + log.Warnf("unable to resolve live heap size: %v", err) + continue + } + if size.Unit() != units.Byte { + size, err = size.In(units.Byte) + if err != nil { + log.Warnf("unable to convert to jfr live heap size to unit bytes: %v", err) + continue + } + } + + weight, err := attributes.HeapWeight.GetValue(event) + if err != nil { + log.Warnf("unable to resolve live heap weight: %v", err) + continue + } + + totalBytes += weight * size.FloatValue() + objectCount += weight + } + } + return +} + +func (j jfrChunks) threadStart() (count int64) { + for _, chunk := range j { + count += int64(len(chunk.Apply(filters.ThreadStart))) + } + return +} + +func (j jfrChunks) deadlockedThread() (count int64) { + for _, chunk := range j { + count += int64(len(chunk.Apply(filters.DatadogDeadlockedThread))) + } + return +} + +func (j jfrChunks) monitorEnter() (maxDurationNS, totalDurationNS float64, count int64) { + for _, chunk := range j { + for _, event := range chunk.Apply(filters.MonitorEnter) { + duration, err := attributes.Duration.GetValue(event) + if err != nil { + log.Warnf("unable to resolve monitor blocked duration: %v", err) + continue + } + if duration.Unit() != units.Nanosecond { + if duration, err = duration.In(units.Nanosecond); err != nil { + log.Warnf("unable to convert to nanoseconds: %v", err) + continue + } + } + durationNS := duration.FloatValue() + if maxDurationNS < durationNS { + maxDurationNS = durationNS + } + totalDurationNS += durationNS + count++ + } + } + return +} + +func (j jfrChunks) compilationDuration() (totalDuration int64) { + for _, chunk := range j { + for _, event := range chunk.Apply(filters.Compilation) { + duration, err := attributes.Duration.GetValue(event) + if err != nil { + log.Warnf("unable to resolve jvm compilation duration: %v", err) + continue + } + if duration.Unit() != units.Nanosecond { + if duration, err = duration.In(units.Nanosecond); err != nil { + log.Warnf("unable to convert to compilation duration to nanoseconds: %v", err) + continue + } + } + totalDuration += duration.IntValue() + } + } + return +} + +func (j jfrChunks) socketIORead() (maxReadTimeNS int64, maxBytesRead int64, totalReadTimeNS int64, totalBytesRead int64) { + return j.ioRead(filters.SocketRead) +} + +func (j jfrChunks) socketIOWrite() (maxWriteTimeNS int64, maxBytesWritten int64, totalWriteTimeNS int64, totalBytesWritten int64) { + return j.ioWrite(filters.SocketWrite) +} diff --git a/internal/plugins/inputs/profile/metrics/jfr_test.go b/internal/plugins/inputs/profile/metrics/jfr_test.go new file mode 100644 index 0000000000..62930fa136 --- /dev/null +++ b/internal/plugins/inputs/profile/metrics/jfr_test.go @@ -0,0 +1,63 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +package metrics + +import ( + "testing" + + "github.com/grafana/jfr-parser/common/attributes" + "github.com/grafana/jfr-parser/common/filters" + "github.com/grafana/jfr-parser/common/types" + "github.com/grafana/jfr-parser/parser" +) + +var chunks = func() jfrChunks { + chunks, err := parser.ParseFile("testdata/main.jfr") + if err != nil { + panic(err) + } + return chunks +}() + +func TestResolveDDProfilerSetting(t *testing.T) { + c := chunks.resolveDDProfilerSetting() + + t.Logf("%+#v", *c) +} + +func TestCpuCores(t *testing.T) { + maxReadTimeNS, maxBytesRead, totalReadTimeNS, totalBytesRead := chunks.socketIORead() + t.Log(maxReadTimeNS, maxBytesRead, totalReadTimeNS, totalBytesRead) +} + +func TestAllocWeight(t *testing.T) { + for _, chunk := range chunks { + for _, event := range chunk.Apply(filters.DatadogAllocationSample) { + value, err := attributes.AllocWeight.GetValue(event) + if err != nil { + t.Fatal(err) + } + t.Log(value) + } + } +} + +func TestCompilationDuration(t *testing.T) { + durationNS := chunks.compilationDuration() + t.Log(durationNS) +} + +func TestAllocations(t *testing.T) { + maxNS, totalPauseNS, count := chunks.gcPauseDuration() + + t.Log(maxNS, totalPauseNS, count) +} + +func TestShowClassMeta(t *testing.T) { + for _, chunk := range chunks { + chunk.ShowClassMeta(types.SocketRead) + } +} diff --git a/internal/plugins/inputs/profile/metrics/metrics.go b/internal/plugins/inputs/profile/metrics/metrics.go new file mode 100644 index 0000000000..ed91a13849 --- /dev/null +++ b/internal/plugins/inputs/profile/metrics/metrics.go @@ -0,0 +1,679 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +// Package metrics generates apm metrics from profiling data. +package metrics + +import ( + "errors" + "fmt" + "mime/multipart" + "os" + "path" + "strings" + + "github.com/GuanceCloud/cliutils/logger" + "github.com/GuanceCloud/cliutils/point" + "github.com/GuanceCloud/cliutils/pprofparser/domain/quantity" + "github.com/grafana/jfr-parser/parser" + dkio "gitlab.jiagouyun.com/cloudcare-tools/datakit/internal/io" +) + +const ( + metricsName = "profiling_metrics" +) + +const ( + profJVMCPUCores = "prof_jvm_cpu_cores" + profJVMUptimeNS = "prof_jvm_uptime_nanoseconds" + profJVMAllocBytesTotal = "prof_jvm_alloc_bytes_total" + profJVMAllocBytesPerSec = "prof_jvm_alloc_bytes_per_sec" + profJVMAllocsPerSec = "prof_jvm_allocs_per_sec" + profJVMDirectAllocBytesPerSec = "prof_jvm_direct_alloc_bytes_per_sec" + profJVMClassLoadsPerSec = "prof_jvm_class_loads_per_sec" + profJVMCompilationTime = "prof_jvm_compilation_time" + profJVMContextSwitchesPerSec = "prof_jvm_context_switches_per_sec" + profJVMThrowsPerSec = "prof_jvm_throws_per_sec" + profJVMThrowsTotal = "prof_jvm_throws_total" + profJVMFileIOMaxReadBytes = "prof_jvm_file_io_max_read_bytes" + profJVMFileIOReadBytes = "prof_jvm_file_io_read_bytes" + profJVMFileIOMaxReadTime = "prof_jvm_file_io_max_read_time" + profJVMFileIOReadTime = "prof_jvm_file_io_read_time" + profJVMFileIOMaxWriteBytes = "prof_jvm_file_io_max_write_bytes" + profJVMFileIOWriteBytes = "prof_jvm_file_io_write_bytes" + profJVMFileIOMaxWriteTime = "prof_jvm_file_io_max_write_time" + profJVMFileIOWriteTime = "prof_jvm_file_io_write_time" + profJVMFileIOTime = "prof_jvm_file_io_time" + profJVMAvgGcPauseTime = "prof_jvm_avg_gc_pause_time" + profJVMMaxGcPauseTime = "prof_jvm_max_gc_pause_time" + profJVMGcPauseTime = "prof_jvm_gc_pause_time" + profJVMGcPausesPerSec = "prof_jvm_gc_pauses_per_sec" + profJVMLifetimeHeapBytes = "prof_jvm_lifetime_heap_bytes" + profJVMLifetimeHeapObjects = "prof_jvm_lifetime_heap_objects" + profJVMLocksMaxWaitTime = "prof_jvm_locks_max_wait_time" + profJVMLocksPerSec = "prof_jvm_locks_per_sec" + profJVMThreadsCreatedPerSec = "prof_jvm_threads_created_per_sec" + profJVMThreadsDeadlocked = "prof_jvm_threads_deadlocked" + profJVMSocketIOMaxReadTime = "prof_jvm_socket_io_max_read_time" + profJVMSocketIOMaxReadBytes = "prof_jvm_socket_io_max_read_bytes" + profJVMSocketIOReadTime = "prof_jvm_socket_io_read_time" + profJVMSocketIOReadBytes = "prof_jvm_socket_io_read_bytes" + profJVMSocketIOMaxWriteTime = "prof_jvm_socket_io_max_write_time" + profJVMSocketIOMaxWriteBytes = "prof_jvm_socket_io_max_write_bytes" + profJVMSocketIOWriteTime = "prof_jvm_socket_io_write_time" + profJVMSocketIOWriteBytes = "prof_jvm_socket_io_write_bytes" +) + +const ( + profGoHeapGrowthBytesPerSec = "prof_go_heap_growth_bytes_per_sec" + profGoGCsPerSec = "prof_go_gcs_per_sec" + profGoGCPauseTime = "prof_go_gc_pause_time" + profGoMaxGCPauseTime = "prof_go_max_gc_pause_time" + profGoNumGoroutine = "prof_go_num_goroutine" + profGoAllocBytesPerSec = "prof_go_alloc_bytes_per_sec" + profGoAllocsPerSec = "prof_go_allocs_per_sec" + profGoFreesPerSec = "prof_go_frees_per_sec" + profGoAllocBytesTotal = "prof_go_alloc_bytes_total" // profGoAllocsPerSec * total_seconds + profGoCPUCoresGcOverhead = "prof_go_cpu_cores_gc_overhead" // profGoGCPauseTime / total_seconds + + profGoCPUCores = "prof_go_cpu_cores" + profGoBlockedTime = "prof_go_blocked_time" + profGoMutexDelayTime = "prof_go_mutex_delay_time" + profGoLifetimeHeapBytes = "prof_go_lifetime_heap_bytes" + profGoLifetimeHeapObjects = "prof_go_lifetime_heap_objects" +) + +const ( + profPythonCPUCores = "prof_python_cpu_cores" + profPythonAllocBytesPerSec = "prof_python_alloc_bytes_per_sec" + profPythonAllocsPerSec = "prof_python_allocs_per_sec" + profPythonAllocBytesTotal = "prof_python_alloc_bytes_total" + profPythonLockAcquisitionTime = "prof_python_lock_acquisition_time" + profPythonLockAcquisitionsPerSec = "prof_python_lock_acquisitions_per_sec" + profPythonLockHoldTime = "prof_python_lock_hold_time" + profPythonExceptionsPerSec = "prof_python_exceptions_per_sec" + profPythonExceptionsTotal = "prof_python_exceptions_total" + profPythonLifetimeHeapBytes = "prof_python_lifetime_heap_bytes" + profPythonWallTime = "prof_python_wall_time" +) + +var goMetricsNameMapping = map[string]string{ + profGoCPUCores: "go_cpu_cores", + profGoCPUCoresGcOverhead: "go_cpu_cores_gc_overhead", + profGoAllocBytesPerSec: "go_alloc_bytes_per_sec", + profGoAllocBytesTotal: "go_alloc_bytes_total", + profGoFreesPerSec: "go_frees_per_sec", + profGoHeapGrowthBytesPerSec: "go_heap_growth_bytes_per_sec", + profGoAllocsPerSec: "go_allocs_per_sec", + profGoBlockedTime: "go_blocked_time", + profGoMutexDelayTime: "go_mutex_delay_time", + profGoGCsPerSec: "go_gcs_per_sec", + profGoMaxGCPauseTime: "go_max_gc_pause_time", + profGoGCPauseTime: "go_gc_pause_time", + profGoNumGoroutine: "go_num_goroutine", + profGoLifetimeHeapBytes: "go_lifetime_heap_bytes", + profGoLifetimeHeapObjects: "go_lifetime_heap_objects", +} + +const ( + goCPUFile = "cpu" + goBlockFile = "block" + goHeapFile = "heap" + goMutexFile = "mutex" + goroutinesFile = "goroutines" +) + +const ( + cpuTimeMetric = "cpu-time" + wallTimeMetric = "wall-time" + exceptionSamplesMetric = "exception-samples" + lockAcquireMetric = "lock-acquire" + lockAcquireWaitMetric = "lock-acquire-wait" + allocSamplesMetric = "alloc-samples" + allocSpaceMetric = "alloc-space" + heapSpaceMetric = "heap-space" + lockReleaseHoldMetric = "lock-release-hold" +) + +var ( + log = logger.DefaultSLogger("profilingMetrics") + metricsFeeder = dkio.DefaultFeeder() +) + +func InitLog() { + log = logger.SLogger("profilingMetrics") +} + +func exportMetrics(pts []*point.Point) error { + if err := metricsFeeder.FeedV2(point.Metric, pts, dkio.WithInputName(metricsName)); err != nil { + return fmt.Errorf("unable to feed profiling metrics: %w", err) + } + return nil +} + +type metricKVs point.KVs + +func newMetricKVs() *metricKVs { + return toMetricKVs(nil) +} + +func toMetricKVs(kvs point.KVs) *metricKVs { + return (*metricKVs)(&kvs) +} + +func (m *metricKVs) toPointKVs() point.KVs { + if m == nil { + return nil + } + return point.KVs(*m) +} + +func (m *metricKVs) AddTag(k, v string) { + if m == nil { + return + } + *m = metricKVs(m.toPointKVs().AddTag(k, v)) +} + +func (m *metricKVs) EasyAdd(k string, v any) { + m.AddV2(k, v, false) +} + +func (m *metricKVs) AddV2(k string, v any, force bool, opts ...point.KVOption) { + if m == nil { + return + } + *m = metricKVs(m.toPointKVs().AddV2(k, v, force, opts...)) +} + +func ExportJVMMetrics(files map[string][]*multipart.FileHeader, metadata *ResolvedMetadata, customTags map[string]string) error { + jfrFile := func() *multipart.FileHeader { + for field, headers := range files { + if field == EventFile || field == EventJSONFile { + continue + } + + switch field { + case EventFile, EventJSONFile: + continue + case MainFile, MainJFRFile, AutoFile, AutoJFRFile: + for _, header := range headers { + if header.Size > 0 { + return header + } + } + } + + for _, header := range headers { + if strings.HasSuffix(header.Filename, ".jfr") && header.Size > 0 { + return header + } + } + } + return nil + }() + + if jfrFile == nil { + return fmt.Errorf("unable to find jfr file") + } + + f, err := jfrFile.Open() + if err != nil { + return fmt.Errorf("unable to open jfr file: %w", err) + } + defer f.Close() // nolint:errcheck + + jfrStart, err := ResolveStartTime(metadata.FormValue) + if err != nil { + return fmt.Errorf("unable to resolve jfr start time: %w", err) + } + jfrEnd, err := ResolveEndTime(metadata.FormValue) + if err != nil { + return fmt.Errorf("unable to resolve jfr end time: %w", err) + } + + jfrDurationNS, jfrDurationSeconds := jfrEnd.Sub(jfrStart).Nanoseconds(), jfrEnd.Sub(jfrStart).Seconds() + + chunks, err := parser.Parse(f) + if err != nil { + return fmt.Errorf("unable to parse jfr: %w", err) + } + + jc := jfrChunks(chunks) + + commonTags := map[string]string{ + "language": Java.String(), + "host": metadata.GetTag("host"), + "service": metadata.GetTag("service"), + "env": metadata.GetTag("env"), + "version": metadata.GetTag("version"), + } + + for k, v := range customTags { + commonTags[k] = v + } + + kVs := toMetricKVs(point.NewTags(commonTags)) + + if jvmStart, err := jc.jvmStartTime(); err == nil { + kVs.EasyAdd(profJVMUptimeNS, jfrEnd.Sub(jvmStart).Nanoseconds()) + } + + costCPUCores := float64(jc.cpuTimeDurationNS()) / float64(jfrDurationNS) + kVs.EasyAdd(profJVMCPUCores, costCPUCores) + + allocBytes, allocCount := jc.allocations() + kVs.EasyAdd(profJVMAllocBytesTotal, allocBytes) + kVs.EasyAdd(profJVMAllocBytesPerSec, allocBytes/jfrDurationSeconds) + kVs.EasyAdd(profJVMAllocsPerSec, allocCount/jfrDurationSeconds) + + directAllocBytes := jc.directAllocationBytes() + kVs.EasyAdd(profJVMDirectAllocBytesPerSec, float64(directAllocBytes)/jfrDurationSeconds) + + classCount := jc.classLoaderCount() + kVs.EasyAdd(profJVMClassLoadsPerSec, float64(classCount)/jfrDurationSeconds) + + kVs.EasyAdd(profJVMCompilationTime, jc.compilationDuration()) + + kVs.EasyAdd(profJVMContextSwitchesPerSec, jc.threadContextSwitchRate()) + + totalExceptions := jc.exceptionCount() + kVs.EasyAdd(profJVMThrowsTotal, totalExceptions) + kVs.EasyAdd(profJVMThrowsPerSec, float64(totalExceptions)/jfrDurationSeconds) + + readMaxDurationNS, readMaxBytesRead, totalReadDurationNS, totalBytesRead := jc.fileRead() + kVs.EasyAdd(profJVMFileIOMaxReadTime, readMaxDurationNS) + kVs.EasyAdd(profJVMFileIOMaxReadBytes, readMaxBytesRead) + kVs.EasyAdd(profJVMFileIOReadTime, totalReadDurationNS) + kVs.EasyAdd(profJVMFileIOReadBytes, totalBytesRead) + + maxWriteDurationNS, maxBytesWritten, totalWriteDurationNS, totalBytesWritten := jc.fileWrite() + + kVs.EasyAdd(profJVMFileIOMaxWriteTime, maxWriteDurationNS) + kVs.EasyAdd(profJVMFileIOMaxWriteBytes, maxBytesWritten) + kVs.EasyAdd(profJVMFileIOWriteTime, totalWriteDurationNS) + kVs.EasyAdd(profJVMFileIOWriteBytes, totalBytesWritten) + kVs.EasyAdd(profJVMFileIOTime, totalReadDurationNS+totalWriteDurationNS) + + durationNS, count := jc.gcDuration() + if count == 0 { + kVs.EasyAdd(profJVMAvgGcPauseTime, 0) + } else { + kVs.EasyAdd(profJVMAvgGcPauseTime, float64(durationNS)/float64(count)) + } + + maxPauseNanos, totalPauseNanos, pauseCount := jc.gcPauseDuration() + kVs.EasyAdd(profJVMMaxGcPauseTime, maxPauseNanos) + kVs.EasyAdd(profJVMGcPauseTime, totalPauseNanos) + kVs.EasyAdd(profJVMGcPausesPerSec, float64(pauseCount)/jfrDurationSeconds) + + kVs.EasyAdd(profJVMLifetimeHeapObjects, jc.liveHeapSamples()) + kVs.EasyAdd(profJVMLifetimeHeapBytes, jc.jvmHeapUsage()) + + maxLockDurationNS, _, lockCount := jc.monitorEnter() + + kVs.EasyAdd(profJVMLocksMaxWaitTime, maxLockDurationNS) + kVs.EasyAdd(profJVMLocksPerSec, float64(lockCount)/jfrDurationSeconds) + + kVs.EasyAdd(profJVMThreadsCreatedPerSec, float64(jc.threadStart())/jfrDurationSeconds) + kVs.EasyAdd(profJVMThreadsDeadlocked, jc.deadlockedThread()) + + maxReadTimeNS, maxBytesRead, totalReadTimeNS, totalBytesRead := jc.socketIORead() + + kVs.EasyAdd(profJVMSocketIOMaxReadTime, maxReadTimeNS) + kVs.EasyAdd(profJVMSocketIOMaxReadBytes, maxBytesRead) + kVs.EasyAdd(profJVMSocketIOReadTime, totalReadTimeNS) + kVs.EasyAdd(profJVMSocketIOReadBytes, totalBytesRead) + + maxWriteTimeNS, maxBytesWritten, totalWriteTimeNS, totalBytesWritten := jc.socketIOWrite() + + kVs.EasyAdd(profJVMSocketIOMaxWriteTime, maxWriteTimeNS) + kVs.EasyAdd(profJVMSocketIOMaxWriteBytes, maxBytesWritten) + kVs.EasyAdd(profJVMSocketIOWriteTime, totalWriteTimeNS) + kVs.EasyAdd(profJVMSocketIOWriteBytes, totalBytesWritten) + + pt := point.NewPointV2(metricsName, kVs.toPointKVs(), point.WithPrecision(point.PrecNS), point.WithTime(jfrEnd)) + if err = exportMetrics([]*point.Point{pt}); err != nil { + return fmt.Errorf("unable to export profiling metrics: %w", err) + } + return nil +} + +func pickProfileFile(files map[string][]*multipart.FileHeader) *multipart.FileHeader { + for fieldName, headers := range files { + if len(headers) > 0 { + if fieldName == AutoFile || fieldName == MainFile || fieldName == ProfFile { + return headers[0] + } + if path.Ext(fieldName) == PprofExt { + return headers[0] + } + + for _, header := range headers { + if header.Filename == AutoFile || header.Filename == MainFile { + return header + } + if path.Ext(header.Filename) == PprofExt { + return header + } + } + } + } + return nil +} + +func ExportPythonMetrics(files map[string][]*multipart.FileHeader, metadata *ResolvedMetadata, customTags map[string]string) error { + commonTags := map[string]string{ + "language": Python.String(), + "host": metadata.GetTag("host"), + "service": metadata.GetTag("service"), + "env": metadata.GetTag("env"), + "version": metadata.GetTag("version"), + } + + for k, v := range customTags { + commonTags[k] = v + } + + pprofStart, err := ResolveStartTime(metadata.FormValue) + if err != nil { + return fmt.Errorf("unable to resolve python profiling start time: %w", err) + } + pprofEnd, err := ResolveEndTime(metadata.FormValue) + if err != nil { + return fmt.Errorf("unable to resolve python profiling end time: %w", err) + } + + profFile := pickProfileFile(files) + if profFile == nil { + return fmt.Errorf("unable to find any pprof file") + } + + pprofDurationNS, pprofDurationSeconds := pprofEnd.Sub(pprofStart).Nanoseconds(), pprofEnd.Sub(pprofStart).Seconds() + + kVs := toMetricKVs(point.NewTags(commonTags)) + + summaries, err := pprofSummaryHeader(profFile) + if err != nil { + return fmt.Errorf("unable to resolve summaries from pprof file: %w", err) + } + + if cpuTime := summaries[cpuTimeMetric]; cpuTime != nil { + cpuNanos := cpuTime.Value + cpuUnit, err := quantity.ParseUnit(quantity.Duration, cpuTime.Unit) + if err != nil { + log.Warnf("unable to resolve cpu duraiton unit: %v", err) + } else { + if q := cpuUnit.Quantity(cpuTime.Value); q.Unit != quantity.NanoSecond { + cpuNanos, err = q.IntValueIn(quantity.NanoSecond) + if err != nil { + log.Warnf("unable to change unit to nanosecond: %v", err) + } + } + } + kVs.EasyAdd(profPythonCPUCores, float64(cpuNanos)/float64(pprofDurationNS)) + } + + if allocSpace := summaries[allocSpaceMetric]; allocSpace != nil { + allocBytes := allocSpace.Value + unit, err := quantity.ParseUnit(quantity.Memory, allocSpace.Unit) + if err != nil { + log.Warnf("unable to resolve alloc space unit: %v", err) + } else { + if q := unit.Quantity(allocSpace.Value); q.Unit != quantity.Byte { + allocBytes, err = q.IntValueIn(quantity.Byte) + if err != nil { + log.Warnf("unable to change unit to byte: %v", err) + } + } + } + kVs.EasyAdd(profPythonAllocBytesTotal, allocBytes) + kVs.EasyAdd(profPythonAllocBytesPerSec, float64(allocBytes)/pprofDurationSeconds) + } + + if allocSample := summaries[allocSamplesMetric]; allocSample != nil { + kVs.EasyAdd(profPythonAllocsPerSec, float64(allocSample.Value)/pprofDurationSeconds) + } + + if lockCount := summaries[lockAcquireMetric]; lockCount != nil { + kVs.EasyAdd(profPythonLockAcquisitionsPerSec, float64(lockCount.Value)/pprofDurationSeconds) + } + + if lockWait := summaries[lockAcquireWaitMetric]; lockWait != nil { + waitDuration := lockWait.Value + unit, err := quantity.ParseUnit(quantity.Duration, lockWait.Unit) + if err != nil { + log.Warnf("unable to resolve lock wait duraiton unit: %v", err) + } else { + if q := unit.Quantity(lockWait.Value); q.Unit != quantity.NanoSecond { + waitDuration, err = q.IntValueIn(quantity.NanoSecond) + if err != nil { + log.Warnf("unable to change unit to nanosecond: %v", err) + } + } + } + kVs.EasyAdd(profPythonLockAcquisitionTime, waitDuration) + } + + if lockRelease := summaries[lockReleaseHoldMetric]; lockRelease != nil { + waitDuration := lockRelease.Value + unit, err := quantity.ParseUnit(quantity.Duration, lockRelease.Unit) + if err != nil { + log.Warnf("unable to resolve lock release duraiton unit: %v", err) + } else { + if q := unit.Quantity(lockRelease.Value); q.Unit != quantity.NanoSecond { + waitDuration, err = q.IntValueIn(quantity.NanoSecond) + if err != nil { + log.Warnf("unable to change unit to nanosecond: %v", err) + } + } + } + kVs.EasyAdd(profPythonLockHoldTime, waitDuration) + } + + if exception := summaries[exceptionSamplesMetric]; exception != nil { + kVs.EasyAdd(profPythonExceptionsTotal, exception.Value) + kVs.EasyAdd(profPythonExceptionsPerSec, float64(exception.Value)/pprofDurationSeconds) + } + + if wallTime := summaries[wallTimeMetric]; wallTime != nil { + wallDuration := wallTime.Value + + unit, err := quantity.ParseUnit(quantity.Duration, wallTime.Unit) + if err != nil { + log.Warnf("unable to resolve wall duraiton unit: %v", err) + } else { + if q := unit.Quantity(wallTime.Value); q.Unit != quantity.NanoSecond { + wallDuration, err = q.IntValueIn(quantity.NanoSecond) + if err != nil { + log.Warnf("unable to change unit to nanosecond: %v", err) + } + } + } + kVs.EasyAdd(profPythonWallTime, wallDuration) + } + + if heapSpace := summaries[heapSpaceMetric]; heapSpace != nil { + heapBytes := heapSpace.Value + + unit, err := quantity.ParseUnit(quantity.Memory, heapSpace.Unit) + if err != nil { + log.Warnf("unable to resolve alloc space unit: %v", err) + } else { + if q := unit.Quantity(heapSpace.Value); q.Unit != quantity.Byte { + heapBytes, err = q.IntValueIn(quantity.Byte) + if err != nil { + log.Warnf("unable to change unit to byte: %v", err) + } + } + } + kVs.EasyAdd(profPythonLifetimeHeapBytes, heapBytes) + } + + pt := point.NewPointV2(metricsName, kVs.toPointKVs(), point.WithPrecision(point.PrecNS), point.WithTime(pprofEnd)) + if err = exportMetrics([]*point.Point{pt}); err != nil { + return fmt.Errorf("unable to export profiling metrics: %w", err) + } + return nil +} + +func ExportGoMetrics(files map[string][]*multipart.FileHeader, metadata *ResolvedMetadata, customTags map[string]string) error { + commonTags := map[string]string{ + "language": Golang.String(), + "host": metadata.GetTag("host"), + "service": metadata.GetTag("service"), + "env": metadata.GetTag("env"), + "version": metadata.GetTag("version"), + } + + for k, v := range customTags { + commonTags[k] = v + } + + pprofStart, err := ResolveStartTime(metadata.FormValue) + if err != nil { + return fmt.Errorf("unable to resolve go profiling start time: %w", err) + } + pprofEnd, err := ResolveEndTime(metadata.FormValue) + if err != nil { + return fmt.Errorf("unable to resolve go profiling end time: %w", err) + } + + pprofDurationNS, pprofDurationSeconds := pprofEnd.Sub(pprofStart).Nanoseconds(), pprofEnd.Sub(pprofStart).Seconds() + + kVs := toMetricKVs(point.NewTags(commonTags)) + + metricsFile, ok := files[MetricFile] + if !ok { + metricsFile = files[MetricJSONFile] + } + + hasExported := make(map[string]bool) + + if len(metricsFile) > 0 { + mf, err := metricsFile[0].Open() + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("unable to open metrics.json file: %w", err) + } + } else { + defer mf.Close() // nolint:errcheck + + jsonMetering, err := parseMetricsJSONFile(mf) + if err != nil { + return fmt.Errorf("unable to resolve metrics.json: %w", err) + } + + for metricName, number := range jsonMetering { + hasExported[metricName] = true + kVs.EasyAdd(metricName, resolveJSONNumber(number)) + } + if allocBytesMetric, ok := jsonMetering[profGoAllocBytesPerSec]; ok && !hasExported[profGoAllocBytesTotal] { + allocPerSec, err := allocBytesMetric.Float64() + if err == nil && allocPerSec > 0 { + hasExported[profGoAllocBytesTotal] = true + kVs.EasyAdd(profGoAllocBytesTotal, allocPerSec*pprofDurationSeconds) + } + } + + if gcPauseMetric, ok := jsonMetering[profGoGCPauseTime]; ok && !hasExported[profGoCPUCoresGcOverhead] { + gcPauseDuration, err := gcPauseMetric.Float64() + if err == nil && gcPauseDuration > 0 { + hasExported[profGoCPUCoresGcOverhead] = true + kVs.EasyAdd(profGoCPUCoresGcOverhead, gcPauseDuration/pprofDurationSeconds) + } + } + } + } + + pprofFiles := make(map[string]*multipart.FileHeader, 5) + + for field, headers := range files { + switch { + case strings.Contains(field, goCPUFile) && len(headers) > 0: + pprofFiles[goCPUFile] = headers[0] + case strings.Contains(field, goBlockFile) && len(headers) > 0: + pprofFiles[goBlockFile] = headers[0] + case strings.Contains(field, goHeapFile) && len(headers) > 0: + pprofFiles[goHeapFile] = headers[0] + case strings.Contains(field, goMutexFile) && len(headers) > 0: + pprofFiles[goMutexFile] = headers[0] + case strings.Contains(field, goroutinesFile) && len(headers) > 0: + pprofFiles[goroutinesFile] = headers[0] + } + } + + if !hasExported[profGoCPUCores] { + if cpuFile, ok := pprofFiles[goCPUFile]; ok { + cpuNanos, err := pprofCPUDuration(cpuFile) + if err != nil { + log.Warnf("unable to resolve pprof cpu duration: %v", err) + } else { + hasExported[profGoCPUCores] = true + kVs.EasyAdd(profGoCPUCores, float64(cpuNanos)/float64(pprofDurationNS)) + } + } + } + + if !hasExported[profGoLifetimeHeapObjects] || !hasExported[profGoLifetimeHeapBytes] { + if heapFile, ok := pprofFiles[goHeapFile]; ok { + objects, size, err := liveHeapSummary(heapFile) + if err != nil { + log.Warnf("unable to resolve go pprof live heap metrics: %v", err) + } else { + if !hasExported[profGoLifetimeHeapObjects] { + hasExported[profGoLifetimeHeapObjects] = true + kVs.EasyAdd(profGoLifetimeHeapObjects, objects) + } + if !hasExported[profGoLifetimeHeapBytes] { + hasExported[profGoLifetimeHeapBytes] = true + kVs.EasyAdd(profGoLifetimeHeapBytes, size) + } + } + } + } + + if !hasExported[profGoBlockedTime] { + if blockFile, ok := pprofFiles[goBlockFile]; ok { + delayNS, err := delayDurationNS(blockFile) + if err != nil { + log.Warnf("unable to resolve go pprof block delay duration: %v", err) + } else { + hasExported[profGoBlockedTime] = true + kVs.EasyAdd(profGoBlockedTime, delayNS) + } + } + } + + if !hasExported[profGoMutexDelayTime] { + if mutexFile, ok := pprofFiles[goMutexFile]; ok { + delayNS, err := delayDurationNS(mutexFile) + if err != nil { + log.Warnf("unable to resolve go pprof mutex delay duration: %v", err) + } else { + hasExported[profGoMutexDelayTime] = true + kVs.EasyAdd(profGoMutexDelayTime, delayNS) + } + } + } + + if !hasExported[profGoNumGoroutine] { + if goroutineFile, ok := pprofFiles[goroutinesFile]; ok { + gCount, err := goroutinesCount(goroutineFile) + if err != nil { + log.Warnf("unable to resolve go pprof goroutines count metric: %w", err) + } else { + hasExported[profGoNumGoroutine] = true + kVs.EasyAdd(profGoNumGoroutine, gCount) + } + } + } + + pt := point.NewPointV2(metricsName, kVs.toPointKVs(), point.WithPrecision(point.PrecNS), point.WithTime(pprofEnd)) + if err = exportMetrics([]*point.Point{pt}); err != nil { + return fmt.Errorf("unable to export profiling metrics: %w", err) + } + return nil +} diff --git a/internal/plugins/inputs/profile/metrics/metrics_test.go b/internal/plugins/inputs/profile/metrics/metrics_test.go new file mode 100644 index 0000000000..58afe7e0ce --- /dev/null +++ b/internal/plugins/inputs/profile/metrics/metrics_test.go @@ -0,0 +1,69 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +package metrics + +import ( + "testing" + "time" + + "github.com/GuanceCloud/cliutils/point" + "github.com/grafana/jfr-parser/common/attributes" + "github.com/grafana/jfr-parser/common/filters" + "github.com/grafana/jfr-parser/common/types" + "github.com/grafana/jfr-parser/common/units" + "github.com/stretchr/testify/assert" +) + +func TestMetricKVs(t *testing.T) { + kvs := newMetricKVs() + kvs.AddTag("foo", "bar") + kvs.AddTag("hello", "world") + + kvs.AddV2("duration", 3.1415925, false) + kvs.AddV2("count", 789, false) + kvs.AddV2("bytes", 100000, false) + + ptKVs := kvs.toPointKVs() + + for _, tag := range ptKVs.InfluxTags() { + t.Logf("[tag] %s : %s", tag.Key, tag.Value) + } + for _, field := range ptKVs.Fields() { + t.Logf("[field] %s : %v", field.Key, field.Raw()) + } + + assert.Equal(t, 2, ptKVs.TagCount()) + assert.Equal(t, 3, ptKVs.FieldCount()) + + mKVs := toMetricKVs(point.NewTags(map[string]string{ + "tag1": "value1", + "tag2": "value2", + "tag3": "value3", + })) + mKVs.AddTag("language", "java") + mKVs.AddV2("foobar", 3.1415, false) + + assert.Equal(t, 4, mKVs.toPointKVs().TagCount()) + assert.Equal(t, 1, mKVs.toPointKVs().FieldCount()) +} + +func TestParseJFR(t *testing.T) { + for _, chunk := range chunks { + chunk.ShowClassMeta(types.VmInfo) + for _, event := range chunk.Apply(filters.VmInfo) { + value, err := attributes.JVMStartTime.GetValue(event) + if err != nil { + t.Fatal(err) + } + t.Log(value) + tm, err := units.ToTime(value) + if err != nil { + t.Fatal(err) + } + t.Logf("jvm start at: %v, uptime: %v", tm, time.Since(tm)) + } + } +} diff --git a/internal/plugins/inputs/profile/metrics/pprof.go b/internal/plugins/inputs/profile/metrics/pprof.go new file mode 100644 index 0000000000..b70ee4bf5a --- /dev/null +++ b/internal/plugins/inputs/profile/metrics/pprof.go @@ -0,0 +1,212 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +package metrics + +import ( + "encoding/json" + "fmt" + "io" + "mime/multipart" + "strings" + + "github.com/GuanceCloud/cliutils/pprofparser/domain/quantity" + "github.com/GuanceCloud/cliutils/pprofparser/service/parsing" + "github.com/google/pprof/profile" +) + +func resolveJSONNumber(n json.Number) any { + if !strings.Contains(n.String(), ".") { + if x, err := n.Int64(); err == nil { + return x + } + } + if x, err := n.Float64(); err == nil { + return x + } + return int64(0) +} + +func rawMessage2String(message json.RawMessage) (string, error) { + if message == nil { + return "", nil + } + var s string + if err := json.Unmarshal(message, &s); err != nil { + return "", fmt.Errorf("illegal json string literal: %q", message) + } + return s, nil +} + +func rawMessage2Number(message json.RawMessage) (json.Number, error) { + var number json.Number + if err := json.Unmarshal(message, &number); err != nil { + return "", fmt.Errorf("illegal json number literal: %q", message) + } + return number, nil +} + +func parseMetricsJSONFile(r io.Reader) (map[string]json.Number, error) { + body, err := io.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("unable to read metrics.json: %w", err) + } + + var rawMetrics [][2]json.RawMessage + + if err = json.Unmarshal(body, &rawMetrics); err != nil { + log.Errorf("unable to unmarshal metrics.json, %q: %v", string(body), err) + return nil, fmt.Errorf("unable to unmarshal metrics.json: %w", err) + } + + rm := make(map[string]json.Number) + + for _, numbers := range rawMetrics { + name, err := rawMessage2String(numbers[0]) + if err != nil { + return nil, fmt.Errorf("invalid metric key: %w", err) + } + num, err := rawMessage2Number(numbers[1]) + if err != nil { + return nil, fmt.Errorf("invalid metric value: %w", err) + } + rm[name] = num + } + + jsonMetrics := make(map[string]json.Number, len(rm)) + for name, jsonField := range goMetricsNameMapping { + if v, ok := rm[jsonField]; ok { + jsonMetrics[name] = v + } + } + + return jsonMetrics, nil +} + +type pprofQuantity struct { + Unit string + Value int64 +} + +func pprofCPUDuration(fileHeader *multipart.FileHeader) (durationNS int64, err error) { + cpuMetrics, err := pprofSummaryHeader(fileHeader) + if err != nil { + return 0, fmt.Errorf("unable to resolve pprof file [%s] cpu metrics: %w", fileHeader.Filename, err) + } + + if cpuDuration, ok := cpuMetrics["cpu"]; ok { + unit, err := quantity.ParseUnit(quantity.Duration, cpuDuration.Unit) + if err != nil { + return 0, fmt.Errorf("unable to resolve cpu duration unit [%s]: %v", cpuDuration.Unit, cpuDuration.Value) + } + + cpuNanos, err := unit.Quantity(cpuDuration.Value).IntValueIn(quantity.NanoSecond) + if err != nil { + return 0, fmt.Errorf("unable to convert cpu duration to nanoseconds: %w", err) + } + return cpuNanos, nil + } + return 0, fmt.Errorf("cpu profiling metrics not found") +} + +func liveHeapSummary(fileHeader *multipart.FileHeader) (liveHeapObjects, liveHeapBytes int64, err error) { + allocMetrics, err := pprofSummaryHeader(fileHeader) + if err != nil { + return 0, 0, fmt.Errorf("unable to resolve pprof file [%s] allocs metrics: %w", fileHeader.Filename, err) + } + + if inuseObjects, ok := allocMetrics["inuse_objects"]; ok { + liveHeapObjects = inuseObjects.Value + } + + if inuseSpace, ok := allocMetrics["inuse_space"]; ok { + unit, err := quantity.ParseUnit(quantity.Memory, inuseSpace.Unit) + if err != nil { + return liveHeapObjects, 0, fmt.Errorf("unable to resolve cpu duration unit [%s]: %v", inuseSpace.Unit, inuseSpace.Value) + } + + liveHeapBytes, err = unit.Quantity(inuseSpace.Value).IntValueIn(quantity.Byte) + if err != nil { + return liveHeapObjects, liveHeapBytes, fmt.Errorf("unable to convert to inuse space value to Bytes: %w", err) + } + } + return +} + +func delayDurationNS(fileHeader *multipart.FileHeader) (durationNS int64, err error) { + delayMetrics, err := pprofSummaryHeader(fileHeader) + if err != nil { + return 0, fmt.Errorf("unable to resolve go pprof block metrics: %w", err) + } + + if delay, ok := delayMetrics["delay"]; ok { + unit, err := quantity.ParseUnit(quantity.Duration, delay.Unit) + if err != nil { + return 0, fmt.Errorf("unable to resolve cpu duration unit [%s]: %v", delay.Unit, delay.Value) + } + durationNS, err = unit.Quantity(delay.Value).IntValueIn(quantity.NanoSecond) + if err != nil { + return 0, fmt.Errorf("unable to convert go pprof blocked duration to nanoseconds: %w", err) + } + } + return +} + +func goroutinesCount(fileHeader *multipart.FileHeader) (int64, error) { + goroutineMetrics, err := pprofSummaryHeader(fileHeader) + if err != nil { + return 0, fmt.Errorf("unable to resolve goroutines count metrics: %w", err) + } + if goroutines, ok := goroutineMetrics["goroutines"]; ok { + return goroutines.Value, nil + } + return 0, nil +} + +func pprofSummaryHeader(mh *multipart.FileHeader) (map[string]*pprofQuantity, error) { + if mh == nil { + return nil, fmt.Errorf("nil FileHeader") + } + + f, err := mh.Open() + if err != nil { + return nil, fmt.Errorf("unable to open file [%s]: %w", mh.Filename, err) + } + defer f.Close() //nolint:errcheck + + summaries, err := pprofSummary(f) + if err != nil { + return nil, fmt.Errorf("unable to parse pprof file [%s]: %w", mh.Filename, err) + } + return summaries, nil +} + +func pprofSummary(r io.Reader) (map[string]*pprofQuantity, error) { + prof, err := profile.Parse(parsing.NewDecompressor(r)) + if err != nil { + return nil, fmt.Errorf("unable to parse pprof: %w", err) + } + + summaries := make(map[string]*pprofQuantity, len(prof.SampleType)) + + for _, valueType := range prof.SampleType { + summaries[valueType.Type] = &pprofQuantity{ + Unit: valueType.Unit, + Value: 0, + } + } + + for _, sample := range prof.Sample { + if len(sample.Value) != len(prof.SampleType) { + return nil, fmt.Errorf("malformed pprof, SampleType count: %d, Value count: %d", + len(prof.SampleType), len(sample.Value)) + } + for idx, v := range sample.Value { + summaries[prof.SampleType[idx].Type].Value += v + } + } + + return summaries, nil +} diff --git a/internal/plugins/inputs/profile/metrics/pprof_test.go b/internal/plugins/inputs/profile/metrics/pprof_test.go new file mode 100644 index 0000000000..e11fcea61b --- /dev/null +++ b/internal/plugins/inputs/profile/metrics/pprof_test.go @@ -0,0 +1,107 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the MIT License. +// This product includes software developed at Guance Cloud (https://www.guance.com/). +// Copyright 2021-present Guance, Inc. + +package metrics + +import ( + "encoding/json" + "os" + "testing" +) + +func TestResolveMetricsJSONFile(t *testing.T) { + f, err := os.Open("testdata/metrics.json") + if err != nil { + t.Fatal(err) + } + + defer f.Close() + + metering, err := parseMetricsJSONFile(f) + if err != nil { + t.Fatal(err) + } + + for name, number := range metering { + t.Logf("[%s]: [%s]", name, number) + } +} + +func TestResolveStartTime(t *testing.T) { + n1 := json.Number(`123`) + n2 := json.Number(`3e6`) + n3 := json.Number(`3.1415`) + n4 := json.Number(`1e-2`) + n5 := json.Number(`1e9`) + + x := resolveJSONNumber(n1) + t.Logf("%T, %v", x, x) + + x = resolveJSONNumber(n2) + t.Logf("%T, %v", x, x) + + x = resolveJSONNumber(n3) + t.Logf("%T, %v", x, x) + + x = resolveJSONNumber(n4) + t.Logf("%T, %v", x, x) + + x = resolveJSONNumber(n5) + t.Logf("%T, %v", x, x) +} + +func TestPprofSummary(t *testing.T) { + /* + pprof_test.go:64: metric name: samples, value: 607, unit: count + pprof_test.go:64: metric name: cpu, value: 6070000000, unit: nanoseconds + f, err := os.Open("testdata/cpu.pprof") + + pprof_test.go:67: metric name: contentions, value: 1089, unit: count + pprof_test.go:67: metric name: delay, value: 1136108787243, unit: nanoseconds + f, err := os.Open("testdata/delta-block.pprof") + + pprof_test.go:70: metric name: alloc_objects, value: 535, unit: count + pprof_test.go:70: metric name: alloc_space, value: 16699978, unit: bytes + pprof_test.go:70: metric name: inuse_objects, value: 55422, unit: count + pprof_test.go:70: metric name: inuse_space, value: 18585974, unit: bytes + f, err := os.Open("testdata/delta-heap.pprof") + + pprof_test.go:77: metric name: contentions, value: 570, unit: count + pprof_test.go:77: metric name: delay, value: 1603409, unit: nanoseconds + f, err := os.Open("testdata/delta-mutex.pprof") + + pprof_test.go:81: metric name: goroutines, value: 25, unit: count + */ + + /** + pprof_test.go:86: metric name: cpu-time, value: 7990188678, unit: nanoseconds + pprof_test.go:86: metric name: wall-time, value: 132095573542, unit: nanoseconds + pprof_test.go:86: metric name: exception-samples, value: 0, unit: count + pprof_test.go:86: metric name: lock-acquire, value: 0, unit: count + pprof_test.go:86: metric name: lock-acquire-wait, value: 0, unit: nanoseconds + pprof_test.go:86: metric name: lock-release, value: 0, unit: count + pprof_test.go:86: metric name: alloc-samples, value: 30720, unit: count + pprof_test.go:86: metric name: cpu-samples, value: 10661, unit: count + pprof_test.go:86: metric name: alloc-space, value: 151819986, unit: bytes + pprof_test.go:86: metric name: heap-space, value: 24367668, unit: bytes + pprof_test.go:86: metric name: lock-release-hold, value: 0, unit: nanoseconds + */ + + f, err := os.Open("testdata/python.pprof") + if err != nil { + t.Fatal(err) + } + + defer f.Close() + + summaries, err := pprofSummary(f) + if err != nil { + t.Fatal(err) + } + + for metricType, quantity := range summaries { + t.Logf("metric name: %s, value: %d, unit: %s", metricType, quantity.Value, quantity.Unit) + } +} diff --git a/internal/plugins/inputs/profile/resolve.go b/internal/plugins/inputs/profile/metrics/resolve.go similarity index 69% rename from internal/plugins/inputs/profile/resolve.go rename to internal/plugins/inputs/profile/metrics/resolve.go index f40151cfea..6ef08e722a 100644 --- a/internal/plugins/inputs/profile/resolve.go +++ b/internal/plugins/inputs/profile/metrics/resolve.go @@ -3,7 +3,7 @@ // This product includes software developed at Guance Cloud (https://www.guance.com/). // Copyright 2021-present Guance, Inc. -package profile +package metrics import ( "encoding/json" @@ -17,11 +17,21 @@ import ( ) const ( - eventJSONFile = "event" - eventJSONFileWithSuffix = "event.json" - profileTagsKey = "tags[]" - eventFileTagsKey = "tags_profiler" - subCustomTagsKey = "sub_custom_tags" + EventFile = "event" + EventJSONFile = "event.json" + ProfFile = "prof" + MainFile = "main" + MainJFRFile = "main.jfr" + MainPprofFile = "main.pprof" + AutoFile = "auto" + AutoJFRFile = "auto.jfr" + MetricFile = "metrics" + MetricJSONFile = "metrics.json" + AutoPprofFile = "auto.pprof" + profileTagsKey = "tags[]" + eventFileTagsKey = "tags_profiler" + SubCustomTagsKey = "sub_custom_tags" + PprofExt = ".pprof" ) const ( @@ -113,7 +123,7 @@ type Tags map[string]string type rfc3339Time time.Time -func newRFC3339Time(t time.Time) *rfc3339Time { +func NewRFC3339Time(t time.Time) *rfc3339Time { return (*rfc3339Time)(&t) } @@ -151,29 +161,19 @@ type Metadata struct { End *rfc3339Time `json:"end"` } -func newTags(originTags []string) Tags { - pt := make(Tags) +func NewTags(originTags []string) Tags { + tags := make(Tags) for _, tag := range originTags { // 有":", 用:切割成键值对 if strings.Index(tag, ":") > 0 { pairs := strings.SplitN(tag, ":", 2) - pt[pairs[0]] = pairs[1] + tags[pairs[0]] = pairs[1] } else { // 没有":" 整个值做key, value为空 - pt[tag] = "" + tags[tag] = "" } } - return pt -} - -func (t Tags) Get(name string, defVal ...string) string { - if tag, ok := t[name]; ok { - return tag - } - if len(defVal) > 0 { - return defVal[0] - } - return "" + return tags } func ResolveLanguage(runtimes []string) Language { @@ -188,11 +188,11 @@ func ResolveLanguage(runtimes []string) Language { return UnKnown } -func resolveStartTime(formValue map[string][]string) (time.Time, error) { +func ResolveStartTime(formValue map[string][]string) (time.Time, error) { return resolveTime(formValue, []string{"recording-start", "start"}) } -func resolveEndTime(formValue map[string][]string) (time.Time, error) { +func ResolveEndTime(formValue map[string][]string) (time.Time, error) { return resolveTime(formValue, []string{"recording-end", "end"}) } @@ -224,84 +224,85 @@ func resolveTime(formValue map[string][]string, formFields []string) (time.Time, return tm, errors.New("there is not proper form time field") } -func getForm(field string, formValues map[string][]string) string { - if val := formValues[field]; len(val) > 0 { - return val[0] - } - return "" -} - -func resolveLang(formValue map[string][]string, pt Tags) Language { +func ResolveLang(metadata *ResolvedMetadata) Language { var runtimes []string - if v := pt.Get("language"); v != "" { - runtimes = append(runtimes, v) - } - - if v := pt.Get("runtime"); v != "" { - runtimes = append(runtimes, v) - } - - formKeys := []string{ - "runtime", + aliasNames := []string{ "language", + "runtime", "family", } - for _, field := range formKeys { - if v := getForm(field, formValue); v != "" { + for _, field := range aliasNames { + if v := metadata.GetTag(field); v != "" { runtimes = append(runtimes, v) } } + for _, field := range aliasNames { + if values := metadata.FormValue[field]; len(values) > 0 { + runtimes = append(runtimes, values...) + } + } + return ResolveLanguage(runtimes) } -func interface2String(i interface{}) (string, error) { - switch baseV := i.(type) { +func any2String(i interface{}) string { + switch ix := i.(type) { case string: - return baseV, nil + return ix + case []byte: + return string(ix) case float32, float64: - return strconv.FormatFloat(reflect.ValueOf(i).Float(), 'g', -1, 64), nil + return strconv.FormatFloat(reflect.ValueOf(i).Float(), 'g', -1, 64) case int, int8, int16, int32, int64: - return strconv.FormatInt(reflect.ValueOf(i).Int(), 10), nil + return strconv.FormatInt(reflect.ValueOf(i).Int(), 10) case uint, uint8, uint16, uint32, uint64, uintptr: - return strconv.FormatUint(reflect.ValueOf(i).Uint(), 10), nil + return strconv.FormatUint(reflect.ValueOf(i).Uint(), 10) case bool: - if baseV { - return "true", nil + if ix { + return "true" } - return "false", nil + return "false" + case json.Number: + return ix.String() } - return "", fmt.Errorf("not suppoerted interface type: %T", i) + return fmt.Sprintf("%v", i) } -func json2StringMap(m map[string]interface{}) map[string][]string { +func json2FormValues(m map[string]interface{}) map[string][]string { formatted := make(map[string][]string, len(m)) for k, v := range m { - switch baseV := v.(type) { + switch vx := v.(type) { case []interface{}: - for _, elem := range baseV { - if elemStr, err := interface2String(elem); err == nil { - formatted[k] = append(formatted[k], elemStr) - } + for _, elem := range vx { + formatted[k] = append(formatted[k], any2String(elem)) } default: - if vStr, err := interface2String(v); err == nil { - formatted[k] = append(formatted[k], vStr) - } + formatted[k] = append(formatted[k], any2String(v)) } } return formatted } -type resolvedMetadata struct { - formValue map[string][]string - tags Tags +type ResolvedMetadata struct { + FormValue map[string][]string + Tags Tags +} + +func (r *ResolvedMetadata) GetTag(name string, defValue ...string) string { + if tag, ok := r.Tags[name]; ok { + return tag + } + if len(defValue) > 0 { + return defValue[0] + } + return "" } -func parseMetadata(req *http.Request) (*resolvedMetadata, int64, error) { +func ParseMetadata(req *http.Request) (*ResolvedMetadata, int64, error) { filesize := int64(0) for _, files := range req.MultipartForm.File { for _, f := range files { @@ -311,16 +312,16 @@ func parseMetadata(req *http.Request) (*resolvedMetadata, int64, error) { if req.MultipartForm.Value != nil { if _, ok := req.MultipartForm.Value[profileTagsKey]; ok { - return &resolvedMetadata{ - formValue: req.MultipartForm.Value, - tags: newTags(req.MultipartForm.Value[profileTagsKey]), + return &ResolvedMetadata{ + FormValue: req.MultipartForm.Value, + Tags: NewTags(req.MultipartForm.Value[profileTagsKey]), }, filesize, nil } } - eventFiles, ok := req.MultipartForm.File[eventJSONFile] + eventFiles, ok := req.MultipartForm.File[EventFile] if !ok { - eventFiles, ok = req.MultipartForm.File[eventJSONFileWithSuffix] + eventFiles, ok = req.MultipartForm.File[EventJSONFile] } if ok && len(eventFiles) > 0 { @@ -337,15 +338,32 @@ func parseMetadata(req *http.Request) (*resolvedMetadata, int64, error) { if err := decoder.Decode(&events); err != nil { return nil, filesize, fmt.Errorf("resolve the event file fail: %w", err) } - eventFormValues := json2StringMap(events) + eventFormValues := json2FormValues(events) + var tags []string if len(eventFormValues[eventFileTagsKey]) > 0 && eventFormValues[eventFileTagsKey][0] != "" { - eventFormValues[profileTagsKey] = strings.Split(eventFormValues[eventFileTagsKey][0], ",") + tags = strings.Split(eventFormValues[eventFileTagsKey][0], ",") } - return &resolvedMetadata{ - formValue: eventFormValues, - tags: newTags(eventFormValues[profileTagsKey]), + return &ResolvedMetadata{ + FormValue: eventFormValues, + Tags: NewTags(tags), }, filesize, nil } return nil, filesize, fmt.Errorf("the profiling data format not supported, check your datadog trace library version") } + +func JoinTags(m map[string]string) string { + if len(m) == 0 { + return "" + } + + var sb strings.Builder + + for k, v := range m { + sb.WriteString(k) + sb.WriteByte(':') + sb.WriteString(v) + sb.WriteByte(',') + } + return strings.TrimSuffix(sb.String(), ",") +} diff --git a/internal/plugins/inputs/profile/resolve_test.go b/internal/plugins/inputs/profile/metrics/resolve_test.go similarity index 86% rename from internal/plugins/inputs/profile/resolve_test.go rename to internal/plugins/inputs/profile/metrics/resolve_test.go index da7fef2ce6..af282be1d7 100644 --- a/internal/plugins/inputs/profile/resolve_test.go +++ b/internal/plugins/inputs/profile/metrics/resolve_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Guance Cloud (https://www.guance.com/). // Copyright 2021-present Guance, Inc. -package profile +package metrics import ( "bytes" @@ -25,8 +25,8 @@ func TestMetadata(t *testing.T) { Language: Golang, TagsProfiler: "process_id:31145,service:zy-profiling-test,profiler_version:0.102.0~b67f6e3380,host:zydeMacBook-Air.local,runtime-id:06dddda1-957b-4619-97cb-1a78fc7e3f07,language:jvm,env:test,version:v1.2", SubCustomTags: "foobar:hello-world", - Start: newRFC3339Time(time.Now()), - End: newRFC3339Time(time.Now().Add(time.Minute)), + Start: NewRFC3339Time(time.Now()), + End: NewRFC3339Time(time.Now().Add(time.Minute)), } out, err := json.MarshalIndent(md, "", " ") @@ -51,7 +51,7 @@ func TestMetadata(t *testing.T) { t.Fatal(err) } - headers := json2StringMap(m) + headers := json2FormValues(m) for k, v := range headers { fmt.Println(k, ":", v) @@ -82,7 +82,7 @@ func TestJson2StringMap(t *testing.T) { t.Fatal(err) } - strMap := json2StringMap(v) + strMap := json2FormValues(v) for key, val := range strMap { fmt.Println(key, ":", val) @@ -123,19 +123,19 @@ func TestParseMetadata(t *testing.T) { req.Header.Set("Content-Type", w.FormDataContentType()) - err = req.ParseMultipartForm(defaultInput().getBodySizeLimit()) + err = req.ParseMultipartForm(1e9) assert.NoError(t, err) - metadata, _, err := parseMetadata(req) + metadata, _, err := ParseMetadata(req) assert.NoError(t, err) - for k, v := range metadata.tags { + for k, v := range metadata.Tags { t.Logf("%s : %s \n", k, v) } - assert.Equal(t, "bar", metadata.tags["foo"]) - assert.Equal(t, "hello-world", metadata.tags["foobar"]) + assert.Equal(t, "bar", metadata.Tags["foo"]) + assert.Equal(t, "hello-world", metadata.Tags["foobar"]) - fmt.Println(joinTags(metadata.tags)) + fmt.Println(JoinTags(metadata.Tags)) } diff --git a/internal/plugins/inputs/profile/metrics/testdata/cpu.pprof b/internal/plugins/inputs/profile/metrics/testdata/cpu.pprof new file mode 100644 index 0000000000..6889c39ef9 --- /dev/null +++ b/internal/plugins/inputs/profile/metrics/testdata/cpu.pprof @@ -0,0 +1,430 @@ +HզƨՄ + +PȾZ`""*  ("Ș"*  (""r* +  (="ғ"y""}""* + + + (""*   ("""*   (*  (" "*  (" +Ȩ" *   (" ç" +*  + (( +  + " ܑ" " * +  (*   (" " *   ("ב"*  (""*   (""* !! "( + ""~* +## $(|"՗"""y"* +%% &(!* +'' $(x* (( )("""* ** )(* ++ )("" +"㥞"* ,, -(""R* +.. /(Q""* 00 $("Ȅ"* 11 ("뎑"* 22 ("ϑ"* 33 ("ؑ"a + "漏"E* +44 /(>""* 55 $("" " * 66 ( *  77 ("" U +" "!$* +!88 9("!""7* +":: ;(0"""#v* +#<< =(b"#䬑" !ͭ + !"#"$"$3* $>> (3"%"  +$%"&神" +&"'"%* %?? ("(ļ"&* &@@ (")"'"(* 'AA (* (BB )("*")* )CC )("+"** *DD )(",ݑ" Q +'()*+, "-͗"+",* +EE F(* ,GG )(".")"/‘"-* -HH ("0"  + -.+,/0"1ؑ"  +1"2"* +2,/0"3"   +3"4"   +4"5ؑ"  +5"6". * .II J( "7"/ +* /KK L("8"0* 0MM L("9"":ƈ"1 * +1NN ( +";ڔ"I* +6789:; + "<"/ +"="' + <89= + ">溗"( + >*+,/0"?–"2* 2OO L("@"/"A"' + ?@89A + "B" m + !"B"Cʵ"/& + C89 + "D"37"-* 3PP (7 +D0"E"F +E"F"( + F + "G"% + +G + "Hۑ"4J" * +4QQ F(I +H/0"I"5* +5RR S(#"J"6* 6TT U("K"7"8* 7VV U(* 8WW U("L"9K* +9XX Y(D"M"1 + +IJKLM; + "Nޑ"""y"  +N "O" "   +O"P":-* :ZZ (-"Q";,* ;[[ (,"R"  +PQR"S"<* <\\ )("T"( + STA + & + ST + "U૵"=* =]] L("V"/"W"* +UV8W:; + "X"2* +X@89 + "Yۑ"  +Y/0"Z">* >^^ ("["% + Z[()*+,/0"\Л"?|"@" * +?__ F({* @`` F(  +\"]"2( + ]@89 + "^"A* Aaa b("_""`"""y"  +^_`"a"<& + aT + "b"( + b:; + "c"B2"$3* Bcc (2 +c%"d᧦"C"D* Cdd (* Dee ("e"E* Eff ("f"Y) +STdef + "g"F* Fgg h("h"$3 +gh%"i"G* Gii )("j͗", + +ij.+,/0 +N/0"kɵ"/' + k89= + "l"<' + lT + "m"H"** Hjj F( +m,/0"n"I"F* Ikk h( +nh%"o" "  +o"p"* +p,/0"q"""y"  +q"r"=( + rV89 + "s"J-* Jll (-"t"K-* Kmm (-"u"  +stu"v"L"M* Lnn L(* Moo L("wɵ"/( + vw8W + "x"A"y"""y"  +x_y"z"N* Npp )("{ԗ" +z{"|"2( + |@8W + "}"O7* +Oqq $(7"~Ӧ"P"Q4" * +Prr &(* +Qss $(3 +}~"ן"R* Rtt ("ϡ"R""R""S* Suu (""R""R""T* Tvv (Z +?wx""R""R_ +Dyx"ܟ"Ra +Fzx""R[ +@{x""R"Ħ"SY +>|x"ա"R[ +@}x""RS +8zx""RW +<x~""RY +>x[ +@zxZ +>x""RY +>zx""R""RP +4x] +Ax"ݡ"R` +DxZ +>xP +4x"Ο"R\ +@x""R^ +Bx""RZ +>x"ʟ"RZ +>x""R\ +@x? +6""RV +:xZ +>xR +6xG +>T +8xX +<xX +<x""RZ +>xX +<x""R\ +@x""U;* U (;""V*V (""W*W ("ּ"X*X (""Y3* Y (&""S> +"x""RZ +>x""R^ +BxX +<x""R\ +@xZ +>x""R\ +@x\ +@x"֘"Z*Z ("dž". * +789A + xxd +Hx""RZ +>xP +4x""RZ +>xZ +>x` +DxV +:x\ +@xZ +>xN +2x\ +@x"С"R\ +@x""RZ +>xV +:x""RP +4x""R\ +@xZ +>x"ɠ"RV +:xV +:x""R\ +@x"ј"[*[ (""\*\ (""]*] ("Ś"^*^ (""_* _ "("Ú"`*` (""a* a b(""b* b b(""c* c (y""d*d ("٨"e*e (""f* f (""g*g (""h*h (""i*i ("ؒ"j"k*j (* k (w""l* l (""m* m (@ +$x""n*n (""]"̀"o"pQ*o (* p (O"ޣ"S> +"xV +:xV +:xZ +>xK +BP +4xC +:^ +Bx"ء"RC +:""RT +8xV +:xT +8xX +<xC +:"ơ"RV +:xZ +>x"˟"RZ +>x\ +@x""RX +<x""RV +:x""RJ +.x"ɑ"q"r* q )(* r (""  +V +:x"ܡ"RX +<xV +:x^ +Bx^ +Bx"ˡ"RV +:xT +8xR +6xV +:x[ +?x""W' +C +:""RV +:xA +8V +:xV +:x""RP +4xI +@` +Dx""RR +6x? +6N +2xT +8xX +<xR +6xV +:xX +<xT +8xV +:xT +8xT +8x^ +BxX +<x""RR +6xV +:x\ +@xR +6x\ +@xX +<x^ +BxV +:xb +FxZ +>xV +:xZ +>xX +<x^ +BxP +4x""RZ +>xX +<xV +:xX +<xX +<xT +8x\ +@xG +>Z +>xI +@X +<xE +<\ +@xA +8P +4xT +8xA +8` +Dx"Š"RV +:xN +2x\ +@x""s3* s (3""% + ()*+, ^ +Bx"ҟ"RT +8xS +7xT +8xX +<xX +<x^ +BxX +<xV +:xX +<xR +6xT +8x^ +Bx\ +@x\ +@x""RV +:xZ +>xV +:xX +<x\ +@xP +4xV +:xT +8xZ +>xP +4x\ +@xV +:xV +:xP +4xT +8xC +:T +8xX +<x? +6R +6xT +8xT +8xG +>N +2xA +8""t7"-* t (7 +0""u"* u (xx""% + ()*+,/0"Ь"> + [()*+,/0"˗", + +.+,/0""vc* v -(]""w* w (""xN* x =(F"Α"y* y ("՗" + ""z8" * z F(7 +"ָ"{* { L("͵"/) +89A + xx""N +{"đ"- +0"ő"- +0""  +"";,""  +""<"䚦"|* | ("ϒ"m* +T + xx"֛"@"  +& + ST + xx"" ">  + [()*+,/0""M) +w89A + xx"鬑"  +* +<89:; + xx""5""9J"ӗ", +JK + xx"" +""  +""  +""_ +"̥"  +""}l* } -(f"ޡ"# +#"" "$3"Ҫ"~3* ~ (3"䍑" + +"Ñ"- +0"ո"{+ +89A + xx% + +A + xx""<' + TA + xx"" +"ߩ"$3 +%""- +0""/' + 89A + xx""  +"" ". *  J( ( + 78W + xx""* )(""N +{"՚"2* +@8W + xx"᜞"* -( +"Η" +"˧"!- +!"#""$ +  + xx""* (""<) +T + xx"ܑ" " ""%* ($ + "">""  +xx+ +w89= + xx""%"*  F($ +""/ ++ +8WA + xx"З""* ( +""""y" +""<) +T:; + xx""G + j.+,/0+ +TA + xx""2+ +@89A + xx"" + ""D' + ef + xx"ᨓ"$3 +%"鱵"/ +) +8W + xx""* )("ݑ"  + /0( + ST + xx"ڎ"""y" +* +w89A + xx"" "ɑ"2"r* (2 +""* ("ڑ"# +xx$ + b + xx"З"H" +"͗", + +.+,/0""  +"" +"汵"/ +( + 8W + xx"߿""-* ( +0""* (""* (""* (""* (""* (""* ("ٯ""* (* ("앿"* ("Ѯ"* (""* (""* (""* ("ݺ"~* (v""* ("Ղ"* (""* () + """* (* ("Ԇ" * ( +"" +* ( +"ֈ"* ("Ԉ"* ("ψ"* ("" +* ( +""* ("ƕ"k* (\""a"* (^* ("""* (* ("ؕ"""* (""* (3 +*"؇" "" +* ( +"""""* (""* ("맿"* ("".* (""* ("ڽ"* (""* (3 +* @(08 +(0ࡿ (0 @(0((22samples2count2cpu2 nanoseconds2main.main.func1.42'/root/project/go-profiling-demo/main.go2sort.partition_func2#/usr/local/go/src/sort/zsortfunc.go2sort.pdqsort_func2 +sort.Slice2/usr/local/go/src/sort/slice.go2main.main.func12(github.com/gin-gonic/gin.(*Context).Next2;/root/go/pkg/mod/github.com/gin-gonic/gin@v1.9.1/context.go24github.com/gin-gonic/gin.(*Engine).handleHTTPRequest27/root/go/pkg/mod/github.com/gin-gonic/gin@v1.9.1/gin.go2,github.com/gin-gonic/gin.(*Engine).ServeHTTP2 net/http.serverHandler.ServeHTTP2$/usr/local/go/src/net/http/server.go2net/http.(*conn).serve2span id232153051693716287722local root span id2trace endpoint2 +get_movies2runtime.nanotime2(/usr/local/go/src/runtime/time_nofake.go2runtime.checkTimers2!/usr/local/go/src/runtime/proc.go2runtime.findRunnable2runtime.schedule2runtime.park_m2 runtime.mcall2%/usr/local/go/src/runtime/asm_amd64.s2runtime.unlock22'/usr/local/go/src/runtime/lock_futex.go2runtime.unlockWithRank2)/usr/local/go/src/runtime/lockrank_off.go2runtime.unlock2runtime.modtimer2!/usr/local/go/src/runtime/time.go2runtime.resettimer2runtime.resetForSleep2 runtime.futex2+/usr/local/go/src/runtime/sys_linux_amd64.s2runtime.futexwakeup2%/usr/local/go/src/runtime/os_linux.go2runtime.notewakeup2runtime.startm2 runtime.wakep2runtime.resetspinning2runtime.futexsleep2runtime.notesleep2 runtime.mPark2 runtime.stopm2!runtime/internal/syscall.Syscall62 g.input.getBodySizeLimit() { + if n > g.input.GetBodySizeLimit() { return nil, fmt.Errorf("exceed body max size") } diff --git a/internal/plugins/inputs/profile/pyroscope.go b/internal/plugins/inputs/profile/pyroscope.go index 01bda7650c..c3ed1f7e8c 100644 --- a/internal/plugins/inputs/profile/pyroscope.go +++ b/internal/plugins/inputs/profile/pyroscope.go @@ -17,6 +17,8 @@ import ( "strings" "time" + "gitlab.jiagouyun.com/cloudcare-tools/datakit/internal/plugins/inputs/profile/metrics" + "github.com/gin-gonic/gin" "github.com/prometheus/client_golang/prometheus" "github.com/pyroscope-io/pyroscope/pkg/agent/types" @@ -489,8 +491,8 @@ Put implements storage.Putter interface */ func (report *pyroscopeDatakitReport) Put(ctx context.Context, putInput *storage.PutInput) error { var profileDataSet []*profileData - var reportFamily Language - var reportFormat Format + var reportFamily metrics.Language + var reportFormat metrics.Format var startTime, endTime time.Time spyName := putInput.SpyName @@ -527,8 +529,8 @@ func (report *pyroscopeDatakitReport) Put(ctx context.Context, putInput *storage // Having cpu, inuse_objects, and inuse_space. Data is ready, start send. - reportFamily = NodeJS - reportFormat = PPROF + reportFamily = metrics.NodeJS + reportFormat = metrics.PPROF // cpu cpuData, err := getBytesBufferByPut(detail.CPU) @@ -570,8 +572,8 @@ func (report *pyroscopeDatakitReport) Put(ctx context.Context, putInput *storage report.Delete(name) case eBPFSpyName: - reportFamily = CPP - reportFormat = Collapsed + reportFamily = metrics.CPP + reportFormat = metrics.Collapsed report.inputTags["sample_rate"] = fmt.Sprintf("%d", putInput.SampleRate) report.inputTags["units"] = putInput.Units.String() @@ -595,17 +597,17 @@ func (report *pyroscopeDatakitReport) Put(ctx context.Context, putInput *storage return fmt.Errorf("not supported format") } - event := &Metadata{ + event := &metrics.Metadata{ Language: reportFamily, Format: reportFormat, - Profiler: Pyroscope, - Start: newRFC3339Time(startTime), - End: newRFC3339Time(endTime), + Profiler: metrics.Pyroscope, + Start: metrics.NewRFC3339Time(startTime), + End: metrics.NewRFC3339Time(endTime), Attachments: []string{ withExtName(pyroscopeFilename, ".pprof"), }, - TagsProfiler: joinTags(report.inputTags), - SubCustomTags: joinTags(report.pyrs.Tags), + TagsProfiler: metrics.JoinTags(report.inputTags), + SubCustomTags: metrics.JoinTags(report.pyrs.Tags), } if err := pushProfileData( @@ -619,7 +621,7 @@ func (report *pyroscopeDatakitReport) Put(ctx context.Context, putInput *storage Input: report.pyrs.input, }, event, - report.pyrs.input.getBodySizeLimit(), + report.pyrs.input.GetBodySizeLimit(), ); err != nil { log.Errorf("unable to push pyroscope profile data: %s", err) return err diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/cfg/cfg.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/cfg/cfg.go new file mode 100644 index 0000000000..0eaaed52f2 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/cfg/cfg.go @@ -0,0 +1,98 @@ +package cfg + +import ( + "fmt" + "os" + "path/filepath" + + "gopkg.in/yaml.v2" +) + +const ( + DefaultWorkDir = "/usr/local/pprofparser" + DefaultCfgName = "conf.yml" + DefaultCfgPath = DefaultWorkDir + "/" + DefaultCfgName +) + +const ( + EnvLocal = "local" + EnvDev = "dev" + EnvTest = "test" + EnvPre = "pre" + EnvProduction = "prod" +) + +var ( + Cfg *Config +) + +func Load(file string) error { + reader, err := os.Open(file) + if err != nil { + var wd string + if wd, err = os.Getwd(); err == nil { + cfgFile := filepath.Join(wd, "cfg", DefaultCfgName) + reader, err = os.Open(cfgFile) + } + if err != nil { + return fmt.Errorf("read config file fail: %w", err) + } + } + defer func() { + _ = reader.Close() + }() + + decoder := yaml.NewDecoder(reader) + var cfg Config + if err := decoder.Decode(&cfg); err != nil { + return fmt.Errorf("decode yaml config file fail: %w", err) + } + Cfg = &cfg + return nil +} + +type Config struct { + Serv Server `yaml:"server"` + Log Log `yaml:"log"` + Gin Gin `yaml:"gin"` + Oss Oss `yaml:"oss"` + Storage Storage `yaml:"storage"` +} + +// Server configuration +type Server struct { + Addr string `yaml:"addr"` + Port string `yaml:"port"` +} + +// Log log configuration +type Log struct { + Path string `yaml:"path"` + File string `yaml:"file"` + Level string `yaml:"level"` +} + +// Gin gin configuration +type Gin struct { + RunMode string `yaml:"run_mode"` + Log string `yaml:"log"` + ErrorLog string `yaml:"error_log"` +} + +// Oss aliyun oss configuration +type Oss struct { + Host string `yaml:"host"` + AccessKey string `yaml:"access_key"` + SecretKey string `yaml:"secret_key"` + ProfileBucket string `yaml:"profile_bucket"` + ProfileDir string `yaml:"profile_dir"` +} + +type Disk struct { + ProfileDir string `yaml:"profile_dir"` +} + +type Storage struct { + Disk Disk `yaml:"disk"` + Oss Oss `yaml:"oss"` +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/events/type.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/events/type.go new file mode 100644 index 0000000000..e99d068787 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/events/type.go @@ -0,0 +1,278 @@ +package events + +import ( + "github.com/GuanceCloud/cliutils/pprofparser/domain/languages" + "github.com/GuanceCloud/cliutils/pprofparser/domain/quantity" +) + +const ( + DefaultMetaFileName = "event" + DefaultMetaFileNameWithExt = "event.json" + DefaultProfileFilename = "prof" + DefaultProfileFilenameWithExt = "prof.pprof" +) + +const ( + CpuSamples Type = "cpu-samples" + CpuTime Type = "cpu-time" + WallTime Type = "wall-time" + HeapLiveSize Type = "heap-space" + HeapLiveObjects Type = "heap-live-objects" + Mutex Type = "mutex" + Block Type = "block" + Goroutines Type = "goroutines" + AllocatedMemory Type = "alloc-space" + Allocations Type = "alloc-samples" + ThrownExceptions Type = "exception-samples" + LockWaitTime Type = "lock-acquire-wait" + LockedTime Type = "lock-release-hold" + LockAcquires Type = "lock-acquire" + LockReleases Type = "lock-release" + Other Type = "other" + Unknown Type = "unknown" +) + +const ( + ShowNoWay ShowPlace = 0 + ShowInTrace ShowPlace = 1 + ShowInProfile ShowPlace = 2 +) + +var TypeProfileFilename = map[languages.Lang]map[Type]string{ + languages.Python: {}, + + languages.GoLang: {}, +} + +var Metas = map[Type]TypeMetadata{ + CpuSamples: { + Sort: sortMap{languages.Python: 0, languages.GoLang: 0, languages.NodeJS: 0}, + Name: "CPU Samples", + Description: descriptionMap{languages.Any: "This is the number of samples each method spent running on the CPU."}, + QuantityKind: quantity.Count, + ShowPlaces: ShowNoWay, + }, + + CpuTime: { + Sort: sortMap{languages.Python: 10, languages.GoLang: 10, languages.DotNet: 10}, //map[languages.Lang]int{languages.Python: 10, languages.GoLang: 10}, + Name: "CPU Time", + Description: descriptionMap{languages.Any: "This is the time each method spent running on the CPU."}, + QuantityKind: quantity.Duration, + ShowPlaces: ShowInTrace | ShowInProfile, + }, + + WallTime: { + Sort: sortMap{languages.Python: 20, languages.DotNet: 80}, //map[languages.Lang]int{languages.Python: 20, languages.GoLang: 20}, + Name: "Wall Time", + Description: descriptionMap{languages.Any: "This is the elapsed time spent in each method. Elapsed time includes time when code is running on CPU, waiting for I/O, and anything else that happens while the function is running."}, + QuantityKind: quantity.Duration, + ShowPlaces: ShowInProfile, + }, + + HeapLiveSize: { + Sort: sortMap{languages.Python: 30, languages.NodeJS: 30, languages.DotNet: 70}, //map[languages.Lang]int{languages.Python: 30, languages.GoLang: 30}, + Name: "Heap Live Size", + Description: descriptionMap{languages.Any: "This is the amount of heap memory allocated that remains in use.", + languages.GoLang: `This is the amount of heap memory allocated by each function that remains in use. (Go calls this "inuse_space").`, + }, + QuantityKind: quantity.Memory, + ShowPlaces: ShowInProfile, + }, + + HeapLiveObjects: { + Sort: sortMap{languages.Python: 31, languages.NodeJS: 31, languages.DotNet: 60}, //map[languages.Lang]int{languages.Python: 31, languages.GoLang: 31}, + Name: "Heap Live Objects", + Description: descriptionMap{languages.Any: `This is the number of objects allocated by each function that remain in use. `, + languages.GoLang: `This is the number of objects allocated by each function that remain in use. (Go calls this "inuse_objects").`, + }, + QuantityKind: quantity.Count, + ShowPlaces: ShowInProfile, + }, + + Mutex: { + Sort: sortMap{languages.Python: 32}, //map[languages.Lang]int{languages.Python: 32, languages.GoLang: 32}, + Name: "Mutex", + Description: descriptionMap{languages.GoLang: `This is the time each function spent waiting on mutexes during the profiling period.`}, + QuantityKind: quantity.Duration, + ShowPlaces: ShowInProfile, + }, + + Block: { + Sort: sortMap{languages.Python: 33}, //map[languages.Lang]int{languages.Python: 33, languages.GoLang: 33}, + Name: "Block", + Description: descriptionMap{languages.Any: `This is the time each function spent blocked since the start of the process.`}, + QuantityKind: quantity.Duration, + ShowPlaces: ShowInProfile, + }, + + Goroutines: { + Sort: sortMap{languages.Python: 34}, //map[languages.Lang]int{languages.Python: 34, languages.GoLang: 34}, + Name: "Goroutines", + Description: descriptionMap{languages.Any: `This is the number of goroutines.`}, + QuantityKind: quantity.Count, + ShowPlaces: ShowInProfile, + }, + + AllocatedMemory: { + Sort: sortMap{languages.Python: 40, languages.DotNet: 40}, //map[languages.Lang]int{languages.Python: 40, languages.GoLang: 40}, + Name: "Allocated Memory", + Description: descriptionMap{languages.Any: "This is the amount of heap memory allocated by each method, including allocations which were subsequently freed.", + languages.GoLang: `This is the amount of heap memory allocated by each function during the profiling period, including allocations which were subsequently freed. (Go calls this "alloc_space").`, + }, + QuantityKind: quantity.Memory, + ShowPlaces: ShowInProfile, + }, + + Allocations: { + Sort: sortMap{languages.Python: 50, languages.DotNet: 30}, //map[languages.Lang]int{languages.Python: 50, languages.GoLang: 50}, + Name: "Allocations", + Description: descriptionMap{languages.Any: "This is the number of heap allocations made by each method, including allocations which were subsequently freed.", + languages.GoLang: "This is the number of objects allocated by each function during the profiling period, including allocations which were subsequently freed. (Go calls this \"alloc_objects\").", + }, + QuantityKind: quantity.Count, + ShowPlaces: ShowInProfile, + }, + + ThrownExceptions: { + Sort: sortMap{languages.Python: 60, languages.DotNet: 20}, //map[languages.Lang]int{languages.Python: 60, languages.GoLang: 60}, + Name: "Thrown Exceptions", + Description: descriptionMap{languages.Any: "This is the number of exceptions thrown by each method."}, + QuantityKind: quantity.Count, + ShowPlaces: ShowInProfile, + }, + + LockWaitTime: { + Sort: sortMap{languages.Python: 70, languages.DotNet: 110}, //map[languages.Lang]int{languages.Python: 70, languages.GoLang: 70}, + Name: "Lock Wait Time", + Description: descriptionMap{languages.Any: "This is the time each function spent waiting for a lock."}, + QuantityKind: quantity.Duration, + ShowPlaces: ShowInTrace | ShowInProfile, + }, + + LockedTime: { + Sort: sortMap{languages.Python: 80}, //map[languages.Lang]int{languages.Python: 80, languages.GoLang: 80}, + Name: "Locked Time", + Description: descriptionMap{languages.Any: "This is the time each function spent holding a lock."}, + QuantityKind: quantity.Duration, + ShowPlaces: ShowInTrace | ShowInProfile, + }, + + LockAcquires: { + Sort: sortMap{languages.Python: 90, languages.DotNet: 100}, //map[languages.Lang]int{languages.Python: 90, languages.GoLang: 90}, + Name: "Lock Acquires", + Description: descriptionMap{languages.Any: "This is the number of lock acquisitions made by each method."}, + QuantityKind: quantity.Count, + ShowPlaces: ShowInProfile, + }, + + LockReleases: { + Sort: sortMap{languages.Python: 100}, //map[languages.Lang]int{languages.Python: 100, languages.GoLang: 100}, + Name: "Lock Releases", + Description: descriptionMap{languages.Any: "This is the number of times each function released a lock."}, + QuantityKind: quantity.Count, + ShowPlaces: ShowInProfile, + }, + Other: { + Sort: sortMap{languages.Python: 110}, //map[languages.Lang]int{languages.Python: 110, languages.GoLang: 110}, + Name: "Other", + Description: descriptionMap{languages.Any: "Methods that used the most uncategorized time."}, + QuantityKind: quantity.Duration, + ShowPlaces: ShowInTrace, + }, +} + +type Type string + +func ParseType(typ string) Type { + et := Type(typ) + if _, ok := Metas[et]; ok { + return et + } + return Unknown +} + +func (et Type) String() string { + return string(et) +} + +func (et Type) Equals(target Type) bool { + return et.String() == target.String() +} + +func (et Type) GetSort(lang languages.Lang) int { + if meta, ok := Metas[et]; ok { + if sort, ok := meta.Sort[lang]; ok { + return sort + } + if sort, ok := meta.Sort[languages.Any]; ok { + return sort + } + } + return 1 << 30 +} + +func (et Type) GetName() string { + if meta, ok := Metas[et]; ok { + return meta.Name + } + return "unknown" +} + +func (et Type) GetDescription(lang languages.Lang) string { + if meta, ok := Metas[et]; ok { + if desc, ok := meta.Description[lang]; ok { + return desc + } + if desc, ok := meta.Description[languages.Any]; ok { + return desc + } + } + return "unknown metric type" +} + +func (et Type) GetQuantityKind() *quantity.Kind { + if meta, ok := Metas[et]; ok { + return meta.QuantityKind + } + return quantity.UnknownKind +} + +func (et Type) GetShowPlaces() ShowPlace { + if meta, ok := Metas[et]; ok { + return meta.ShowPlaces + } + return ShowNoWay +} + +type TypeMetadata struct { + Sort sortMap + Name string + Description descriptionMap + QuantityKind *quantity.Kind + ShowPlaces ShowPlace +} + +type ShowPlace int + +// sortMap used to generate sort map for convenience +type sortMap map[languages.Lang]int + +func newSortMap() sortMap { + return make(sortMap) +} + +func (sm sortMap) put(lang languages.Lang, sort int) sortMap { + sm[lang] = sort + return sm +} + +type descriptionMap map[languages.Lang]string + +func newDescriptionMap() descriptionMap { + return make(descriptionMap) +} + +func (dm descriptionMap) put(lang languages.Lang, desc string) descriptionMap { + dm[lang] = desc + return dm +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/languages/lang.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/languages/lang.go new file mode 100644 index 0000000000..b0b5511180 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/languages/lang.go @@ -0,0 +1,39 @@ +package languages + +import "strings" + +const ( + Any Lang = "any" // 代表全部语言 + Java Lang = "java" + Python Lang = "python" + GoLang Lang = "golang" + Ruby Lang = "ruby" + Ebpf Lang = "ebpf" + NodeJS Lang = "nodejs" + DotNet Lang = "dotnet" + PHP Lang = "php" + Unknown Lang = "unknown" +) + +const ( + AnyID LangID = 1 << iota + PythonID + GolangID + JavaID + RubyID + EbpfID + NodeJSID + DotNetID + PhpID +) + +type Lang string +type LangID int + +func (l Lang) String() string { + return strings.ToLower(string(l)) +} + +func (l Lang) Is(target Lang) bool { + return l.String() == target.String() +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/parameter/parameter.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/parameter/parameter.go new file mode 100644 index 0000000000..85b7b0e510 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/parameter/parameter.go @@ -0,0 +1,124 @@ +package parameter + +import ( + "errors" + "fmt" + "github.com/GuanceCloud/cliutils/pprofparser/domain/languages" + "github.com/GuanceCloud/cliutils/pprofparser/tools/jsontoolkit" +) + +const ( + FromTrace = "trace" + FromProfile = "profile" +) + +const ( + FilterBySpanTime = "spanTime" + FilterByFull = "full" +) + +const ( + MinTimestampMicro = 1640966400000000 + MaxTimestampMicro = 2147483647000000 + MinTimestampNano = 1640966400000000000 +) + +type BaseRequestParam struct { + WorkspaceUUID string `json:"workspace_uuid" binding:"required"` +} + +type WithTypeRequestParam struct { + BaseRequestParam + Type string `json:"type" binding:"required"` +} + +type Profile struct { + Language languages.Lang `json:"language" binding:"required"` + EsDocID string `json:"__docid"` + ProfileID string `json:"profile_id" binding:"required"` + ProfileStart interface{} `json:"profile_start" binding:"required"` // , min=1640966400000000000 + ProfileEnd interface{} `json:"profile_end" binding:"required"` // , min=1640966400000000000 + internalProfStart int64 + internalProfEnd int64 +} + +func (p *Profile) StartTime() (int64, error) { + if p.internalProfStart > 0 { + return p.internalProfStart, nil + } + start, err := jsontoolkit.IFaceCast2Int64(p.ProfileStart) + if err != nil { + return 0, err + } + if start <= 0 { + return 0, errors.New("illegal profile_start parameter") + } + p.internalProfStart = start + return start, err +} + +func (p *Profile) EndTime() (int64, error) { + if p.internalProfEnd > 0 { + return p.internalProfEnd, nil + } + end, err := jsontoolkit.IFaceCast2Int64(p.ProfileEnd) + if err != nil { + return 0, err + } + if end <= 0 { + return 0, errors.New("illegal profile_end parameter") + } + p.internalProfEnd = end + return end, err +} + +type Span struct { + TraceID string `json:"trace_id"` + SpanID string `json:"span_id"` + SpanStart int64 `json:"span_start"` + SpanEnd int64 `json:"span_end"` +} + +type SummaryParam struct { + BaseRequestParam + Span + From string `json:"from"` + FilterBy string `json:"filter_by"` + Profiles []*Profile `json:"profiles" binding:"required"` +} + +type ParseParam struct { + WithTypeRequestParam + Profile +} + +type LookupParam struct { + WithTypeRequestParam + Span + FilterBy string `json:"filter_by"` + Profiles []*Profile `json:"profiles" binding:"required"` +} + +type DownloadParam struct { + BaseRequestParam + Profiles []*Profile `json:"profiles" binding:"required"` +} + +// VerifyLanguage 校验多个profiles中的language是否相同 +func VerifyLanguage(profiles []*Profile) (languages.Lang, error) { + + if len(profiles) == 0 { + return languages.Unknown, fmt.Errorf("empty profiles param") + } + + //lang := profiles[0].Language + + // 对比是否是同一个language + //for i := 1; i < len(profiles); i++ { + // if !lang.Is(profiles[i].Language) { + // return languages.Unknown, fmt.Errorf("the languages are not same") + // } + //} + + return profiles[0].Language, nil +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/pprof/frame.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/pprof/frame.go new file mode 100644 index 0000000000..b11f630b60 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/pprof/frame.go @@ -0,0 +1,98 @@ +package pprof + +import ( + "encoding/json" + + "github.com/GuanceCloud/cliutils/pprofparser/domain/quantity" +) + +const ( + FieldQuantity = "quantity" + FieldValue = "value" + FieldUnit = "unit" + FieldPercent = "percent" + FieldFunctionName = "functionName" + FieldLine = "line" + FieldFile = "file" + FieldDirectory = "directory" + FieldThreadID = "threadID" + FieldThreadName = "threadName" + FieldClass = "class" + FieldNamespace = "namespace" + FieldAssembly = "assembly" + FieldPackage = "package" + FieldPrintString = "printString" + FieldSubFrames = "subFrames" +) + +func GetFrameJSONFields() []string { + return []string{ + FieldQuantity, + FieldValue, + FieldUnit, + FieldPercent, + FieldFunctionName, + FieldLine, + FieldFile, + FieldDirectory, + FieldThreadID, + FieldThreadName, + FieldClass, + FieldNamespace, + FieldAssembly, + FieldPackage, + FieldPrintString, + FieldSubFrames, + } +} + +type Frame struct { + Quantity *quantity.Quantity `json:"quantity"` + Value int64 `json:"value"` + Unit *quantity.Unit `json:"unit"` + Percent string `json:"percent"` + Function string `json:"functionName"` + Line int64 `json:"line,omitempty"` + File string `json:"file,omitempty"` + Directory string `json:"directory,omitempty"` + ThreadID string `json:"threadID"` + ThreadName string `json:"threadName"` + Class string `json:"class,omitempty"` + Namespace string `json:"namespace,omitempty"` + Assembly string `json:"assembly,omitempty"` + Package string `json:"package,omitempty"` + PrintString string `json:"printString"` + SubFrames SubFrames `json:"subFrames"` +} + +func (f *Frame) MarshalJSON() ([]byte, error) { + toArr := []interface{}{ + f.Quantity, + f.Value, + f.Unit, + f.Percent, + f.Function, + f.Line, + f.File, + f.Directory, + f.ThreadID, + f.ThreadName, + f.Class, + f.Namespace, + f.Assembly, + f.Package, + f.PrintString, + f.SubFrames, + } + return json.Marshal(toArr) +} + +type SubFrames map[string]*Frame + +func (sf SubFrames) MarshalJSON() ([]byte, error) { + frames := make([]*Frame, 0, len(sf)) + for _, frame := range sf { + frames = append(frames, frame) + } + return json.Marshal(frames) +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/pprof/summary.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/pprof/summary.go new file mode 100644 index 0000000000..6702343e89 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/pprof/summary.go @@ -0,0 +1,68 @@ +package pprof + +import ( + "sort" + + "github.com/GuanceCloud/cliutils/pprofparser/domain/events" + "github.com/GuanceCloud/cliutils/pprofparser/domain/languages" + "github.com/GuanceCloud/cliutils/pprofparser/domain/quantity" +) + +type SummaryValueType struct { + Type events.Type `json:"type"` + Unit *quantity.Unit `json:"unit"` +} + +type EventSummary struct { + *SummaryValueType + Value int64 `json:"value"` +} + +func (es *EventSummary) ConvertToDefaultUnit() { + if es.Unit == nil { + return + } + es.Value = es.Unit.ConvertToDefaultUnit(es.Value) + es.Unit = es.Unit.Kind.DefaultUnit +} + +type SummaryCollection []*EventSummary + +func (s SummaryCollection) SortByType(lang languages.Lang) { + cs := &CollectionSort{ + collection: s, + lessFunc: func(a, b *EventSummary) bool { + return a.Type.GetSort(lang) < b.Type.GetSort(lang) + }, + } + sort.Sort(cs) +} + +func (s SummaryCollection) SortByValue() { + cs := &CollectionSort{ + collection: s, + lessFunc: func(a, b *EventSummary) bool { + return a.Value > b.Value + }, + } + sort.Sort(cs) +} + +type LessFunc func(a, b *EventSummary) bool + +type CollectionSort struct { + collection SummaryCollection + lessFunc LessFunc +} + +func (c *CollectionSort) Len() int { + return len(c.collection) +} + +func (c *CollectionSort) Less(i, j int) bool { + return c.lessFunc(c.collection[i], c.collection[j]) +} + +func (c *CollectionSort) Swap(i, j int) { + c.collection[i], c.collection[j] = c.collection[j], c.collection[i] +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/quantity/kind.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/quantity/kind.go new file mode 100644 index 0000000000..ca205ff73c --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/quantity/kind.go @@ -0,0 +1,21 @@ +package quantity + +var ( + UnknownKind = &Kind{} + Count = &Kind{} + Duration = &Kind{} + Memory = &Kind{} +) + +func init() { + // avoid loop reference + UnknownKind.DefaultUnit = UnknownUnit + Count.DefaultUnit = CountUnit + Duration.DefaultUnit = MicroSecond + Memory.DefaultUnit = Byte +} + +// Kind 度量类型,数量,时间,内存... +type Kind struct { + DefaultUnit *Unit +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/quantity/quantity.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/quantity/quantity.go new file mode 100644 index 0000000000..4f349687f3 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/quantity/quantity.go @@ -0,0 +1,121 @@ +package quantity + +import ( + "fmt" + "time" + + "github.com/GuanceCloud/cliutils/pprofparser/tools/mathtoolkit" +) + +type Quantity struct { + Value int64 + Unit *Unit +} + +func (q *Quantity) MarshalJSON() ([]byte, error) { + if q == nil { + return []byte("null"), nil + } + return []byte(fmt.Sprintf("\"%s\"", q.String())), nil +} + +func (q *Quantity) SwitchToDefaultUnit() { + if q == nil || q.Unit == nil { + return + } + if q.Unit != q.Unit.Kind.DefaultUnit { + q.Value, _ = q.IntValueIn(q.Unit.Kind.DefaultUnit) + q.Unit = q.Unit.Kind.DefaultUnit + } +} + +func (q *Quantity) String() string { + if q.Value == 0 { + return "No Data" + } + switch q.Unit.Kind { + case Count: + return fmt.Sprintf("%d", q.Value) + case Memory: + byt, _ := q.IntValueIn(Byte) + if byt >= GigaByte.Base { + gb, _ := q.DoubleValueIn(GigaByte) + return fmt.Sprintf("%.2f %s", gb, GigaByte) + } else if byt >= MegaByte.Base { + mb, _ := q.DoubleValueIn(MegaByte) + return fmt.Sprintf("%.2f %s", mb, MegaByte) + } else if byt >= KiloByte.Base { + kb, _ := q.DoubleValueIn(KiloByte) + return fmt.Sprintf("%.2f %s", kb, KiloByte) + } + return fmt.Sprintf("%d %s", byt, Byte) + case Duration: + td := q.toTimeDuration() + return td.String() + } + return fmt.Sprintf("%d %s", q.Value, q.Unit) +} + +func (q *Quantity) DoubleValueIn(target *Unit) (float64, error) { + if q.Unit == target { + return float64(q.Value), nil + } + if q.Unit.Kind != target.Kind { + return 0, fmt.Errorf("can not convert [%s] to [%s], the kinds of the unit should be same", q.Unit, target) + } + + return float64(q.Value) * (float64(q.Unit.Base) / float64(target.Base)), nil +} + +func (q *Quantity) IntValueIn(target *Unit) (int64, error) { + if q.Unit == target { + return q.Value, nil + } + v, err := q.DoubleValueIn(target) + if err != nil { + return 0, err + } + return mathtoolkit.Trunc(v), nil +} + +func (q *Quantity) Sub(sub *Quantity) *Quantity { + if q.Unit.Kind != sub.Unit.Kind { + panic("arithmetic operation between not matched unit kind") + } + + m, n := q.Value, sub.Value + + toUnit := q.Unit + if q.Unit.Base > sub.Unit.Base { + m, _ = q.IntValueIn(sub.Unit) + toUnit = sub.Unit + } else if q.Unit.Base < sub.Unit.Base { + n, _ = sub.IntValueIn(q.Unit) + } + + return toUnit.Quantity(m - n) +} + +func (q *Quantity) toTimeDuration() time.Duration { + if q.Unit.Kind != Duration { + panic("not kind of duration, can not convert") + } + + num := time.Duration(q.Value) + + switch q.Unit { + case NanoSecond: + return time.Nanosecond * num + case MicroSecond: + return time.Microsecond * num + case MilliSecond: + return time.Millisecond * num + case Second: + return time.Second * num + case Minute: + return time.Minute * num + case Hour: + return time.Hour * num + } + panic(fmt.Sprintf("not resolved duration unit: [%s]", q.Unit)) +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/quantity/unit.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/quantity/unit.go new file mode 100644 index 0000000000..46a552a84c --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/quantity/unit.go @@ -0,0 +1,213 @@ +package quantity + +import ( + "encoding/json" + "fmt" + "strings" + "time" +) + +var ( + MemoryDisplayName = "digital" + MemoryDefaultUnitFlag = "B" + + DurationDisplayName = "time" + DurationDefaultUnitFlag = "μs" +) + +var UnknownUnit = &Unit{Kind: UnknownKind, Name: "unknown"} + +var ( + NanoSecond = &Unit{Kind: Duration, Base: int64(time.Nanosecond), Name: "ns"} + MicroSecond = &Unit{Kind: Duration, Base: int64(time.Microsecond), Name: "us"} + MilliSecond = &Unit{Kind: Duration, Base: int64(time.Millisecond), Name: "ms"} + Second = &Unit{Kind: Duration, Base: int64(time.Second), Name: "s"} + Minute = &Unit{Kind: Duration, Base: int64(time.Minute), Name: "m"} + Hour = &Unit{Kind: Duration, Base: int64(time.Hour), Name: "h"} +) + +var ( + CountUnit = &Unit{Kind: Count, Base: 1, Name: ""} +) + +var ( + Byte = &Unit{Kind: Memory, Base: 1, Name: "Bytes"} + KiloByte = &Unit{Kind: Memory, Base: 1 << 10, Name: "KB"} + MegaByte = &Unit{Kind: Memory, Base: 1 << 20, Name: "MB"} + GigaByte = &Unit{Kind: Memory, Base: 1 << 30, Name: "GB"} + TeraByte = &Unit{Kind: Memory, Base: 1 << 40, Name: "TB"} + PetaByte = &Unit{Kind: Memory, Base: 1 << 50, Name: "PB"} +) + +var ( + memoryUnitMaps = map[string]*Unit{ + "bytes": Byte, + "byte": Byte, + "b": Byte, + + "kilobytes": KiloByte, + "kilobyte": KiloByte, // 1000 + "kibibytes": KiloByte, + "kibibyte": KiloByte, // 1024 + "kib": KiloByte, + "kb": KiloByte, + + "megabytes": MegaByte, + "megabyte": MegaByte, + "megibytes": MegaByte, + "megibyte": MegaByte, + "mib": MegaByte, + "mb": MegaByte, + + "gigabytes": GigaByte, + "gigabyte": GigaByte, + "gigibytes": GigaByte, + "gigibyte": GigaByte, + "gib": GigaByte, + "gb": GigaByte, + + "terabytes": TeraByte, + "terabyte": TeraByte, + "tebibytes": TeraByte, + "tebibyte": TeraByte, + "trillionbytes": TeraByte, + "trillionbyte": TeraByte, + "tib": TeraByte, + "tb": TeraByte, + + "petabytes": PetaByte, + "petabyte": PetaByte, + "pebibytes": PetaByte, + "pebibyte": PetaByte, + "pib": PetaByte, + "pb": PetaByte, + } + + durationUnitMaps = map[string]*Unit{ + "nanoseconds": NanoSecond, + "nanosecond": NanoSecond, + "ns": NanoSecond, + + "microseconds": MicroSecond, + "microsecond": MicroSecond, + "us": MicroSecond, + "µs": MicroSecond, + + "milliseconds": MilliSecond, + "millisecond": MilliSecond, + "ms": MilliSecond, + + "seconds": Second, + "second": Second, + "s": Second, + + "minutes": Minute, + "minute": Minute, + "m": Minute, + + "hours": Hour, + "hour": Hour, + "h": Hour, + } +) + +type Unit struct { + Kind *Kind + Base int64 + Name string +} + +func (u *Unit) String() string { + return u.Name +} + +// ConvertTo convert value for Unit u to Unit target +func (u *Unit) ConvertTo(target *Unit, value int64) (int64, error) { + if u.Kind != target.Kind { + return 0, fmt.Errorf("unit kinds are not compatiable") + } + return value * u.Base / target.Base, nil +} + +// ConvertToDefaultUnit convert value in Unit u to Unit Kind's default unit +func (u *Unit) ConvertToDefaultUnit(value int64) int64 { + v, _ := u.ConvertTo(u.Kind.DefaultUnit, value) + return v +} + +func (u *Unit) Quantity(v int64) *Quantity { + return &Quantity{ + Unit: u, + Value: v, + } +} + +func (u *Unit) MarshalJSON() ([]byte, error) { + var flags []string + + switch u.Kind { + case Memory: + flags = []string{MemoryDisplayName, MemoryDefaultUnitFlag} + case Duration: + flags = []string{DurationDisplayName, DurationDefaultUnitFlag} + default: + flags = make([]string, 0) + } + return json.Marshal(flags) +} + +func parseDuration(s string) (*Unit, error) { + unit := strings.ToLower(strings.TrimSpace(s)) + if u, ok := durationUnitMaps[unit]; ok { + return u, nil + } + switch { + case strings.HasPrefix(unit, "na"): // nano + return NanoSecond, nil + case strings.HasPrefix(unit, "mic"): // micro + return MicroSecond, nil + case strings.HasPrefix(unit, "mil"): // milli + return MilliSecond, nil + case strings.HasPrefix(unit, "min"): // minute + return Minute, nil + case strings.HasPrefix(unit, "hour"): // hour + return Hour, nil + case strings.HasPrefix(unit, "sec"): // second + return Second, nil + } + return nil, fmt.Errorf("can not resolve duration unit from [%s]", s) +} + +func parseMemoryUnit(s string) (*Unit, error) { + unit := strings.ToLower(strings.TrimSpace(s)) + if u, ok := memoryUnitMaps[unit]; ok { + return u, nil + } + switch { + case strings.HasPrefix(unit, "k"): + return KiloByte, nil + case strings.HasPrefix(unit, "m"): + return MegaByte, nil + case strings.HasPrefix(unit, "g"): + return GigaByte, nil + case strings.HasPrefix(unit, "t"): + return TeraByte, nil + case strings.HasPrefix(unit, "p"): + return PetaByte, nil + case strings.HasPrefix(unit, "byte"): + return Byte, nil + } + return nil, fmt.Errorf("can not resolve memory unit from [%s]", s) +} + +func ParseUnit(kind *Kind, s string) (*Unit, error) { + switch kind { + case Count: + return CountUnit, nil + case Memory: + return parseMemoryUnit(s) + case Duration: + return parseDuration(s) + } + return nil, fmt.Errorf("unknown quantity kind") +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/tracing/tracing.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/tracing/tracing.go new file mode 100644 index 0000000000..16c2509a80 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/domain/tracing/tracing.go @@ -0,0 +1,42 @@ +package tracing + +var ( + AllTraceSpanSet = NewSpanIDSet() +) + +type SpanIDSet struct { + Set map[string]struct{} +} + +func NewSpanIDSet() *SpanIDSet { + return &SpanIDSet{ + Set: make(map[string]struct{}), + } +} + +func (ss *SpanIDSet) Put(id string) { + ss.Set[id] = struct{}{} +} + +func (ss *SpanIDSet) Contains(id string) bool { + // avoid nil pointer + if ss.Set == nil { + return false + } + _, ok := ss.Set[id] + return ok +} + +// getSpanParentID 查询 spanID 的顶级父级ID, 并进行路径压缩 +func getSpanParentID(spanIDMaps map[string]string, topID string, spanID string) string { + for { + pid, ok := spanIDMaps[spanID] + if !ok { + return "" + } + if pid != "0" && pid != topID { + spanIDMaps[spanID] = getSpanParentID(spanIDMaps, topID, pid) + } + return spanIDMaps[spanID] + } +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/aggregators.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/aggregators.go new file mode 100644 index 0000000000..c34ab6e345 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/aggregators.go @@ -0,0 +1,677 @@ +package parsing + +import ( + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/GuanceCloud/cliutils/pprofparser/domain/languages" + "github.com/GuanceCloud/cliutils/pprofparser/domain/pprof" + "github.com/GuanceCloud/cliutils/pprofparser/domain/quantity" + "github.com/GuanceCloud/cliutils/pprofparser/tools/filepathtoolkit" + "github.com/GuanceCloud/cliutils/pprofparser/tools/logtoolkit" + "github.com/GuanceCloud/cliutils/pprofparser/tools/parsetoolkit" + "github.com/google/pprof/profile" +) + +// [|lm:System.Private.CoreLib;|ns:System.Diagnostics.Tracing;|ct:EventSource;|fn:DebugCheckEvent] + +type DDFieldTag string + +const UnknownInfo = "" + +const ( + AssemblyTag DDFieldTag = "|lm:" + NamespaceTag DDFieldTag = "|ns:" + ClassTag DDFieldTag = "|ct:" + MethodTag DDFieldTag = "|fn:" +) + +var ddDotnetFieldIdx = map[DDFieldTag]int{ + AssemblyTag: 0, + NamespaceTag: 1, + ClassTag: 2, + MethodTag: 3, +} + +type GetPropertyByLine func(lang languages.Lang, line profile.Line) string + +var ( + getFuncName = getPropertyCallable(getFuncNameByLine) + getMethod = getPropertyCallable(getMethodByLine) + getClass = getPropertyCallable(getClassByLine) + getNamespace = getPropertyCallable(getNamespaceByLine) + getAssembly = getPropertyCallable(getAssemblyByLine) + getFuncDisplayStr = getPropertyCallable(GetPrintStrByLine) + getDirectory = getPropertyCallable(getDirectoryByLine) + getFile = getPropertyCallable(getFileByLine) + getPackageName = getPropertyCallable(getPackageNameByLine) + getLine = getPropertyCallable(func(lang languages.Lang, line profile.Line) string { + return strconv.FormatInt(getLineByLine(lang, line), 10) + }) +) + +func getFuncNameByLine(lang languages.Lang, line profile.Line) string { + switch lang { + case languages.NodeJS: + segments := strings.Split(line.Function.Name, ":") + if len(segments) > 1 { + return segments[len(segments)-2] + } + return UnknownInfo + case languages.DotNet: + return getDDDotnetMethodName(line.Function.Name) + case languages.PHP: + return getPHPBaseFuncName(line.Function.Name) + } + return line.Function.Name +} + +func getDDDotnetMethodName(funcName string) string { + pieces := strings.Split(funcName, " ") + + var className, methodName string + + classIdx, fnIdx := ddDotnetFieldIdx[ClassTag], ddDotnetFieldIdx[MethodTag] + if classIdx < len(pieces) && strings.HasPrefix(pieces[classIdx], string(ClassTag)) { + className = strings.TrimPrefix(pieces[classIdx], string(ClassTag)) + } + if fnIdx < len(pieces) && strings.HasPrefix(pieces[fnIdx], string(MethodTag)) { + methodName = strings.TrimPrefix(pieces[fnIdx], string(MethodTag)) + } + + if className == "" || methodName == "" { + for _, piece := range pieces { + piece = strings.TrimSpace(piece) + if className == "" && strings.HasPrefix(piece, string(ClassTag)) { + className = strings.TrimPrefix(piece, string(ClassTag)) + } + if methodName == "" && strings.HasPrefix(piece, string(MethodTag)) { + methodName = strings.TrimPrefix(piece, string(MethodTag)) + } + } + } + if methodName == "" { + return "unknown" + } + + if className != "" { + return className + "." + methodName + } + + return methodName +} + +func getDDDotnetField(funcName string, tag DDFieldTag) string { + pieces := strings.Split(funcName, " ") + idx := ddDotnetFieldIdx[tag] + + tagStr := string(tag) + if idx < len(pieces) && strings.HasPrefix(pieces[idx], tagStr) { + return strings.TrimPrefix(pieces[idx], tagStr) + } + for _, piece := range pieces { + piece = strings.TrimSpace(piece) + if strings.HasPrefix(piece, tagStr) { + return strings.TrimPrefix(piece, tagStr) + } + } + return "" +} + +func getFuncIdentifier(lang languages.Lang, smp *profile.Sample, reverse bool) string { + i := 0 + if reverse { + i = len(smp.Location) - 1 + } + if len(smp.Location) > 0 { + loc := smp.Location[i] + if len(loc.Line) > 0 { + return strconv.FormatUint(loc.Line[len(loc.Line)-1].Function.ID, 10) + } + } + return UnknownInfo +} + +func getMethodByLine(lang languages.Lang, line profile.Line) string { + return getDDDotnetMethodName(line.Function.Name) +} + +func getPropertyCallable(getPropertyByLine GetPropertyByLine) GetPropertyFunc { + return func(lang languages.Lang, sample *profile.Sample, reverse bool) string { + i := 0 + if reverse { + i = len(sample.Location) - 1 + } + + if len(sample.Location) > 0 { + loc := sample.Location[i] + if len(loc.Line) > 0 { + return getPropertyByLine(lang, loc.Line[len(loc.Line)-1]) + } + } + return UnknownInfo + } +} + +func getClassByLine(lang languages.Lang, line profile.Line) string { + funcName := line.Function.Name + switch lang { + case languages.DotNet: + return getDDDotnetField(funcName, ClassTag) + case languages.PHP: + funcName = getPHPBaseFuncName(funcName) + if pos := strings.LastIndex(funcName, "::"); pos >= 0 { + return funcName[:pos] + } + if pos := strings.Index(funcName, "|"); pos >= 0 { + return funcName[:pos] + } + filename := strings.ReplaceAll(line.Function.Filename, "\\", "/") + + if pos := strings.Index(filename, "/vendor/"); pos >= 0 { + filename = filename[pos+len("/vendor/"):] + if idx := strings.Index(filename, "/src/"); idx >= 0 { + filename = filename[:idx] + } else if idx := strings.LastIndexByte(filename, '/'); idx >= 0 { + filename = filename[:idx] + } + return filename + } + + return "standard" + } + + return UnknownInfo +} + +func getNamespaceByLine(lang languages.Lang, line profile.Line) string { + switch lang { + case languages.DotNet: + if namespace := getDDDotnetField(line.Function.Name, NamespaceTag); namespace != "" { + return namespace + } + } + return UnknownInfo +} + +func getAssemblyByLine(lang languages.Lang, line profile.Line) string { + switch lang { + case languages.DotNet: + if assembly := getDDDotnetField(line.Function.Name, AssemblyTag); assembly != "" { + return assembly + } + } + return UnknownInfo +} + +func getLineByLine(lang languages.Lang, line profile.Line) int64 { + switch lang { + case languages.NodeJS: + segments := strings.Split(line.Function.Name, ":") + if len(segments) > 0 { + lineNo := segments[len(segments)-1] + if lineNoRegExp.MatchString(lineNo) { + lineNum, _ := strconv.ParseInt(lineNo, 10, 64) + return lineNum + } + } + } + return line.Line +} + +func getFileByLine(lang languages.Lang, line profile.Line) string { + switch lang { + case languages.NodeJS: + funcName := line.Function.Name + segments := strings.Split(funcName, ":") + if len(segments) >= 3 { + filename := strings.TrimSpace(strings.Join(segments[:len(segments)-2], ":")) + if filename != "" { + return filename + } + } + return UnknownInfo + case languages.PHP: + filename := strings.TrimSpace(line.Function.Filename) + if filename == "" { + filename = "standard" + } + return filename + } + return line.Function.Filename +} + +func getDirectoryByLine(lang languages.Lang, line profile.Line) string { + return filepathtoolkit.DirName(getFileByLine(lang, line)) +} + +func getThreadID(lang languages.Lang, smp *profile.Sample, reverse bool) string { + return getThreadIDBySample(smp) +} + +func getThreadIDBySample(smp *profile.Sample) string { + if tid := parsetoolkit.GetLabel(smp, LabelThreadID); tid != "" { + return tid + } + return UnknownInfo +} + +func getThreadName(lang languages.Lang, smp *profile.Sample, reverse bool) string { + return getThreadNameBySample(smp) +} + +func getThreadNameBySample(smp *profile.Sample) string { + if tName := parsetoolkit.GetLabel(smp, LabelThreadName); tName != "" { + return tName + } + return UnknownInfo +} + +func getPackageNameByLine(lang languages.Lang, line profile.Line) string { + switch lang { + case languages.GoLang: + packageName, _ := cutGoFuncName(line.Function.Name) + return packageName + } + return UnknownInfo +} + +// cutGoFuncName 切割pprof go func 为 package 和 func name +// return package name 和 func name +func cutGoFuncName(funcName string) (string, string) { + pos := strings.LastIndexByte(funcName, '/') + packageName := "" + if pos > -1 { + packageName, funcName = funcName[:pos+1], funcName[pos+1:] + } + cuts := strings.SplitN(funcName, ".", 2) + if len(cuts) < 2 { + logtoolkit.Errorf(`func name not contains ".": %s`, funcName) + return packageName, cuts[0] + } + return packageName + cuts[0], cuts[1] +} + +func GetPrintStrByLine(lang languages.Lang, line profile.Line) string { + switch lang { + case languages.GoLang: + _, funcName := cutGoFuncName(line.Function.Name) + return fmt.Sprintf("%s(%s)", funcName, filepathtoolkit.BaseName(line.Function.Filename)) + case languages.NodeJS: + // node:internal/timers:listOnTimeout:569 + // ./node_modules/@pyroscope/nodejs/dist/cjs/index.js:(anonymous):313 + // :(idle):0 + segments := strings.Split(line.Function.Name, ":") + funcName := "" + filename := "" + if len(segments) == 1 { + funcName = segments[0] + } else if len(segments) > 1 { + funcName = segments[len(segments)-2] + filename = strings.TrimSpace(strings.Join(segments[:len(segments)-2], ":")) + } + baseName := filepathtoolkit.BaseName(filename) + if baseName == "" || baseName == "." { + return funcName + } + return fmt.Sprintf("%s(%s)", funcName, baseName) + + case languages.DotNet: + return getDDDotnetMethodName(line.Function.Name) + case languages.PHP: + filename := line.Function.Filename + if filename != "" { + filename = filepathtoolkit.BaseName(filename) + } + funcName := getPHPBaseFuncName(line.Function.Name) + if filename != "" { + return fmt.Sprintf("%s(%s)", funcName, filename) + } + return funcName + default: + return fmt.Sprintf("%s(%s)", line.Function.Name, filepathtoolkit.BaseName(line.Function.Filename)) + } +} + +func getPHPBaseFuncName(funcName string) string { + if funcName == "" { + return UnknownInfo + } + pos := strings.LastIndexByte(funcName, '\\') + if pos >= 0 && pos < len(funcName)-1 { + return funcName[pos+1:] + } + return funcName +} + +func GetSpyPrintStr(funcName, fileName string) string { + return fmt.Sprintf("%s(%s)", funcName, filepathtoolkit.BaseName(fileName)) +} + +func GetFuncAndLineDisplay(lang languages.Lang, smp *profile.Sample, reverse bool) string { + i := 0 + if reverse { + i = len(smp.Location) - 1 + } + if len(smp.Location) > 0 { + loc := smp.Location[i] + if len(loc.Line) > 0 { + line := loc.Line[len(loc.Line)-1] + switch lang { + case languages.PHP: + funcName := getPHPBaseFuncName(line.Function.Name) + filename := line.Function.Filename + if filename != "" { + filename = filepathtoolkit.BaseName(filename) + } + if filename != "" { + return fmt.Sprintf("%s(%s:L#%d)", funcName, filename, line.Line) + } + return funcName + case languages.GoLang: + _, funcName := cutGoFuncName(line.Function.Name) + return fmt.Sprintf("%s(%s:L#%d)", + funcName, filepathtoolkit.BaseName(line.Function.Filename), line.Line) + default: + return fmt.Sprintf("%s(%s:L#%d)", + line.Function.Name, filepathtoolkit.BaseName(line.Function.Filename), line.Line) + } + } + } + return "" +} + +var ( + Function = &Aggregator{ + Name: "Function", + Mapping: []string{pprof.FieldFunctionName}, + ShowLanguages: languages.PythonID | languages.GolangID, + GetIdentifier: getFuncIdentifier, + GetDisplayStr: getFuncDisplayStr, + GetMappingFuncs: []GetPropertyFunc{getFuncName}, + } + + PHPFunction = &Aggregator{ + Name: "Function", + Mapping: []string{pprof.FieldFunctionName}, + GetIdentifier: getFuncIdentifier, + GetDisplayStr: getFuncDisplayStr, + GetMappingFuncs: []GetPropertyFunc{getFuncName}, + } + + Method = &Aggregator{ + Name: "Method", + Mapping: []string{pprof.FieldFunctionName}, + ShowLanguages: languages.JavaID | languages.DotNetID, + GetIdentifier: getMethod, + GetDisplayStr: getMethod, + GetMappingFuncs: []GetPropertyFunc{getMethod}, + } + + Class = &Aggregator{ + Name: "Class", + Mapping: []string{pprof.FieldClass}, + ShowLanguages: languages.DotNetID, + GetIdentifier: getClass, + GetDisplayStr: getClass, + GetMappingFuncs: []GetPropertyFunc{getClass}, + } + + Namespace = &Aggregator{ + Name: "Namespace", + Mapping: []string{pprof.FieldNamespace}, + ShowLanguages: languages.DotNetID, + GetIdentifier: getNamespace, + GetDisplayStr: getNamespace, + GetMappingFuncs: []GetPropertyFunc{getNamespace}, + } + + Assembly = &Aggregator{ + Name: "Assembly", + Mapping: []string{pprof.FieldAssembly}, + ShowLanguages: languages.DotNetID, + GetIdentifier: getAssembly, + GetDisplayStr: getAssembly, + GetMappingFuncs: []GetPropertyFunc{getAssembly}, + } + + PyroNodeFunction = &Aggregator{ + Name: "Function", + Mapping: []string{pprof.FieldFunctionName}, + ShowLanguages: languages.NodeJSID, + GetIdentifier: getFuncIdentifier, + GetDisplayStr: getFuncDisplayStr, + GetMappingFuncs: []GetPropertyFunc{getFuncName}, + } + + FunctionLine = &Aggregator{ + Name: "Function + Line", + Mapping: []string{pprof.FieldFunctionName, pprof.FieldLine}, + ShowLanguages: languages.PythonID | languages.GolangID, + GetIdentifier: func(lang languages.Lang, smp *profile.Sample, reverse bool) string { + i := 0 + if reverse { + i = len(smp.Location) - 1 + } + if len(smp.Location) > 0 { + loc := smp.Location[i] + if len(loc.Line) > 0 { + return fmt.Sprintf("%s###%d###%d", + loc.Line[len(loc.Line)-1].Function.Filename, loc.Line[len(loc.Line)-1].Function.ID, loc.Line[len(loc.Line)-1].Line) + } + } + return "" + }, + GetDisplayStr: GetFuncAndLineDisplay, + GetMappingFuncs: []GetPropertyFunc{getFuncName, getLine}, + } + + Directory = &Aggregator{ + Name: "Directory", + Mapping: []string{pprof.FieldDirectory}, + ShowLanguages: languages.PythonID | languages.GolangID, + GetIdentifier: getDirectory, + GetDisplayStr: getDirectory, + GetMappingFuncs: []GetPropertyFunc{getDirectory}, + } + + File = &Aggregator{ + Name: "File", + Mapping: []string{pprof.FieldFile}, + ShowLanguages: languages.PythonID | languages.GolangID, + GetIdentifier: getFile, + GetDisplayStr: getFile, + GetMappingFuncs: []GetPropertyFunc{getFile}, + } + + PyroNodeFile = &Aggregator{ + Name: "File", + Mapping: []string{pprof.FieldFile}, + ShowLanguages: languages.NodeJSID, + GetIdentifier: getFile, + GetDisplayStr: getFile, + GetMappingFuncs: []GetPropertyFunc{getFile}, + } + + ThreadID = &Aggregator{ + Name: "Thread ID", + Mapping: []string{pprof.FieldThreadID}, + ShowLanguages: languages.PythonID | languages.DotNetID, + GetIdentifier: getThreadID, + GetDisplayStr: getThreadID, + GetMappingFuncs: []GetPropertyFunc{getThreadID}, + } + + ThreadName = &Aggregator{ + Name: "Thread Name", + Mapping: []string{pprof.FieldThreadName}, + ShowLanguages: languages.PythonID | languages.DotNetID, + GetIdentifier: getThreadName, + GetDisplayStr: getThreadName, + GetMappingFuncs: []GetPropertyFunc{getThreadName}, + } + + Package = &Aggregator{ + Name: "Package", + Mapping: []string{pprof.FieldPackage}, + ShowLanguages: languages.GolangID, + GetIdentifier: getPackageName, + GetDisplayStr: getPackageName, + GetMappingFuncs: []GetPropertyFunc{getPackageName}, + } +) + +type GetPropertyFunc func(lang languages.Lang, smp *profile.Sample, reverse bool) string + +type Aggregator struct { + Name string + Mapping []string + + ShowLanguages languages.LangID + + // GetIdentifier 获取维度的唯一标识 + GetIdentifier GetPropertyFunc + + // GetDisplayStr 获取维度的显示字符 + GetDisplayStr GetPropertyFunc + + // GetMappingFuncs, 获取与Mapping字段对应值Func + GetMappingFuncs []GetPropertyFunc +} + +type AggregatorSelectSlice []*AggregatorSelect + +func (asm AggregatorSelectSlice) CalcPercentAndQuantity(total int64) { + for _, aggregatorSelect := range asm { + for _, opt := range aggregatorSelect.Options { + opt.CalcPercentAndQuantity(total) + } + } +} + +func (asm AggregatorSelectSlice) MarshalJSON() ([]byte, error) { + + JSONMap := make([]*AggregatorSelectForJSON, 0, len(asm)) + + for _, aggregatorSelect := range asm { + + selectForJSON := &AggregatorSelectForJSON{ + Dimension: aggregatorSelect.Aggregator.Name, + Mapping: aggregatorSelect.Mapping, + } + + for _, opt := range aggregatorSelect.Options { + selectForJSON.Options = append(selectForJSON.Options, opt) + } + + sort.Sort(selectForJSON.Options) + + JSONMap = append(JSONMap, selectForJSON) + } + return json.Marshal(JSONMap) +} + +type OptionSlice []*AggregatorOption + +func (os OptionSlice) Len() int { + return len(os) +} + +func (os OptionSlice) Less(i, j int) bool { + return os[i].Value > os[j].Value +} + +func (os OptionSlice) Swap(i, j int) { + os[i], os[j] = os[j], os[i] +} + +type OptionMap map[string]*AggregatorOption + +type AggregatorSelect struct { + Aggregator *Aggregator + Mapping []string + Options OptionMap +} + +type AggregatorSelectForJSON struct { + Dimension string `json:"dimension"` + Mapping []string `json:"mapping"` + Options OptionSlice `json:"data"` +} + +type AggregatorOption struct { + Title string `json:"title"` + Quantity *quantity.Quantity `json:"quantity"` + Value int64 `json:"value"` + Unit *quantity.Unit `json:"unit"` + Percent string `json:"percent"` + MappingValues []string `json:"mappingValues"` +} + +func (ao *AggregatorOption) CalcPercentAndQuantity(total int64) { + + if total <= 0 { + ao.Percent = "100" + } else { + ao.Percent = fmt.Sprintf("%.2f", float64(ao.Value)/float64(total)*100) + } + + if ao.Unit != nil { + ao.Quantity = ao.Unit.Quantity(ao.Value) + + // 转成默认单位 + ao.Quantity.SwitchToDefaultUnit() + ao.Value = ao.Quantity.Value + ao.Unit = ao.Quantity.Unit + } +} + +var PythonAggregatorList = []*Aggregator{ + Function, + FunctionLine, + Directory, + File, + ThreadID, + ThreadName, +} + +var GoAggregatorList = []*Aggregator{ + Function, + FunctionLine, + Directory, + File, + Package, +} + +var SpyAggregatorList = []*Aggregator{ + Function, + FunctionLine, + Directory, + File, + ThreadName, +} + +var PyroscopeNodeJSAggregatorList = []*Aggregator{ + PyroNodeFunction, + PyroNodeFile, +} + +var DDTraceDotnetAggregatorList = []*Aggregator{ + Method, + Class, + Namespace, + Assembly, + ThreadID, + ThreadName, +} + +var DDTracePHPAggregatorList = []*Aggregator{ + PHPFunction, + FunctionLine, + Class, + File, + Directory, +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/collapse.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/collapse.go new file mode 100644 index 0000000000..8ed189a339 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/collapse.go @@ -0,0 +1,462 @@ +package parsing + +import ( + "bufio" + "fmt" + "github.com/GuanceCloud/cliutils/pprofparser/domain/tracing" + "os" + "regexp" + "strconv" + "strings" + + "github.com/GuanceCloud/cliutils/pprofparser/domain/events" + "github.com/GuanceCloud/cliutils/pprofparser/domain/parameter" + "github.com/GuanceCloud/cliutils/pprofparser/domain/pprof" + "github.com/GuanceCloud/cliutils/pprofparser/domain/quantity" + "github.com/GuanceCloud/cliutils/pprofparser/service/storage" + "github.com/GuanceCloud/cliutils/pprofparser/tools/filepathtoolkit" + "github.com/GuanceCloud/cliutils/pprofparser/tools/logtoolkit" + "github.com/GuanceCloud/cliutils/pprofparser/tools/parsetoolkit" +) + +/* +py-spy profiler output is as below: + +process 95768:"/opt/homebrew/Cellar/python@3.10/3.10.5/Frameworks/Python.framework/Versions/3.10/Resources/Python.app/Contents/MacOS/Python fibobacci.py";thread (0x100850580); (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:14);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:5) 1 +process 95768:"/opt/homebrew/Cellar/python@3.10/3.10.5/Frameworks/Python.framework/Versions/3.10/Resources/Python.app/Contents/MacOS/Python fibobacci.py";thread (0x100850580); (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:14);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:7) 1 +process 95768:"/opt/homebrew/Cellar/python@3.10/3.10.5/Frameworks/Python.framework/Versions/3.10/Resources/Python.app/Contents/MacOS/Python fibobacci.py";thread (0x100850580); (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:14);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:5) 1 +process 95768:"/opt/homebrew/Cellar/python@3.10/3.10.5/Frameworks/Python.framework/Versions/3.10/Resources/Python.app/Contents/MacOS/Python fibobacci.py";thread (0x100850580); (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:14);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:8);fibonacci (/Users/zy/PycharmProjects/pyroscope-demo/fibobacci.py:5) 1 + +nginx;/usr/sbin/nginx+0x24678;__libc_start_main;main;ngx_master_process_cycle;/usr/sbin/nginx+0x4f0d8;ngx_spawn_process;/usr/sbin/nginx+0x4fa44;ngx_process_events_and_timers;/usr/sbin/nginx+0x51f54;epoll_pwait 1 + +*/ + +var processRegExp = regexp.MustCompile(`^process\s+\d+:"`) +var threadRexExp = regexp.MustCompile(`^thread\s+\([a-zA-Z\d]+\)$`) +var stackTraceRegExp = regexp.MustCompile(`^(\S+)(?: +\(([^:]+):(\d+)\))?`) + +type Collapse struct { + workspaceUUID string + profiles []*parameter.Profile + filterBySpan bool + spanIDSet *tracing.SpanIDSet +} + +func NewCollapse(workspaceUUID string, profiles []*parameter.Profile, + filterBySpan bool, spanIDSet *tracing.SpanIDSet) *Collapse { + return &Collapse{ + workspaceUUID: workspaceUUID, + profiles: profiles, + filterBySpan: filterBySpan, + spanIDSet: spanIDSet, + } +} + +func summary(filename string) (map[events.Type]*EventSummary, error) { + f, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("open profile file [%s] fail: %w", filename, err) + } + defer f.Close() + + sampleSummary := &EventSummary{ + SummaryValueType: &SummaryValueType{ + Type: events.CpuSamples, + Unit: quantity.CountUnit, + }, + Value: 0, + } + + spySummaries := map[events.Type]*EventSummary{ + events.CpuSamples: sampleSummary, + } + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if len(line) < 2 { + continue + } + + blankIdx := strings.LastIndexByte(line, ' ') + if blankIdx < 0 { + logtoolkit.Errorf("py-spy profile doesn't contain any blank [line: %s]", line) + continue + } + n, err := strconv.ParseInt(strings.TrimSpace(line[blankIdx+1:]), 10, 64) + if err != nil { + logtoolkit.Errorf("resolve sample count fail [line: %s]: %w", line, err) + continue + } + sampleSummary.Value += n + } + return spySummaries, nil +} + +func (p *Collapse) Summary() (map[events.Type]*EventSummary, int64, error) { + + prof := p.profiles[0] + + startNanos, err := prof.StartTime() + if err != nil { + return nil, 0, fmt.Errorf("resolve Profile start timestamp fail: %w", err) + } + endNanos, err := prof.EndTime() + if err != nil { + return nil, 0, fmt.Errorf("resolve Profile end timestamp fail: %w", err) + } + + filename := storage.DefaultDiskStorage.GetProfilePath(p.workspaceUUID, prof.ProfileID, startNanos, events.DefaultProfileFilename) + + summaries, err := summary(filename) + if err != nil { + return nil, 0, fmt.Errorf("resolve collapse summary fail: %w", err) + } + + return summaries, endNanos - startNanos, nil +} + +func IsCollapseProfile(profiles []*parameter.Profile, workspaceUUID string) (bool, error) { + // 当前 py-spy 一次只有一条profile数据 + if len(profiles) > 1 { + return false, nil + } + + metadata, err := ReadMetaData(profiles[0], workspaceUUID) + if err != nil { + return false, fmt.Errorf("read py-spy metadata file fail: %w", err) + } + + return metadata.Format == RawFlameGraph || metadata.Format == Collapsed, nil +} + +func (p *Collapse) ResolveFlameGraph(_ events.Type) (*pprof.Frame, AggregatorSelectSlice, error) { + + prof := p.profiles[0] + + startNanos, err := prof.StartTime() + if err != nil { + return nil, nil, fmt.Errorf("invalid profile start: %w", err) + } + file := storage.DefaultDiskStorage.GetProfilePath(p.workspaceUUID, prof.ProfileID, startNanos, events.DefaultProfileFilename) + + f, err := os.Open(file) + if err != nil { + return nil, nil, fmt.Errorf("open py-spy profile file fail: %w", err) + } + defer f.Close() + + scanner := bufio.NewScanner(f) + + rootFrame := &pprof.Frame{ + SubFrames: make(pprof.SubFrames), + } + totalValue := int64(0) + + aggregatorSelects := make(AggregatorSelectSlice, 0, len(SpyAggregatorList)) + + for _, aggregator := range SpyAggregatorList { + aggregatorSelects = append(aggregatorSelects, &AggregatorSelect{ + Aggregator: aggregator, + Mapping: aggregator.Mapping, + Options: make(map[string]*AggregatorOption), + }) + } + + for scanner.Scan() { + + line := strings.TrimSpace(scanner.Text()) + if len(line) == 0 { + continue + } + + stacks := strings.Split(line, ";") + if len(stacks) == 0 { + continue + } + + lastStack := strings.TrimSpace(stacks[len(stacks)-1]) + if !stackTraceRegExp.MatchString(lastStack) { + logtoolkit.Warnf("The last stacktrace not match with the regexp [%s], the stacktrace [%s]", stackTraceRegExp.String(), lastStack) + continue + } + blankIdx := strings.LastIndexByte(lastStack, ' ') + if blankIdx < 0 { + logtoolkit.Warnf("Can not find any blank from [%s]", lastStack) + continue + } + + sampleCount, err := strconv.ParseInt(lastStack[blankIdx+1:], 10, 64) + if err != nil { + logtoolkit.Warnf("Can not resolve sample count from [%s]", lastStack) + continue + } + totalValue += sampleCount + + currentFrame := rootFrame + threadName := "" + + for idx, stack := range stacks { + stack = strings.TrimSpace(stack) + matches := stackTraceRegExp.FindStringSubmatch(stack) + if len(matches) != 4 { + if processRegExp.MatchString(stack) { + continue + } else if threadRexExp.MatchString(stack) { + threadName = stack + continue + } else { + return nil, nil, fmt.Errorf("resolve stacktrace from profiling file fail") + } + } + + funcName, codeFile, lineNoStr := matches[1], matches[2], matches[3] + + if codeFile == "" { + codeFile = "" + } + + var lineNo int64 = -1 + if lineNoStr != "" { + lineNo, _ = strconv.ParseInt(lineNoStr, 10, 64) + } + + funcIdentifier := fmt.Sprintf("%s###%s###%s###%d", threadName, codeFile, funcName, lineNo) + + if idx == len(stacks)-1 { + + for _, aggregatorSelect := range aggregatorSelects { + + var identifier string + var displayStr string + var mappingValues []string + + switch aggregatorSelect.Aggregator { + case Function: + identifier = fmt.Sprintf("%s###%s", codeFile, funcName) + displayStr = GetSpyPrintStr(funcName, codeFile) + mappingValues = []string{funcName} + case FunctionLine: + identifier = fmt.Sprintf("%s###%s###%d", codeFile, funcName, lineNo) + displayStr = fmt.Sprintf("%s(%s:L#%d)", funcName, filepathtoolkit.BaseName(codeFile), lineNo) + mappingValues = []string{funcName, fmt.Sprintf("%d", lineNo)} + case Directory: + identifier = filepathtoolkit.DirName(codeFile) + displayStr = identifier + mappingValues = []string{identifier} + case File: + identifier = codeFile + displayStr = codeFile + mappingValues = []string{codeFile} + case ThreadName: + identifier = threadName + displayStr = threadName + mappingValues = []string{threadName} + } + + if _, ok := aggregatorSelect.Options[identifier]; ok { + aggregatorSelect.Options[identifier].Value += sampleCount + } else { + aggregatorSelect.Options[identifier] = &AggregatorOption{ + Title: displayStr, + Value: sampleCount, + Unit: quantity.CountUnit, + MappingValues: mappingValues, + } + } + } + } + + subFrame, ok := currentFrame.SubFrames[funcIdentifier] + + if ok { + subFrame.Value += sampleCount + } else { + subFrame = &pprof.Frame{ + Value: sampleCount, + Unit: quantity.CountUnit, + Function: funcName, + Line: lineNo, + File: codeFile, + Directory: filepathtoolkit.DirName(codeFile), + ThreadID: "", + ThreadName: threadName, + Package: "", + PrintString: GetSpyPrintStr(funcName, codeFile), + SubFrames: make(pprof.SubFrames), + } + currentFrame.SubFrames[funcIdentifier] = subFrame + } + + currentFrame = subFrame + } + } + + rootFrame.Value = totalValue + rootFrame.Unit = quantity.CountUnit + + parsetoolkit.CalcPercentAndQuantity(rootFrame, totalValue) + aggregatorSelects.CalcPercentAndQuantity(totalValue) + return rootFrame, aggregatorSelects, nil +} + +func ParseRawFlameGraph(filename string) (*pprof.Frame, AggregatorSelectSlice, error) { + f, err := os.Open(filename) + if err != nil { + return nil, nil, fmt.Errorf("open py-spy profile file fail: %w", err) + } + defer f.Close() + + scanner := bufio.NewScanner(f) + + rootFrame := &pprof.Frame{ + SubFrames: make(pprof.SubFrames), + } + totalValue := int64(0) + + aggregatorSelects := make(AggregatorSelectSlice, 0, len(SpyAggregatorList)) + + for _, aggregator := range SpyAggregatorList { + aggregatorSelects = append(aggregatorSelects, &AggregatorSelect{ + Aggregator: aggregator, + Mapping: aggregator.Mapping, + Options: make(map[string]*AggregatorOption), + }) + } + + for scanner.Scan() { + + line := strings.TrimSpace(scanner.Text()) + if len(line) == 0 { + continue + } + + stacks := strings.Split(line, ";") + if len(stacks) == 0 { + continue + } + + lastStack := strings.TrimSpace(stacks[len(stacks)-1]) + if !stackTraceRegExp.MatchString(lastStack) { + logtoolkit.Warnf("The last stacktrace not match with the regexp [%s], the stacktrace [%s]", stackTraceRegExp.String(), lastStack) + continue + } + blankIdx := strings.LastIndexByte(lastStack, ' ') + if blankIdx < 0 { + logtoolkit.Warnf("Can not find any blank from [%s]", lastStack) + continue + } + + sampleCount, err := strconv.ParseInt(lastStack[blankIdx+1:], 10, 64) + if err != nil { + logtoolkit.Warnf("Can not resolve sample count from [%s]", lastStack) + continue + } + totalValue += sampleCount + + currentFrame := rootFrame + threadName := "" + + for idx, stack := range stacks { + stack = strings.TrimSpace(stack) + matches := stackTraceRegExp.FindStringSubmatch(stack) + if len(matches) != 4 { + if processRegExp.MatchString(stack) { + continue + } else if threadRexExp.MatchString(stack) { + threadName = stack + continue + } else { + return nil, nil, fmt.Errorf("resolve stacktrace from profiling file fail") + } + } + + funcName, codeFile, lineNoStr := matches[1], matches[2], matches[3] + + if codeFile == "" { + codeFile = "" + } + + var lineNo int64 = -1 + if lineNoStr != "" { + lineNo, _ = strconv.ParseInt(lineNoStr, 10, 64) + } + + funcIdentifier := fmt.Sprintf("%s###%s###%s###%d", threadName, codeFile, funcName, lineNo) + + if idx == len(stacks)-1 { + + for _, aggregatorSelect := range aggregatorSelects { + + var identifier string + var displayStr string + var mappingValues []string + + switch aggregatorSelect.Aggregator { + case Function: + identifier = fmt.Sprintf("%s###%s", codeFile, funcName) + displayStr = GetSpyPrintStr(funcName, codeFile) + mappingValues = []string{funcName} + case FunctionLine: + identifier = fmt.Sprintf("%s###%s###%d", codeFile, funcName, lineNo) + displayStr = fmt.Sprintf("%s(%s:L#%d)", funcName, filepathtoolkit.BaseName(codeFile), lineNo) + mappingValues = []string{funcName, fmt.Sprintf("%d", lineNo)} + case Directory: + identifier = filepathtoolkit.DirName(codeFile) + displayStr = identifier + mappingValues = []string{identifier} + case File: + identifier = codeFile + displayStr = codeFile + mappingValues = []string{codeFile} + case ThreadName: + identifier = threadName + displayStr = threadName + mappingValues = []string{threadName} + } + + if _, ok := aggregatorSelect.Options[identifier]; ok { + aggregatorSelect.Options[identifier].Value += sampleCount + } else { + aggregatorSelect.Options[identifier] = &AggregatorOption{ + Title: displayStr, + Value: sampleCount, + Unit: quantity.CountUnit, + MappingValues: mappingValues, + } + } + } + } + + subFrame, ok := currentFrame.SubFrames[funcIdentifier] + + if ok { + subFrame.Value += sampleCount + } else { + subFrame = &pprof.Frame{ + Value: sampleCount, + Unit: quantity.CountUnit, + Function: funcName, + Line: lineNo, + File: codeFile, + Directory: filepathtoolkit.DirName(codeFile), + ThreadID: "", + ThreadName: threadName, + Package: "", + PrintString: GetSpyPrintStr(funcName, codeFile), + SubFrames: make(pprof.SubFrames), + } + currentFrame.SubFrames[funcIdentifier] = subFrame + } + + currentFrame = subFrame + } + } + + rootFrame.Value = totalValue + rootFrame.Unit = quantity.CountUnit + + parsetoolkit.CalcPercentAndQuantity(rootFrame, totalValue) + aggregatorSelects.CalcPercentAndQuantity(totalValue) + return rootFrame, aggregatorSelects, nil +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/display.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/display.go new file mode 100644 index 0000000000..09165a881c --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/display.go @@ -0,0 +1,29 @@ +package parsing + +import ( + "github.com/GuanceCloud/cliutils/pprofparser/domain/events" +) + +type DisplayCtl interface { + ShowInTrace(e events.Type) bool + ShowInProfile(e events.Type) bool +} + +type DDTrace struct{} + +func (D DDTrace) ShowInTrace(e events.Type) bool { + return e.GetShowPlaces()&events.ShowInTrace > 0 +} + +func (D DDTrace) ShowInProfile(e events.Type) bool { + return e.GetShowPlaces()&events.ShowInProfile > 0 +} + +type PyroscopeNodejs struct{} + +func (p *PyroscopeNodejs) ShowInTrace(_ events.Type) bool { + return false +} +func (p *PyroscopeNodejs) ShowInProfile(_ events.Type) bool { + return true +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/metadata.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/metadata.go new file mode 100644 index 0000000000..038e7bf9e8 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/metadata.go @@ -0,0 +1,66 @@ +package parsing + +import ( + "encoding/json" + "fmt" + "github.com/GuanceCloud/cliutils/pprofparser/domain/events" + "github.com/GuanceCloud/cliutils/pprofparser/domain/parameter" + "github.com/GuanceCloud/cliutils/pprofparser/service/storage" + "github.com/GuanceCloud/cliutils/pprofparser/tools/filepathtoolkit" + "os" + "strings" +) + +const ( + FlameGraph Format = "flamegraph" // see https://github.com/brendangregg/FlameGraph + //Deprecated use Collapsed instead + RawFlameGraph Format = "rawflamegraph" // flamegraph collapse + Collapsed Format = "collapse" // flamegraph collapse format see https://github.com/brendangregg/FlameGraph/blob/master/stackcollapse.pl + SpeedScope Format = "speedscope" // see https://github.com/jlfwong/speedscope + JFR Format = "jfr" // see https://github.com/openjdk/jmc#core-api-example + PPROF Format = "pprof" // see https://github.com/google/pprof/blob/main/proto/profile.proto +) + +type Profiler string + +const ( + Unknown Profiler = "unknown" + DDtrace = "ddtrace" + AsyncProfiler = "async-profiler" + PySpy + Pyroscope = "pyroscope" +) + +type Format string + +type MetaData struct { + Format Format `json:"format"` + Profiler Profiler `json:"profiler"` + Attachments []string `json:"attachments"` +} + +func ReadMetaDataFile(f string) (*MetaData, error) { + if !filepathtoolkit.FileExists(f) && !strings.HasSuffix(f, ".json") { + f += ".json" + } + data, err := os.ReadFile(f) + if err != nil { + return nil, fmt.Errorf("open metadata file fail: %w", err) + } + m := new(MetaData) + if err := json.Unmarshal(data, m); err != nil { + return nil, fmt.Errorf("metadata json unmarshal fail: %w", err) + } + return m, nil +} + +func ReadMetaData(prof *parameter.Profile, workspaceUUID string) (*MetaData, error) { + startNanos, err := prof.StartTime() + if err != nil { + return nil, fmt.Errorf("resolve Profile start timestamp fail: %w", err) + } + + file := storage.DefaultDiskStorage.GetProfilePath(workspaceUUID, prof.ProfileID, startNanos, events.DefaultMetaFileName) + + return ReadMetaDataFile(file) +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/parser.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/parser.go new file mode 100644 index 0000000000..b471aeee3f --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/parser.go @@ -0,0 +1,52 @@ +package parsing + +import ( + "fmt" + "github.com/GuanceCloud/cliutils/pprofparser/domain/events" + "github.com/GuanceCloud/cliutils/pprofparser/domain/languages" + "github.com/GuanceCloud/cliutils/pprofparser/domain/parameter" + "github.com/GuanceCloud/cliutils/pprofparser/domain/pprof" + "github.com/GuanceCloud/cliutils/pprofparser/domain/tracing" + "github.com/GuanceCloud/cliutils/pprofparser/tools/logtoolkit" +) + +type Parser interface { + Summary() (map[events.Type]*EventSummary, int64, error) + ResolveFlameGraph(eventType events.Type) (*pprof.Frame, AggregatorSelectSlice, error) +} + +type SummaryValueType = pprof.SummaryValueType +type EventSummary = pprof.EventSummary +type SummaryCollection = pprof.SummaryCollection + +func GetSummary(param parameter.SummaryParam, filterBySpan bool, spanIDSet *tracing.SpanIDSet) (map[events.Type]*EventSummary, int64, error) { + + isCollapseProfile := false + meta, err := ReadMetaData(param.Profiles[0], param.WorkspaceUUID) + if err != nil { + logtoolkit.Warnf("unable to read profile metadata: %s", err) + } else { + if meta.Format == RawFlameGraph || meta.Format == Collapsed { + isCollapseProfile = true + } + } + + if isCollapseProfile { + return NewCollapse(param.WorkspaceUUID, param.Profiles, filterBySpan, spanIDSet).Summary() + } + + lang, err := parameter.VerifyLanguage(param.Profiles) + if err != nil { + return nil, 0, fmt.Errorf("invalid language: %w", err) + } + var ctl DisplayCtl + if meta != nil && meta.Profiler == Pyroscope && lang == languages.NodeJS { + ctl = new(PyroscopeNodejs) + } else { + ctl = new(DDTrace) + } + + parser := NewPProfParser(param.From, param.WorkspaceUUID, param.Profiles, + filterBySpan, ¶m.Span, spanIDSet, ctl) + return parser.Summary() +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/pprof.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/pprof.go new file mode 100644 index 0000000000..d70a16ca61 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/pprof.go @@ -0,0 +1,527 @@ +package parsing + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/fs" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/GuanceCloud/cliutils/pprofparser/domain/events" + "github.com/GuanceCloud/cliutils/pprofparser/domain/languages" + "github.com/GuanceCloud/cliutils/pprofparser/domain/parameter" + "github.com/GuanceCloud/cliutils/pprofparser/domain/pprof" + "github.com/GuanceCloud/cliutils/pprofparser/domain/quantity" + "github.com/GuanceCloud/cliutils/pprofparser/domain/tracing" + "github.com/GuanceCloud/cliutils/pprofparser/service/storage" + "github.com/GuanceCloud/cliutils/pprofparser/tools/logtoolkit" + "github.com/GuanceCloud/cliutils/pprofparser/tools/parsetoolkit" + "github.com/google/pprof/profile" + "github.com/pierrec/lz4/v4" +) + +const ( + LabelExceptionType = "exception type" + LabelThreadID = "thread id" + LabelThreadNativeID = "thread native id" + LabelThreadName = "thread name" + LabelSpanID = "span id" + LabelLocalRootSpanID = "local root span id" +) + +var ( + ZIPMagic = []byte{0x50, 0x4b, 3, 4} + LZ4Magic = []byte{4, 34, 77, 24} + GZIPMagic = []byte{31, 139} +) + +var lineNoRegExp = regexp.MustCompile(`^\d+$`) + +type PProf struct { + from string + workspaceUUID string + profiles []*parameter.Profile + filterBySpan bool + span *parameter.Span + spanIDSet *tracing.SpanIDSet + DisplayCtl +} + +type Decompressor struct { + io.Reader + r io.Reader +} + +func NewDecompressor(r io.Reader) io.ReadCloser { + bufReader := bufio.NewReader(r) + + magics, err := bufReader.Peek(4) + if err != nil { + return &Decompressor{ + r: r, + Reader: bufReader, + } + } + + if bytes.Compare(LZ4Magic, magics) == 0 { + return &Decompressor{ + r: r, + Reader: lz4.NewReader(bufReader), + } + } + + return &Decompressor{ + r: r, + Reader: bufReader, + } +} + +func (d *Decompressor) Close() error { + var err error + if rc, ok := d.Reader.(io.Closer); ok { + if e := rc.Close(); e != nil { + err = e + } + } + + if rc, ok := d.r.(io.Closer); ok { + if e := rc.Close(); e != nil { + err = e + } + } + return err +} + +func NewPProfParser( + from string, + workspaceUUID string, + profiles []*parameter.Profile, + filterBySpan bool, + span *parameter.Span, + spanIDSet *tracing.SpanIDSet, + ctl DisplayCtl, +) *PProf { + return &PProf{ + from: from, + workspaceUUID: workspaceUUID, + profiles: profiles, + filterBySpan: filterBySpan, + span: span, + spanIDSet: spanIDSet, + DisplayCtl: ctl, + } +} + +func isGlobPattern(pattern string) bool { + return strings.ContainsAny(pattern, "?*") +} + +func (p *PProf) mergePProf(filename string) (*profile.Profile, error) { + if len(p.profiles) == 0 { + return nil, fmt.Errorf("empty profiles") + } + + client, err := storage.GetStorage(storage.LocalDisk) + if err != nil { + return nil, fmt.Errorf("init oss client err: %w", err) + } + + filenames := []string{filename} + + if strings.ContainsRune(filename, '|') { + filenames = strings.Split(filename, "|") + } + + profSrc := make([]*profile.Profile, 0, len(p.profiles)) + + for _, prof := range p.profiles { + startTime, err := prof.StartTime() + if err != nil { + return nil, fmt.Errorf("cast ProfileStart to int64 fail: %w", err) + } + + profilePath := "" + + FilenameLoop: + for _, name := range filenames { + if name == "" { + continue + } + + pattern := client.GetProfilePath(p.workspaceUUID, prof.ProfileID, startTime, name) + if ok, err := client.IsFileExists(pattern); ok && err == nil { + profilePath = pattern + break + } + + // check whether the filename is a glob pattern + if isGlobPattern(name) { + matches, err := filepath.Glob(pattern) + if err != nil { + return nil, fmt.Errorf("illegal glob pattern [%s]; %w", pattern, err) + } + + for _, match := range matches { + baseName := filepath.Base(match) + if baseName != events.DefaultMetaFileName && baseName != events.DefaultMetaFileNameWithExt { + profilePath = match + break FilenameLoop + } + } + } + } + + if profilePath == "" { + return nil, fmt.Errorf("no available profile file found: [%s]: %w", filename, fs.ErrNotExist) + } + + reader, err := client.ReadFile(profilePath) + + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil, fmt.Errorf("profile file [%s] not exists: %w", profilePath, err) + } + if ok, err := client.IsFileExists(profilePath); err == nil && !ok { + return nil, fmt.Errorf("profile file [%s] not exists:%w", profilePath, fs.ErrNotExist) + } + return nil, fmt.Errorf("unable to read profile file [%s]: %w", profilePath, err) + } + + parsedPProf, err := parseAndClose(NewDecompressor(reader)) + if err != nil { + logtoolkit.Errorf("parse profile [path:%s] fail: %s", profilePath, err) + continue + } + + profSrc = append(profSrc, parsedPProf) + } + + if len(profSrc) == 0 { + return nil, fmt.Errorf("no available profile") + } + + mergedPProf, err := profile.Merge(profSrc) + if err != nil { + return nil, fmt.Errorf("merge profile fail: %w", err) + } + if err := mergedPProf.CheckValid(); err != nil { + return nil, fmt.Errorf("invalid merged profile file: %w", err) + } + return mergedPProf, nil +} + +func (p *PProf) Summary() (map[events.Type]*EventSummary, int64, error) { + lang, err := parameter.VerifyLanguage(p.profiles) + if err != nil { + return nil, 0, fmt.Errorf("unable to resolve language: %w", err) + //======= + // return nil, 0, fmt.Errorf("GetSummary VerifyLanguage err: %w", err) + // } + // + // ok, err := IsPySpyProfile(param.Profiles, param.WorkspaceUUID) + // if ok && err == nil { + // prof := param.Profiles[0] + // startNanos, err := jsontoolkit.IFaceCast2Int64(prof.ProfileStart) + // if err != nil { + // return nil, 0, fmt.Errorf("resolve Profile start timestamp fail: %w", err) + // } + // endNanos, err := jsontoolkit.IFaceCast2Int64(prof.ProfileEnd) + // if err != nil { + // return nil, 0, fmt.Errorf("resolve Profile end timestamp fail: %w", err) + // } + // profileFile := storage.DefaultDiskStorage.GetProfilePath(param.WorkspaceUUID, prof.ProfileID, startNanos, events.DefaultProfileFilename) + // + // summaries, err := GetPySpySummary(profileFile) + // if err != nil { + // return nil, 0, fmt.Errorf("get py-spy profile summary fail: %w", err) + // } + // return summaries, endNanos - startNanos, nil + // } else if err != nil { + // logtoolkit.Warnf("judge if profile is from py-spy err: %s", err) + //>>>>>>> 66994b8f59cd601e6e7fbac181122f708d77ef3a:service/parsing/multiparser.go + } + + fileSampleTypes := getFileSampleTypes(lang) + if len(fileSampleTypes) == 0 { + return nil, 0, fmt.Errorf("getFileSampleTypes: not supported language [%s]", lang) + } + + summariesTypedMap := make(map[events.Type]*EventSummary) + var totalDurationNanos int64 = 0 + + filesCount := 0 + for filename, sampleTypes := range fileSampleTypes { + + mergedPProf, err := p.mergePProf(filename) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + continue + } + return nil, 0, fmt.Errorf("merge pprof: %w", err) + } + filesCount++ + + if mergedPProf.DurationNanos > totalDurationNanos { + totalDurationNanos = mergedPProf.DurationNanos + } + + // pprof.SampleType 和 pprof.Sample[xx].Value 一一对应 + summaryMap := make(map[int]*EventSummary) + + for i, st := range mergedPProf.SampleType { + + if et, ok := sampleTypes[st.Type]; ok { + + if p.from == parameter.FromTrace && !p.ShowInTrace(et) { + continue + } + + if p.from == parameter.FromProfile && !p.ShowInProfile(et) { + continue + } + + unit, err := quantity.ParseUnit(et.GetQuantityKind(), st.Unit) + if err != nil { + return nil, 0, fmt.Errorf("parseUnit error: %w", err) + } + + summaryMap[i] = &EventSummary{ + SummaryValueType: &SummaryValueType{ + Type: et, + Unit: unit, + }, + Value: 0, + } + } + } + + for _, sample := range mergedPProf.Sample { + // 需要进行span过滤 + if p.filterBySpan { + spanID := parsetoolkit.GetStringLabel(sample, LabelSpanID) + rootSpanId := parsetoolkit.GetStringLabel(sample, LabelLocalRootSpanID) + // 没有spanID的数据去掉 + if spanID == "" { + continue + } + if p.spanIDSet != nil { + if p.spanIDSet == tracing.AllTraceSpanSet { + if rootSpanId != p.span.SpanID { + continue + } + } else if !p.spanIDSet.Contains(spanID) { + continue + } + } + } + for i, v := range sample.Value { + if _, ok := summaryMap[i]; ok { + summaryMap[i].Value += v + } + } + } + + for _, summary := range summaryMap { + summariesTypedMap[summary.Type] = summary + } + } + + if filesCount == 0 { + sb := &strings.Builder{} + for i, pro := range p.profiles { + if i > 0 { + sb.WriteByte(';') + } + sb.WriteString(pro.ProfileID) + } + return nil, 0, fmt.Errorf("no corresponding profiling file exists, workspaceUUID [%s], profileID [%s]", p.workspaceUUID, sb.String()) + } + + return summariesTypedMap, totalDurationNanos, nil +} + +// parseAndClose parse profile from a readable stream, and try to close the reader when end +func parseAndClose(r io.Reader) (*profile.Profile, error) { + if r == nil { + return nil, fmt.Errorf("nil reader") + } + + if closable, ok := r.(io.Closer); ok { + defer closable.Close() + } + + goPprof, err := profile.Parse(r) + + if err != nil { + return nil, fmt.Errorf("parse pprof err: %w", err) + } + + return goPprof, nil +} + +// ResolveFlameGraph (lang languages.Lang, eType events.Type, pprofSampleType string, filterBySpan bool, span *parameter.Span, spanIDSet *dql.SpanIDSet) +func (p *PProf) ResolveFlameGraph(eventType events.Type) (*pprof.Frame, AggregatorSelectSlice, error) { + + lang, err := parameter.VerifyLanguage(p.profiles) + if err != nil { + return nil, nil, fmt.Errorf("VerifyLanguage fail: %s", err) + } + + sampleFile, err := GetFileByEvent(lang, eventType) + if err != nil { + return nil, nil, fmt.Errorf("GetFileByEvent: %s", err) + } + + mergedPProf, err := p.mergePProf(sampleFile.Filename) + if err != nil { + return nil, nil, fmt.Errorf("merge pprof: %w", err) + } + + valueIdx, valueUnit, err := p.getIdxOfTypeAndUnit(sampleFile.SampleType, mergedPProf) + if err != nil { + return nil, nil, fmt.Errorf("render frame: %w", err) + } + + unit, err := quantity.ParseUnit(eventType.GetQuantityKind(), valueUnit) + if err != nil { + return nil, nil, fmt.Errorf("ParseUnit fail: %w", err) + } + + rootFrame := &pprof.Frame{ + SubFrames: make(pprof.SubFrames), + } + + aggregatorList := PythonAggregatorList + switch lang { + case languages.GoLang: + aggregatorList = GoAggregatorList + case languages.NodeJS: + aggregatorList = PyroscopeNodeJSAggregatorList + case languages.DotNet: + aggregatorList = DDTraceDotnetAggregatorList + case languages.PHP: + aggregatorList = DDTracePHPAggregatorList + } + + aggregatorSelectMap := make(AggregatorSelectSlice, 0, len(aggregatorList)) + + for _, aggregator := range aggregatorList { + aggregatorSelectMap = append(aggregatorSelectMap, &AggregatorSelect{ + Aggregator: aggregator, + Mapping: aggregator.Mapping, + Options: make(map[string]*AggregatorOption), + }) + } + + totalValue := int64(0) + for _, smp := range mergedPProf.Sample { + if smp.Value[valueIdx] == 0 { + // 过滤值为0的采样数据 + continue + } + + // span 过滤,必须有spanID的才显示 + if p.filterBySpan { + spanID := parsetoolkit.GetStringLabel(smp, LabelSpanID) + rootSpanId := parsetoolkit.GetStringLabel(smp, LabelLocalRootSpanID) + if spanID == "" { + continue + } + if p.spanIDSet == tracing.AllTraceSpanSet { + if rootSpanId != p.span.SpanID { + continue + } + } else if p.spanIDSet != nil { + if !p.spanIDSet.Contains(spanID) { + continue + } + } + } + + currentFrame := rootFrame + + totalValue += smp.Value[valueIdx] + + for _, aggregatorSelect := range aggregatorSelectMap { + aggregator := aggregatorSelect.Aggregator + identifier := aggregator.GetIdentifier(lang, smp, false) + + if _, ok := aggregatorSelect.Options[identifier]; ok { + aggregatorSelect.Options[identifier].Value += smp.Value[valueIdx] + } else { + mappingValues := make([]string, 0, len(aggregator.GetMappingFuncs)) + for _, mFunc := range aggregator.GetMappingFuncs { + mappingValues = append(mappingValues, mFunc(lang, smp, false)) + } + aggregatorSelect.Options[identifier] = &AggregatorOption{ + Title: aggregator.GetDisplayStr(lang, smp, false), + Value: smp.Value[valueIdx], + Unit: unit, + MappingValues: mappingValues, + } + } + } + + for i := len(smp.Location) - 1; i >= 0; i-- { + location := smp.Location[i] + line := location.Line[len(location.Line)-1] + + var funcIdentifier string + //if i == 0 { + // 最后一层必须严格相同, 不是最后一层行号不相同也允许合并 + //funcIdentifier = fmt.Sprintf("%s###%s###%d", line.Function.Filename, line.Function.Name, line.Line) + funcIdentifier = strconv.FormatUint(location.ID, 10) + //} else { + // funcIdentifier = fmt.Sprintf("%s###%s###%s", parsetoolkit.GetLabel(smp, LabelThreadID), line.Function.Filename, line.Function.Name) + //} + + subFrame, ok := currentFrame.SubFrames[funcIdentifier] + + if ok { + subFrame.Value += smp.Value[valueIdx] + } else { + subFrame = &pprof.Frame{ + Value: smp.Value[valueIdx], + Unit: unit, + Function: getFuncNameByLine(lang, line), + Line: getLineByLine(lang, line), + File: getFileByLine(lang, line), + Directory: getDirectoryByLine(lang, line), + ThreadID: getThreadIDBySample(smp), + ThreadName: getThreadNameBySample(smp), + Class: getClassByLine(lang, line), + Namespace: getNamespaceByLine(lang, line), + Assembly: getAssemblyByLine(lang, line), + Package: getPackageNameByLine(lang, line), + PrintString: GetPrintStrByLine(lang, line), + SubFrames: make(pprof.SubFrames), + } + currentFrame.SubFrames[funcIdentifier] = subFrame + } + + currentFrame = subFrame + } + } + + rootFrame.Value = totalValue + rootFrame.Unit = unit + + parsetoolkit.CalcPercentAndQuantity(rootFrame, totalValue) + aggregatorSelectMap.CalcPercentAndQuantity(totalValue) + + return rootFrame, aggregatorSelectMap, nil +} + +func (p *PProf) getIdxOfTypeAndUnit(typeName string, pprof *profile.Profile) (int, string, error) { + for idx, st := range pprof.SampleType { + if st.Type == typeName { + return idx, st.Unit, nil + } + } + return 0, "", fmt.Errorf("the pprof does not contain the event type: %s", typeName) +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/sampletype.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/sampletype.go new file mode 100644 index 0000000000..bbf543aaae --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/parsing/sampletype.go @@ -0,0 +1,138 @@ +package parsing + +import ( + "fmt" + "github.com/GuanceCloud/cliutils/pprofparser/domain/events" + "github.com/GuanceCloud/cliutils/pprofparser/domain/languages" +) + +type SampleFile struct { + Filename string + SampleType string +} + +var typeSampleFileMap = generateDictByEvent(pprofTypeMaps) + +func generateDictByEvent(pprofTypeMaps map[languages.Lang]fileSampleTypesMap) map[languages.Lang]map[events.Type]*SampleFile { + result := make(map[languages.Lang]map[events.Type]*SampleFile, len(pprofTypeMaps)) + + for lang, sampleTypes := range pprofTypeMaps { + if _, ok := result[lang]; !ok { + result[lang] = make(map[events.Type]*SampleFile, len(sampleTypes)) + } + + for filename, eventsMap := range sampleTypes { + for typeName, typeID := range eventsMap { + result[lang][typeID] = &SampleFile{Filename: filename, SampleType: typeName} + } + } + } + + return result +} + +var pprofTypeMaps = map[languages.Lang]fileSampleTypesMap{ + languages.Python: pyPprofTypeMaps, + languages.GoLang: goPprofTypeMaps, + languages.NodeJS: nodejsEventMaps, + languages.DotNet: dotnetPProfEventMaps, + languages.PHP: phpEventMaps, +} + +type fileSampleTypesMap map[string]map[string]events.Type + +var pyPprofTypeMaps = fileSampleTypesMap{ + "prof|auto|*.pprof": { + // "cpu-samples": events.CpuSamples, + "cpu-time": events.CpuTime, + "wall-time": events.WallTime, + "exception-samples": events.ThrownExceptions, + "lock-acquire": events.LockAcquires, + "lock-acquire-wait": events.LockWaitTime, + "lock-release": events.LockReleases, + "lock-release-hold": events.LockedTime, + "alloc-samples": events.Allocations, + "alloc-space": events.AllocatedMemory, + "heap-space": events.HeapLiveSize, + }, +} +var dotnetPProfEventMaps = fileSampleTypesMap{ + "prof|auto|*.pprof": { + "cpu": events.CpuTime, + "exception": events.ThrownExceptions, + "alloc-samples": events.Allocations, + "alloc-size": events.AllocatedMemory, + "inuse-objects": events.HeapLiveObjects, + "inuse-space": events.HeapLiveSize, + "wall": events.WallTime, + "lock-count": events.LockAcquires, + "lock-time": events.LockWaitTime, + }, +} + +var phpEventMaps = fileSampleTypesMap{ + "prof|auto|*.pprof": { + "cpu-time": events.CpuTime, + "sample": events.CpuSamples, + "wall-time": events.WallTime, + "alloc-samples": events.Allocations, + "alloc-size": events.AllocatedMemory, + }, +} + +var nodejsEventMaps = fileSampleTypesMap{ + "cpu.pprof": { + "": events.CpuSamples, + }, + + "inuse_objects.pprof": { + "": events.HeapLiveObjects, + }, + + "inuse_space.pprof": { + "": events.HeapLiveSize, + }, +} + +var goPprofTypeMaps = fileSampleTypesMap{ + "cpu.pprof|*cpu.pprof*": { + "cpu": events.CpuTime, + }, + "delta-heap.pprof|*delta-heap.pprof*": { + "alloc_objects": events.Allocations, + "alloc_space": events.AllocatedMemory, + "inuse_objects": events.HeapLiveObjects, + "inuse_space": events.HeapLiveSize, + }, + "delta-mutex.pprof|*delta-mutex.pprof*": { + "delay": events.Mutex, + }, + "delta-block.pprof|*delta-block.pprof*": { + "delay": events.Block, + }, + "goroutines.pprof|*goroutines.pprof*": { + "goroutine": events.Goroutines, + }, +} + +func getFileSampleTypes(lang languages.Lang) fileSampleTypesMap { + if typesMap, ok := pprofTypeMaps[lang]; ok { + return typesMap + } + return nil +} + +func GetFileByEvent(lang languages.Lang, typ events.Type) (*SampleFile, error) { + typeFileMap, ok := typeSampleFileMap[lang] + + if !ok { + return nil, fmt.Errorf("not supported lang: %s", lang) + } + + sampleFile, ok := typeFileMap[typ] + if !ok { + return nil, fmt.Errorf("not supported event type: %s", typ) + } + + return sampleFile, nil +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/storage/disk.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/storage/disk.go new file mode 100644 index 0000000000..2e5f2246ae --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/storage/disk.go @@ -0,0 +1,61 @@ +package storage + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/GuanceCloud/cliutils/pprofparser/cfg" + "github.com/GuanceCloud/cliutils/pprofparser/domain/parameter" +) + +var _ Storage = &Disk{} + +type Disk struct { +} + +func (d *Disk) GetProfilePathOld(workspaceUUID string, profileID string, unixTimeNS int64, ossFilename string) string { + return filepath.Join(d.GetProfileDirOld(workspaceUUID, profileID, unixTimeNS), ossFilename) +} + +func (d *Disk) GetProfileDirOld(workspaceUUID string, profileID string, unixTimeNS int64) string { + if unixTimeNS >= parameter.MinTimestampMicro && unixTimeNS <= parameter.MaxTimestampMicro { + unixTimeNS *= 1000 + } + date := time.Unix(0, unixTimeNS).In(timeZoneCST).Format("20060102") + return filepath.Join(cfg.Cfg.Storage.Disk.ProfileDir, date, workspaceUUID, profileID) +} + +func (d *Disk) GetProfilePath(workspaceUUID string, profileID string, unixTimeNS int64, ossFilename string) string { + return d.GetProfileDir(workspaceUUID, profileID, unixTimeNS) + "/" + ossFilename +} + +func (d *Disk) GetProfileDir(workspaceUUID string, profileID string, unixTimeNS int64) string { + if unixTimeNS >= parameter.MinTimestampMicro && unixTimeNS <= parameter.MaxTimestampMicro { + unixTimeNS *= 1000 + } + date := time.Unix(0, unixTimeNS).In(timeZoneCST).Format("20060102") + return filepath.Join(cfg.Cfg.Storage.Disk.ProfileDir, date, workspaceUUID, profileID[:2], profileID) +} + +func (d *Disk) IsFileExists(path string) (bool, error) { + stat, err := os.Stat(path) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return false, nil + } + return false, fmt.Errorf("stat file [%s] err: %w", path, err) + } + return stat.Mode().IsRegular(), nil +} + +func (d *Disk) ReadFile(path string) (io.ReadCloser, error) { + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("open file [%s] fail: %w", path, err) + } + return f, nil +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/storage/oss.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/storage/oss.go new file mode 100644 index 0000000000..def14138b1 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/storage/oss.go @@ -0,0 +1,90 @@ +package storage + +import ( + "fmt" + "io" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/GuanceCloud/cliutils/pprofparser/cfg" + "github.com/GuanceCloud/cliutils/pprofparser/domain/parameter" + "github.com/aliyun/aliyun-oss-go-sdk/oss" +) + +var ( + timeZoneCST = time.FixedZone("CST", 3600*8) + ossClient *OSS + ossInitLock sync.Mutex +) + +type OSS struct { + client *oss.Client +} + +func InitOSS() (*OSS, error) { + if ossClient != nil { + return ossClient, nil + } + ossInitLock.Lock() + defer ossInitLock.Unlock() + if ossClient == nil { + client, err := oss.New(cfg.Cfg.Oss.Host, cfg.Cfg.Oss.AccessKey, + cfg.Cfg.Oss.SecretKey) + if err != nil { + return nil, fmt.Errorf("oss.New fail: %w", err) + } + ossClient = &OSS{client: client} + } + return ossClient, nil +} + +func (o *OSS) selectBucket() (*oss.Bucket, error) { + return o.client.Bucket(cfg.Cfg.Oss.ProfileBucket) +} + +func (o *OSS) IsFileExists(path string) (bool, error) { + bucket, err := o.selectBucket() + if err != nil { + return false, fmt.Errorf("call IsFileExists, path [%s]: %w", path, err) + } + return bucket.IsObjectExist(path) +} + +func (o *OSS) ReadFile(path string) (io.ReadCloser, error) { + bucket, err := o.selectBucket() + if err != nil { + return nil, fmt.Errorf("ReadFile selectBucket: %w", err) + } + return bucket.GetObject(path) +} + +func (o *OSS) GetProfileDir(workspaceUUID string, profileID string, unixTimeNS int64) string { + if unixTimeNS >= parameter.MinTimestampMicro && unixTimeNS <= parameter.MaxTimestampMicro { + unixTimeNS *= 1000 + } + + date := time.Unix(0, unixTimeNS).In(timeZoneCST).Format("20060102") + path := filepath.Join(cfg.Cfg.Oss.ProfileDir, date, workspaceUUID, profileID[:2], profileID) + return strings.ReplaceAll(path, "\\", "/") +} + +func (o *OSS) GetProfilePath(workspaceUUID string, profileID string, unixTimeNS int64, ossFilename string) string { + if unixTimeNS >= parameter.MinTimestampMicro && unixTimeNS <= parameter.MaxTimestampMicro { + unixTimeNS *= 1000 + } + + date := time.Unix(0, unixTimeNS).In(timeZoneCST).Format("20060102") + path := filepath.Join(cfg.Cfg.Oss.ProfileDir, date, workspaceUUID, profileID[:2], profileID, ossFilename) + return strings.ReplaceAll(path, "\\", "/") +} + +func (o *OSS) GetProfilePathOld(workspaceUUID string, profileID string, unixTimeNS int64, ossFilename string) string { + if unixTimeNS >= parameter.MinTimestampMicro && unixTimeNS <= parameter.MaxTimestampMicro { + unixTimeNS *= 1000 + } + date := time.Unix(0, unixTimeNS).In(timeZoneCST).Format("20060102") + path := filepath.Join(cfg.Cfg.Oss.ProfileDir, date, workspaceUUID, profileID, ossFilename) + return strings.ReplaceAll(path, "\\", "/") +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/storage/storage.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/storage/storage.go new file mode 100644 index 0000000000..59ae6f66fb --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/service/storage/storage.go @@ -0,0 +1,33 @@ +package storage + +import ( + "fmt" + "io" +) + +type Type string + +const ( + AliOSS Type = "oss" + LocalDisk Type = "disk" +) + +var DefaultDiskStorage = &Disk{} + +type Storage interface { + IsFileExists(path string) (bool, error) + GetProfilePath(workspaceUUID string, profileID string, unixTimeNS int64, ossFilename string) string + GetProfileDir(workspaceUUID string, profileID string, unixTimeNS int64) string + GetProfilePathOld(workspaceUUID string, profileID string, unixTimeNS int64, ossFilename string) string + ReadFile(path string) (io.ReadCloser, error) +} + +func GetStorage(typ Type) (Storage, error) { + switch typ { + case AliOSS: + return InitOSS() + case LocalDisk: + return DefaultDiskStorage, nil + } + return nil, fmt.Errorf("storage type [%s] not supported yet", typ) +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/filepathtoolkit/format.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/filepathtoolkit/format.go new file mode 100644 index 0000000000..3ee033e7c9 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/filepathtoolkit/format.go @@ -0,0 +1,45 @@ +package filepathtoolkit + +import ( + "os" + "path/filepath" + "strings" +) + +var ( + currentOsPathSep = string(os.PathSeparator) + otherOsPathSep = otherOsPathSeparator() +) + +func otherOsPathSeparator() string { + if os.PathSeparator == '/' { + return "\\" + } + return "/" +} + +func BaseName(path string) string { + if !strings.Contains(path, currentOsPathSep) && strings.Contains(path, otherOsPathSep) { + path = strings.ReplaceAll(path, otherOsPathSep, currentOsPathSep) + } + return filepath.Base(path) +} + +func DirName(path string) string { + dir := filepath.Dir(path) + + // for other platforms + if dir == "." && !strings.Contains(path, currentOsPathSep) { + idx := strings.LastIndex(path, otherOsPathSep) + if idx >= 0 { + if idx == 0 || path[idx-1] == ':' { + return path[:idx+1] + } + return path[:idx] + } + } + if dir == "." && len(path) >= 2 && path[0] == '<' && path[len(path)-1] == '>' { + return path + } + return dir +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/filepathtoolkit/stat.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/filepathtoolkit/stat.go new file mode 100644 index 0000000000..ff23436355 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/filepathtoolkit/stat.go @@ -0,0 +1,14 @@ +package filepathtoolkit + +import ( + "errors" + "os" +) + +func FileExists(path string) bool { + _, err := os.Stat(path) + if err != nil && errors.Is(err, os.ErrNotExist) { + return false + } + return true +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/jsontoolkit/cast.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/jsontoolkit/cast.go new file mode 100644 index 0000000000..d11ee142e8 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/jsontoolkit/cast.go @@ -0,0 +1,58 @@ +package jsontoolkit + +import ( + "encoding/json" + "fmt" + "strconv" +) + +func IFaceCast2Int64(x interface{}) (int64, error) { + if x == nil { + return 0, fmt.Errorf("can not convert nil value to int64") + } + + switch xx := x.(type) { + case string: + n, err := strconv.ParseInt(xx, 10, 64) + if err != nil { + return 0, fmt.Errorf("cannot convert profile.profile_start: [%s] to int64, err: %w", xx, err) + } + return n, nil + + case float64: + return int64(xx), nil + case json.Number: + return xx.Int64() + case int64: + return xx, nil + case uint64: + return int64(xx), nil + case int: + return int64(xx), nil + case uint: + return int64(xx), nil + case float32: + return int64(xx), nil + } + + return 0, fmt.Errorf("无法把interface{}类型: %T 转换为 int64", x) +} + +func IFaceCast2String(x interface{}) (string, error) { + if x == nil { + return "", fmt.Errorf("can not convert nil value to string") + } + + switch xx := x.(type) { + case string: + return xx, nil + case float64: + return strconv.FormatFloat(xx, 'g', -1, 64), nil + case json.Number: + return xx.String(), nil + case int64: + return strconv.FormatInt(xx, 10), nil + } + + return "", fmt.Errorf("无法把interface{}类型: %T 转换为 string", x) +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/jsontoolkit/json.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/jsontoolkit/json.go new file mode 100644 index 0000000000..a5aeff3b11 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/jsontoolkit/json.go @@ -0,0 +1,41 @@ +package jsontoolkit + +import ( + "fmt" + "net/http" + + "github.com/GuanceCloud/cliutils/pprofparser/tools/logtoolkit" + "github.com/gin-gonic/gin" +) + +type JSONResp struct { + Code int `json:"code"` + Message string `json:"message"` + Data interface{} `json:"data"` +} + +func JSONSuccess(ctx *gin.Context, data interface{}) { + ctx.JSON(http.StatusOK, &JSONResp{ + Code: 0, + Message: "success", + Data: data, + }) +} + +func JSONError(ctx *gin.Context, code int, message string) { + ctx.JSON(http.StatusOK, &JSONResp{ + Code: code, + Message: message, + Data: struct{}{}, + }) +} + +func JSONErrorf(ctx *gin.Context, code int, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + logtoolkit.Error(msg) + ctx.JSON(http.StatusOK, &JSONResp{ + Code: code, + Message: msg, + Data: struct{}{}, + }) +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/logtoolkit/logger.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/logtoolkit/logger.go new file mode 100644 index 0000000000..c034491541 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/logtoolkit/logger.go @@ -0,0 +1,66 @@ +package logtoolkit + +import ( + "sync" + + "github.com/GuanceCloud/cliutils/logger" +) + +var ( + defaultLogger *logger.Logger + logInitOnce sync.Once + loggerPool = make(map[string]*logger.Logger) + loggerPoolLock = &sync.Mutex{} +) + +func Logger(name ...string) *logger.Logger { + if len(name) == 0 { + logInitOnce.Do(func() { + defaultLogger = logger.SLogger("global") + }) + return defaultLogger + } + + logName := name[0] + if logHandler, ok := loggerPool[logName]; ok { + return logHandler + } + loggerPoolLock.Lock() + defer loggerPoolLock.Unlock() + if _, ok := loggerPool[logName]; !ok { + loggerPool[logName] = logger.SLogger(logName) + } + return loggerPool[logName] +} + +func Info(args ...interface{}) { + Logger().Info(args...) +} + +func Warn(args ...interface{}) { + Logger().Warn(args...) +} + +func Error(args ...interface{}) { + Logger().Error(args...) +} + +func Fatal(args ...interface{}) { + Logger().Fatal(args...) +} + +func Infof(format string, args ...interface{}) { + Logger().Infof(format, args...) +} + +func Warnf(format string, args ...interface{}) { + Logger().Warnf(format, args...) +} + +func Errorf(format string, args ...interface{}) { + Logger().Errorf(format, args...) +} + +func Fatalf(format string, args ...interface{}) { + Logger().Fatalf(format, args...) +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/mathtoolkit/math.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/mathtoolkit/math.go new file mode 100644 index 0000000000..d7c416d0f3 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/mathtoolkit/math.go @@ -0,0 +1,15 @@ +package mathtoolkit + +import "math" + +func Trunc(x float64) int64 { + return int64(math.Trunc(x)) +} + +func Floor(x float64) int64 { + return int64(math.Floor(x)) +} + +func Ceil(x float64) int64 { + return int64(math.Ceil(x)) +} diff --git a/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/parsetoolkit/pprof.go b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/parsetoolkit/pprof.go new file mode 100644 index 0000000000..6df4bb9aa7 --- /dev/null +++ b/vendor/github.com/GuanceCloud/cliutils/pprofparser/tools/parsetoolkit/pprof.go @@ -0,0 +1,94 @@ +package parsetoolkit + +import ( + "fmt" + "math" + "strconv" + + "github.com/GuanceCloud/cliutils/pprofparser/domain/pprof" + "github.com/GuanceCloud/cliutils/pprofparser/domain/quantity" + "github.com/google/pprof/profile" +) + +func GetLabel(smp *profile.Sample, key string) string { + if labels, ok := smp.Label[key]; ok { + for _, label := range labels { + if label != "" { + return label + } + } + } + return "" +} + +func GetStringLabel(smp *profile.Sample, key string) string { + if span := GetLabel(smp, key); span != "" { + return span + } + if span, ok := GetNumLabel(smp, key); ok { + return strconv.FormatUint(uint64(span), 10) + } + return "" +} + +func GetNumLabel(smp *profile.Sample, key string) (int64, bool) { + if values, ok := smp.NumLabel[key]; ok { + if len(values) > 0 { + // Find none zero value at first + for _, v := range values { + if v != 0 { + return v, true + } + } + return values[0], true + } + } + return 0, false +} + +func CalcPercentOfAggregator() { + +} + +func CalcPercentAndQuantity(frame *pprof.Frame, total int64) { + if frame == nil { + return + } + + if total <= 0 { + frame.Percent = "100" + } else { + frame.Percent = fmt.Sprintf("%.2f", float64(frame.Value)/float64(total)*100) + } + + if frame.Unit != nil { + frame.Quantity = frame.Unit.Quantity(frame.Value) + + // 转成默认单位 + if frame.Unit.Kind == quantity.Memory && frame.Unit != quantity.Byte { + frame.Value, _ = frame.Quantity.IntValueIn(quantity.Byte) + frame.Unit = quantity.Byte + } else if frame.Unit.Kind == quantity.Duration && frame.Unit != quantity.MicroSecond { + frame.Value, _ = frame.Quantity.IntValueIn(quantity.MicroSecond) + frame.Unit = quantity.MicroSecond + } + } + + for _, subFrame := range frame.SubFrames { + CalcPercentAndQuantity(subFrame, total) + } +} + +func FormatDuration(nanoseconds int64) string { + ms := int64(math.Round(float64(nanoseconds) / 1000_000)) + if ms < 1000 { + return fmt.Sprintf("%d%s", ms, "ms") + } + if ms < 60_000 { + seconds := int64(math.Round(float64(ms) / 1000)) + return fmt.Sprintf("%d%s", seconds, "s") + } + + minutes := int64(math.Round(float64(ms) / 60_000)) + return fmt.Sprintf("%d%s", minutes, "minute") +} diff --git a/vendor/github.com/GuanceCloud/zipstream/.gitignore b/vendor/github.com/GuanceCloud/zipstream/.gitignore new file mode 100644 index 0000000000..ee770a66d6 --- /dev/null +++ b/vendor/github.com/GuanceCloud/zipstream/.gitignore @@ -0,0 +1,17 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +.idea/ diff --git a/vendor/github.com/GuanceCloud/zipstream/LICENSE b/vendor/github.com/GuanceCloud/zipstream/LICENSE new file mode 100644 index 0000000000..f288702d2f --- /dev/null +++ b/vendor/github.com/GuanceCloud/zipstream/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/github.com/GuanceCloud/zipstream/README.md b/vendor/github.com/GuanceCloud/zipstream/README.md new file mode 100644 index 0000000000..7f7964e64c --- /dev/null +++ b/vendor/github.com/GuanceCloud/zipstream/README.md @@ -0,0 +1,78 @@ +# zipstream +Package zipstream is a stream on the fly extractor/reader for zip archive like Java's `java.util.zip.ZipInputStream`, there is no need to provide `io.ReaderAt` and total archive size parameters, that is, just need only one `io.Reader` parameter. + +## Implementation +Most code of this package is copied directly from golang standard library [archive/zip](https://pkg.go.dev/archive/zip), .ZIP archive format specification reference +is [here](https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT) + +## Usage +> go get github.com/GuanceCloud/zipstream + +## Examples + +```go +package main + +import ( + "io" + "log" + "net/http" + + "github.com/GuanceCloud/zipstream" +) + +func main() { + + resp, err := http.Get("https://github.com/golang/go/archive/refs/tags/go1.16.10.zip") + if err != nil { + log.Fatal(err) + } + defer resp.Body.Close() + + zr := zipstream.NewReader(resp.Body) + + for { + e, err := zr.GetNextEntry() + if err == io.EOF { + break + } + if err != nil { + log.Fatalf("unable to get next entry: %s", err) + } + + log.Println("entry name: ", e.Name) + log.Println("entry comment: ", e.Comment) + log.Println("entry reader version: ", e.ReaderVersion) + log.Println("entry modify time: ", e.Modified) + log.Println("entry compressed size: ", e.CompressedSize64) + log.Println("entry uncompressed size: ", e.UncompressedSize64) + log.Println("entry is a dir: ", e.IsDir()) + + if !e.IsDir() { + rc, err := e.Open() + if err != nil { + log.Fatalf("unable to open zip file: %s", err) + } + content, err := io.ReadAll(rc) + if err != nil { + log.Fatalf("read zip file content fail: %s", err) + } + + log.Println("file length:", len(content)) + + if uint64(len(content)) != e.UncompressedSize64 { + log.Fatalf("read zip file length not equal with UncompressedSize64") + } + if err := rc.Close(); err != nil { + log.Fatalf("close zip entry reader fail: %s", err) + } + } + } +} +``` + +## Limitation + +- Every file in zip archive can read only once for a new Reader, Repeated read is unsupported. +- Some `central directory header` field is not resolved, such as `version made by`, `internal file attributes`, `external file attributes`, `relative offset of local header`, some `central directory header` field may differ from `local file header`, such as `extra field`. +- Unable to read multi files concurrently. \ No newline at end of file diff --git a/vendor/github.com/GuanceCloud/zipstream/reader.go b/vendor/github.com/GuanceCloud/zipstream/reader.go new file mode 100644 index 0000000000..49d292929c --- /dev/null +++ b/vendor/github.com/GuanceCloud/zipstream/reader.go @@ -0,0 +1,463 @@ +package zipstream + +import ( + "archive/zip" + "bytes" + "compress/flate" + "encoding/binary" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "sync" + "time" +) + +const ( + headerIdentifierLen = 4 + fileHeaderLen = 26 + dataDescriptorLen = 16 // four uint32: descriptor signature, crc32, compressed size, size + fileHeaderSignature = 0x04034b50 + directoryHeaderSignature = 0x02014b50 + directoryEndSignature = 0x06054b50 + dataDescriptorSignature = 0x08074b50 + + // Extra header IDs. + // See http://mdfs.net/Docs/Comp/Archiving/Zip/ExtraField + + Zip64ExtraID = 0x0001 // Zip64 extended information + NtfsExtraID = 0x000a // NTFS + UnixExtraID = 0x000d // UNIX + ExtTimeExtraID = 0x5455 // Extended timestamp + InfoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension + +) + +const ( + CompressMethodStored = 0 + CompressMethodDeflated = 8 +) + +type Entry struct { + zip.FileHeader + r io.Reader + lr io.Reader // LimitReader + zip64 bool + hasReadNum uint64 + hasDataDescriptorSignature bool + eof bool +} + +func (e *Entry) hasDataDescriptor() bool { + return e.Flags&8 != 0 +} + +// IsDir just simply check whether the entry name ends with "/" +func (e *Entry) IsDir() bool { + return len(e.Name) > 0 && e.Name[len(e.Name)-1] == '/' +} + +func (e *Entry) Open() (io.ReadCloser, error) { + if e.eof { + return nil, errors.New("this file has read to end") + } + decomp := decompressor(e.Method) + if decomp == nil { + return nil, zip.ErrAlgorithm + } + rc := decomp(e.lr) + + return &checksumReader{ + rc: rc, + hash: crc32.NewIEEE(), + entry: e, + }, nil +} + +type Reader struct { + r io.Reader + localFileEnd bool + curEntry *Entry +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + } +} + +func (z *Reader) readEntry() (*Entry, error) { + + buf := make([]byte, fileHeaderLen) + if _, err := io.ReadFull(z.r, buf); err != nil { + return nil, fmt.Errorf("unable to read local file header: %w", err) + } + + lr := readBuf(buf) + + readerVersion := lr.uint16() + flags := lr.uint16() + method := lr.uint16() + modifiedTime := lr.uint16() + modifiedDate := lr.uint16() + crc32Sum := lr.uint32() + compressedSize := lr.uint32() + uncompressedSize := lr.uint32() + filenameLen := int(lr.uint16()) + extraAreaLen := int(lr.uint16()) + + entry := &Entry{ + FileHeader: zip.FileHeader{ + ReaderVersion: readerVersion, + Flags: flags, + Method: method, + ModifiedTime: modifiedTime, + ModifiedDate: modifiedDate, + CRC32: crc32Sum, + CompressedSize: compressedSize, + UncompressedSize: uncompressedSize, + CompressedSize64: uint64(compressedSize), + UncompressedSize64: uint64(uncompressedSize), + }, + r: z.r, + hasReadNum: 0, + eof: false, + } + + nameAndExtraBuf := make([]byte, filenameLen+extraAreaLen) + if _, err := io.ReadFull(z.r, nameAndExtraBuf); err != nil { + return nil, fmt.Errorf("unable to read entry name and extra area: %w", err) + } + + entry.Name = string(nameAndExtraBuf[:filenameLen]) + entry.Extra = nameAndExtraBuf[filenameLen:] + + entry.NonUTF8 = flags&0x800 == 0 + if flags&1 == 1 { + return nil, fmt.Errorf("encrypted ZIP entry not supported") + } + if flags&8 == 8 && method != CompressMethodDeflated { + return nil, fmt.Errorf("only DEFLATED entries can have data descriptor") + } + + needCSize := entry.CompressedSize == ^uint32(0) + needUSize := entry.UncompressedSize == ^uint32(0) + + ler := readBuf(entry.Extra) + var modified time.Time +parseExtras: + for len(ler) >= 4 { // need at least tag and size + fieldTag := ler.uint16() + fieldSize := int(ler.uint16()) + if len(ler) < fieldSize { + break + } + fieldBuf := ler.sub(fieldSize) + + switch fieldTag { + case Zip64ExtraID: + entry.zip64 = true + + // update directory values from the zip64 extra block. + // They should only be consulted if the sizes read earlier + // are maxed out. + // See golang.org/issue/13367. + if needUSize { + needUSize = false + if len(fieldBuf) < 8 { + return nil, zip.ErrFormat + } + entry.UncompressedSize64 = fieldBuf.uint64() + } + if needCSize { + needCSize = false + if len(fieldBuf) < 8 { + return nil, zip.ErrFormat + } + entry.CompressedSize64 = fieldBuf.uint64() + } + case NtfsExtraID: + if len(fieldBuf) < 4 { + continue parseExtras + } + fieldBuf.uint32() // reserved (ignored) + for len(fieldBuf) >= 4 { // need at least tag and size + attrTag := fieldBuf.uint16() + attrSize := int(fieldBuf.uint16()) + if len(fieldBuf) < attrSize { + continue parseExtras + } + attrBuf := fieldBuf.sub(attrSize) + if attrTag != 1 || attrSize != 24 { + continue // Ignore irrelevant attributes + } + + const ticksPerSecond = 1e7 // Windows timestamp resolution + ts := int64(attrBuf.uint64()) // ModTime since Windows epoch + secs := ts / ticksPerSecond + nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond) + epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC) + modified = time.Unix(epoch.Unix()+secs, nsecs) + } + case UnixExtraID, InfoZipUnixExtraID: + if len(fieldBuf) < 8 { + continue parseExtras + } + fieldBuf.uint32() // AcTime (ignored) + ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch + modified = time.Unix(ts, 0) + case ExtTimeExtraID: + if len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 { + continue parseExtras + } + ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch + modified = time.Unix(ts, 0) + } + } + + msDosModified := MSDosTimeToTime(entry.ModifiedDate, entry.ModifiedTime) + entry.Modified = msDosModified + + if !modified.IsZero() { + entry.Modified = modified.UTC() + + // If legacy MS-DOS timestamps are set, we can use the delta between + // the legacy and extended versions to estimate timezone offset. + // + // A non-UTC timezone is always used (even if offset is zero). + // Thus, FileHeader.Modified.Location() == time.UTC is useful for + // determining whether extended timestamps are present. + // This is necessary for users that need to do additional time + // calculations when dealing with legacy ZIP formats. + if entry.ModifiedTime != 0 || entry.ModifiedDate != 0 { + entry.Modified = modified.In(timeZone(msDosModified.Sub(modified))) + } + } + + if needCSize { + return nil, zip.ErrFormat + } + + entry.lr = io.LimitReader(z.r, int64(entry.CompressedSize64)) + + return entry, nil +} + +func (z *Reader) GetNextEntry() (*Entry, error) { + if z.localFileEnd { + return nil, io.EOF + } + if z.curEntry != nil && !z.curEntry.eof { + if z.curEntry.hasReadNum <= z.curEntry.UncompressedSize64 { + if _, err := io.Copy(io.Discard, z.curEntry.lr); err != nil { + return nil, fmt.Errorf("read previous file data fail: %w", err) + } + if z.curEntry.hasDataDescriptor() { + if err := readDataDescriptor(z.r, z.curEntry); err != nil { + return nil, fmt.Errorf("read previous entry's data descriptor fail: %w", err) + } + } + } else { + if !z.curEntry.hasDataDescriptor() { + return nil, errors.New("parse error, read position exceed entry") + } + + readDataLen := z.curEntry.hasReadNum - z.curEntry.UncompressedSize64 + if readDataLen > dataDescriptorLen { + return nil, errors.New("parse error, read position exceed entry") + } else if readDataLen > dataDescriptorLen-4 { + if z.curEntry.hasDataDescriptorSignature { + if _, err := io.Copy(io.Discard, io.LimitReader(z.r, int64(dataDescriptorLen-readDataLen))); err != nil { + return nil, fmt.Errorf("read previous entry's data descriptor fail: %w", err) + } + } else { + return nil, errors.New("parse error, read position exceed entry") + } + } else { + buf := make([]byte, dataDescriptorLen-readDataLen) + if _, err := io.ReadFull(z.r, buf); err != nil { + return nil, fmt.Errorf("read previous entry's data descriptor fail: %w", err) + } + buf = buf[len(buf)-4:] + headerID := binary.LittleEndian.Uint32(buf) + + // read to next record head + if headerID == fileHeaderSignature || + headerID == directoryHeaderSignature || + headerID == directoryEndSignature { + z.r = io.MultiReader(bytes.NewReader(buf), z.r) + } + } + } + z.curEntry.eof = true + } + headerIDBuf := make([]byte, headerIdentifierLen) + if _, err := io.ReadFull(z.r, headerIDBuf); err != nil { + return nil, fmt.Errorf("unable to read header identifier: %w", err) + } + headerID := binary.LittleEndian.Uint32(headerIDBuf) + if headerID != fileHeaderSignature { + if headerID == directoryHeaderSignature || headerID == directoryEndSignature { + z.localFileEnd = true + return nil, io.EOF + } + return nil, zip.ErrFormat + } + entry, err := z.readEntry() + if err != nil { + return nil, fmt.Errorf("unable to read zip file header: %w", err) + } + z.curEntry = entry + return entry, nil +} + +var ( + decompressors sync.Map // map[uint16]Decompressor +) + +func init() { + decompressors.Store(zip.Store, zip.Decompressor(io.NopCloser)) + decompressors.Store(zip.Deflate, zip.Decompressor(newFlateReader)) +} + +func decompressor(method uint16) zip.Decompressor { + di, ok := decompressors.Load(method) + if !ok { + return nil + } + return di.(zip.Decompressor) +} + +var flateReaderPool sync.Pool + +func newFlateReader(r io.Reader) io.ReadCloser { + fr, ok := flateReaderPool.Get().(io.ReadCloser) + if ok { + fr.(flate.Resetter).Reset(r, nil) + } else { + fr = flate.NewReader(r) + } + return &pooledFlateReader{fr: fr} +} + +type pooledFlateReader struct { + mu sync.Mutex // guards Close and Read + fr io.ReadCloser +} + +func (r *pooledFlateReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.fr == nil { + return 0, errors.New("Read after Close") + } + return r.fr.Read(p) +} + +func (r *pooledFlateReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.fr != nil { + err = r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + } + return err +} + +func readDataDescriptor(r io.Reader, entry *Entry) error { + var buf [dataDescriptorLen]byte + // The spec says: "Although not originally assigned a + // signature, the value 0x08074b50 has commonly been adopted + // as a signature value for the data descriptor record. + // Implementers should be aware that ZIP files may be + // encountered with or without this signature marking data + // descriptors and should account for either case when reading + // ZIP files to ensure compatibility." + // + // dataDescriptorLen includes the size of the signature but + // first read just those 4 bytes to see if it exists. + n, err := io.ReadFull(r, buf[:4]) + entry.hasReadNum += uint64(n) + if err != nil { + return err + } + off := 0 + maybeSig := readBuf(buf[:4]) + if maybeSig.uint32() != dataDescriptorSignature { + // No data descriptor signature. Keep these four + // bytes. + off += 4 + } else { + entry.hasDataDescriptorSignature = true + } + n, err = io.ReadFull(r, buf[off:12]) + entry.hasReadNum += uint64(n) + if err != nil { + return err + } + entry.eof = true + b := readBuf(buf[:12]) + if b.uint32() != entry.CRC32 { + return zip.ErrChecksum + } + + // The two sizes that follow here can be either 32 bits or 64 bits + // but the spec is not very clear on this and different + // interpretations has been made causing incompatibilities. We + // already have the sizes from the central directory so we can + // just ignore these. + + return nil +} + +type checksumReader struct { + rc io.ReadCloser + hash hash.Hash32 + nread uint64 // number of bytes read so far + entry *Entry + err error // sticky error +} + +func (r *checksumReader) Read(b []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + n, err = r.rc.Read(b) + r.hash.Write(b[:n]) + r.nread += uint64(n) + r.entry.hasReadNum += uint64(n) + if err == nil { + return + } + if err == io.EOF { + if r.nread != r.entry.UncompressedSize64 { + return 0, io.ErrUnexpectedEOF + } + if r.entry.hasDataDescriptor() { + if err1 := readDataDescriptor(r.entry.r, r.entry); err1 != nil { + if err1 == io.EOF { + err = io.ErrUnexpectedEOF + } else { + err = err1 + } + } else if r.hash.Sum32() != r.entry.CRC32 { + err = zip.ErrChecksum + } + } else { + // If there's not a data descriptor, we still compare + // the CRC32 of what we've read against the file header + // or TOC's CRC32, if it seems like it was set. + r.entry.eof = true + if r.entry.CRC32 != 0 && r.hash.Sum32() != r.entry.CRC32 { + err = zip.ErrChecksum + } + } + } + r.err = err + return +} + +func (r *checksumReader) Close() error { return r.rc.Close() } diff --git a/vendor/github.com/GuanceCloud/zipstream/utils.go b/vendor/github.com/GuanceCloud/zipstream/utils.go new file mode 100644 index 0000000000..bc7482b81f --- /dev/null +++ b/vendor/github.com/GuanceCloud/zipstream/utils.go @@ -0,0 +1,70 @@ +package zipstream + +import ( + "encoding/binary" + "time" +) + +func MSDosTimeToTime(dosDate, dosTime uint16) time.Time { + return time.Date( + // date bits 0-4: day of month; 5-8: month; 9-15: years since 1980 + int(dosDate>>9+1980), + time.Month(dosDate>>5&0xf), + int(dosDate&0x1f), + + // time bits 0-4: second/2; 5-10: minute; 11-15: hour + int(dosTime>>11), + int(dosTime>>5&0x3f), + int(dosTime&0x1f*2), + 0, // nanoseconds + + time.UTC, + ) +} + +// timeZone returns a *time.Location based on the provided offset. +// If the offset is non-sensible, then this uses an offset of zero. +func timeZone(offset time.Duration) *time.Location { + const ( + minOffset = -12 * time.Hour // E.g., Baker island at -12:00 + maxOffset = +14 * time.Hour // E.g., Line island at +14:00 + offsetAlias = 15 * time.Minute // E.g., Nepal at +5:45 + ) + offset = offset.Round(offsetAlias) + if offset < minOffset || maxOffset < offset { + offset = 0 + } + return time.FixedZone("", int(offset/time.Second)) +} + +type readBuf []byte + +func (b *readBuf) uint8() uint8 { + v := (*b)[0] + *b = (*b)[1:] + return v +} + +func (b *readBuf) uint16() uint16 { + v := binary.LittleEndian.Uint16(*b) + *b = (*b)[2:] + return v +} + +func (b *readBuf) uint32() uint32 { + v := binary.LittleEndian.Uint32(*b) + *b = (*b)[4:] + return v +} + +func (b *readBuf) uint64() uint64 { + v := binary.LittleEndian.Uint64(*b) + *b = (*b)[8:] + return v +} + +func (b *readBuf) sub(n int) readBuf { + b2 := (*b)[:n] + *b = (*b)[n:] + return b2 +} diff --git a/vendor/github.com/grafana/jfr-parser/LICENSE b/vendor/github.com/grafana/jfr-parser/LICENSE new file mode 100644 index 0000000000..d5361db622 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Pyroscope + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/grafana/jfr-parser/common/attributes/attribute.go b/vendor/github.com/grafana/jfr-parser/common/attributes/attribute.go new file mode 100644 index 0000000000..75c856e75c --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/common/attributes/attribute.go @@ -0,0 +1,163 @@ +package attributes + +import ( + "fmt" + "github.com/grafana/jfr-parser/common/types" + "github.com/grafana/jfr-parser/common/units" + "github.com/grafana/jfr-parser/parser" + "reflect" +) + +var ( + Blocking = Attr[bool]("blocking", "Blocking", types.Boolean, "Whether the thread calling the vm operation was blocked or not") + Safepoint = Attr[bool]("safepoint", "Safepoint", types.Boolean, "Whether the vm operation occured at a safepoint or not") + EventThread = Attr[*parser.Thread]("eventThread", "Thread", types.Thread, "The thread in which the event occurred") + BytesRead = Attr[units.IQuantity]("bytesRead", "Bytes Read", types.Long, "Number of bytes read from the file (possibly 0)") + BytesWritten = Attr[units.IQuantity]("bytesWritten", "Bytes Written", types.Long, "Number of bytes written to the file") + SwitchRate = Attr[float64]("switchRate", "Switch Rate", types.Float, "Number of context switches per second") + + StartTime = AttrNoDesc[units.IQuantity]("startTime", "Start Time", types.Long) + GcWhen = AttrNoDesc[string]("when", "When", types.String) + EventStacktrace = AttrNoDesc[*parser.StackTrace]("stackTrace", "Stack Trace", types.StackTrace) + ThreadStat = AttrNoDesc[string]("state", "Thread State", types.ThreadState) + CpuSamplingInterval = AttrNoDesc[units.IQuantity]("cpuInterval", "CPU Sampling Interval", types.Long) + SettingName = AttrNoDesc[string]("name", "Setting Name", types.String) + SettingValue = AttrNoDesc[string]("value", "Setting Value", types.String) + SettingUnit = AttrNoDesc[string]("unit", "Setting Unit", types.String) + DatadogEndpoint = AttrNoDesc[string]("endpoint", "Endpoint", types.String) + Duration = AttrNoDesc[units.IQuantity]("duration", "Duration", types.Long) + + JVMStartTime = AttrSimple[units.IQuantity]("jvmStartTime", types.Long) + SampleWeight = AttrSimple[int64]("weight", types.Long) + AllocWeight = AttrSimple[float64]("weight", types.Float) + AllocSize = AttrSimple[units.IQuantity]("size", types.Long) + WallSampleInterval = AttrSimple[units.IQuantity]("wallInterval", types.Long) + Allocated = AttrSimple[units.IQuantity]("allocated", types.Long) + Size = AttrSimple[units.IQuantity]("size", types.Long) + HeapWeight = AttrSimple[float64]("weight", types.Long) +) + +type Attribute[T any] struct { + Name string // unique identifier for attribute + Label string // human-readable name + ClassName types.FieldClass + Description string +} + +func Attr[T any](name, label string, className types.FieldClass, description string) *Attribute[T] { + return &Attribute[T]{ + Name: name, + Label: label, + ClassName: className, + Description: description, + } +} + +func AttrSimple[T any](name string, className types.FieldClass) *Attribute[T] { + return &Attribute[T]{ + Name: name, + ClassName: className, + } +} + +func AttrNoDesc[T any](name, label string, className types.FieldClass) *Attribute[T] { + return &Attribute[T]{ + Name: name, + Label: label, + ClassName: className, + } +} + +func (a *Attribute[T]) GetValue(event *parser.GenericEvent) (T, error) { + var t T + attr, ok := event.Attributes[a.Name] + if !ok { + return t, fmt.Errorf("attribute name [%s] is not found in the event", a.Name) + } + + if x, ok := attr.(T); ok { + return x, nil + } + + attrValue := reflect.ValueOf(attr) + attrType := attrValue.Type() + tValue := reflect.ValueOf(&t).Elem() + tType := tValue.Type() + + if attrType.ConvertibleTo(tType) { + // t = t(attr) + tValue.Set(attrValue.Convert(tType)) + return t, nil + } else if attrValue.Kind() == reflect.Pointer && attrValue.Elem().Type().ConvertibleTo(tType) { + // t = t(*attr) + tValue.Set(attrValue.Elem().Convert(tType)) + return t, nil + } else if tType.Kind() == reflect.Pointer && attrType.ConvertibleTo(tType.Elem()) { + // t = t(&attr) + ap := reflect.New(attrType) + ap.Elem().Set(attrValue) + if ap.Type().ConvertibleTo(tType) { + tValue.Set(ap.Convert(tType)) + return t, nil + } + } + + fieldMeta := event.ClassMetadata.GetField(a.Name) + fieldUnit := fieldMeta.Unit(event.ClassMetadata.ClassMap) + + if fieldUnit != nil || fieldMeta.TickTimestamp(event.ClassMetadata.ClassMap) { + var ( + num units.Number + quantity units.IQuantity + ) + + switch attr.(type) { + case *parser.Byte, *parser.Short, *parser.Int, *parser.Long: + if fieldMeta.Unsigned(event.ClassMetadata.ClassMap) { + var x any + switch ax := attr.(type) { + case *parser.Byte: + x = uint8(*ax) + case *parser.Short: + x = uint16(*ax) + case *parser.Int: + x = uint32(*ax) + case *parser.Long: + x = uint64(*ax) + } + num = units.I64(reflect.ValueOf(x).Uint()) + } else { + num = units.I64(reflect.ValueOf(attr).Elem().Int()) + } + case *parser.Float, *parser.Double: + num = units.F64(reflect.ValueOf(attr).Elem().Float()) + } + + if fieldMeta.TickTimestamp(event.ClassMetadata.ClassMap) { + ts := fieldMeta.ChunkHeader.StartTimeNanos + ((num.Int64() - fieldMeta.ChunkHeader.StartTicks) * 1e9 / fieldMeta.ChunkHeader.TicksPerSecond) + quantity = units.NewIntQuantity(ts, units.UnixNano) + } else { + if num.Float() { + quantity = units.NewFloatQuantity(num.Float64(), fieldUnit) + } else { + quantity = units.NewIntQuantity(num.Int64(), fieldUnit) + } + } + + if q, ok := quantity.(T); ok { + return q, nil + } + } + + switch any(t).(type) { + case string: + s, err := parser.ToString(attr) + if err != nil { + return t, fmt.Errorf("unable to resolve string: %w", err) + } + reflect.ValueOf(&t).Elem().SetString(s) + return t, nil + } + + return t, fmt.Errorf("attribute is not type of %T", t) +} diff --git a/vendor/github.com/grafana/jfr-parser/common/filters/filter.go b/vendor/github.com/grafana/jfr-parser/common/filters/filter.go new file mode 100644 index 0000000000..1ea8bd0738 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/common/filters/filter.go @@ -0,0 +1,375 @@ +package filters + +import ( + "github.com/grafana/jfr-parser/common/attributes" + "github.com/grafana/jfr-parser/common/types" + "github.com/grafana/jfr-parser/parser" + "reflect" +) + +const ( + Parked = "PARKED" + Runnable = "RUNNABLE" + Waiting = "WAITING" + Contended = "CONTENDED" +) + +var ( + JavaMonitorInflate = Types(types.JavaMonitorInflate) + DatadogProfilerSetting = Types(types.ProfilerSetting) + DatadogEndpoint = Types(types.DatadogEndpoint) + DatadogScope = Types(types.SCOPE) + DatadogExceptionCount = Types(types.ExceptionCount) + DatadogExceptionSample = Types(types.ExceptionSample) + DatadogMethodSample = Types(types.DatadogMethodSample) + ThreadParked = AttributeEqual(attributes.ThreadStat, Parked) + ThreadWaiting = AttributeEqual(attributes.ThreadStat, Waiting) + DdMethodSampleThreadParked = AndFilters(DatadogMethodSample, ThreadParked) + DdMethodSampleThreadWaiting = AndFilters(DatadogMethodSample, ThreadWaiting) + FileIo = Types(types.FileRead, types.FileWrite) + SocketIO = Types(types.SocketRead, types.SocketWrite) + StacktraceNotNull = NotNull(attributes.EventStacktrace) + ALLOCATION = AndFilters(AllocAll, StacktraceNotNull) + ObjAllocation = AndFilters(ObjAlloc, StacktraceNotNull) + MonitorEnterStacktrace = AndFilters(MonitorEnter, StacktraceNotNull) + MonitorWait = Types(types.MonitorWait) + ThreadPark = Types(types.ThreadPark) + ThreadSleep = Types(types.ThreadSleep) + AsyncProfilerLock = Types(types.MonitorEnter, types.ThreadPark) + GarbageCollectionStacktrace = AndFilters(GarbageCollection, StacktraceNotNull) + SYNCHRONIZATION = Types(types.MonitorWait, types.ThreadPark) + OldObjectSample = Types(types.OldObjectSample) + DatadogProfilerConfig = Types(types.DatadogProfilerConfig) + DatadogAllocationSample = Types(types.DatadogAllocationSample) + DatadogHeapLiveObject = Types(types.HeapLiveObject) +) + +var ( + SocketRead = Types(types.SocketRead) + SocketWrite = Types(types.SocketWrite) + SocketReadOrWrite = OrFilters(SocketRead, SocketWrite) + NoRmiSocketRead = AndFilters(SocketRead, NotFilter(MethodFilter("sun.rmi.transport.tcp.TCPTransport", "handleMessages")), + NotFilter(MethodFilter("javax.management.remote.rmi.RMIConnector$RMINotifClient", "fetchNotifs"))) + NoRmiSocketWrite = AndFilters(SocketWrite, + NotFilter(MethodFilter("sun.rmi.transport.tcp.TCPTransport$ConnectionHandler", "run")), + NotFilter(MethodFilter("sun.rmi.transport.tcp.TCPTransport$ConnectionHandler", "run0"))) + EnvironmentVariable = Types(types.EnvironmentVariable) + FileRead = Types(types.FileRead) + FileWrite = Types(types.FileWrite) + CodeCacheFull = Types(types.CodeCacheFull) + CodeCacheStatistics = Types(types.CodeCacheStatistics) + CodeCacheConfig = Types(types.CodeCacheConfig) + SweepCodeCache = Types(types.SweepCodeCache) + CodeCache = OrFilters(CodeCacheFull, CodeCacheStatistics, SweepCodeCache, CodeCacheConfig) + CpuInformation = Types(types.CPUInformation) + GcConfig = Types(types.GcConf) + HeapConfig = Types(types.HeapConf) + BeforeGc = AttributeEqual(attributes.GcWhen, "Before GC") //$NON-NLS-1$ + AfterGc = AttributeEqual(attributes.GcWhen, "After GC") //$NON-NLS-1$ + AllocOutsideTlab = Types(types.AllocOutsideTlab) + AllocInsideTlab = Types(types.AllocInsideTlab) + AllocAll = Types(types.AllocInsideTlab, types.AllocOutsideTlab) + ObjAlloc = Types(types.ObjAllocSample) + ReferenceStatistics = Types(types.GcReferenceStatistics) + GarbageCollection = Types(types.GarbageCollection) + OldGarbageCollection = Types(types.GcCollectorOldGarbageCollection) + YoungGarbageCollection = Types(types.GcCollectorYoungGarbageCollection) + ConcurrentModeFailure = Types(types.ConcurrentModeFailure) + ERRORS = Types(types.ErrorsThrown) + EXCEPTIONS = Types(types.ExceptionsThrown) + Throwables = OrFilters(EXCEPTIONS, ERRORS) + ThrowablesStatistics = Types(types.ThrowableStatistics) + ClassUnload = Types(types.ClassUnload) + ClassLoadStatistics = Types(types.ClassLoadStatistics) + ClassLoaderStatistics = Types(types.ClassLoaderStatistics) + ClassLoad = Types(types.ClassLoad) + ClassLoadOrUnload = OrFilters(ClassLoad, ClassUnload) + ClassDefine = Types(types.ClassDefine) + ClassLoaderEvents = OrFilters(ClassLoad, ClassUnload, ClassDefine, ClassLoaderStatistics) + MonitorEnter = Types(types.MonitorEnter) + FileOrSocketIo = Types(types.SocketRead, types.SocketWrite, types.FileRead, types.FileWrite) // NOTE: Are there more types to add (i.e. relevant types with duration)? + ThreadLatencies = Types(types.MonitorEnter, types.MonitorWait, types.ThreadSleep, types.ThreadPark, types.SocketRead, types.SocketWrite, types.FileRead, types.FileWrite, types.ClassLoad, types.Compilation, types.ExecutionSamplingInfoEventId) + FilterExecutionSample = Types(types.ExecutionSample) + DatadogExecutionSample = Types(types.DatadogExecutionSample) + ContextSwitchRate = Types(types.ContextSwitchRate) + CpuLoad = Types(types.CpuLoad) + GcPause = Types(types.GcPause) + GcPausePhase = Types(types.GcPauseL1, types.GcPauseL2, types.GcPauseL3, types.GcPauseL4) + TimeConversion = Types(types.TimeConversion) + VmInfo = Types(types.VmInfo) + ThreadDump = Types(types.ThreadDump) + SystemProperties = Types(types.SystemProperties) + JfrDataLost = Types(types.JfrDataLost) + PROCESSES = Types(types.Processes) + ObjectCount = Types(types.ObjectCount) + MetaspaceOOM = Types(types.MetaspaceOom) + Compilation = Types(types.Compilation) + CompilerFailure = Types(types.CompilerFailure) + CompilerStats = Types(types.CompilerStats) + OsMemorySummary = Types(types.OSMemorySummary) + HeapSummary = Types(types.HeapSummary) + HeapSummaryBeforeGc = AndFilters(HeapSummary, BeforeGc) + HeapSummaryAfterGc = AndFilters(HeapSummary, AfterGc) + MetaspaceSummary = Types(types.MetaspaceSummary) + MetaspaceSummaryAfterGc = AndFilters(MetaspaceSummary, AfterGc) + RECORDINGS = Types(types.RECORDINGS) + RecordingSetting = Types(types.RecordingSetting) + SafePoints = Types(types.SafepointBegin, + types.SafepointCleanup, types.SafepointCleanupTask, types.SafepointStateSync, + types.SafepointWaitBlocked, types.SafepointEnd) + VmOperations = Types(types.VmOperations) // NOTE: Not sure if there are any VM events that are neither blocking nor safepoint, but just in case. + VmOperationsBlockingOrSafepoint = AndFilters( + Types(types.VmOperations), OrFilters(AttributeEqual(attributes.Blocking, true), AttributeEqual(attributes.Safepoint, true))) + // VmOperationsSafepoint NOTE: Are there any VM operations that are blocking, but not safepoints. Should we include those in the VM Thread?? + VmOperationsSafepoint = AndFilters(Types(types.VmOperations), AttributeEqual(attributes.Safepoint, true)) + ApplicationPauses = OrFilters(GcPause, SafePoints, VmOperationsSafepoint) + BiasedLockingRevocations = Types(types.BiasedLockClassRevocation, types.BiasedLockRevocation, types.BiasedLockSelfRevocation) + ThreadCpuLoad = Types(types.ThreadCpuLoad) + NativeMethodSample = Types(types.NativeMethodSample) + ThreadStart = Types(types.JavaThreadStart) + ThreadEnd = Types(types.JavaThreadEnd) + DatadogDirectAllocationTotal = Types(types.DatadogDirectAllocationTotal) + DatadogHeapUsage = Types(types.DatadogHeapUsage) + DatadogDeadlockedThread = Types(types.DatadogDeadlockedThread) +) + +type EventFilterFunc func(metadata *parser.ClassMetadata) parser.Predicate[parser.Event] + +func (e EventFilterFunc) GetPredicate(metadata *parser.ClassMetadata) parser.Predicate[parser.Event] { + return e(metadata) +} + +type AndPredicate []parser.PredicateFunc + +func (a AndPredicate) Test(t parser.Event) bool { + for _, fn := range a { + if !fn(t) { + return false + } + } + return true +} + +type OrPredicate []parser.PredicateFunc + +func (o OrPredicate) Test(t parser.Event) bool { + for _, fn := range o { + if fn(t) { + return true + } + } + return false +} + +type NotPredicate parser.PredicateFunc + +func (n NotPredicate) Test(t parser.Event) bool { + return !n(t) +} + +func And(p ...parser.Predicate[parser.Event]) parser.Predicate[parser.Event] { + ap := make(AndPredicate, 0, len(p)) + for _, pp := range p { + ap = append(ap, pp.Test) + } + return ap +} + +func Or(p ...parser.Predicate[parser.Event]) parser.Predicate[parser.Event] { + op := make(OrPredicate, 0, len(p)) + for _, pp := range p { + op = append(op, pp.Test) + } + return op +} + +func Not(p parser.Predicate[parser.Event]) parser.Predicate[parser.Event] { + return NotPredicate(p.Test) +} + +func AndAlways(p ...parser.Predicate[parser.Event]) parser.Predicate[parser.Event] { + switch len(p) { + case 0: + return parser.AlwaysTrue + case 1: + return p[0] + } + + notAlwaysPred := make([]parser.Predicate[parser.Event], 0) + for _, pp := range p { + if parser.IsAlwaysFalse(pp) { + return parser.AlwaysFalse + } + if parser.IsAlwaysTrue(pp) { + continue + } + notAlwaysPred = append(notAlwaysPred, pp) + } + if len(notAlwaysPred) == 0 { + return parser.AlwaysTrue + } + return And(notAlwaysPred...) +} + +func OrAlways(p ...parser.Predicate[parser.Event]) parser.Predicate[parser.Event] { + switch len(p) { + case 0: + return parser.AlwaysFalse + case 1: + return p[0] + } + notAlwaysPred := make([]parser.Predicate[parser.Event], 0) + for _, pp := range p { + if parser.IsAlwaysTrue(pp) { + return parser.AlwaysTrue + } + if parser.IsAlwaysFalse(pp) { + continue + } + notAlwaysPred = append(notAlwaysPred, pp) + } + if len(notAlwaysPred) == 0 { + return parser.AlwaysFalse + } + return Or(notAlwaysPred...) +} + +func NotAlways(p parser.Predicate[parser.Event]) parser.Predicate[parser.Event] { + switch { + case parser.IsAlwaysTrue(p): + return parser.AlwaysFalse + case parser.IsAlwaysFalse(p): + return parser.AlwaysTrue + } + return Not(p) +} + +func Types(classNames ...string) parser.EventFilter { + if len(classNames) == 1 { + return EventFilterFunc(func(metadata *parser.ClassMetadata) parser.Predicate[parser.Event] { + if classNames[0] == metadata.Name { + return parser.AlwaysTrue + } + return parser.AlwaysFalse + }) + } else { + et := make(map[string]struct{}, len(classNames)) + for _, className := range classNames { + et[className] = struct{}{} + } + return EventFilterFunc(func(metadata *parser.ClassMetadata) parser.Predicate[parser.Event] { + if _, ok := et[metadata.Name]; ok { + return parser.AlwaysTrue + } + return parser.AlwaysFalse + }) + } +} + +func AttributeEqual[T comparable](attr *attributes.Attribute[T], target T) parser.EventFilter { + return EventFilterFunc(func(metadata *parser.ClassMetadata) parser.Predicate[parser.Event] { + + if parser.IsAlwaysFalse(HasAttribute(attr).GetPredicate(metadata)) { + return parser.AlwaysFalse + } + + return parser.PredicateFunc(func(e parser.Event) bool { + value, err := attr.GetValue(e.(*parser.GenericEvent)) + if err != nil { + return false + } + return value == target + }) + }) +} + +func HasAttribute[T any](attr *attributes.Attribute[T]) parser.EventFilter { + return EventFilterFunc(func(metadata *parser.ClassMetadata) parser.Predicate[parser.Event] { + f := metadata.GetField(attr.Name) + if f == nil { + return parser.AlwaysFalse + } + + if metadata.ClassMap[f.ClassID].Name != string(attr.ClassName) { + return parser.AlwaysFalse + } + + return parser.AlwaysTrue + }) +} + +func MethodFilter(typeName, method string) parser.EventFilter { + return EventFilterFunc(func(metadata *parser.ClassMetadata) parser.Predicate[parser.Event] { + methodFilter := EventFilterFunc(func(metadata *parser.ClassMetadata) parser.Predicate[parser.Event] { + + return parser.PredicateFunc(func(e parser.Event) bool { + stacktrace, err := attributes.EventStacktrace.GetValue(e.(*parser.GenericEvent)) + if err != nil { + return false + } + for _, frame := range stacktrace.Frames { + // todo check type full name + if frame.Method.Type.Name.String == typeName && frame.Method.Name.String == method { + return true + } + } + return false + }) + }) + + return AndFilters(HasAttribute[*parser.StackTrace](attributes.EventStacktrace), methodFilter).GetPredicate(metadata) + }) +} + +func AndFilters(filters ...parser.EventFilter) parser.EventFilter { + return EventFilterFunc(func(metadata *parser.ClassMetadata) parser.Predicate[parser.Event] { + predicates := make([]parser.Predicate[parser.Event], 0, len(filters)) + + for _, filter := range filters { + predicates = append(predicates, filter.GetPredicate(metadata)) + } + + return AndAlways(predicates...) + }) +} + +func OrFilters(filters ...parser.EventFilter) parser.EventFilter { + return EventFilterFunc(func(metadata *parser.ClassMetadata) parser.Predicate[parser.Event] { + + predicates := make([]parser.Predicate[parser.Event], 0, len(filters)) + + for _, filter := range filters { + predicates = append(predicates, filter.GetPredicate(metadata)) + } + + return OrAlways(predicates...) + }) +} + +func NotFilter(filter parser.EventFilter) parser.EventFilter { + return EventFilterFunc(func(metadata *parser.ClassMetadata) parser.Predicate[parser.Event] { + return NotAlways(filter.GetPredicate(metadata)) + }) +} + +func NotNull[T any](attr *attributes.Attribute[T]) parser.EventFilter { + return EventFilterFunc(func(metadata *parser.ClassMetadata) parser.Predicate[parser.Event] { + if parser.IsAlwaysFalse(HasAttribute[T](attr).GetPredicate(metadata)) { + return parser.AlwaysFalse + } + + return parser.PredicateFunc(func(e parser.Event) bool { + value, err := attr.GetValue(e.(*parser.GenericEvent)) + if err != nil { + return false + } + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Pointer, reflect.Slice: + return rv.IsNil() + default: + return false + } + }) + }) +} diff --git a/vendor/github.com/grafana/jfr-parser/common/types/eventtypes.go b/vendor/github.com/grafana/jfr-parser/common/types/eventtypes.go new file mode 100644 index 0000000000..dc894b57e4 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/common/types/eventtypes.go @@ -0,0 +1,185 @@ +package types + +const ( + jdkTypePrefix = "jdk." + datadogTypePrefix = "datadog." +) + +const ( + CpuLoad = jdkTypePrefix + "CPULoad" + ThreadCpuLoad = jdkTypePrefix + "ThreadCPULoad" + ExecutionSample = jdkTypePrefix + "ExecutionSample" + ExecutionSamplingInfoEventId = jdkTypePrefix + "ExecutionSampling" + NativeMethodSample = jdkTypePrefix + "NativeMethodSample" + Processes = jdkTypePrefix + "SystemProcess" + OSMemorySummary = jdkTypePrefix + "PhysicalMemory" + OSInformation = jdkTypePrefix + "OSInformation" + CPUInformation = jdkTypePrefix + "CPUInformation" + ThreadAllocationStatistics = jdkTypePrefix + "ThreadAllocationStatistics" + HeapConf = jdkTypePrefix + "GCHeapConfiguration" + GcConf = jdkTypePrefix + "GCConfiguration" + HeapSummary = jdkTypePrefix + "GCHeapSummary" + AllocInsideTlab = jdkTypePrefix + "ObjectAllocationInNewTLAB" + AllocOutsideTlab = jdkTypePrefix + "ObjectAllocationOutsideTLAB" + ObjAllocSample = jdkTypePrefix + "ObjectAllocationSample" + VmInfo = jdkTypePrefix + "JVMInformation" + ClassDefine = jdkTypePrefix + "ClassDefine" + ClassLoad = jdkTypePrefix + "ClassLoad" + ClassUnload = jdkTypePrefix + "ClassUnload" + ClassLoadStatistics = jdkTypePrefix + "ClassLoadingStatistics" + ClassLoaderStatistics = jdkTypePrefix + "ClassLoaderStatistics" + Compilation = jdkTypePrefix + "Compilation" + FileWrite = jdkTypePrefix + "FileWrite" + FileRead = jdkTypePrefix + "FileRead" + SocketWrite = jdkTypePrefix + "SocketWrite" + SocketRead = jdkTypePrefix + "SocketRead" + ThreadPark = jdkTypePrefix + "ThreadPark" + ThreadSleep = jdkTypePrefix + "ThreadSleep" + MonitorEnter = jdkTypePrefix + "JavaMonitorEnter" + MonitorWait = jdkTypePrefix + "JavaMonitorWait" + MetaspaceOom = jdkTypePrefix + "MetaspaceOOM" + CodeCacheFull = jdkTypePrefix + "CodeCacheFull" + CodeCacheStatistics = jdkTypePrefix + "CodeCacheStatistics" + CodeSweeperStatistics = jdkTypePrefix + "CodeSweeperStatistics" + SweepCodeCache = jdkTypePrefix + "SweepCodeCache" + EnvironmentVariable = jdkTypePrefix + "InitialEnvironmentVariable" + SystemProperties = jdkTypePrefix + "InitialSystemProperty" + ObjectCount = jdkTypePrefix + "ObjectCount" + GcReferenceStatistics = jdkTypePrefix + "GCReferenceStatistics" + OldObjectSample = jdkTypePrefix + "OldObjectSample" + GcPauseL4 = jdkTypePrefix + "GCPhasePauseLevel4" + GcPauseL3 = jdkTypePrefix + "GCPhasePauseLevel3" + GcPauseL2 = jdkTypePrefix + "GCPhasePauseLevel2" + GcPauseL1 = jdkTypePrefix + "GCPhasePauseLevel1" + GcPause = jdkTypePrefix + "GCPhasePause" + MetaspaceSummary = jdkTypePrefix + "MetaspaceSummary" + GarbageCollection = jdkTypePrefix + "GarbageCollection" + ConcurrentModeFailure = jdkTypePrefix + "ConcurrentModeFailure" + ThrowableStatistics = jdkTypePrefix + "ExceptionStatistics" + ErrorsThrown = jdkTypePrefix + "JavaErrorThrow" + /* + * NOTE: The parser filters all JavaExceptionThrow events created from the Error constructor to + * avoid duplicates, so this event type represents 'non error throwables' rather than + * exceptions. See note in SyntheticAttributeExtension which does the duplicate filtering. + */ + ExceptionsThrown = jdkTypePrefix + "JavaExceptionThrow" + CompilerStats = jdkTypePrefix + "CompilerStatistics" + CompilerFailure = jdkTypePrefix + "CompilationFailure" + UlongFlag = jdkTypePrefix + "UnsignedLongFlag" + BooleanFlag = jdkTypePrefix + "BooleanFlag" + StringFlag = jdkTypePrefix + "StringFlag" + DoubleFlag = jdkTypePrefix + "DoubleFlag" + LongFlag = jdkTypePrefix + "LongFlag" + IntFlag = jdkTypePrefix + "IntFlag" + UintFlag = jdkTypePrefix + "UnsignedIntFlag" + UlongFlagChanged = jdkTypePrefix + "UnsignedLongFlagChanged" + BooleanFlagChanged = jdkTypePrefix + "BooleanFlagChanged" + StringFlagChanged = jdkTypePrefix + "StringFlagChanged" + DoubleFlagChanged = jdkTypePrefix + "DoubleFlagChanged" + LongFlagChanged = jdkTypePrefix + "LongFlagChanged" + IntFlagChanged = jdkTypePrefix + "IntFlagChanged" + UintFlagChanged = jdkTypePrefix + "UnsignedIntFlagChanged" + TimeConversion = jdkTypePrefix + "CPUTimeStampCounter" + ThreadDump = jdkTypePrefix + "ThreadDump" + JfrDataLost = jdkTypePrefix + "DataLoss" + DumpReason = jdkTypePrefix + "DumpReason" + GcConfYoungGeneration = jdkTypePrefix + "YoungGenerationConfiguration" + GcConfSurvivor = jdkTypePrefix + "GCSurvivorConfiguration" + GcConfTlab = jdkTypePrefix + "GCTLABConfiguration" + JavaThreadStart = jdkTypePrefix + "ThreadStart" + JavaThreadEnd = jdkTypePrefix + "ThreadEnd" + VmOperations = jdkTypePrefix + "ExecuteVMOperation" + VmShutdown = jdkTypePrefix + "Shutdown" + ThreadStatistics = jdkTypePrefix + "JavaThreadStatistics" + ContextSwitchRate = jdkTypePrefix + "ThreadContextSwitchRate" + CompilerConfig = jdkTypePrefix + "CompilerConfiguration" + CodeCacheConfig = jdkTypePrefix + "CodeCacheConfiguration" + CodeSweeperConfig = jdkTypePrefix + "CodeSweeperConfiguration" + CompilerPhase = jdkTypePrefix + "CompilerPhase" + GcCollectorG1GarbageCollection = jdkTypePrefix + "G1GarbageCollection" + GcCollectorOldGarbageCollection = jdkTypePrefix + "OldGarbageCollection" + GcCollectorParoldGarbageCollection = jdkTypePrefix + "ParallelOldGarbageCollection" + GcCollectorYoungGarbageCollection = jdkTypePrefix + "YoungGarbageCollection" + GcDetailedAllocationRequiringGc = jdkTypePrefix + "AllocationRequiringGC" + GcDetailedEvacuationFailed = jdkTypePrefix + "EvacuationFailed" + GcDetailedEvacuationInfo = jdkTypePrefix + "EvacuationInformation" + GcDetailedObjectCountAfterGc = jdkTypePrefix + "ObjectCountAfterGC" + GcDetailedPromotionFailed = jdkTypePrefix + "PromotionFailed" + GcHeapPsSummary = jdkTypePrefix + "PSHeapSummary" + GcMetaspaceAllocationFailure = jdkTypePrefix + "MetaspaceAllocationFailure" + GcMetaspaceChunkFreeListSummary = jdkTypePrefix + "MetaspaceChunkFreeListSummary" + GcMetaspaceGcThreshold = jdkTypePrefix + "MetaspaceGCThreshold" + GcG1mmu = jdkTypePrefix + "G1MMU" + GcG1EvacuationYoungStatistics = jdkTypePrefix + "G1EvacuationYoungStatistics" + GcG1EvacuationOldStatistics = jdkTypePrefix + "G1EvacuationOldStatistics" + GcG1BasicIHOP = jdkTypePrefix + "G1BasicIHOP" + GcG1HeapRegionTypeChange = jdkTypePrefix + "G1HeapRegionTypeChange" + GcG1HeapRegionInformation = jdkTypePrefix + "G1HeapRegionInformation" + BiasedLockSelfRevocation = jdkTypePrefix + "BiasedLockSelfRevocation" + BiasedLockRevocation = jdkTypePrefix + "BiasedLockRevocation" + BiasedLockClassRevocation = jdkTypePrefix + "BiasedLockClassRevocation" + GcG1AdaptiveIHOP = jdkTypePrefix + "G1AdaptiveIHOP" + RECORDINGS = jdkTypePrefix + "ActiveRecording" + RecordingSetting = jdkTypePrefix + "ActiveSetting" + + // SafepointBegin Safepointing begin + SafepointBegin = jdkTypePrefix + "SafepointBegin" + // SafepointStateSync Synchronize run state of threads + SafepointStateSync = jdkTypePrefix + "SafepointStateSynchronization" + // SafepointWaitBlocked SAFEPOINT_WAIT_BLOCKED Safepointing begin waiting on running threads to block + SafepointWaitBlocked = jdkTypePrefix + "SafepointWaitBlocked" + // SafepointCleanup SAFEPOINT_CLEANUP Safepointing begin running cleanup (parent) + SafepointCleanup = jdkTypePrefix + "SafepointCleanup" + // SafepointCleanupTask SAFEPOINT_CLEANUP_TASK Safepointing begin running cleanup task, individual subtasks + SafepointCleanupTask = jdkTypePrefix + "SafepointCleanupTask" + // SafepointEnd Safepointing end + SafepointEnd = jdkTypePrefix + "SafepointEnd" + ModuleExport = jdkTypePrefix + "ModuleExport" + ModuleRequire = jdkTypePrefix + "ModuleRequire" + NativeLibrary = jdkTypePrefix + "NativeLibrary" + HeapDump = jdkTypePrefix + "HeapDump" + ProcessStart = jdkTypePrefix + "ProcessStart" + + JavaMonitorInflate = jdkTypePrefix + "JavaMonitorInflate" +) + +const ( + ExceptionCount = datadogTypePrefix + "ExceptionCount" + ExceptionSample = datadogTypePrefix + "ExceptionSample" + ProfilerSetting = datadogTypePrefix + "ProfilerSetting" + + // DatadogProfilerConfig 标识符 名称 说明 内容类型 + //datadog.DatadogProfilerConfig Datadog Profiler Configuration [datadog.DatadogProfilerConfig] null + DatadogProfilerConfig = datadogTypePrefix + "DatadogProfilerConfig" + SCOPE = datadogTypePrefix + "Scope" + + // DatadogExecutionSample EXECUTION_SAMPLE 标识符 名称 说明 内容类型 + //datadog.ExecutionSample Method CPU Profiling Sample [datadog.ExecutionSample] null + DatadogExecutionSample = datadogTypePrefix + "ExecutionSample" + + // DatadogMethodSample MethodSample METHOD_SAMPLE 标识符 名称 说明 内容类型 + //datadog.MethodSample Method Wall Profiling Sample [datadog.MethodSample] null + DatadogMethodSample = datadogTypePrefix + "MethodSample" + + // DatadogAllocationSample ALLOCATION_SAMPLE 标识符 名称 说明 内容类型 + //datadog.ObjectSample Allocation sample [datadog.ObjectSample] null + DatadogAllocationSample = datadogTypePrefix + "ObjectSample" + + // HeapLiveObject 标识符 名称 说明 内容类型 + //datadog.HeapLiveObject Heap Live Object [datadog.HeapLiveObject] null + HeapLiveObject = datadogTypePrefix + "HeapLiveObject" + + DatadogEndpoint = datadogTypePrefix + "Endpoint" + + // DatadogDirectAllocationTotal 字段 值 详细模式 (Verbose) 值 + //事件类型 Direct Allocation Total Allocated Datadog direct allocation count event. [datadog.DirectAllocationTotal] + DatadogDirectAllocationTotal = datadogTypePrefix + "DirectAllocationTotal" + + // DatadogHeapUsage 标识符 名称 说明 内容类型 + //datadog.HeapUsage JVM Heap Usage [datadog.HeapUsage] null + DatadogHeapUsage = datadogTypePrefix + "HeapUsage" + + // DatadogDeadlockedThread 标识符 名称 说明 内容类型 + //datadog.DeadlockedThread Deadlocked Thread Datadog deadlock detection event - thread details. [datadog.DeadlockedThread] null + DatadogDeadlockedThread = datadogTypePrefix + "DeadlockedThread" +) diff --git a/vendor/github.com/grafana/jfr-parser/common/types/fieldtypes.go b/vendor/github.com/grafana/jfr-parser/common/types/fieldtypes.go new file mode 100644 index 0000000000..1f21785bbf --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/common/types/fieldtypes.go @@ -0,0 +1,46 @@ +package types + +type FieldClass string + +const ( + Boolean FieldClass = "boolean" + Byte FieldClass = "byte" + Char FieldClass = "char" + Short FieldClass = "short" + Int FieldClass = "int" + Long FieldClass = "long" + Float FieldClass = "float" + Double FieldClass = "double" + String FieldClass = "java.lang.String" + Class FieldClass = "java.lang.Class" + Thread FieldClass = "java.lang.Thread" + ClassLoader FieldClass = "jdk.types.ClassLoader" + CodeBlobType FieldClass = "jdk.types.CodeBlobType" + FlagValueOrigin FieldClass = "jdk.types.FlagValueOrigin" + FrameType FieldClass = "jdk.types.FrameType" + G1YCType FieldClass = "jdk.types.G1YCType" + GCName FieldClass = "jdk.types.GCName" + Method FieldClass = "jdk.types.Method" + Module FieldClass = "jdk.types.Module" + NarrowOopMode FieldClass = "jdk.types.NarrowOopMode" + NetworkInterfaceName FieldClass = "jdk.types.NetworkInterfaceName" + Package FieldClass = "jdk.types.Package" + StackFrame FieldClass = "jdk.types.StackFrame" + StackTrace FieldClass = "jdk.types.StackTrace" + Symbol FieldClass = "jdk.types.Symbol" + ThreadState FieldClass = "jdk.types.ThreadState" + InflateCause FieldClass = "jdk.types.InflateCause" + GCCause FieldClass = "jdk.types.GCCause" + CompilerPhaseType FieldClass = "jdk.types.CompilerPhaseType" + ThreadGroup FieldClass = "jdk.types.ThreadGroup" + GCThresholdUpdater FieldClass = "jdk.types.GCThresholdUpdater" + MetaspaceObjectType FieldClass = "jdk.types.MetaspaceObjectType" + ExecutionMode FieldClass = "datadog.types.ExecutionMode" + VMOperationType FieldClass = "jdk.types.VMOperationType" + G1HeapRegionType FieldClass = "jdk.types.G1HeapRegionType" + GCWhen FieldClass = "jdk.types.GCWhen" + ReferenceType FieldClass = "jdk.types.ReferenceType" + MetadataType FieldClass = "jdk.types.MetadataType" + LogLevel FieldClass = "profiler.types.LogLevel" + AttributeValue FieldClass = "profiler.types.AttributeValue" +) diff --git a/vendor/github.com/grafana/jfr-parser/common/units/quantity.go b/vendor/github.com/grafana/jfr-parser/common/units/quantity.go new file mode 100644 index 0000000000..70b1a93db8 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/common/units/quantity.go @@ -0,0 +1,102 @@ +package units + +import ( + "fmt" +) + +type IQuantity interface { + Unit() *Unit + In(unit *Unit) (IQuantity, error) + FloatValue() float64 + IntValue() int64 + Add(q IQuantity) (IQuantity, error) + String() string +} + +type IntQuantity struct { + num int64 + unit *Unit +} + +func (i *IntQuantity) String() string { + return fmt.Sprintf("%d%s", i.num, i.unit.Name) +} + +func (i *IntQuantity) Add(q IQuantity) (IQuantity, error) { + //TODO implement me + panic("implement me") +} + +func (i *IntQuantity) IntValue() int64 { + return i.num +} + +func (i *IntQuantity) FloatValue() float64 { + return float64(i.num) +} + +func (i *IntQuantity) Unit() *Unit { + return i.unit +} + +func (i *IntQuantity) In(u *Unit) (IQuantity, error) { + if i.unit.Kind != u.Kind { + return nil, fmt.Errorf("imcompatible unit kinds between source [%q] and target [%q]", i.unit.Kind, u.Kind) + } + if i.num == 0 { + return NewIntQuantity(0, u), nil + } + if !i.unit.Base.Float() && !u.Base.Float() { + if iBase, uBase := i.unit.Base.Int64(), u.Base.Int64(); (i.num*iBase)%uBase == 0 { + return NewIntQuantity(i.num*iBase/uBase, u), nil + } + } + return NewFloatQuantity(i.FloatValue()*i.unit.Base.Float64()/u.Base.Float64(), u), nil +} + +func NewIntQuantity(number int64, unit *Unit) IQuantity { + return &IntQuantity{ + num: number, + unit: unit, + } +} + +type FloatQuantity struct { + num float64 + unit *Unit +} + +func (f *FloatQuantity) String() string { + return fmt.Sprintf("%f%s", f.num, f.unit.Name) +} + +func (f *FloatQuantity) Add(q IQuantity) (IQuantity, error) { + //TODO implement me + panic("implement me") +} + +func (f *FloatQuantity) Unit() *Unit { + return f.unit +} + +func (f *FloatQuantity) In(u *Unit) (IQuantity, error) { + if f.unit.Kind != u.Kind { + return nil, fmt.Errorf("imcompatible unit kinds between source [%q] and target [%q]", f.unit.Kind, u.Kind) + } + return NewFloatQuantity(f.num*f.unit.Base.Float64()/u.Base.Float64(), u), nil +} + +func (f *FloatQuantity) FloatValue() float64 { + return f.num +} + +func (f *FloatQuantity) IntValue() int64 { + return int64(f.num) +} + +func NewFloatQuantity(number float64, unit *Unit) IQuantity { + return &FloatQuantity{ + num: number, + unit: unit, + } +} diff --git a/vendor/github.com/grafana/jfr-parser/common/units/toolkit.go b/vendor/github.com/grafana/jfr-parser/common/units/toolkit.go new file mode 100644 index 0000000000..d4069a1104 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/common/units/toolkit.go @@ -0,0 +1,23 @@ +package units + +import ( + "fmt" + "time" +) + +func ToTime(quantity IQuantity) (time.Time, error) { + var t time.Time + if quantity.Unit() == nil { + return t, fmt.Errorf("nil unit") + } + if kind := quantity.Unit().Kind; kind != TimeStamp { + return t, fmt.Errorf("not kind of timestamp: %q", kind.String()) + } + + quantity, err := quantity.In(UnixNano) + if err != nil { + return t, fmt.Errorf("unable to be converted to unixnano: %w", err) + } + + return time.Unix(0, quantity.IntValue()), nil +} diff --git a/vendor/github.com/grafana/jfr-parser/common/units/units.go b/vendor/github.com/grafana/jfr-parser/common/units/units.go new file mode 100644 index 0000000000..cf597ab24d --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/common/units/units.go @@ -0,0 +1,152 @@ +package units + +type Kind int + +const ( + UnknownKind Kind = 0 + Duration Kind = 1 + Memory Kind = 2 + Numeric Kind = 3 + TimeStamp Kind = 4 + Frequency Kind = 5 + Percentage Kind = 6 +) + +var kindDesc = [...]string{ + UnknownKind: "unknown", + Duration: "duration", + Memory: "memory", + Numeric: "numeric", + TimeStamp: "timestamp", + Frequency: "frequency", + Percentage: "percentage", +} + +func (k Kind) String() string { + if int(k) < len(kindDesc) { + return kindDesc[k] + } + return "" +} + +type Number interface { + Float() bool + Int64() int64 + Float64() float64 + Add(n Number) Number + Multi(n Number) Number +} + +type I64 int64 + +func (i I64) Float() bool { + return false +} + +func (i I64) Int64() int64 { + return int64(i) +} + +func (i I64) Float64() float64 { + return float64(i) +} + +func (i I64) Add(n Number) Number { + if !n.Float() { + return I64(i.Int64() + n.Int64()) + } + return F64(i.Float64() + n.Float64()) +} + +func (i I64) Multi(n Number) Number { + if n.Float() { + return F64(n.Float64() * i.Float64()) + } + + return I64(i.Int64() * n.Int64()) +} + +type F64 float64 + +func (f F64) Float() bool { + return true +} + +func (f F64) Int64() int64 { + return int64(f) +} + +func (f F64) Float64() float64 { + return float64(f) +} + +func (f F64) Add(n Number) Number { + return F64(f.Float64() + n.Float64()) +} + +func (f F64) Multi(n Number) Number { + return F64(float64(f) * n.Float64()) +} + +var _ Number = F64(0) +var _ Number = I64(0) + +type Unit struct { + Name string + Kind Kind + Base Number +} + +func newUnit(Name string, kind Kind, Base Number) *Unit { + return &Unit{ + Kind: kind, + Name: Name, + Base: Base, + } +} + +func (u *Unit) Derived(Name string, times Number) *Unit { + return &Unit{ + Kind: u.Kind, + Name: Name, + Base: u.Base.Multi(times), + } +} + +func (u *Unit) IntQuantity(n int64) IQuantity { + return NewIntQuantity(n, u) +} + +func (u *Unit) FloatQuantity(n float64) IQuantity { + return NewFloatQuantity(n, u) +} + +var ( + Unknown = newUnit("unknown", UnknownKind, I64(0)) + + Nanosecond = newUnit("ns", Duration, I64(1)) + Microsecond = Nanosecond.Derived("μs", I64(1000)) + Millisecond = Microsecond.Derived("ms", I64(1000)) + Second = Millisecond.Derived("s", I64(1000)) + Minute = Second.Derived("min", I64(60)) + Hour = Minute.Derived("h", I64(60)) + Day = Hour.Derived("d", I64(24)) + Week = Day.Derived("w", I64(7)) + + Byte = newUnit("B", Memory, I64(1)) + Kilobyte = Byte.Derived("KB", I64(1024)) + Megabyte = Kilobyte.Derived("MB", I64(1024)) + Gigabyte = Megabyte.Derived("GB", I64(1024)) + Terabyte = Gigabyte.Derived("TB", I64(1024)) + Petabyte = Terabyte.Derived("PB", I64(1024)) + + UnixNano = newUnit("epoch_ns", TimeStamp, I64(1)) + UnixMicro = UnixNano.Derived("epoch_μs", I64(1000)) + UnixMilli = UnixMicro.Derived("epoch_ms", I64(1000)) + UnixSecond = UnixMilli.Derived("epoch_s", I64(1000)) + + Multiple = newUnit("", Percentage, I64(1)) // eg: 0.15 + Percent = newUnit("%", Percentage, I64(100)) // eg: 15% + + Hertz = newUnit("hz", Frequency, I64(1)) +) diff --git a/vendor/github.com/grafana/jfr-parser/internal/utils/utils.go b/vendor/github.com/grafana/jfr-parser/internal/utils/utils.go new file mode 100644 index 0000000000..a5f418f4df --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/internal/utils/utils.go @@ -0,0 +1,5 @@ +package utils + +func NewPointer[T any](t T) *T { + return &t +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/annotations.go b/vendor/github.com/grafana/jfr-parser/parser/annotations.go new file mode 100644 index 0000000000..98338c983e --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/annotations.go @@ -0,0 +1,119 @@ +package parser + +import ( + "github.com/grafana/jfr-parser/common/units" + "strconv" + + "github.com/grafana/jfr-parser/internal/utils" +) + +const ( + valueProperty = "value" + + annotationLabel = "jdk.jfr.Label" + annotationDescription = "jdk.jfr.Description" + annotationExperimental = "jdk.jfr.Experimental" + annotationCategory = "jdk.jfr.Category" + annotationTimestamp = "jdk.jfr.Timestamp" + annotationTimespan = "jdk.jfr.Timespan" + annotationMemoryAddress = "jdk.jfr.MemoryAddress" + annotationPercentage = "jdk.jfr.Percentage" + annotationMemoryAmount = "jdk.jfr.MemoryAmount" + annotationDataAmount = "jdk.jfr.DataAmount" + annotationFrequency = "jdk.jfr.Frequency" + annotationUnsigned = "jdk.jfr.Unsigned" +) + +const ( + unitS = "SECONDS" + unitMS = "MILLISECONDS" + unitNS = "NANOSECONDS" + unitTicks = "TICKS" + unitSSinceEpoch = "SECONDS_SINCE_EPOCH" + unitMSSinceEpoch = "MILLISECONDS_SINCE_EPOCH" + unitNSSinceEpoch = "NANOSECONDS_SINCE_EPOCH" +) + +type AnnotationMetadata struct { + ClassID int64 + Values map[string]string +} + +func (a *AnnotationMetadata) SetAttribute(key, value string) (err error) { + switch key { + case "class": + a.ClassID, err = strconv.ParseInt(value, 10, 64) + default: + if a.Values == nil { + a.Values = make(map[string]string) + } + a.Values[key] = value + } + return err +} + +func (a *AnnotationMetadata) AppendChild(string) Element { return nil } + +type BaseAnnotation struct { + label *string + description *string + experimental *bool + Annotations []*AnnotationMetadata +} + +func (b *BaseAnnotation) Label(classMap ClassMap) string { + if b.label == nil { + for _, annotation := range b.Annotations { + if classMap[annotation.ClassID].Name == annotationLabel { + b.label = utils.NewPointer(annotation.Values[valueProperty]) + break + } + } + if b.label == nil { + b.label = utils.NewPointer("") + } + } + return *b.label +} + +func (b *BaseAnnotation) Description(classMap ClassMap) string { + if b.description == nil { + for _, annotation := range b.Annotations { + if classMap[annotation.ClassID].Name == annotationDescription { + b.description = utils.NewPointer(annotation.Values[valueProperty]) + break + } + } + if b.description == nil { + b.description = utils.NewPointer("") + } + } + return *b.description +} + +func (b *BaseAnnotation) Experimental(classMap ClassMap) bool { + if b.experimental == nil { + if b.experimental == nil { + for _, annotation := range b.Annotations { + if classMap[annotation.ClassID].Name == annotationExperimental { + b.experimental = utils.NewPointer(true) + break + } + } + b.experimental = utils.NewPointer(false) + } + } + return *b.experimental +} + +type ClassAnnotation struct { + categories []string + BaseAnnotation +} + +type FieldAnnotation struct { + unsigned *bool + tickTimestamp *bool + unit *units.Unit + BaseAnnotation +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/chunk.go b/vendor/github.com/grafana/jfr-parser/parser/chunk.go new file mode 100644 index 0000000000..773d6b956a --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/chunk.go @@ -0,0 +1,257 @@ +package parser + +import ( + "bufio" + "bytes" + "fmt" + "io" +) + +const ( + MetadataEventType = 0 + ConstantPoolEventType = 1 + + EventSuperType = "jdk.jfr.Event" +) + +var magic = []byte{'F', 'L', 'R', 0} + +type Version struct { + Major uint16 + Minor uint16 +} + +type CPool struct { + Pool map[int64]ParseResolvable + resolved bool +} +type ClassMap map[int64]*ClassMetadata +type PoolMap map[int64]*CPool +type ChunkEvents map[string]*EventCollection + +func (c ChunkEvents) Apply(filter EventFilter) []*GenericEvent { + var filtered []*GenericEvent + + for _, collection := range c { + predicate := filter.GetPredicate(collection.ClassMetadata) + if IsAlwaysFalse(predicate) { + continue + } else if IsAlwaysTrue(predicate) { + filtered = append(filtered, collection.Events...) + } else { + for _, event := range collection.Events { + if predicate.Test(event) { + filtered = append(filtered, event) + } + } + } + } + return filtered +} + +type Chunk struct { + Header Header + Metadata ChunkMetadata + ChunkEvents +} + +type EventCollection struct { + ClassMetadata *ClassMetadata + Events []*GenericEvent +} + +func (c *EventCollection) Add(e *GenericEvent) { + c.Events = append(c.Events, e) +} + +type ChunkParseOptions struct { + CPoolProcessor func(meta *ClassMetadata, cpool *CPool) +} + +func (c *Chunk) addEvent(e *GenericEvent) { + if c.ChunkEvents == nil { + c.ChunkEvents = make(ChunkEvents) + } + + classMeta := c.Metadata.ClassMap[e.ClassID] + + ec, ok := c.ChunkEvents[classMeta.Name] + if !ok { + ec = &EventCollection{ + ClassMetadata: classMeta, + } + c.ChunkEvents[classMeta.Name] = ec + } + ec.Add(e) +} + +func (c *Chunk) ShowClassMeta(name string) { + for _, classMeta := range c.Metadata.ClassMap { + if classMeta.Name == name { + fmt.Printf("simple type: %t, super type: %s\n", + classMeta.SimpleType, classMeta.SuperType) + + for _, field := range classMeta.Fields { + fmt.Printf("field name: %s, field label: %s, field class: %s, field description: %s, field constant pool: %t, field is array: %t, field unsigned: %t, field unit: %+#v\n", + field.Name, field.Label(c.Metadata.ClassMap), c.Metadata.ClassMap[field.ClassID].Name, field.Description(c.Metadata.ClassMap), + field.ConstantPool, field.IsArray(), field.Unsigned(c.Metadata.ClassMap), field.Unit(c.Metadata.ClassMap)) + } + break + } + } +} + +func (c *Chunk) Parse(r io.Reader, options *ChunkParseOptions) (err error) { + bufR, ok := r.(*bufio.Reader) + if !ok { + bufR = bufio.NewReader(r) + } + + buf := make([]byte, len(magic)) + if _, err = io.ReadFull(bufR, buf); err != nil { + if err == io.EOF { + return err + } + return fmt.Errorf("unable to read chunk's header: %w", err) + } + + // TODO magic header + for i, r := range magic { + if r != buf[i] { + return fmt.Errorf("unexpected magic header %v expected, %v found", magic, buf) + } + } + if _, err = io.ReadFull(bufR, buf); err != nil { + return fmt.Errorf("unable to read format version: %w", err) + } + + // TODO Check supported major / minor + + buf = make([]byte, headerSize) + if _, err = io.ReadFull(bufR, buf); err != nil { + return fmt.Errorf("unable to read chunk header: %w", err) + } + if err = c.Header.Parse(NewReader(bytes.NewReader(buf), false)); err != nil { + return fmt.Errorf("unable to parse chunk header: %w", err) + } + c.Header.ChunkSize -= headerSize + 8 + c.Header.MetadataOffset -= headerSize + 8 + c.Header.ConstantPoolOffset -= headerSize + 8 + useCompression := c.Header.Features&1 == 1 + // TODO: assert c.Header.ChunkSize is small enough + buf = make([]byte, c.Header.ChunkSize) + if _, err := io.ReadFull(r, buf); err != nil { + return fmt.Errorf("unable to read chunk contents: %w", err) + } + + br := bytes.NewReader(buf) + rd := NewReader(br, useCompression) + pointer := int64(0) + eventsOffset := make(map[int64]int32) + + // Parse metadata + if _, err := br.Seek(c.Header.MetadataOffset, io.SeekStart); err != nil { + return fmt.Errorf("unable to seek reader: %w", err) + } + metadataSize, err := rd.VarInt() + if err != nil { + return fmt.Errorf("unable to parse chunk metadata size: %w", err) + } + eventsOffset[c.Header.MetadataOffset] = metadataSize + + c.Metadata.Header = &c.Header // for resolving class fields unit ticks + + if err := c.Metadata.Parse(rd); err != nil { + return fmt.Errorf("unable to parse chunk metadata: %w", err) + } + + // Parse checkpoint event(s) + if _, err := br.Seek(c.Header.ConstantPoolOffset, io.SeekStart); err != nil { + return fmt.Errorf("unable to seek reader: %w", err) + } + constantPoolSize := int32(0) + cpools := make(PoolMap) + delta := int64(0) + cp := new(ConstantPoolEvent) + for { + size, err := rd.VarInt() + if err != nil { + return fmt.Errorf("unable to parse checkpoint event size: %w", err) + } + eventsOffset[c.Header.ConstantPoolOffset+delta] = size + constantPoolSize += size + if err := cp.Parse(rd, c.Metadata.ClassMap, cpools); err != nil { + return fmt.Errorf("unable to parse constant pool event: %w", err) + } + if cp.Delta == 0 { + break + } + delta += cp.Delta + if _, err := br.Seek(c.Header.ConstantPoolOffset+delta, io.SeekStart); err != nil { + return fmt.Errorf("unable to seek reader: %w", err) + } + } + + if options.CPoolProcessor != nil { + for classID, pool := range cpools { + options.CPoolProcessor(c.Metadata.ClassMap[classID], pool) + } + } + + // Second pass over constant pools: resolve constants + if err = ResolveConstants(c.Metadata.ClassMap, cpools); err != nil { + return err + } + + // Parse the rest of events + if _, err := br.Seek(pointer, io.SeekStart); err != nil { + return fmt.Errorf("unable to seek reader: %w", err) + } + for pointer != c.Header.ChunkSize { + if size, ok := eventsOffset[pointer]; ok { + pointer += int64(size) + } else { + if _, err := br.Seek(pointer, io.SeekStart); err != nil { + return fmt.Errorf("unable to seek to position %d: %w", pointer, err) + } + size, err := rd.VarInt() + if err != nil { + return fmt.Errorf("unable to parse event size: %w", err) + } + if size == 0 { + return fmt.Errorf("found event with invalid size (0)") + } + eventsOffset[pointer] = size + ge, err := ParseEvent(rd, c.Metadata.ClassMap, cpools) + if err != nil { + return fmt.Errorf("unable to parse event: %w", err) + } + if ge != nil { + c.addEvent(ge) + } + pointer += int64(size) + } + } + return nil +} + +func ResolveConstants(classes ClassMap, poolMap PoolMap) (err error) { + + for classID, pool := range poolMap { + if pool.resolved { + continue + } + if classes[classID] != nil && classes[classID].SuperType == EventSuperType { + continue + } + + for _, t := range pool.Pool { + if err = t.Resolve(classes, poolMap); err != nil { + return fmt.Errorf("unable to resolve constants: %w", err) + } + } + pool.resolved = true + } + + return nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/constpool.go b/vendor/github.com/grafana/jfr-parser/parser/constpool.go new file mode 100644 index 0000000000..c1b2fe0703 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/constpool.go @@ -0,0 +1,65 @@ +package parser + +import ( + "fmt" +) + +type ConstantPoolEvent struct { + StartTime int64 + Duration int64 + Delta int64 + TypeMask int8 +} + +func (c *ConstantPoolEvent) Parse(r Reader, classes ClassMap, cpools PoolMap) (err error) { + eventType, err := r.VarLong() + if err != nil { + return fmt.Errorf("unable to retrieve event type: %w", err) + } + if eventType != ConstantPoolEventType { + return fmt.Errorf("unexpected checkpoint event type: %d", eventType) + } + if c.StartTime, err = r.VarLong(); err != nil { + return fmt.Errorf("unable to parse checkpoint event's start time: %w", err) + } + if c.Duration, err = r.VarLong(); err != nil { + return fmt.Errorf("unable to parse checkpoint event's duration: %w", err) + } + if c.Delta, err = r.VarLong(); err != nil { + return fmt.Errorf("unable to parse checkpoint event's delta: %w", err) + } + c.TypeMask, _ = r.Byte() + n, err := r.VarInt() + if err != nil { + return fmt.Errorf("unable to parse checkpoint event's number of constant pools: %w", err) + } + // TODO: assert n is small enough + for i := 0; i < int(n); i++ { + classID, err := r.VarLong() + if err != nil { + return fmt.Errorf("unable to parse constant pool class: %w", err) + } + cPool, ok := cpools[classID] + if !ok { + cPool = &CPool{Pool: make(map[int64]ParseResolvable)} + cpools[classID] = cPool + } + m, err := r.VarInt() + if err != nil { + return fmt.Errorf("unable to parse constant pool's number of constants: %w", err) + } + // TODO: assert m is small enough + for j := 0; j < int(m); j++ { + idx, err := r.VarLong() + if err != nil { + return fmt.Errorf("unable to parse contant's index: %w", err) + } + v, err := ParseClass(r, classes, cpools, classID) + if err != nil { + return fmt.Errorf("unable to parse constant type %d: %w", classID, err) + } + cPool.Pool[idx] = v + } + } + return nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/cpool.go b/vendor/github.com/grafana/jfr-parser/parser/cpool.go new file mode 100644 index 0000000000..20ca7e489a --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/cpool.go @@ -0,0 +1,129 @@ +package parser + +import ( + "fmt" + + gtypes "github.com/grafana/jfr-parser/parser/types" + "github.com/grafana/jfr-parser/parser/types/def" +) + +func (p *Parser) readConstantPool(pos int) error { + for { + if err := p.seek(pos); err != nil { + return err + } + sz, err := p.varLong() + if err != nil { + return err + } + typ, err := p.varLong() + if err != nil { + return err + } + startTimeTicks, err := p.varLong() + if err != nil { + return err + } + duration, err := p.varLong() + if err != nil { + return err + } + delta, err := p.varLong() + if err != nil { + return err + } + typeMask, err := p.varInt() // boolean flush + if err != nil { + return err + } + n, err := p.varInt() + if err != nil { + return err + } + _ = startTimeTicks + _ = duration + _ = delta + _ = sz + _ = typeMask + _ = typ + + id := int(int64(delta)) + + for i := 0; i < int(n); i++ { + typ, err := p.varLong() + if err != nil { + return err + } + c := p.TypeMap.IDMap[def.TypeID(typ)] + if c == nil { + return fmt.Errorf("unknown type %d", def.TypeID(typ)) + } + err = p.readConstants(c) + if err != nil { + return fmt.Errorf("error reading %+v %w", c, err) + } + } + if delta == 0 { + break + } else { + pos += id + if pos <= 0 { + break + } + } + } + return nil +} + +func (p *Parser) readConstants(c *def.Class) error { + switch c.Name { + case "jdk.types.ChunkHeader": + p.pos += chunkHeaderSize + return nil + case "jdk.types.FrameType": + o, err := p.FrameTypes.Parse(p.buf[p.pos:], p.bindFrameType, &p.TypeMap) + p.pos += o + return err + case "jdk.types.ThreadState": + o, err := p.ThreadStates.Parse(p.buf[p.pos:], p.bindThreadState, &p.TypeMap) + p.pos += o + return err + case "java.lang.Thread": + o, err := p.Threads.Parse(p.buf[p.pos:], p.bindThread, &p.TypeMap) + p.pos += o + return err + case "java.lang.Class": + o, err := p.Classes.Parse(p.buf[p.pos:], p.bindClass, &p.TypeMap) + p.pos += o + return err + case "jdk.types.Method": + o, err := p.Methods.Parse(p.buf[p.pos:], p.bindMethod, &p.TypeMap) + p.pos += o + return err + case "jdk.types.Package": + o, err := p.Packages.Parse(p.buf[p.pos:], p.bindPackage, &p.TypeMap) + p.pos += o + return err + case "jdk.types.Symbol": + o, err := p.Symbols.Parse(p.buf[p.pos:], p.bindSymbol, &p.TypeMap) + p.pos += o + return err + case "profiler.types.LogLevel": + if p.bindLogLevel == nil { + return fmt.Errorf("no \"profiler.types.LogLevel\"") + } + o, err := p.LogLevels.Parse(p.buf[p.pos:], p.bindLogLevel, &p.TypeMap) + p.pos += o + return err + case "jdk.types.StackTrace": + o, err := p.Stacktrace.Parse(p.buf[p.pos:], p.bindStackTrace, p.bindStackFrame, &p.TypeMap) + p.pos += o + return err + default: + b := gtypes.NewBindSkipConstantPool(c, &p.TypeMap) + skipper := gtypes.SkipConstantPoolList{} + o, err := skipper.Parse(p.buf[p.pos:], b, &p.TypeMap) + p.pos += o + return err + } +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/decompress.go b/vendor/github.com/grafana/jfr-parser/parser/decompress.go new file mode 100644 index 0000000000..76735a83f9 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/decompress.go @@ -0,0 +1,88 @@ +package parser + +import ( + "bytes" + "compress/gzip" + "errors" + "fmt" + "github.com/GuanceCloud/zipstream" + "github.com/pierrec/lz4/v4" + "io" +) + +type CompressionType uint8 + +const ( + Unknown CompressionType = iota + PlainJFR + GZip + ZIP + LZ4 +) + +var ( + JFRMagic = []byte{'F', 'L', 'R', 0} + ZIPMagic = []byte{0x50, 0x4b, 3, 4} + LZ4Magic = []byte{4, 34, 77, 24} + GZipMagic = []byte{31, 139} +) + +func hasMagic(buf []byte, magic []byte) bool { + if len(buf) < len(magic) { + return false + } + return bytes.Compare(buf[:len(magic)], magic) == 0 +} + +func GuessCompressionType(magic []byte) CompressionType { + if len(magic) == 4 { + if hasMagic(magic, ZIPMagic) { + return ZIP + } else if hasMagic(magic, LZ4Magic) { + return LZ4 + } else if hasMagic(magic, JFRMagic) { + return PlainJFR + } + } + if len(magic) >= 2 && hasMagic(magic[:2], GZipMagic) { + return GZip + } + return Unknown +} + +func Decompress(r io.Reader) (io.ReadCloser, error) { + buf := make([]byte, 4) + n, err := io.ReadFull(r, buf) + if n == 0 && err != nil { + return nil, fmt.Errorf("unable to read file magic: %w", err) + } + + buf = buf[:n] + typ := GuessCompressionType(buf) + r = io.MultiReader(bytes.NewReader(buf), r) + + switch typ { + case GZip: + return gzip.NewReader(r) + case ZIP: + zr := zipstream.NewReader(r) + for { + entry, err := zr.GetNextEntry() + if err != nil { + if err == io.EOF { + return nil, fmt.Errorf("the zip archive does not contain any regular file") + } + return nil, fmt.Errorf("unable to resolve zip entry: %w", err) + } + if !entry.IsDir() { + return entry.Open() + } + } + case LZ4: + return io.NopCloser(lz4.NewReader(r)), nil + case PlainJFR: + return io.NopCloser(r), nil + default: + return nil, errors.New("unsupported compression type") + } +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/event_types.go b/vendor/github.com/grafana/jfr-parser/parser/event_types.go new file mode 100644 index 0000000000..9941ffd760 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/event_types.go @@ -0,0 +1,1930 @@ +package parser + +import ( + "fmt" + "reflect" +) + +var events = map[string]func() Event{ + "jdk.ActiveRecording": func() Event { return new(ActiveRecording) }, + "jdk.ActiveSetting": func() Event { return new(ActiveSetting) }, + "jdk.BooleanFlag": func() Event { return new(BooleanFlag) }, + "jdk.CPUInformation": func() Event { return new(CPUInformation) }, + "jdk.CPULoad": func() Event { return new(CPULoad) }, + "jdk.CPUTimeStampCounter": func() Event { return new(CPUTimeStampCounter) }, + "jdk.ClassLoaderStatistics": func() Event { return new(ClassLoaderStatistics) }, + "jdk.ClassLoadingStatistics": func() Event { return new(ClassLoadingStatistics) }, + "jdk.CodeCacheConfiguration": func() Event { return new(CodeCacheConfiguration) }, + "jdk.CodeCacheStatistics": func() Event { return new(CodeCacheStatistics) }, + "jdk.CodeSweeperConfiguration": func() Event { return new(CodeSweeperConfiguration) }, + "jdk.CodeSweeperStatistics": func() Event { return new(CodeSweeperStatistics) }, + "jdk.CompilerConfiguration": func() Event { return new(CompilerConfiguration) }, + "jdk.CompilerStatistics": func() Event { return new(CompilerStatistics) }, + "jdk.DoubleFlag": func() Event { return new(DoubleFlag) }, + "jdk.ExceptionStatistics": func() Event { return new(ExceptionStatistics) }, + "jdk.ExecutionSample": func() Event { return new(ExecutionSample) }, + "jdk.GCConfiguration": func() Event { return new(GCConfiguration) }, + "jdk.GCHeapConfiguration": func() Event { return new(GCHeapConfiguration) }, + "jdk.GCSurvivorConfiguration": func() Event { return new(GCSurvivorConfiguration) }, + "jdk.GCTLABConfiguration": func() Event { return new(GCTLABConfiguration) }, + "jdk.InitialEnvironmentVariable": func() Event { return new(InitialEnvironmentVariable) }, + "jdk.InitialSystemProperty": func() Event { return new(InitialSystemProperty) }, + "jdk.IntFlag": func() Event { return new(IntFlag) }, + "jdk.JavaMonitorEnter": func() Event { return new(JavaMonitorEnter) }, + "jdk.JavaMonitorWait": func() Event { return new(JavaMonitorWait) }, + "jdk.JavaThreadStatistics": func() Event { return new(JavaThreadStatistics) }, + "jdk.JVMInformation": func() Event { return new(JVMInformation) }, + "jdk.LoaderConstraintsTableStatistics": func() Event { return new(LoaderConstraintsTableStatistics) }, + "jdk.LongFlag": func() Event { return new(LongFlag) }, + "jdk.ModuleExport": func() Event { return new(ModuleExport) }, + "jdk.ModuleRequire": func() Event { return new(ModuleRequire) }, + "jdk.NativeLibrary": func() Event { return new(NativeLibrary) }, + "jdk.NetworkUtilization": func() Event { return new(NetworkUtilization) }, + "jdk.ObjectAllocationInNewTLAB": func() Event { return new(ObjectAllocationInNewTLAB) }, + "jdk.ObjectAllocationOutsideTLAB": func() Event { return new(ObjectAllocationOutsideTLAB) }, + "jdk.OSInformation": func() Event { return new(OSInformation) }, + "jdk.PhysicalMemory": func() Event { return new(PhysicalMemory) }, + "jdk.PlaceholderTableStatistics": func() Event { return new(PlaceholderTableStatistics) }, + "jdk.ProtectionDomainCacheTableStatistics": func() Event { return new(ProtectionDomainCacheTableStatistics) }, + "jdk.StringFlag": func() Event { return new(StringFlag) }, + "jdk.StringTableStatistics": func() Event { return new(StringTableStatistics) }, + "jdk.SymbolTableStatistics": func() Event { return new(SymbolTableStatistics) }, + "jdk.SystemProcess": func() Event { return new(SystemProcess) }, + "jdk.ThreadAllocationStatistics": func() Event { return new(ThreadAllocationStatistics) }, + "jdk.ThreadCPULoad": func() Event { return new(ThreadCPULoad) }, + "jdk.ThreadContextSwitchRate": func() Event { return new(ThreadContextSwitchRate) }, + "jdk.ThreadDump": func() Event { return new(ThreadDump) }, + "jdk.ThreadPark": func() Event { return new(ThreadPark) }, + "jdk.ThreadStart": func() Event { return new(ThreadStart) }, + "jdk.UnsignedIntFlag": func() Event { return new(UnsignedIntFlag) }, + "jdk.UnsignedLongFlag": func() Event { return new(UnsignedLongFlag) }, + "jdk.VirtualizationInformation": func() Event { return new(VirtualizationInformation) }, + "jdk.YoungGenerationConfiguration": func() Event { return new(YoungGenerationConfiguration) }, +} + +func indirect(rv reflect.Value, isNil bool) reflect.Value { + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if rv.Kind() == reflect.Interface && !rv.IsNil() { + e := rv.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!isNil || e.Elem().Kind() == reflect.Ptr) { + rv = e + continue + } + } + + if rv.Kind() != reflect.Ptr { + break + } + + if isNil && rv.CanSet() { + return rv + } + + if rv.Elem().Kind() == reflect.Interface && rv.Elem().Elem() == rv { + return rv.Elem() + } + + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + + rv = rv.Elem() + } + + return rv +} + +func dereference(v interface{}) reflect.Value { + rv := reflect.ValueOf(v) + + for rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface { + + if rv.Elem().Kind() == reflect.Interface && rv.Elem().Elem() == rv { + return rv.Elem() + } + + rv = rv.Elem() + } + + return rv +} + +func isNilValue(v interface{}) bool { + rv := dereference(v) + + switch rv.Kind() { + case reflect.Invalid: + return true + case reflect.Ptr, reflect.Interface, reflect.Map, reflect.Slice, reflect.Chan, reflect.Func: + return rv.IsNil() + } + return false +} + +type GenericEvent struct { + ClassID int64 + ClassMetadata *ClassMetadata + Attributes map[string]ParseResolvable +} + +func NewGenericEvent(classID int64, classMeta *ClassMetadata) *GenericEvent { + return &GenericEvent{ + ClassID: classID, + ClassMetadata: classMeta, + Attributes: make(map[string]ParseResolvable), + } +} + +func (g *GenericEvent) setField(name string, p ParseResolvable) (err error) { + g.Attributes[name] = p + return nil +} + +func (g *GenericEvent) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, g.setField) +} + +func (g *GenericEvent) GetAttr(fieldName string, v interface{}) error { + rv := reflect.ValueOf(v) + + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("v must be a pointer") + } + if rv.IsNil() { + return fmt.Errorf("v is a nil pointer") + } + + attr, ok := g.Attributes[fieldName] + if !ok { + return fmt.Errorf("field [%s] not exists in this event", fieldName) + } + + nilValue := isNilValue(attr) + + rv = indirect(rv, nilValue) + + if nilValue { + switch rv.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + rv.Set(reflect.Zero(rv.Type())) + } + return nil + } + + switch rv.Kind() { + case reflect.Bool: + x, err := toBoolean(attr) + if err != nil { + return fmt.Errorf("unable to resolve boolean: %w", err) + } + rv.SetBool(x) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var x int64 + switch v := attr.(type) { + case *Byte: + x = int64(*v) + case *Short: + x = int64(*v) + case *Int: + x = int64(*v) + case *Long: + x = int64(*v) + default: + return fmt.Errorf("unable to assign %T to number", attr) + } + + if rv.OverflowInt(x) { + return fmt.Errorf("unable to assign value to %s: number overflow", rv.Type().Name()) + } + rv.SetInt(x) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var x int64 + switch v := attr.(type) { + case *Byte: + x = int64(*v) + case *Short: + x = int64(*v) + case *Int: + x = int64(*v) + case *Long: + x = int64(*v) + default: + return fmt.Errorf("unable to assign %T to number", attr) + } + if x < 0 { + return fmt.Errorf("unable to assign negative number to unsigned number") + } + if rv.OverflowUint(uint64(x)) { + return fmt.Errorf("unable to assign value to %s: number overflow", rv.Type().Name()) + } + rv.SetUint(uint64(x)) + + case reflect.Float32, reflect.Float64: + var f64 float64 + switch v := attr.(type) { + case *Float: + f64 = float64(*v) + case *Double: + f64 = float64(*v) + default: + return fmt.Errorf("unable to assign %T to float", attr) + } + if rv.OverflowFloat(f64) { + return fmt.Errorf("unable to assign value to %s: number overflow", rv.Type().Name()) + } + rv.SetFloat(f64) + + case reflect.String: + x, err := ToString(attr) + if err != nil { + return fmt.Errorf("unable to resolve string: %w", err) + } + rv.SetString(x) + case reflect.Struct, reflect.Interface: + attrValue := dereference(attr) + if !attrValue.Type().AssignableTo(rv.Type()) { + return fmt.Errorf("unable to assign value of type %s to type %s", attrValue.Type().Name(), rv.Type().Name()) + } + rv.Set(attrValue) + } + + return nil +} + +func ParseEvent(r Reader, classes ClassMap, cpools PoolMap) (*GenericEvent, error) { + kind, err := r.VarLong() + if err != nil { + return nil, fmt.Errorf("failed to retrieve event type: %w", err) + } + if kind == MetadataEventType || kind == ConstantPoolEventType { + return nil, nil + } + return parseEvent(r, classes, cpools, kind) +} + +func parseEvent(r Reader, classMap ClassMap, cpools PoolMap, classID int64) (*GenericEvent, error) { + classMeta, ok := classMap[classID] + if !ok { + return nil, fmt.Errorf("unknown class %d", classID) + } + if classMeta.SuperType != EventSuperType { + return nil, nil + } + //var v Event + //if _, ok := events[class.Name]; ok { + // //v = typeFn() + // v = NewGenericEvent(class.Name) + //} else { + // v = new(UnsupportedEvent) + //} + v := NewGenericEvent(classID, classMeta) + if err := v.Parse(r, classMap, cpools, classMeta); err != nil { + return nil, fmt.Errorf("unable to parse event type of %s: %w", classMeta.Name, err) + } + return v, nil +} + +type ActiveRecording struct { + StartTime int64 + Duration int64 + EventThread *Thread + ID int64 + Name string + Destination string + MaxAge int64 + MaxSize int64 + RecordingStart int64 + RecordingDuration int64 +} + +func (ar *ActiveRecording) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + ar.StartTime, err = toLong(p) + case "duration": + ar.Duration, err = toLong(p) + case "eventThread": + ar.EventThread, err = toThread(p) + case "id": + ar.ID, err = toLong(p) + case "name": + ar.Name, err = ToString(p) + case "destination": + ar.Destination, err = ToString(p) + case "maxAge": + ar.MaxAge, err = toLong(p) + case "maxSize": + ar.MaxSize, err = toLong(p) + case "recordingStart": + ar.RecordingStart, err = toLong(p) + case "recordingDuration": + ar.RecordingDuration, err = toLong(p) + } + return err +} + +func (ar *ActiveRecording) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, ar.setField) +} + +type ActiveSetting struct { + StartTime int64 + Duration int64 + EventThread *Thread + ID int64 + Name string + Value string +} + +func (as *ActiveSetting) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + as.StartTime, err = toLong(p) + case "duration": + as.Duration, err = toLong(p) + case "eventThread": + as.EventThread, err = toThread(p) + case "id": + as.ID, err = toLong(p) + case "name": + as.Name, err = ToString(p) + case "value": + as.Value, err = ToString(p) + } + return err +} + +func (as *ActiveSetting) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, as.setField) +} + +type BooleanFlag struct { + StartTime int64 + Name string + Value bool + Origin *FlagValueOrigin +} + +func (bf *BooleanFlag) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + bf.StartTime, err = toLong(p) + case "name": + bf.Name, err = ToString(p) + case "value": + bf.Value, err = toBoolean(p) + case "origin": + bf.Origin, err = toFlagValueOrigin(p) + } + return err +} + +func (bf *BooleanFlag) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, bf.setField) +} + +type CPUInformation struct { + StartTime int64 + CPU string + Description string + Sockets int32 + Cores int32 + HWThreads int32 +} + +func (ci *CPUInformation) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + ci.StartTime, err = toLong(p) + case "duration": + ci.CPU, err = ToString(p) + case "eventThread": + ci.Description, err = ToString(p) + case "sockets": + ci.Sockets, err = toInt(p) + case "cores": + ci.Cores, err = toInt(p) + case "hwThreads": + ci.HWThreads, err = toInt(p) + } + return err +} + +func (ci *CPUInformation) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, ci.setField) +} + +type CPULoad struct { + StartTime int64 + JVMUser float32 + JVMSystem float32 + MachineTotal float32 +} + +func (cl *CPULoad) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + cl.StartTime, err = toLong(p) + case "jvmUser": + cl.JVMUser, err = toFloat(p) + case "jvmSystem": + cl.JVMSystem, err = toFloat(p) + case "machineTotal": + cl.MachineTotal, err = toFloat(p) + } + return err +} + +func (cl *CPULoad) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, cl.setField) +} + +type CPUTimeStampCounter struct { + StartTime int64 + FastTimeEnabled bool + FastTimeAutoEnabled bool + OSFrequency int64 + FastTimeFrequency int64 +} + +func (ctsc *CPUTimeStampCounter) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + ctsc.StartTime, err = toLong(p) + case "fastTimeEnabled": + ctsc.FastTimeEnabled, err = toBoolean(p) + case "fastTimeAutoEnabled": + ctsc.FastTimeAutoEnabled, err = toBoolean(p) + case "osFrequency": + ctsc.OSFrequency, err = toLong(p) + case "fastTimeFrequency": + ctsc.FastTimeFrequency, err = toLong(p) + } + return err +} + +func (ctsc *CPUTimeStampCounter) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, ctsc.setField) +} + +type ClassLoaderStatistics struct { + StartTime int64 + ClassLoader *ClassLoader + ParentClassLoader *ClassLoader + ClassLoaderData int64 + ClassCount int64 + ChunkSize int64 + BlockSize int64 + AnonymousClassCount int64 + AnonymousChunkSize int64 + AnonymousBlockSize int64 + UnsafeAnonymousClassCount int64 + UnsafeAnonymousChunkSize int64 + UnsafeAnonymousBlockSize int64 + HiddenClassCount int64 + HiddenChunkSize int64 + HiddenBlockSize int64 +} + +func (cls *ClassLoaderStatistics) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + cls.StartTime, err = toLong(p) + case "classLoader": + cls.ClassLoader, err = toClassLoader(p) + case "parentClassLoader": + cls.ParentClassLoader, err = toClassLoader(p) + case "classLoaderData": + cls.ClassLoaderData, err = toLong(p) + case "classCount": + cls.ClassCount, err = toLong(p) + case "chunkSize": + cls.ChunkSize, err = toLong(p) + case "blockSize": + cls.BlockSize, err = toLong(p) + case "anonymousClassCount": + cls.AnonymousClassCount, err = toLong(p) + case "anonymousChunkSize": + cls.AnonymousChunkSize, err = toLong(p) + case "anonymousBlockSize": + cls.AnonymousBlockSize, err = toLong(p) + case "unsafeAnonymousClassCount": + cls.UnsafeAnonymousClassCount, err = toLong(p) + case "unsafeAnonymousChunkSize": + cls.UnsafeAnonymousChunkSize, err = toLong(p) + case "unsafeAnonymousBlockSize": + cls.UnsafeAnonymousBlockSize, err = toLong(p) + case "hiddenClassCount": + cls.HiddenClassCount, err = toLong(p) + case "hiddenChunkSize": + cls.HiddenChunkSize, err = toLong(p) + case "hiddenBlockSize": + cls.HiddenBlockSize, err = toLong(p) + } + return err +} + +func (cls *ClassLoaderStatistics) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, cls.setField) +} + +type ClassLoadingStatistics struct { + StartTime int64 + LoadedClassCount int64 + UnloadedClassCount int64 +} + +func (cls *ClassLoadingStatistics) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + cls.StartTime, err = toLong(p) + case "loadedClassCount": + cls.LoadedClassCount, err = toLong(p) + case "unloadedClassCount": + cls.UnloadedClassCount, err = toLong(p) + } + return err +} + +func (cls *ClassLoadingStatistics) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, cls.setField) +} + +type CodeCacheConfiguration struct { + StartTime int64 + InitialSize int64 + ReservedSize int64 + NonNMethodSize int64 + ProfiledSize int64 + NonProfiledSize int64 + ExpansionSize int64 + MinBlockLength int64 + StartAddress int64 + ReservedTopAddress int64 +} + +func (ccc *CodeCacheConfiguration) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + ccc.StartTime, err = toLong(p) + case "initialSize": + ccc.InitialSize, err = toLong(p) + case "reservedSize": + ccc.ReservedSize, err = toLong(p) + case "nonNMethodSize": + ccc.NonNMethodSize, err = toLong(p) + case "profiledSize": + ccc.ProfiledSize, err = toLong(p) + case "NonProfiledSize": + ccc.NonProfiledSize, err = toLong(p) + case "ExpansionSize": + ccc.ExpansionSize, err = toLong(p) + case "MinBlockLength": + ccc.MinBlockLength, err = toLong(p) + case "StartAddress": + ccc.StartAddress, err = toLong(p) + case "ReservedTopAddress": + ccc.ReservedTopAddress, err = toLong(p) + } + return err +} + +func (ccc *CodeCacheConfiguration) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, ccc.setField) +} + +type CodeCacheStatistics struct { + StartTime int64 + CodeBlobType *CodeBlobType + StartAddress int64 + ReservedTopAddress int64 + EntryCount int32 + MethodCount int32 + AdaptorCount int32 + UnallocatedCapacity int64 + FullCount int32 +} + +func (ccs *CodeCacheStatistics) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + ccs.StartTime, err = toLong(p) + case "codeBlobType": + ccs.CodeBlobType, err = toCodeBlobType(p) + case "startAddress": + ccs.StartAddress, err = toLong(p) + case "reservedTopAddress": + ccs.ReservedTopAddress, err = toLong(p) + case "entryCount": + ccs.EntryCount, err = toInt(p) + case "methodCount": + ccs.MethodCount, err = toInt(p) + case "adaptorCount": + ccs.AdaptorCount, err = toInt(p) + case "unallocatedCapacity": + ccs.UnallocatedCapacity, err = toLong(p) + case "fullCount": + ccs.FullCount, err = toInt(p) + } + return err +} + +func (ccs *CodeCacheStatistics) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, ccs.setField) +} + +type CodeSweeperConfiguration struct { + StartTime int64 + SweeperEnabled bool + FlushingEnabled bool + SweepThreshold int64 +} + +func (csc *CodeSweeperConfiguration) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + csc.StartTime, err = toLong(p) + case "sweeperEnabled": + csc.SweeperEnabled, err = toBoolean(p) + case "flushingEnabled": + csc.FlushingEnabled, err = toBoolean(p) + case "sweepThreshold": + csc.SweepThreshold, err = toLong(p) + } + return err +} + +func (csc *CodeSweeperConfiguration) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, csc.setField) +} + +type CodeSweeperStatistics struct { + StartTime int64 + SweepCount int32 + MethodReclaimedCount int32 + TotalSweepTime int64 + PeakFractionTime int64 + PeakSweepTime int64 +} + +func (css *CodeSweeperStatistics) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + css.StartTime, err = toLong(p) + case "sweepCount": + css.SweepCount, err = toInt(p) + case "methodReclaimedCount": + css.MethodReclaimedCount, err = toInt(p) + case "totalSweepTime": + css.TotalSweepTime, err = toLong(p) + case "peakFractionTime": + css.PeakFractionTime, err = toLong(p) + case "peakSweepTime": + css.PeakSweepTime, err = toLong(p) + } + return err +} + +func (css *CodeSweeperStatistics) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, css.setField) +} + +type CompilerConfiguration struct { + StartTime int64 + ThreadCount int32 + TieredCompilation bool +} + +func (cc *CompilerConfiguration) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + cc.StartTime, err = toLong(p) + case "threadCount": + cc.ThreadCount, err = toInt(p) + case "tieredCompilation": + cc.TieredCompilation, err = toBoolean(p) + } + return err +} + +func (cc *CompilerConfiguration) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, cc.setField) +} + +type CompilerStatistics struct { + StartTime int64 + CompileCount int32 + BailoutCount int32 + InvalidatedCount int32 + OSRCompileCount int32 + StandardCompileCount int32 + OSRBytesCompiled int64 + StandardBytesCompiled int64 + NMethodsSize int64 + NMethodCodeSize int64 + PeakTimeSpent int64 + TotalTimeSpent int64 +} + +func (cs *CompilerStatistics) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + cs.StartTime, err = toLong(p) + case "compileCount": + cs.CompileCount, err = toInt(p) + case "bailoutCount": + cs.BailoutCount, err = toInt(p) + case "invalidatedCount": + cs.InvalidatedCount, err = toInt(p) + case "osrCompileCount": + cs.OSRCompileCount, err = toInt(p) + case "standardCompileCount": + cs.StandardCompileCount, err = toInt(p) + case "osrBytesCompiled": + cs.OSRBytesCompiled, err = toLong(p) + case "standardBytesCompiled": + cs.StandardBytesCompiled, err = toLong(p) + case "nmethodsSize": + cs.NMethodsSize, err = toLong(p) + case "nmethodCodeSize": + cs.NMethodCodeSize, err = toLong(p) + case "peakTimeSpent": + cs.PeakTimeSpent, err = toLong(p) + case "totalTimeSpent": + cs.TotalTimeSpent, err = toLong(p) + } + return err +} + +func (cs *CompilerStatistics) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, cs.setField) +} + +type DoubleFlag struct { + StartTime int64 + Name string + Value float64 + Origin *FlagValueOrigin +} + +func (df *DoubleFlag) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + df.StartTime, err = toLong(p) + case "name": + df.Name, err = ToString(p) + case "value": + df.Value, err = toDouble(p) + case "origin": + df.Origin, err = toFlagValueOrigin(p) + } + return err +} + +func (df *DoubleFlag) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, df.setField) +} + +type ExceptionStatistics struct { + StartTime int64 + Duration int64 + EventThread *Thread + StackTrace *StackTrace + Throwable int64 +} + +func (es *ExceptionStatistics) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + es.StartTime, err = toLong(p) + case "duration": + es.Duration, err = toLong(p) + case "eventThread": + es.EventThread, err = toThread(p) + case "stackTrace": + es.StackTrace, err = toStackTrace(p) + case "throwable": + es.Throwable, err = toLong(p) + } + return err +} + +func (es *ExceptionStatistics) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, es.setField) +} + +type ExecutionSample struct { + StartTime int64 + SampledThread *Thread + StackTrace *StackTrace + State *ThreadState + ContextId int64 +} + +func (es *ExecutionSample) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + es.StartTime, err = toLong(p) + case "sampledThread": + es.SampledThread, err = toThread(p) + case "stackTrace": + es.StackTrace, err = toStackTrace(p) + case "state": + es.State, err = toThreadState(p) + case "contextId": + es.ContextId, err = toLong(p) + } + return err +} + +func (es *ExecutionSample) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, es.setField) +} + +type GCConfiguration struct { + StartTime int64 + YoungCollector *GCName + OldCollector *GCName + ParallelGCThreads int32 + ConcurrentGCThreads int32 + UsesDynamicGCThreads bool + IsExplicitGCConcurrent bool + IsExplicitGCDisabled bool + PauseTarget int64 + GCTimeRatio int32 +} + +func (gc *GCConfiguration) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + gc.StartTime, err = toLong(p) + case "youngCollector": + gc.YoungCollector, err = toGCName(p) + case "oldCollector": + gc.OldCollector, err = toGCName(p) + case "parallelGCThreads": + gc.ParallelGCThreads, err = toInt(p) + case "concurrentGCThreads": + gc.ConcurrentGCThreads, err = toInt(p) + case "usesDynamicGCThreads": + gc.UsesDynamicGCThreads, err = toBoolean(p) + case "isExplicitGCConcurrent": + gc.IsExplicitGCConcurrent, err = toBoolean(p) + case "isExplicitGCDisabled": + gc.IsExplicitGCDisabled, err = toBoolean(p) + case "pauseTarget": + gc.PauseTarget, err = toLong(p) + case "gcTimeRatio": + gc.GCTimeRatio, err = toInt(p) + } + return err +} + +func (gc *GCConfiguration) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, gc.setField) +} + +type GCHeapConfiguration struct { + StartTime int64 + MinSize int64 + MaxSize int64 + InitialSize int64 + UsesCompressedOops bool + CompressedOopsMode *NarrowOopMode + ObjectAlignment int64 + HeapAddressBits int8 +} + +func (ghc *GCHeapConfiguration) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + ghc.StartTime, err = toLong(p) + case "minSize": + ghc.MinSize, err = toLong(p) + case "maxSize": + ghc.MaxSize, err = toLong(p) + case "initialSize": + ghc.InitialSize, err = toLong(p) + case "usesCompressedOops": + ghc.UsesCompressedOops, err = toBoolean(p) + case "compressedOopsMode": + ghc.CompressedOopsMode, err = toNarrowOopMode(p) + case "objectAlignment": + ghc.ObjectAlignment, err = toLong(p) + case "heapAddressBits": + ghc.HeapAddressBits, err = toByte(p) + } + return err +} + +func (ghc *GCHeapConfiguration) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, ghc.setField) +} + +type GCSurvivorConfiguration struct { + StartTime int64 + MaxTenuringThreshold int8 + InitialTenuringThreshold int8 +} + +func (gcs *GCSurvivorConfiguration) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + gcs.StartTime, err = toLong(p) + case "maxTenuringThreshold": + gcs.MaxTenuringThreshold, err = toByte(p) + case "initialTenuringThreshold": + gcs.InitialTenuringThreshold, err = toByte(p) + } + return err +} + +func (gsc *GCSurvivorConfiguration) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, gsc.setField) +} + +type GCTLABConfiguration struct { + StartTime int64 + UsesTLABs bool + MinTLABSize int64 + TLABRefillWasteLimit int64 +} + +func (gtc *GCTLABConfiguration) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + gtc.StartTime, err = toLong(p) + case "usesTLABs": + gtc.UsesTLABs, err = toBoolean(p) + case "minTLABSize": + gtc.MinTLABSize, err = toLong(p) + case "tlabRefillWasteLimit": + gtc.TLABRefillWasteLimit, err = toLong(p) + } + return err +} + +func (gtc *GCTLABConfiguration) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, gtc.setField) +} + +type InitialEnvironmentVariable struct { + StartTime int64 + Key string + Value string +} + +func (iev *InitialEnvironmentVariable) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + iev.StartTime, err = toLong(p) + case "key": + iev.Key, err = ToString(p) + case "value": + iev.Value, err = ToString(p) + } + return err +} + +func (iev *InitialEnvironmentVariable) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, iev.setField) +} + +type InitialSystemProperty struct { + StartTime int64 + Key string + Value string +} + +func (isp *InitialSystemProperty) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + isp.StartTime, err = toLong(p) + case "key": + isp.Key, err = ToString(p) + case "value": + isp.Value, err = ToString(p) + } + return err +} + +func (isp *InitialSystemProperty) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, isp.setField) +} + +type IntFlag struct { + StartTime int64 + Name string + Value int32 + Origin *FlagValueOrigin +} + +func (f *IntFlag) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + f.StartTime, err = toLong(p) + case "name": + f.Name, err = ToString(p) + case "value": + f.Value, err = toInt(p) + case "origin": + f.Origin, err = toFlagValueOrigin(p) + } + return err +} + +func (f *IntFlag) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, f.setField) +} + +type JavaMonitorEnter struct { + StartTime int64 + Duration int64 + EventThread *Thread + StackTrace *StackTrace + MonitorClass *Class + PreviousOwner *Thread + Address int64 + ContextId int64 +} + +func (jme *JavaMonitorEnter) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + jme.StartTime, err = toLong(p) + case "duration": + jme.Duration, err = toLong(p) + case "eventThread": + jme.EventThread, err = toThread(p) + case "stackTrace": + jme.StackTrace, err = toStackTrace(p) + case "monitorClass": + jme.MonitorClass, err = toClass(p) + case "previousOwner": + jme.PreviousOwner, err = toThread(p) + case "address": + jme.Address, err = toLong(p) + case "contextId": + jme.ContextId, err = toLong(p) + } + return err +} + +func (jme *JavaMonitorEnter) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, jme.setField) +} + +type JavaMonitorWait struct { + StartTime int64 + Duration int64 + EventThread *Thread + StackTrace *StackTrace + MonitorClass *Class + Notifier *Thread + Timeout int64 + TimedOut bool + Address int64 +} + +func (jmw *JavaMonitorWait) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + jmw.StartTime, err = toLong(p) + case "duration": + jmw.Duration, err = toLong(p) + case "eventThread": + jmw.EventThread, err = toThread(p) + case "stackTrace": + jmw.StackTrace, err = toStackTrace(p) + case "monitorClass": + jmw.MonitorClass, err = toClass(p) + case "notifier": + jmw.Notifier, err = toThread(p) + case "timeout": + jmw.Timeout, err = toLong(p) + case "timedOut": + jmw.TimedOut, err = toBoolean(p) + case "address": + jmw.Address, err = toLong(p) + } + return err +} + +func (jmw *JavaMonitorWait) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, jmw.setField) +} + +type JavaThreadStatistics struct { + StartTime int64 + ActiveCount int64 + DaemonCount int64 + AccumulatedCount int64 + PeakCount int64 +} + +func (jts *JavaThreadStatistics) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + jts.StartTime, err = toLong(p) + case "activeCount": + jts.ActiveCount, err = toLong(p) + case "daemonCount": + jts.DaemonCount, err = toLong(p) + case "accumulatedCount": + jts.AccumulatedCount, err = toLong(p) + case "peakCount": + jts.PeakCount, err = toLong(p) + } + return err +} + +func (jts *JavaThreadStatistics) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, jts.setField) +} + +type JVMInformation struct { + StartTime int64 + JVMName string + JVMVersion string + JVMArguments string + JVMFlags string + JavaArguments string + JVMStartTime int64 + PID int64 +} + +func (ji *JVMInformation) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + ji.StartTime, err = toLong(p) + case "jvmName": + ji.JVMName, err = ToString(p) + case "jvmVersion": + ji.JVMVersion, err = ToString(p) + case "jvmArguments": + ji.JVMArguments, err = ToString(p) + case "jvmFlags": + ji.JVMFlags, err = ToString(p) + case "javaArguments": + ji.JavaArguments, err = ToString(p) + case "jvmStartTime": + ji.JVMStartTime, err = toLong(p) + case "pid": + ji.PID, err = toLong(p) + } + return err +} + +func (ji *JVMInformation) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, ji.setField) +} + +type LoaderConstraintsTableStatistics struct { + StartTime int64 + BucketCount int64 + EntryCount int64 + TotalFootprint int64 + BucketCountMaximum int64 + BucketCountAverage float32 + BucketCountVariance float32 + BucketCountStandardDeviation float32 + InsertionRate float32 + RemovalRate float32 +} + +func (lcts *LoaderConstraintsTableStatistics) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + lcts.StartTime, err = toLong(p) + case "bucketCount": + lcts.BucketCount, err = toLong(p) + case "entryCount": + lcts.EntryCount, err = toLong(p) + case "totalFootprint": + lcts.TotalFootprint, err = toLong(p) + case "bucketCountMaximum": + lcts.BucketCountMaximum, err = toLong(p) + case "bucketCountAverage": + lcts.BucketCountAverage, err = toFloat(p) + case "bucketCountVariance": + lcts.BucketCountVariance, err = toFloat(p) + case "bucketCountStandardDeviation": + lcts.BucketCountStandardDeviation, err = toFloat(p) + case "insertionRate": + lcts.InsertionRate, err = toFloat(p) + case "removalRate": + lcts.RemovalRate, err = toFloat(p) + } + return err +} + +func (lcts *LoaderConstraintsTableStatistics) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, lcts.setField) +} + +type LongFlag struct { + StartTime int64 + Name string + Value int64 + Origin *FlagValueOrigin +} + +func (lf *LongFlag) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + lf.StartTime, err = toLong(p) + case "name": + lf.Name, err = ToString(p) + case "value": + lf.Value, err = toLong(p) + case "origin": + lf.Origin, err = toFlagValueOrigin(p) + } + return err +} + +func (lf *LongFlag) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, lf.setField) +} + +type ModuleExport struct { + StartTime int64 + ExportedPackage *Package + TargetModule *Module +} + +func (me *ModuleExport) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + me.StartTime, err = toLong(p) + case "exportedPackage": + me.ExportedPackage, err = toPackage(p) + case "targetModule": + me.TargetModule, err = toModule(p) + } + return err +} + +func (me *ModuleExport) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, me.setField) +} + +type ModuleRequire struct { + StartTime int64 + Source *Module + RequiredModule *Module +} + +func (mr *ModuleRequire) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + mr.StartTime, err = toLong(p) + case "sourced": + mr.Source, err = toModule(p) + case "requiredModule": + mr.RequiredModule, err = toModule(p) + } + return err +} + +func (mr *ModuleRequire) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, mr.setField) +} + +type NativeLibrary struct { + StartTime int64 + Name string + BaseAddress int64 + TopAddress int64 +} + +func (nl *NativeLibrary) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + nl.StartTime, err = toLong(p) + case "name": + nl.Name, err = ToString(p) + case "baseAddress": + nl.BaseAddress, err = toLong(p) + case "topAddress": + nl.TopAddress, err = toLong(p) + } + return err +} + +func (nl *NativeLibrary) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, nl.setField) +} + +type NetworkUtilization struct { + StartTime int64 + NetworkInterface *NetworkInterfaceName + ReadRate int64 + WriteRate int64 +} + +func (nu *NetworkUtilization) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + nu.StartTime, err = toLong(p) + case "networkInterface": + nu.NetworkInterface, err = toNetworkInterfaceName(p) + case "readRate": + nu.ReadRate, err = toLong(p) + case "writeRate": + nu.WriteRate, err = toLong(p) + } + return err +} + +func (nu *NetworkUtilization) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, nu.setField) +} + +type ObjectAllocationInNewTLAB struct { + StartTime int64 + EventThread *Thread + StackTrace *StackTrace + ObjectClass *Class + AllocationSize int64 + TLABSize int64 + ContextId int64 +} + +func (oa *ObjectAllocationInNewTLAB) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + oa.StartTime, err = toLong(p) + case "sampledThread": + oa.EventThread, err = toThread(p) + case "stackTrace": + oa.StackTrace, err = toStackTrace(p) + case "objectClass": + oa.ObjectClass, err = toClass(p) + case "allocationSize": + oa.AllocationSize, err = toLong(p) + case "tlabSize": + oa.TLABSize, err = toLong(p) + case "contextId": + oa.ContextId, err = toLong(p) + } + + return err +} + +func (oa *ObjectAllocationInNewTLAB) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, oa.setField) +} + +type ObjectAllocationOutsideTLAB struct { + StartTime int64 + EventThread *Thread + StackTrace *StackTrace + ObjectClass *Class + AllocationSize int64 + ContextId int64 +} + +func (oa *ObjectAllocationOutsideTLAB) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + oa.StartTime, err = toLong(p) + case "sampledThread": + oa.EventThread, err = toThread(p) + case "stackTrace": + oa.StackTrace, err = toStackTrace(p) + case "objectClass": + oa.ObjectClass, err = toClass(p) + case "allocationSize": + oa.AllocationSize, err = toLong(p) + case "contextId": + oa.ContextId, err = toLong(p) + } + return err +} + +func (oa *ObjectAllocationOutsideTLAB) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, oa.setField) +} + +type OSInformation struct { + StartTime int64 + OSVersion string +} + +func (os *OSInformation) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + os.StartTime, err = toLong(p) + case "osVersion": + os.OSVersion, err = ToString(p) + } + return err +} + +func (os *OSInformation) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, os.setField) +} + +type PhysicalMemory struct { + StartTime int64 + TotalSize int64 + UsedSize int64 +} + +func (pm *PhysicalMemory) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + pm.StartTime, err = toLong(p) + case "totalSize": + pm.TotalSize, err = toLong(p) + case "usedSize": + pm.UsedSize, err = toLong(p) + } + return err +} + +func (pm *PhysicalMemory) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, pm.setField) +} + +type PlaceholderTableStatistics struct { + StartTime int64 + BucketCount int64 + EntryCount int64 + TotalFootprint int64 + BucketCountMaximum int64 + BucketCountAverage float32 + BucketCountVariance float32 + BucketCountStandardDeviation float32 + InsertionRate float32 + RemovalRate float32 +} + +func (pts *PlaceholderTableStatistics) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + pts.StartTime, err = toLong(p) + case "bucketCount": + pts.BucketCount, err = toLong(p) + case "entryCount": + pts.EntryCount, err = toLong(p) + case "totalFootprint": + pts.TotalFootprint, err = toLong(p) + case "bucketCountMaximum": + pts.BucketCountMaximum, err = toLong(p) + case "bucketCountAverage": + pts.BucketCountAverage, err = toFloat(p) + case "bucketCountVariance": + pts.BucketCountVariance, err = toFloat(p) + case "bucketCountStandardDeviation": + pts.BucketCountStandardDeviation, err = toFloat(p) + case "insertionRate": + pts.InsertionRate, err = toFloat(p) + case "removalRate": + pts.RemovalRate, err = toFloat(p) + } + return err +} + +func (pts *PlaceholderTableStatistics) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, pts.setField) +} + +type ProtectionDomainCacheTableStatistics struct { + StartTime int64 + BucketCount int64 + EntryCount int64 + TotalFootprint int64 + BucketCountMaximum int64 + BucketCountAverage float32 + BucketCountVariance float32 + BucketCountStandardDeviation float32 + InsertionRate float32 + RemovalRate float32 +} + +func (pdcts *ProtectionDomainCacheTableStatistics) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + pdcts.StartTime, err = toLong(p) + case "bucketCount": + pdcts.BucketCount, err = toLong(p) + case "entryCount": + pdcts.EntryCount, err = toLong(p) + case "totalFootprint": + pdcts.TotalFootprint, err = toLong(p) + case "bucketCountMaximum": + pdcts.BucketCountMaximum, err = toLong(p) + case "bucketCountAverage": + pdcts.BucketCountAverage, err = toFloat(p) + case "bucketCountVariance": + pdcts.BucketCountVariance, err = toFloat(p) + case "bucketCountStandardDeviation": + pdcts.BucketCountStandardDeviation, err = toFloat(p) + case "insertionRate": + pdcts.InsertionRate, err = toFloat(p) + case "removalRate": + pdcts.RemovalRate, err = toFloat(p) + } + return err +} + +func (pdcts *ProtectionDomainCacheTableStatistics) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, pdcts.setField) +} + +type StringFlag struct { + StartTime int64 + Name string + Value string + Origin *FlagValueOrigin +} + +func (sf *StringFlag) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + sf.StartTime, err = toLong(p) + case "name": + sf.Name, err = ToString(p) + case "value": + sf.Value, err = ToString(p) + case "origin": + sf.Origin, err = toFlagValueOrigin(p) + } + return err +} + +func (sf *StringFlag) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, sf.setField) +} + +type StringTableStatistics struct { + StartTime int64 + BucketCount int64 + EntryCount int64 + TotalFootprint int64 + BucketCountMaximum int64 + BucketCountAverage float32 + BucketCountVariance float32 + BucketCountStandardDeviation float32 + InsertionRate float32 + RemovalRate float32 +} + +func (sts *StringTableStatistics) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + sts.StartTime, err = toLong(p) + case "bucketCount": + sts.BucketCount, err = toLong(p) + case "entryCount": + sts.EntryCount, err = toLong(p) + case "totalFootprint": + sts.TotalFootprint, err = toLong(p) + case "bucketCountMaximum": + sts.BucketCountMaximum, err = toLong(p) + case "bucketCountAverage": + sts.BucketCountAverage, err = toFloat(p) + case "bucketCountVariance": + sts.BucketCountVariance, err = toFloat(p) + case "bucketCountStandardDeviation": + sts.BucketCountStandardDeviation, err = toFloat(p) + case "insertionRate": + sts.InsertionRate, err = toFloat(p) + case "removalRate": + sts.RemovalRate, err = toFloat(p) + } + return err +} + +func (sts *StringTableStatistics) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, sts.setField) +} + +type SymbolTableStatistics struct { + StartTime int64 + BucketCount int64 + EntryCount int64 + TotalFootprint int64 + BucketCountMaximum int64 + BucketCountAverage float32 + BucketCountVariance float32 + BucketCountStandardDeviation float32 + InsertionRate float32 + RemovalRate float32 +} + +func (sts *SymbolTableStatistics) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + sts.StartTime, err = toLong(p) + case "bucketCount": + sts.BucketCount, err = toLong(p) + case "entryCount": + sts.EntryCount, err = toLong(p) + case "totalFootprint": + sts.TotalFootprint, err = toLong(p) + case "bucketCountMaximum": + sts.BucketCountMaximum, err = toLong(p) + case "bucketCountAverage": + sts.BucketCountAverage, err = toFloat(p) + case "bucketCountVariance": + sts.BucketCountVariance, err = toFloat(p) + case "bucketCountStandardDeviation": + sts.BucketCountStandardDeviation, err = toFloat(p) + case "insertionRate": + sts.InsertionRate, err = toFloat(p) + case "removalRate": + sts.RemovalRate, err = toFloat(p) + } + return err +} + +func (sts *SymbolTableStatistics) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, sts.setField) +} + +type SystemProcess struct { + StartTime int64 + PID string + CommandLine string +} + +func (sp *SystemProcess) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + sp.StartTime, err = toLong(p) + case "pid": + sp.PID, err = ToString(p) + case "commandLine": + sp.CommandLine, err = ToString(p) + } + return err +} + +func (sp *SystemProcess) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, sp.setField) +} + +type ThreadAllocationStatistics struct { + StartTime int64 + Allocated int64 + Thread *Thread +} + +func (tas *ThreadAllocationStatistics) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + tas.StartTime, err = toLong(p) + case "allocated": + tas.Allocated, err = toLong(p) + case "thread": + tas.Thread, err = toThread(p) + } + return err +} + +func (tas *ThreadAllocationStatistics) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, tas.setField) +} + +type ThreadCPULoad struct { + StartTime int64 + EventThread *Thread + User float32 + System float32 +} + +func (tcl *ThreadCPULoad) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + tcl.StartTime, err = toLong(p) + case "eventThread": + tcl.EventThread, err = toThread(p) + case "user": + tcl.User, err = toFloat(p) + case "system": + tcl.System, err = toFloat(p) + } + return err +} + +func (tcl *ThreadCPULoad) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, tcl.setField) +} + +type ThreadContextSwitchRate struct { + StartTime int64 + SwitchRate float32 +} + +func (tcsr *ThreadContextSwitchRate) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + tcsr.StartTime, err = toLong(p) + case "switchRate": + tcsr.SwitchRate, err = toFloat(p) + } + return err +} + +func (tcsr *ThreadContextSwitchRate) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, tcsr.setField) +} + +type ThreadDump struct { + StartTime int64 + Result string +} + +func (td *ThreadDump) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + td.StartTime, err = toLong(p) + case "result": + td.Result, err = ToString(p) + } + return err +} + +func (td *ThreadDump) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, td.setField) +} + +type ThreadPark struct { + StartTime int64 + Duration int64 + EventThread *Thread + StackTrace *StackTrace + ParkedClass *Class + Timeout int64 + Until int64 + Address int64 + ContextId int64 +} + +func (tp *ThreadPark) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + tp.StartTime, err = toLong(p) + case "duration": + tp.Duration, err = toLong(p) + case "eventThread": + tp.EventThread, err = toThread(p) + case "stackTrace": + tp.StackTrace, err = toStackTrace(p) + case "parkedClass": + tp.ParkedClass, err = toClass(p) + case "timeout": + tp.Timeout, err = toLong(p) + case "until": + tp.Until, err = toLong(p) + case "address": + tp.Address, err = toLong(p) + case "contextId": // todo this one seems to be unimplemented in the profiler yet + tp.ContextId, err = toLong(p) + } + return err +} + +func (tp *ThreadPark) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, tp.setField) +} + +type ThreadStart struct { + StartTime int64 + EventThread *Thread + StackTrace *StackTrace + Thread *Thread + ParentThread *Thread +} + +func (ts *ThreadStart) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + ts.StartTime, err = toLong(p) + case "eventThread": + ts.EventThread, err = toThread(p) + case "stackTrace": + ts.StackTrace, err = toStackTrace(p) + case "thread": + ts.Thread, err = toThread(p) + case "parentThread": + ts.ParentThread, err = toThread(p) + } + return err +} + +func (ts *ThreadStart) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, ts.setField) +} + +type UnsignedIntFlag struct { + StartTime int64 + Name string + Value int32 + Origin *FlagValueOrigin +} + +func (uif *UnsignedIntFlag) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + uif.StartTime, err = toLong(p) + case "name": + uif.Name, err = ToString(p) + case "value": + uif.Value, err = toInt(p) + case "origin": + uif.Origin, err = toFlagValueOrigin(p) + } + return err +} + +func (uif *UnsignedIntFlag) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, uif.setField) +} + +type UnsignedLongFlag struct { + StartTime int64 + Name string + Value int64 + Origin *FlagValueOrigin +} + +func (ulf *UnsignedLongFlag) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + ulf.StartTime, err = toLong(p) + case "name": + ulf.Name, err = ToString(p) + case "value": + ulf.Value, err = toLong(p) + case "origin": + ulf.Origin, err = toFlagValueOrigin(p) + } + return err +} + +func (ulf *UnsignedLongFlag) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, ulf.setField) +} + +type VirtualizationInformation struct { + StartTime int64 + Name string +} + +func (vi *VirtualizationInformation) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + vi.StartTime, err = toLong(p) + case "name": + vi.Name, err = ToString(p) + } + return err +} + +func (vi *VirtualizationInformation) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, vi.setField) +} + +type YoungGenerationConfiguration struct { + StartTime int64 + MinSize int64 + MaxSize int64 + NewRatio int32 +} + +func (ygc *YoungGenerationConfiguration) setField(name string, p ParseResolvable) (err error) { + switch name { + case "startTime": + ygc.StartTime, err = toLong(p) + case "minSize": + ygc.MinSize, err = toLong(p) + case "maxSize": + ygc.MaxSize, err = toLong(p) + case "newRatio": + ygc.NewRatio, err = toInt(p) + } + return err +} + +func (ygc *YoungGenerationConfiguration) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, ygc.setField) +} + +type UnsupportedEvent struct { +} + +func (ue *UnsupportedEvent) setField(name string, p ParseResolvable) error { + return nil +} + +func (ue *UnsupportedEvent) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return parseFields(r, classes, cpools, class, nil, true, ue.setField) +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/filter.go b/vendor/github.com/grafana/jfr-parser/parser/filter.go new file mode 100644 index 0000000000..4b96df4b8e --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/filter.go @@ -0,0 +1,47 @@ +package parser + +import ( + "reflect" +) + +var ( + AlwaysTrue Predicate[Event] = TrueFn + AlwaysFalse Predicate[Event] = FalseFn +) + +var ( + TrueFn PredicateFunc = func(Event) bool { return true } + FalseFn PredicateFunc = func(Event) bool { return false } +) + +type EventFilter interface { + GetPredicate(metadata *ClassMetadata) Predicate[Event] +} + +type Predicate[T any] interface { + Test(t T) bool +} + +type PredicateFunc func(Event) bool + +func (p PredicateFunc) Test(e Event) bool { + return p(e) +} + +func (p PredicateFunc) Equals(other PredicateFunc) bool { + return reflect.ValueOf(p).Pointer() == reflect.ValueOf(other).Pointer() +} + +func IsAlwaysTrue(p Predicate[Event]) bool { + if pf, ok := p.(PredicateFunc); ok { + return pf.Equals(AlwaysTrue.(PredicateFunc)) + } + return false +} + +func IsAlwaysFalse(p Predicate[Event]) bool { + if pf, ok := p.(PredicateFunc); ok { + return pf.Equals(AlwaysFalse.(PredicateFunc)) + } + return false +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/header.go b/vendor/github.com/grafana/jfr-parser/parser/header.go new file mode 100644 index 0000000000..ea482bb953 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/header.go @@ -0,0 +1,69 @@ +package parser + +import ( + "encoding/binary" + "fmt" + "io" +) + +const headerSize = 60 + +type Header struct { + ChunkSize int64 + ConstantPoolOffset int64 + MetadataOffset int64 + StartTimeNanos int64 + DurationNanos int64 + StartTicks int64 + TicksPerSecond int64 + Features int32 +} + +func (h *Header) Parse(rd Reader) (err error) { + h.ChunkSize, _ = rd.Long() + h.ConstantPoolOffset, _ = rd.Long() + h.MetadataOffset, _ = rd.Long() + h.StartTimeNanos, _ = rd.Long() + h.DurationNanos, _ = rd.Long() + h.StartTicks, _ = rd.Long() + h.TicksPerSecond, _ = rd.Long() + h.Features, err = rd.Int() + return err +} + +func (p *Parser) readChunkHeader(pos int) error { + if p.pos+chunkHeaderSize > len(p.buf) { + return io.ErrUnexpectedEOF + } + + p.pos = pos + h := ChunkHeader{} + h.Features = binary.BigEndian.Uint32(p.buf[pos+64:]) + h.Magic = binary.BigEndian.Uint32(p.buf[pos:]) + h.Version = binary.BigEndian.Uint32(p.buf[pos+4:]) + h.Size = int(binary.BigEndian.Uint64(p.buf[pos+8:])) + h.OffsetConstantPool = int(binary.BigEndian.Uint64(p.buf[pos+16:])) + h.OffsetMeta = int(binary.BigEndian.Uint64(p.buf[pos+24:])) + h.StartNanos = binary.BigEndian.Uint64(p.buf[pos+32:]) + h.DurationNanos = binary.BigEndian.Uint64(p.buf[pos+40:]) + h.StartTicks = binary.BigEndian.Uint64(p.buf[pos+48:]) + h.TicksPerSecond = binary.BigEndian.Uint64(p.buf[pos+56:]) + if h.Magic != chunkMagic { + return fmt.Errorf("invalid chunk magic: %x", h.Magic) + } + if h.Version < 0x20000 || h.Version > 0x2ffff { + return fmt.Errorf("unknown version %x", h.Version) + } + if h.OffsetConstantPool <= 0 || h.OffsetMeta <= 0 { + return fmt.Errorf("invalid offsets: cp %d meta %d", h.OffsetConstantPool, h.OffsetMeta) + } + if h.Size <= 0 { + return fmt.Errorf("invalid size: %d", h.Size) + } + if p.options.ChunkSizeLimit > 0 && h.Size > p.options.ChunkSizeLimit { + return fmt.Errorf("chunk size %d exceeds limit %d", h.Size, p.options.ChunkSizeLimit) + } + p.header = h + p.chunkEnd = pos + h.Size + return nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/metadata.go b/vendor/github.com/grafana/jfr-parser/parser/metadata.go new file mode 100644 index 0000000000..688e847dfb --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/metadata.go @@ -0,0 +1,621 @@ +package parser + +import ( + "fmt" + "github.com/grafana/jfr-parser/common/units" + "github.com/grafana/jfr-parser/internal/utils" + "strconv" + + "github.com/grafana/jfr-parser/parser/types/def" +) + +type Element interface { + SetAttribute(key, value string) error + AppendChild(name string) Element +} + +type ElementWithHeader interface { + Element + SetHeader(header *Header) +} + +// SettingMetadata TODO: Proper attribute support for SettingMetadata +type SettingMetadata struct { + Values map[string]string +} + +func (s *SettingMetadata) SetAttribute(key, value string) error { + if s.Values == nil { + s.Values = make(map[string]string) + } + s.Values[key] = value + return nil +} + +func (s *SettingMetadata) AppendChild(string) Element { return nil } + +type FieldMetadata struct { + ClassID int64 + Name string + ConstantPool bool + Dimension int32 + ChunkHeader *Header + FieldAnnotation +} + +func (f *FieldMetadata) SetAttribute(key, value string) (err error) { + switch key { + case "name": + f.Name = value + case "class": + f.ClassID, err = strconv.ParseInt(value, 10, 64) + case "constantPool": + f.ConstantPool, err = parseBool(value) + case "dimension": + var n int64 + n, err = strconv.ParseInt(value, 10, 32) + f.Dimension = int32(n) + } + return nil +} + +func (f *FieldMetadata) AppendChild(name string) Element { + switch name { + case "annotation": + am := &AnnotationMetadata{} + f.Annotations = append(f.Annotations, am) + return am + } + return nil +} + +func (f *FieldMetadata) IsArray() bool { + switch f.Dimension { + case 0: + return false + case 1: + return true + default: + panic(fmt.Sprintf("dimension value [%d] is not supported", f.Dimension)) + } +} + +func (f *FieldMetadata) SetHeader(header *Header) { + f.ChunkHeader = header +} + +func (f *FieldMetadata) Unsigned(classMap ClassMap) bool { + if f.unsigned == nil { + f.resolve(classMap) + } + return *f.unsigned +} + +func (f *FieldMetadata) Unit(classMap ClassMap) *units.Unit { + if f.unit == nil { + f.resolve(classMap) + } + if f.unit == units.Unknown { + return nil + } + return f.unit +} + +func (f *FieldMetadata) TickTimestamp(classMap ClassMap) bool { + if f.tickTimestamp == nil { + f.resolve(classMap) + } + return *f.tickTimestamp +} + +func (f *FieldMetadata) resolve(classMap ClassMap) { + for _, annotation := range f.Annotations { + switch classMap[annotation.ClassID].Name { + case annotationUnsigned: + f.unsigned = utils.NewPointer(true) + case annotationMemoryAmount, annotationDataAmount: + f.unit = units.Byte + case annotationPercentage: + f.unit = units.Multiple + case annotationTimespan: + switch annotation.Values[valueProperty] { + case unitTicks: + f.unit = units.Nanosecond.Derived("tick", units.F64(1e9/float64(f.ChunkHeader.TicksPerSecond))) + case unitNS: + f.unit = units.Nanosecond + case unitMS: + f.unit = units.Millisecond + case unitS: + f.unit = units.Second + } + case annotationFrequency: + f.unit = units.Hertz + case annotationTimestamp: + switch annotation.Values[valueProperty] { + case unitTicks: + f.tickTimestamp = utils.NewPointer(true) + case unitSSinceEpoch: + f.unit = units.UnixSecond + case unitMSSinceEpoch: + f.unit = units.UnixMilli + case unitNSSinceEpoch: + f.unit = units.UnixNano + } + } + } + if f.unsigned == nil { + f.unsigned = utils.NewPointer(false) + } + if f.tickTimestamp == nil { + f.tickTimestamp = utils.NewPointer(false) + } + if f.unit == nil { + f.unit = units.Unknown + } +} + +type ClassMetadata struct { + ID int64 + Name string + SuperType string + SimpleType bool + Fields []*FieldMetadata + fieldsDict map[string]*FieldMetadata + Settings []*SettingMetadata + ClassMap ClassMap // redundant ClassMap here is for getting field class more easily + ClassAnnotation +} + +func (c *ClassMetadata) Category() []string { + if c.categories == nil { + for _, annotation := range c.Annotations { + if c.ClassMap[annotation.ClassID].Name == annotationCategory { + categories := make([]string, 0, len(annotation.Values)) + idx := 0 + for { + cat, ok := annotation.Values[fmt.Sprintf("%s-%d", valueProperty, idx)] + if !ok { + break + } + categories = append(categories, cat) + idx++ + } + c.categories = categories + } + if c.categories == nil { + c.categories = []string{} + } + } + } + return c.categories +} + +func (c *ClassMetadata) Label() string { + return c.BaseAnnotation.Label(c.ClassMap) +} + +func (c *ClassMetadata) Unit(fieldName string) *units.Unit { + fieldMeta := c.GetField(fieldName) + if fieldMeta == nil { + return nil + } + return fieldMeta.Unit(c.ClassMap) +} + +func (c *ClassMetadata) Unsigned(fieldName string) bool { + fieldMeta := c.GetField(fieldName) + if fieldMeta == nil { + return false + } + return fieldMeta.Unsigned(c.ClassMap) +} + +func (c *ClassMetadata) buildFieldMap() { + if c.fieldsDict == nil { + c.fieldsDict = make(map[string]*FieldMetadata, len(c.Fields)) + for _, field := range c.Fields { + c.fieldsDict[field.Name] = field + } + } +} + +func (c *ClassMetadata) SetAttribute(key, value string) (err error) { + switch key { + case "id": + c.ID, err = strconv.ParseInt(value, 10, 64) + case "name": + c.Name = value + case "superType": + c.SuperType = value + case "simpleType": + c.SimpleType, err = parseBool(value) + } + return err +} + +func (c *ClassMetadata) ContainsField(fieldName, fieldClass string) bool { + md := c.GetField(fieldName) + if md == nil { + return false + } + + if c.ClassMap[md.ClassID].Name == fieldClass { + return true + } + return false +} + +func (c *ClassMetadata) GetField(fieldName string) *FieldMetadata { + if c.fieldsDict == nil { + c.buildFieldMap() + } + return c.fieldsDict[fieldName] +} + +func (c *ClassMetadata) AppendChild(name string) Element { + switch name { + case "field": + fm := &FieldMetadata{} + c.Fields = append(c.Fields, fm) + return fm + case "setting": + sm := &SettingMetadata{} + c.Settings = append(c.Settings, sm) + return sm + case "annotation": + am := &AnnotationMetadata{} + c.Annotations = append(c.Annotations, am) + return am + } + return nil +} + +type Metadata struct { + Classes []*ClassMetadata +} + +func (m *Metadata) SetAttribute(string, string) error { return nil } + +func (m *Metadata) AppendChild(name string) Element { + switch name { + case "class": + cm := &ClassMetadata{} + m.Classes = append(m.Classes, cm) + return cm + default: + } + return nil +} + +type Region struct { + Locale string + GMTOffset string + TicksToMillis string +} + +func (m *Region) SetAttribute(key, value string) error { + switch key { + case "locale": + m.Locale = value + case "gmtOffset": + // TODO int? + m.GMTOffset = value + case "ticksToMillis": + // TODO int? + m.TicksToMillis = value + } + return nil +} + +func (m *Region) AppendChild(string) Element { return nil } + +type Root struct { + Metadata *Metadata + Region Region +} + +func (r *Root) SetAttribute(string, string) error { return nil } + +func (r *Root) AppendChild(name string) Element { + switch name { + case "metadata": + r.Metadata = &Metadata{} + return r.Metadata + case "region": + r.Region = Region{} + return &r.Region + } + return nil +} + +type ChunkMetadata struct { + StartTime int64 + Duration int64 + ID int64 + Root *Root + Header *Header + ClassMap ClassMap +} + +func (m *ChunkMetadata) buildClassMap() { + classMap := make(ClassMap, len(m.Root.Metadata.Classes)) + for _, class := range m.Root.Metadata.Classes { + class.ClassMap = classMap // assign all class to every class metadata + classMap[class.ID] = class + } + + m.ClassMap = classMap + m.Root.Metadata.Classes = nil +} + +func (m *ChunkMetadata) Parse(r Reader) (err error) { + if kind, err := r.VarLong(); err != nil { + return fmt.Errorf("unable to retrieve event type: %w", err) + } else if kind != 0 { + return fmt.Errorf("unexpected metadata event type: %d", kind) + } + + if m.StartTime, err = r.VarLong(); err != nil { + return fmt.Errorf("unable to parse metadata event's start time: %w", err) + } + if m.Duration, err = r.VarLong(); err != nil { + return fmt.Errorf("unable to parse metadata event's duration: %w", err) + } + if m.ID, err = r.VarLong(); err != nil { + return fmt.Errorf("unable to parse metadata event's ID: %w", err) + } + n, err := r.VarInt() + if err != nil { + return fmt.Errorf("unable to parse metadata event's number of strings: %w", err) + } + // TODO: assert n is small enough + strings := make([]string, n) + for i := 0; i < int(n); i++ { + if x, err := r.String(); err != nil { + return fmt.Errorf("unable to parse metadata event's string: %w", err) + } else { + strings[i] = x.s + } + } + + name, err := parseName(r, strings) + if err != nil { + return err + } + if name != "root" { + return fmt.Errorf("invalid root element name: %s", name) + } + + m.Root = &Root{} + if err = parseElement(r, strings, m.Header, m.Root); err != nil { + return fmt.Errorf("unable to parse metadata element tree: %w", err) + } + + m.buildClassMap() + + return nil +} + +func (p *Parser) readMeta(pos int) error { + p.TypeMap.IDMap = make(map[def.TypeID]*def.Class, 43+5) + p.TypeMap.NameMap = make(map[string]*def.Class, 43+5) + + if err := p.seek(pos); err != nil { + return err + } + sz, err := p.varInt() + if err != nil { + return err + } + p.metaSize = sz + _, err = p.varInt() + if err != nil { + return err + } + _, err = p.varLong() + if err != nil { + return err + } + _, err = p.varLong() + if err != nil { + return err + } + _, err = p.varLong() + if err != nil { + return err + } + nstr, err := p.varInt() + if err != nil { + return err + } + strings := make([]string, nstr) + for i := 0; i < int(nstr); i++ { + strings[i], err = p.string() + if err != nil { + return err + } + } + + e, err := p.readElement(strings, false) + if err != nil { + return err + } + if e.name != "root" { + return fmt.Errorf("expected root element, got %s", e.name) + } + for i := 0; i < e.childCount; i++ { + meta, err := p.readElement(strings, false) + if err != nil { + return err + } + //fmt.Println(meta.name) + switch meta.name { + case "metadata": + for j := 0; j < meta.childCount; j++ { + classElement, err := p.readElement(strings, true) + + if err != nil { + return err + } + cls, err := def.NewClass(classElement.attr, classElement.childCount) + if err != nil { + return err + } + + for k := 0; k < classElement.childCount; k++ { + field, err := p.readElement(strings, true) + if err != nil { + return err + } + if field.name == "field" { + f, err := def.NewField(field.attr) + if err != nil { + return err + } + cls.Fields = append(cls.Fields, f) + } + for l := 0; l < field.childCount; l++ { + _, err := p.readElement(strings, false) + if err != nil { + return err + } + } + + } + //fmt.Println(cls.String()) + p.TypeMap.IDMap[cls.ID] = cls + p.TypeMap.NameMap[cls.Name] = cls + + } + case "region": + break + default: + return fmt.Errorf("unexpected element %s", meta.name) + } + } + if err := p.checkTypes(); err != nil { + return err + } + return nil +} +func parseElement(r Reader, s []string, chunkHeader *Header, e Element) error { + n, err := r.VarInt() + if err != nil { + return fmt.Errorf("unable to parse attribute count: %w", err) + } + + if ex, ok := e.(ElementWithHeader); ok { + ex.SetHeader(chunkHeader) + } + + for i := int64(0); i < int64(n); i++ { + k, err := parseName(r, s) + if err != nil { + return fmt.Errorf("unable to parse attribute key: %w", err) + } + v, err := parseName(r, s) + if err != nil { + return fmt.Errorf("unable to parse attribute value: %w", err) + } + if err := e.SetAttribute(k, v); err != nil { + return fmt.Errorf("unable to set element attribute: %w", err) + } + } + n, err = r.VarInt() + if err != nil { + return fmt.Errorf("unable to parse element count: %w", err) + } + // TODO: assert n is small enough + for i := 0; i < int(n); i++ { + name, err := parseName(r, s) + if err != nil { + return fmt.Errorf("unable to parse element name: %w", err) + } + child := e.AppendChild(name) + if child == nil { + return fmt.Errorf("unexpected child in metadata event: %s", name) + } + if err = parseElement(r, s, chunkHeader, child); err != nil { + return fmt.Errorf("unable to parse child element: %w", err) + } + } + return nil +} + +func (p *Parser) readElement(strings []string, needAttributes bool) (element, error) { + iname, err := p.varInt() + if err != nil { + return element{}, err + } + if iname < 0 || int(iname) >= len(strings) { + return element{}, def.ErrIntOverflow + } + name := strings[iname] + attributeCount, err := p.varInt() + if err != nil { + return element{}, err + } + var attributes map[string]string + if needAttributes { + attributes = make(map[string]string, attributeCount) + } + for i := 0; i < int(attributeCount); i++ { + attributeName, err := p.varInt() + if err != nil { + return element{}, err + } + if attributeName < 0 || int(attributeName) >= len(strings) { + return element{}, def.ErrIntOverflow + } + attributeValue, err := p.varInt() + if err != nil { + return element{}, err + } + if attributeValue < 0 || int(attributeValue) >= len(strings) { + return element{}, def.ErrIntOverflow + } + if needAttributes { + attributes[strings[attributeName]] = strings[attributeValue] + } else { + //fmt.Printf(" >>> skipping attribute %s=%s\n", strings[attributeName], strings[attributeValue]) + } + } + + childCount, err := p.varInt() + if err != nil { + return element{}, err + } + return element{ + name: name, + attr: attributes, + childCount: int(childCount), + }, nil + +} + +func parseName(r Reader, s []string) (string, error) { + n, err := r.VarInt() + if err != nil { + return "", fmt.Errorf("unable to parse string name index: %w", err) + } + if int(n) >= len(s) { + return "", fmt.Errorf("invalid name index %d, only %d names available", n, len(s)) + } + return s[int(n)], nil +} + +func parseBool(s string) (bool, error) { + if s == "true" { + return true, nil + } + if s == "false" { + return false, nil + } + return false, fmt.Errorf("unable to parse '%s' as boolean", s) +} + +type element struct { + name string + attr map[string]string + childCount int +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/parser.go b/vendor/github.com/grafana/jfr-parser/parser/parser.go new file mode 100644 index 0000000000..b4f29dd94c --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/parser.go @@ -0,0 +1,595 @@ +package parser + +import ( + "bufio" + "fmt" + "io" + "os" + "unsafe" + + types2 "github.com/grafana/jfr-parser/parser/types" + "github.com/grafana/jfr-parser/parser/types/def" +) + +func ParseFile(p string) ([]*Chunk, error) { + f, err := os.Open(p) + if err != nil { + return nil, fmt.Errorf("unable to open file [%s]: %w", p, err) + } + defer f.Close() + return Parse(f) +} + +func Parse(r io.Reader) ([]*Chunk, error) { + rc, err := Decompress(r) + if err != nil { + return nil, fmt.Errorf("unable to decompress input stream: %w", err) + } + defer rc.Close() + return ParseWithOptions(bufio.NewReader(rc), &ChunkParseOptions{}) +} + +func ParseWithOptions(r io.Reader, options *ChunkParseOptions) ([]*Chunk, error) { + var chunks []*Chunk + for { + chunk := new(Chunk) + err := chunk.Parse(r, options) + if err == io.EOF { + return chunks, nil + } + if err != nil { + return chunks, fmt.Errorf("unable to parse chunk: %w", err) + } + chunks = append(chunks, chunk) + } +} + +const chunkHeaderSize = 68 +const bufferSize = 1024 * 1024 +const chunkMagic = 0x464c5200 + +type ChunkHeader struct { + Magic uint32 + Version uint32 + Size int + OffsetConstantPool int + OffsetMeta int + StartNanos uint64 + DurationNanos uint64 + StartTicks uint64 + TicksPerSecond uint64 + Features uint32 +} + +func (c *ChunkHeader) String() string { + return fmt.Sprintf("ChunkHeader{Magic: %x, Version: %x, Size: %d, OffsetConstantPool: %d, OffsetMeta: %d, StartNanos: %d, DurationNanos: %d, StartTicks: %d, TicksPerSecond: %d, Features: %d}", c.Magic, c.Version, c.Size, c.OffsetConstantPool, c.OffsetMeta, c.StartNanos, c.DurationNanos, c.StartTicks, c.TicksPerSecond, c.Features) +} + +type SymbolProcessor func(ref *types2.SymbolList) + +type Options struct { + ChunkSizeLimit int + SymbolProcessor SymbolProcessor +} + +type Parser struct { + FrameTypes types2.FrameTypeList + ThreadStates types2.ThreadStateList + Threads types2.ThreadList + Classes types2.ClassList + Methods types2.MethodList + Packages types2.PackageList + Symbols types2.SymbolList + LogLevels types2.LogLevelList + Stacktrace types2.StackTraceList + + ExecutionSample types2.ExecutionSample + ObjectAllocationInNewTLAB types2.ObjectAllocationInNewTLAB + ObjectAllocationOutsideTLAB types2.ObjectAllocationOutsideTLAB + JavaMonitorEnter types2.JavaMonitorEnter + ThreadPark types2.ThreadPark + LiveObject types2.LiveObject + ActiveSetting types2.ActiveSetting + + header ChunkHeader + options Options + buf []byte + pos int + metaSize uint32 + chunkEnd int + + TypeMap def.TypeMap + + bindFrameType *types2.BindFrameType + bindThreadState *types2.BindThreadState + bindThread *types2.BindThread + bindClass *types2.BindClass + bindMethod *types2.BindMethod + bindPackage *types2.BindPackage + bindSymbol *types2.BindSymbol + bindLogLevel *types2.BindLogLevel + bindStackFrame *types2.BindStackFrame + bindStackTrace *types2.BindStackTrace + + bindExecutionSample *types2.BindExecutionSample + + bindAllocInNewTLAB *types2.BindObjectAllocationInNewTLAB + bindAllocOutsideTLAB *types2.BindObjectAllocationOutsideTLAB + bindMonitorEnter *types2.BindJavaMonitorEnter + bindThreadPark *types2.BindThreadPark + bindLiveObject *types2.BindLiveObject + bindActiveSetting *types2.BindActiveSetting +} + +func NewParser(buf []byte, options Options) *Parser { + p := &Parser{ + options: options, + buf: buf, + } + return p +} + +func (p *Parser) ParseEvent() (def.TypeID, error) { + for { + if p.pos == p.chunkEnd { + if p.pos == len(p.buf) { + return 0, io.EOF + } + if err := p.readChunk(p.pos); err != nil { + return 0, err + } + } + pp := p.pos + size, err := p.varLong() + if err != nil { + return 0, err + } + if size == 0 { + return 0, def.ErrIntOverflow + } + typ, err := p.varLong() + if err != nil { + return 0, err + } + _ = size + + ttyp := def.TypeID(typ) + switch ttyp { + case p.TypeMap.T_EXECUTION_SAMPLE: + if p.bindExecutionSample == nil { + p.pos = pp + int(size) // skip + continue + } + _, err := p.ExecutionSample.Parse(p.buf[p.pos:], p.bindExecutionSample, &p.TypeMap) + if err != nil { + return 0, err + } + p.pos = pp + int(size) + return ttyp, nil + case p.TypeMap.T_ALLOC_IN_NEW_TLAB: + if p.bindAllocInNewTLAB == nil { + p.pos = pp + int(size) // skip + continue + } + _, err := p.ObjectAllocationInNewTLAB.Parse(p.buf[p.pos:], p.bindAllocInNewTLAB, &p.TypeMap) + if err != nil { + return 0, err + } + p.pos = pp + int(size) + return ttyp, nil + case p.TypeMap.T_ALLOC_OUTSIDE_TLAB: + if p.bindAllocOutsideTLAB == nil { + p.pos = pp + int(size) // skip + continue + } + _, err := p.ObjectAllocationOutsideTLAB.Parse(p.buf[p.pos:], p.bindAllocOutsideTLAB, &p.TypeMap) + if err != nil { + return 0, err + } + p.pos = pp + int(size) + return ttyp, nil + case p.TypeMap.T_LIVE_OBJECT: + if p.bindLiveObject == nil { + p.pos = pp + int(size) // skip + continue + } + _, err := p.LiveObject.Parse(p.buf[p.pos:], p.bindLiveObject, &p.TypeMap) + if err != nil { + return 0, err + } + p.pos = pp + int(size) + return ttyp, nil + case p.TypeMap.T_MONITOR_ENTER: + if p.bindMonitorEnter == nil { + p.pos = pp + int(size) // skip + continue + } + _, err := p.JavaMonitorEnter.Parse(p.buf[p.pos:], p.bindMonitorEnter, &p.TypeMap) + if err != nil { + return 0, err + } + p.pos = pp + int(size) + return ttyp, nil + case p.TypeMap.T_THREAD_PARK: + if p.bindThreadPark == nil { + p.pos = pp + int(size) // skip + continue + } + _, err := p.ThreadPark.Parse(p.buf[p.pos:], p.bindThreadPark, &p.TypeMap) + if err != nil { + return 0, err + } + p.pos = pp + int(size) + return ttyp, nil + + case p.TypeMap.T_ACTIVE_SETTING: + if p.bindActiveSetting == nil { + p.pos = pp + int(size) // skip + continue + } + _, err := p.ActiveSetting.Parse(p.buf[p.pos:], p.bindActiveSetting, &p.TypeMap) + if err != nil { + return 0, err + } + p.pos = pp + int(size) + return ttyp, nil + default: + //fmt.Printf("skipping %s %v\n", def.TypeID2Sym(ttyp), ttyp) + p.pos = pp + int(size) + } + } +} + +func (p *Parser) ChunkHeader() ChunkHeader { + return p.header +} + +func (p *Parser) GetStacktrace(stID types2.StackTraceRef) *types2.StackTrace { + idx, ok := p.Stacktrace.IDMap[stID] + if !ok { + return nil + } + return &p.Stacktrace.StackTrace[idx] +} + +func (p *Parser) GetThreadState(ref types2.ThreadStateRef) *types2.ThreadState { + idx, ok := p.ThreadStates.IDMap[ref] + if !ok { + return nil + } + return &p.ThreadStates.ThreadState[idx] +} + +func (p *Parser) GetMethod(mID types2.MethodRef) *types2.Method { + if mID == 0 { + return nil + } + var idx int + + refIDX := int(mID) + if refIDX < len(p.Methods.IDMap.Slice) { + idx = int(p.Methods.IDMap.Slice[mID]) + } else { + idx = p.Methods.IDMap.Get(mID) + } + + if idx == -1 { + return nil + } + return &p.Methods.Method[idx] +} + +func (p *Parser) GetClass(cID types2.ClassRef) *types2.Class { + idx, ok := p.Classes.IDMap[cID] + if !ok { + return nil + } + return &p.Classes.Class[idx] +} + +func (p *Parser) GetSymbol(sID types2.SymbolRef) *types2.Symbol { + idx, ok := p.Symbols.IDMap[sID] + if !ok { + return nil + } + return &p.Symbols.Symbol[idx] +} + +func (p *Parser) GetSymbolString(sID types2.SymbolRef) string { + idx, ok := p.Symbols.IDMap[sID] + if !ok { + return "" + } + return p.Symbols.Symbol[idx].String +} + +func (p *Parser) readChunk(pos int) error { + if err := p.readChunkHeader(pos); err != nil { + return fmt.Errorf("error reading chunk header: %w", err) + } + + if err := p.readMeta(pos + p.header.OffsetMeta); err != nil { + return fmt.Errorf("error reading metadata: %w", err) + } + if err := p.readConstantPool(pos + p.header.OffsetConstantPool); err != nil { + return fmt.Errorf("error reading CP: %w @ %d", err, pos+p.header.OffsetConstantPool) + } + pp := p.options.SymbolProcessor + if pp != nil { + pp(&p.Symbols) + } + p.pos = pos + chunkHeaderSize + return nil +} + +func (p *Parser) seek(pos int) error { + if pos < len(p.buf) { + p.pos = pos + return nil + } + return io.ErrUnexpectedEOF +} + +func (p *Parser) byte() (byte, error) { + if p.pos >= len(p.buf) { + return 0, io.ErrUnexpectedEOF + } + b := p.buf[p.pos] + p.pos++ + return b, nil +} +func (p *Parser) varInt() (uint32, error) { + v := uint32(0) + for shift := uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if p.pos >= len(p.buf) { + return 0, io.ErrUnexpectedEOF + } + b := p.buf[p.pos] + p.pos++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + return v, nil +} + +func (p *Parser) varLong() (uint64, error) { + v64_ := uint64(0) + for shift := uint(0); shift <= 56; shift += 7 { + if p.pos >= len(p.buf) { + return 0, io.ErrUnexpectedEOF + } + b_ := p.buf[p.pos] + p.pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + return v64_, nil +} + +func (p *Parser) string() (string, error) { + if p.pos >= len(p.buf) { + return "", io.ErrUnexpectedEOF + } + b := p.buf[p.pos] + p.pos++ + switch b { //todo implement 2 + case 0: + return "", nil //todo this should be nil + case 1: + return "", nil + case 3: + bs, err := p.bytes() + if err != nil { + return "", err + } + str := *(*string)(unsafe.Pointer(&bs)) + return str, nil + case 4: + return p.charArrayString() + default: + return "", fmt.Errorf("unknown string type %d", b) + } + +} + +func (p *Parser) charArrayString() (string, error) { + l, err := p.varInt() + if err != nil { + return "", err + } + if l < 0 { + return "", def.ErrIntOverflow + } + buf := make([]rune, int(l)) + for i := 0; i < int(l); i++ { + c, err := p.varInt() + if err != nil { + return "", err + } + buf[i] = rune(c) + } + + res := string(buf) + return res, nil +} + +func (p *Parser) bytes() ([]byte, error) { + l, err := p.varInt() + if err != nil { + return nil, err + } + if l < 0 { + return nil, def.ErrIntOverflow + } + if p.pos+int(l) > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + bs := p.buf[p.pos : p.pos+int(l)] + p.pos += int(l) + return bs, nil +} + +func (p *Parser) checkTypes() error { + + tint := p.TypeMap.NameMap["int"] + tlong := p.TypeMap.NameMap["long"] + tfloat := p.TypeMap.NameMap["float"] + tboolean := p.TypeMap.NameMap["boolean"] + tstring := p.TypeMap.NameMap["java.lang.String"] + + if tint == nil { + return fmt.Errorf("missing \"int\"") + } + if tlong == nil { + return fmt.Errorf("missing \"long\"") + } + if tfloat == nil { + return fmt.Errorf("missing \"float\"") + } + if tboolean == nil { + return fmt.Errorf("missing \"boolean\"") + } + if tstring == nil { + return fmt.Errorf("missing \"java.lang.String\"") + } + p.TypeMap.T_INT = tint.ID + p.TypeMap.T_LONG = tlong.ID + p.TypeMap.T_FLOAT = tfloat.ID + p.TypeMap.T_BOOLEAN = tboolean.ID + p.TypeMap.T_STRING = tstring.ID + + typeCPFrameType := p.TypeMap.NameMap["jdk.types.FrameType"] + typeCPThreadState := p.TypeMap.NameMap["jdk.types.ThreadState"] + typeCPThread := p.TypeMap.NameMap["java.lang.Thread"] + typeCPClass := p.TypeMap.NameMap["java.lang.Class"] + typeCPMethod := p.TypeMap.NameMap["jdk.types.Method"] + typeCPPackage := p.TypeMap.NameMap["jdk.types.Package"] + typeCPSymbol := p.TypeMap.NameMap["jdk.types.Symbol"] + typeCPLogLevel := p.TypeMap.NameMap["profiler.types.LogLevel"] + typeCPStackTrace := p.TypeMap.NameMap["jdk.types.StackTrace"] + typeCPClassLoader := p.TypeMap.NameMap["jdk.types.ClassLoader"] + + if typeCPFrameType == nil { + return fmt.Errorf("missing \"jdk.types.FrameType\"") + } + if typeCPThreadState == nil { + return fmt.Errorf("missing \"jdk.types.ThreadState\"") + } + if typeCPThread == nil { + return fmt.Errorf("missing \"java.lang.Thread\"") + } + if typeCPClass == nil { + return fmt.Errorf("missing \"java.lang.Class\"") + } + if typeCPMethod == nil { + return fmt.Errorf("missing \"jdk.types.Method\"") + } + if typeCPPackage == nil { + return fmt.Errorf("missing \"jdk.types.Package\"") + } + if typeCPSymbol == nil { + return fmt.Errorf("missing \"jdk.types.Symbol\"") + } + if typeCPStackTrace == nil { + return fmt.Errorf("missing \"jdk.types.StackTrace\"") + } + if typeCPClassLoader == nil { + return fmt.Errorf("missing \"jdk.types.ClassLoader\"") + } + p.TypeMap.T_FRAME_TYPE = typeCPFrameType.ID + p.TypeMap.T_THREAD_STATE = typeCPThreadState.ID + p.TypeMap.T_THREAD = typeCPThread.ID + p.TypeMap.T_CLASS = typeCPClass.ID + p.TypeMap.T_METHOD = typeCPMethod.ID + p.TypeMap.T_PACKAGE = typeCPPackage.ID + p.TypeMap.T_SYMBOL = typeCPSymbol.ID + if typeCPLogLevel != nil { + p.TypeMap.T_LOG_LEVEL = typeCPLogLevel.ID + } else { + p.TypeMap.T_LOG_LEVEL = 0 + } + p.TypeMap.T_STACK_TRACE = typeCPStackTrace.ID + p.TypeMap.T_CLASS_LOADER = typeCPClassLoader.ID + + typeStackFrame := p.TypeMap.NameMap["jdk.types.StackFrame"] + + if typeStackFrame == nil { + return fmt.Errorf("missing \"jdk.types.StackFrame\"") + } + p.TypeMap.T_STACK_FRAME = typeStackFrame.ID + + p.bindFrameType = types2.NewBindFrameType(typeCPFrameType, &p.TypeMap) + p.bindThreadState = types2.NewBindThreadState(typeCPThreadState, &p.TypeMap) + p.bindThread = types2.NewBindThread(typeCPThread, &p.TypeMap) + p.bindClass = types2.NewBindClass(typeCPClass, &p.TypeMap) + p.bindMethod = types2.NewBindMethod(typeCPMethod, &p.TypeMap) + p.bindPackage = types2.NewBindPackage(typeCPPackage, &p.TypeMap) + p.bindSymbol = types2.NewBindSymbol(typeCPSymbol, &p.TypeMap) + if typeCPLogLevel != nil { + p.bindLogLevel = types2.NewBindLogLevel(typeCPLogLevel, &p.TypeMap) + } else { + p.bindLogLevel = nil + } + p.bindStackTrace = types2.NewBindStackTrace(typeCPStackTrace, &p.TypeMap) + p.bindStackFrame = types2.NewBindStackFrame(typeStackFrame, &p.TypeMap) + + typeExecutionSample := p.TypeMap.NameMap["jdk.ExecutionSample"] + typeAllocInNewTLAB := p.TypeMap.NameMap["jdk.ObjectAllocationInNewTLAB"] + typeALlocOutsideTLAB := p.TypeMap.NameMap["jdk.ObjectAllocationOutsideTLAB"] + typeMonitorEnter := p.TypeMap.NameMap["jdk.JavaMonitorEnter"] + typeThreadPark := p.TypeMap.NameMap["jdk.ThreadPark"] + typeLiveObject := p.TypeMap.NameMap["profiler.LiveObject"] + typeActiveSetting := p.TypeMap.NameMap["jdk.ActiveSetting"] + + if typeExecutionSample != nil { + p.TypeMap.T_EXECUTION_SAMPLE = typeExecutionSample.ID + p.bindExecutionSample = types2.NewBindExecutionSample(typeExecutionSample, &p.TypeMap) + } + if typeAllocInNewTLAB != nil { + p.TypeMap.T_ALLOC_IN_NEW_TLAB = typeAllocInNewTLAB.ID + p.bindAllocInNewTLAB = types2.NewBindObjectAllocationInNewTLAB(typeAllocInNewTLAB, &p.TypeMap) + } + if typeALlocOutsideTLAB != nil { + p.TypeMap.T_ALLOC_OUTSIDE_TLAB = typeALlocOutsideTLAB.ID + p.bindAllocOutsideTLAB = types2.NewBindObjectAllocationOutsideTLAB(typeALlocOutsideTLAB, &p.TypeMap) + } + if typeMonitorEnter != nil { + p.TypeMap.T_MONITOR_ENTER = typeMonitorEnter.ID + p.bindMonitorEnter = types2.NewBindJavaMonitorEnter(typeMonitorEnter, &p.TypeMap) + } + if typeThreadPark != nil { + p.TypeMap.T_THREAD_PARK = typeThreadPark.ID + p.bindThreadPark = types2.NewBindThreadPark(typeThreadPark, &p.TypeMap) + } + if typeLiveObject != nil { + p.TypeMap.T_LIVE_OBJECT = typeLiveObject.ID + p.bindLiveObject = types2.NewBindLiveObject(typeLiveObject, &p.TypeMap) + } + if typeActiveSetting != nil { + p.TypeMap.T_ACTIVE_SETTING = typeActiveSetting.ID + p.bindActiveSetting = types2.NewBindActiveSetting(typeActiveSetting, &p.TypeMap) + } + + p.FrameTypes.IDMap = nil + p.ThreadStates.IDMap = nil + p.Threads.IDMap = nil + p.Classes.IDMap = nil + p.Methods.IDMap.Slice = nil + p.Packages.IDMap = nil + p.Symbols.IDMap = nil + p.LogLevels.IDMap = nil + p.Stacktrace.IDMap = nil + return nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/reader.go b/vendor/github.com/grafana/jfr-parser/parser/reader.go new file mode 100644 index 0000000000..6301410675 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/reader.go @@ -0,0 +1,156 @@ +package parser + +import ( + "encoding/binary" + "fmt" + "io" + + reader2 "github.com/grafana/jfr-parser/reader" +) + +const ( + StringEncodingNull = 0 + StringEncodingEmptyString = 1 + StringEncodingConstantPool = 2 + StringEncodingUtf8ByteArray = 3 + StringEncodingCharArray = 4 + StringEncodingLatin1ByteArray = 5 +) + +type Reader interface { + Boolean() (bool, error) + Byte() (int8, error) + Short() (int16, error) + Char() (uint16, error) + Int() (int32, error) + Long() (int64, error) + Float() (float32, error) + Double() (float64, error) + String() (*String, error) + + reader2.VarReader + + // TODO: Support arrays +} + +type InputReader interface { + io.Reader + io.ByteReader +} + +type reader struct { + InputReader + varR reader2.VarReader +} + +func NewReader(r InputReader, compressed bool) Reader { + var varR reader2.VarReader + if compressed { + varR = reader2.NewCompressed(r) + } else { + varR = reader2.NewUncompressed(r) + } + return reader{ + InputReader: r, + varR: varR, + } +} + +func (r reader) Boolean() (bool, error) { + var n int8 + err := binary.Read(r, binary.BigEndian, &n) + if n == 0 { + return false, err + } + return true, err +} + +func (r reader) Byte() (int8, error) { + var n int8 + err := binary.Read(r, binary.BigEndian, &n) + return n, err +} + +func (r reader) Short() (int16, error) { + return reader2.Short(r) +} + +func (r reader) Char() (uint16, error) { + var n uint16 + err := binary.Read(r, binary.BigEndian, &n) + return n, err +} + +func (r reader) Int() (int32, error) { + return reader2.Int(r) +} + +func (r reader) Long() (int64, error) { + return reader2.Long(r) +} + +func (r reader) Float() (float32, error) { + var n float32 + err := binary.Read(r, binary.BigEndian, &n) + return n, err +} + +func (r reader) Double() (float64, error) { + var n float64 + err := binary.Read(r, binary.BigEndian, &n) + return n, err +} + +// TODO: Should we differentiate between null and empty? +func (r reader) String() (*String, error) { + s := new(String) + enc, err := r.Byte() + if err != nil { + return nil, err + } + switch enc { + case StringEncodingNull: + return s, nil + case StringEncodingEmptyString: + return s, nil + case StringEncodingConstantPool: + idx, err := r.VarLong() + if err != nil { + return nil, fmt.Errorf("unable to resolve constant refrence index: %w", err) + } + s.constantRef = &constantReference{index: idx} + return s, nil + case StringEncodingUtf8ByteArray, StringEncodingCharArray, StringEncodingLatin1ByteArray: + str, err := r.utf8() + if err != nil { + return nil, err + } + s.s = str + return s, nil + default: + return nil, fmt.Errorf("unsupported string type :%d", enc) + } +} + +func (r reader) VarShort() (int16, error) { + return r.varR.VarShort() +} + +func (r reader) VarInt() (int32, error) { + return r.varR.VarInt() +} + +func (r reader) VarLong() (int64, error) { + return r.varR.VarLong() +} + +func (r reader) utf8() (string, error) { + n, err := r.varR.VarInt() + if err != nil { + return "", nil + } + // TODO: make sure n is reasonable + b := make([]byte, n) + _, err = io.ReadFull(r, b) + return string(b), err +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/symbols.go b/vendor/github.com/grafana/jfr-parser/parser/symbols.go new file mode 100644 index 0000000000..c5aa45fae1 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/symbols.go @@ -0,0 +1,43 @@ +package parser + +import ( + "regexp" + + ptypes "github.com/grafana/jfr-parser/parser/types" +) + +// jdk/internal/reflect/GeneratedMethodAccessor31 +var generatedMethodAccessor = regexp.MustCompile("^(jdk/internal/reflect/GeneratedMethodAccessor)(\\d+)$") + +// org/example/rideshare/OrderService$$Lambda$669.0x0000000800fd7318.run +// Fib$$Lambda.0x00007ffa600c4da0.run +var lambdaGeneratedEnclosingClass = regexp.MustCompile("^(.+\\$\\$Lambda)(\\$?\\d*[./](0x)?[\\da-f]+|\\d+)$") + +// libzstd-jni-1.5.1-16931311898282279136.so.Java_com_github_luben_zstd_ZstdInputStreamNoFinalizer_decompressStream +var zstdJniSoLibName = regexp.MustCompile("^(\\.?/tmp/)?(libzstd-jni-\\d+\\.\\d+\\.\\d+-)(\\d+)(\\.so)( \\(deleted\\))?$") + +// ./tmp/libamazonCorrettoCryptoProvider109b39cf33c563eb.so +// ./tmp/amazonCorrettoCryptoProviderNativeLibraries.7382c2f79097f415/libcrypto.so (deleted) +var amazonCorrettoCryptoProvider = regexp.MustCompile("^(\\.?/tmp/)?(lib)?(amazonCorrettoCryptoProvider)(NativeLibraries\\.)?([0-9a-f]{16})" + + "(/libcrypto|/libamazonCorrettoCryptoProvider)?(\\.so)( \\(deleted\\))?$") + +// libasyncProfiler-linux-arm64-17b9a1d8156277a98ccc871afa9a8f69215f92.so +var pyroscopeAsyncProfiler = regexp.MustCompile( + "^(\\.?/tmp/)?(libasyncProfiler)-(linux-arm64|linux-musl-x64|linux-x64|macos)-(17b9a1d8156277a98ccc871afa9a8f69215f92)(\\.so)( \\(deleted\\))?$") + +// TODO +// ./tmp/snappy-1.1.8-6fb9393a-3093-4706-a7e4-837efe01d078-libsnappyjava.so +func mergeJVMGeneratedClasses(frame string) string { + frame = generatedMethodAccessor.ReplaceAllString(frame, "${1}_") + frame = lambdaGeneratedEnclosingClass.ReplaceAllString(frame, "${1}_") + frame = zstdJniSoLibName.ReplaceAllString(frame, "libzstd-jni-_.so") + frame = amazonCorrettoCryptoProvider.ReplaceAllString(frame, "libamazonCorrettoCryptoProvider_.so") + frame = pyroscopeAsyncProfiler.ReplaceAllString(frame, "libasyncProfiler-_.so") + return frame +} + +func ProcessSymbols(ref *ptypes.SymbolList) { + for i := range ref.Symbol { //todo regex replace inplace + ref.Symbol[i].String = mergeJVMGeneratedClasses(ref.Symbol[i].String) + } +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types.go b/vendor/github.com/grafana/jfr-parser/parser/types.go new file mode 100644 index 0000000000..387de79aa5 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types.go @@ -0,0 +1,1201 @@ +package parser + +import ( + "errors" + "fmt" + "reflect" + + types2 "github.com/grafana/jfr-parser/common/types" +) + +var types = map[types2.FieldClass]func() ParseResolvable{ + types2.Boolean: func() ParseResolvable { return SetPfFunc(new(Boolean)) }, + types2.Byte: func() ParseResolvable { return SetPfFunc(new(Byte)) }, + types2.Char: func() ParseResolvable { return SetPfFunc(new(Char)) }, + types2.Double: func() ParseResolvable { return SetPfFunc(new(Double)) }, + types2.Float: func() ParseResolvable { return SetPfFunc(new(Float)) }, + types2.Int: func() ParseResolvable { return SetPfFunc(new(Int)) }, + types2.Long: func() ParseResolvable { return SetPfFunc(new(Long)) }, + types2.Short: func() ParseResolvable { return SetPfFunc(new(Short)) }, + types2.Class: func() ParseResolvable { return SetPfFunc(new(Class)) }, + types2.String: func() ParseResolvable { return SetPfFunc(new(String)) }, + types2.Thread: func() ParseResolvable { return SetPfFunc(new(Thread)) }, + types2.ClassLoader: func() ParseResolvable { return SetPfFunc(new(ClassLoader)) }, + types2.CodeBlobType: func() ParseResolvable { return SetPfFunc(new(CodeBlobType)) }, + types2.FlagValueOrigin: func() ParseResolvable { return SetPfFunc(new(FlagValueOrigin)) }, + types2.FrameType: func() ParseResolvable { return SetPfFunc(new(FrameType)) }, + types2.G1YCType: func() ParseResolvable { return SetPfFunc(new(G1YCType)) }, + types2.GCName: func() ParseResolvable { return SetPfFunc(new(GCName)) }, + types2.Method: func() ParseResolvable { return SetPfFunc(new(Method)) }, + types2.Module: func() ParseResolvable { return SetPfFunc(new(Module)) }, + types2.NarrowOopMode: func() ParseResolvable { return SetPfFunc(new(NarrowOopMode)) }, + types2.NetworkInterfaceName: func() ParseResolvable { return SetPfFunc(new(NetworkInterfaceName)) }, + types2.Package: func() ParseResolvable { return SetPfFunc(new(Package)) }, + types2.StackFrame: func() ParseResolvable { return SetPfFunc(new(StackFrame)) }, + types2.StackTrace: func() ParseResolvable { return SetPfFunc(new(StackTrace)) }, + types2.Symbol: func() ParseResolvable { return SetPfFunc(new(Symbol)) }, + types2.ThreadState: func() ParseResolvable { return SetPfFunc(new(ThreadState)) }, + types2.InflateCause: func() ParseResolvable { return SetPfFunc(new(InflateCause)) }, + types2.GCCause: func() ParseResolvable { return SetPfFunc(new(GCCause)) }, + types2.CompilerPhaseType: func() ParseResolvable { return SetPfFunc(new(CompilerPhaseType)) }, + types2.ThreadGroup: func() ParseResolvable { return SetPfFunc(new(ThreadGroup)) }, + types2.GCThresholdUpdater: func() ParseResolvable { return SetPfFunc(new(GCThresholdUpdater)) }, + types2.MetaspaceObjectType: func() ParseResolvable { return SetPfFunc(new(MetaspaceObjectType)) }, + types2.ExecutionMode: func() ParseResolvable { return SetPfFunc(new(ExecutionMode)) }, + types2.VMOperationType: func() ParseResolvable { return SetPfFunc(new(VMOperationType)) }, + types2.G1HeapRegionType: func() ParseResolvable { return SetPfFunc(new(G1HeapRegionType)) }, + types2.GCWhen: func() ParseResolvable { return SetPfFunc(new(GCWhen)) }, + types2.ReferenceType: func() ParseResolvable { return SetPfFunc(new(ReferenceType)) }, + types2.MetadataType: func() ParseResolvable { return SetPfFunc(new(MetadataType)) }, + types2.LogLevel: func() ParseResolvable { return SetPfFunc(new(LogLevel)) }, + types2.AttributeValue: func() ParseResolvable { return SetPfFunc(new(AttributeValue)) }, +} + +var ( + _ ParseResolveFielder = (*Class)(nil) + _ ParseResolveFielder = (*Thread)(nil) + _ ParseResolveFielder = (*ClassLoader)(nil) + _ ParseResolveFielder = (*CodeBlobType)(nil) + _ ParseResolveFielder = (*FrameType)(nil) + _ ParseResolveFielder = (*G1YCType)(nil) + _ ParseResolveFielder = (*GCName)(nil) + _ ParseResolveFielder = (*Method)(nil) + _ ParseResolveFielder = (*Module)(nil) + _ ParseResolveFielder = (*NarrowOopMode)(nil) + _ ParseResolveFielder = (*NetworkInterfaceName)(nil) + _ ParseResolveFielder = (*Package)(nil) + _ ParseResolveFielder = (*Symbol)(nil) + _ ParseResolveFielder = (*StackTrace)(nil) + _ ParseResolveFielder = (*ThreadState)(nil) + _ ParseResolveFielder = (*InflateCause)(nil) + _ ParseResolveFielder = (*GCCause)(nil) + _ ParseResolveFielder = (*CompilerPhaseType)(nil) + _ ParseResolveFielder = (*ThreadGroup)(nil) + _ ParseResolveFielder = (*GCThresholdUpdater)(nil) + _ ParseResolveFielder = (*MetaspaceObjectType)(nil) + _ ParseResolveFielder = (*ExecutionMode)(nil) + _ ParseResolveFielder = (*VMOperationType)(nil) + _ ParseResolveFielder = (*G1HeapRegionType)(nil) + _ ParseResolveFielder = (*GCWhen)(nil) + _ ParseResolveFielder = (*ReferenceType)(nil) + _ ParseResolveFielder = (*MetadataType)(nil) + _ ParseResolveFielder = (*LogLevel)(nil) + _ ParseResolveFielder = (*AttributeValue)(nil) + _ ParseResolveFielder = (*InflateCause)(nil) +) + +func ParseClass(r Reader, classes ClassMap, cpools PoolMap, classID int64) (ParseResolvable, error) { + class, ok := classes[classID] + if !ok { + return nil, fmt.Errorf("unexpected class %d", classID) + } + var v ParseResolvable + if typeFn, ok := types[types2.FieldClass(class.Name)]; ok { + v = typeFn() + } else { + v = NewParseResolvable[*DefaultStructType]() + if vx, ok := v.(*DefaultStructType); ok { + classMeta := classes[classID] + classMeta.buildFieldMap() + vx.className = classMeta.Name + vx.fieldsDict = classMeta.fieldsDict + } + } + if err := v.Parse(r, classes, cpools, class); err != nil { + return nil, err + } + return v, nil +} + +type Event interface { + Parseable +} + +type Parseable interface { + Parse(Reader, ClassMap, PoolMap, *ClassMetadata) error +} + +type Resolvable interface { + Resolve(ClassMap, PoolMap) error +} + +type ParseFieldFunc func(name string, p ParseResolvable) error + +type setFielder interface { + setField(name string, p ParseResolvable) error +} + +type parseFieldFuncSetter interface { + setParseFieldFunc(fn ParseFieldFunc) +} + +type ParseResolvable interface { + Parseable + Resolvable +} + +type ParseResolveFielder interface { + ParseResolvable + setFielder +} + +type constantReference struct { + classID int64 + field string + index int64 +} + +type BaseStructType struct { + constants []constantReference + resolved bool + fieldAssign ParseFieldFunc + unresolved []ParseResolvable +} + +func SetPfFunc(p ParseResolvable) ParseResolvable { + if setter, ok := p.(parseFieldFuncSetter); ok { + if pf, yes := p.(setFielder); yes { + setter.setParseFieldFunc(pf.setField) + } + } + return p +} + +func NewParseResolvable[T ParseResolvable]() ParseResolvable { + var t T + rt := reflect.TypeOf(t) + if rt.Kind() != reflect.Pointer { + panic(fmt.Sprintf("generic parameter should be a pointer type: %v", rt.Kind())) + } + + instance := reflect.New(rt.Elem()).Interface() + + if setter, ok := instance.(parseFieldFuncSetter); ok { + if pf, ok := instance.(setFielder); ok { + setter.setParseFieldFunc(pf.setField) + } + } + return instance.(ParseResolvable) +} + +func (b *BaseStructType) setParseFieldFunc(fn ParseFieldFunc) { + b.fieldAssign = fn +} + +func (b *BaseStructType) Parse(r Reader, classMap ClassMap, poolMap PoolMap, metadata *ClassMetadata) error { + return b.parseFields(r, classMap, poolMap, metadata, b.resolved) +} + +func (b *BaseStructType) parseFields(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata, resolved bool) error { + for _, f := range class.Fields { + if f.ConstantPool { + if !resolved { + if err := b.appendConstant(r, f.Name, f.ClassID); err != nil { + return fmt.Errorf("failed to parse %s: unable to append constant: %w", class.Name, err) + } + } else { + cPool, ok := cpools[f.ClassID] + if !ok { + continue + //return fmt.Errorf("constant pool for class [%s] doesn't exists", class.Name) + } + idx, err := r.VarLong() + if err != nil { + return fmt.Errorf("unable to read constant index") + } + p, ok := cPool.Pool[idx] + if !ok { + continue + //return fmt.Errorf("constant value of index [%d] doesn't exists", idx) + } + if err := b.fieldAssign(f.Name, p); err != nil { + return fmt.Errorf("unable to parse constant field %s: %w", f.Name, err) + } + } + } else if f.Dimension == 1 { + n, err := r.VarInt() + if err != nil { + return fmt.Errorf("failed to parse %s: unable to read array length: %w", class.Name, err) + } + // done: assert n is small enough + for i := int64(0); i < int64(n); i++ { + p, err := ParseClass(r, classes, cpools, f.ClassID) + if err != nil { + return fmt.Errorf("failed to parse %s: unable to read an array element: %w", class.Name, err) + } + if err := b.fieldAssign(f.Name, p); err != nil { + return fmt.Errorf("failed to parse %s: unable to parse an array element: %w", class.Name, err) + } + + b.unresolved = append(b.unresolved, p) // cache fields need to resolve + } + } else { + p, err := ParseClass(r, classes, cpools, f.ClassID) + if err != nil { + return fmt.Errorf("failed to parse %s: unable to read a field: %w", class.Name, err) + } + if err := b.fieldAssign(f.Name, p); err != nil { + return fmt.Errorf("failed to parse %s: unable to parse a field: %w", class.Name, err) + } + b.unresolved = append(b.unresolved, p) // cache fields need to resolve + } + } + return nil +} + +func (b *BaseStructType) Resolve(classMap ClassMap, poolMap PoolMap) error { + if !b.resolved { + b.resolved = true + for _, c := range b.constants { + p, ok := poolMap[c.classID] + if !ok { + // Non-existent constant pool references seem to be used to mark no s + continue + } + it, ok := p.Pool[c.index] + if !ok { + // Non-existent constant pool references seem to be used to mark no s + continue + } + if b.fieldAssign != nil { + if err := b.fieldAssign(c.field, it); err != nil { + return fmt.Errorf("unable to resolve constants for field %s: %w", c.field, err) + } + } + } + b.constants = nil + } + + if len(b.unresolved) > 0 { + for _, needResolve := range b.unresolved { + if err := needResolve.Resolve(classMap, poolMap); err != nil { + return fmt.Errorf("unable to resolve field value: %v", needResolve) + } + } + b.unresolved = nil + } + + return nil +} + +func appendConstant(r Reader, constants *[]constantReference, name string, class int64) error { + i, err := r.VarLong() + if err != nil { + return fmt.Errorf("unable to read constant index") + } + *constants = append(*constants, constantReference{field: name, index: i, classID: class}) + return nil +} + +func (b *BaseStructType) appendConstant(r Reader, name string, class int64) error { + i, err := r.VarLong() + if err != nil { + return fmt.Errorf("unable to read constant index") + } + b.constants = append(b.constants, constantReference{field: name, index: i, classID: class}) + return nil +} + +func parseFields(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata, constants *[]constantReference, resolved bool, cb func(string, ParseResolvable) error) error { + for _, f := range class.Fields { + if f.ConstantPool { + if constants != nil && !resolved { + if err := appendConstant(r, constants, f.Name, f.ClassID); err != nil { + return fmt.Errorf("failed to parse %s: unable to append constant: %w", class.Name, err) + } + } else { + cPool, ok := cpools[f.ClassID] + if !ok { + continue + //return fmt.Errorf("constant pool for class [%s] doesn't exists", class.Name) + } + idx, err := r.VarLong() + if err != nil { + return fmt.Errorf("unable to read constant index") + } + p, ok := cPool.Pool[idx] + if !ok { + continue + //return fmt.Errorf("constant value of index [%d] doesn't exists", idx) + } + if err := cb(f.Name, p); err != nil { + return fmt.Errorf("unable to parse constant field %s: %w", f.Name, err) + } + } + } else if f.Dimension == 1 { + n, err := r.VarInt() + if err != nil { + return fmt.Errorf("failed to parse %s: unable to read array length: %w", class.Name, err) + } + // done: assert n is small enough + for i := int64(0); i < int64(n); i++ { + p, err := ParseClass(r, classes, cpools, f.ClassID) + if err != nil { + return fmt.Errorf("failed to parse %s: unable to read an array element: %w", class.Name, err) + } + if err := cb(f.Name, p); err != nil { + return fmt.Errorf("failed to parse %s: unable to parse an array element: %w", class.Name, err) + } + } + } else { + p, err := ParseClass(r, classes, cpools, f.ClassID) + if err != nil { + return fmt.Errorf("failed to parse %s: unable to read a field: %w", class.Name, err) + } + if err := cb(f.Name, p); err != nil { + return fmt.Errorf("failed to parse %s: unable to parse a field: %w", class.Name, err) + } + } + } + return nil +} + +func resolveConstants(classes ClassMap, cpools PoolMap, constants *[]constantReference, resolved *bool, cb func(string, ParseResolvable) error) error { + if *resolved { + return nil + } + *resolved = true + for _, c := range *constants { + if err := ResolveConstants(classes, cpools); err != nil { + return fmt.Errorf("unable to resolve contants: %w", err) + } + p, ok := cpools[c.classID] + if !ok { + // Non-existent constant pool references seem to be used to mark no value + continue + } + it, ok := p.Pool[c.index] + if !ok { + // Non-existent constant pool references seem to be used to mark no s + continue + } + if err := it.Resolve(classes, cpools); err != nil { + return err + } + if err := cb(c.field, it); err != nil { + return fmt.Errorf("unable to resolve constants for field %s: %w", c.field, err) + } + } + *constants = nil + return nil +} + +type Boolean bool + +func (b *Boolean) Parse(r Reader, _ ClassMap, _ PoolMap, _ *ClassMetadata) error { + // TODO: Assert simpletype, no fields, etc. + x, err := r.Boolean() + *b = Boolean(x) + return err +} + +func (*Boolean) Resolve(ClassMap, PoolMap) error { return nil } + +func toBoolean(p Parseable) (bool, error) { + x, ok := p.(*Boolean) + if !ok { + return false, errors.New("not a Boolean") + } + return bool(*x), nil +} + +type Byte int8 + +func (b *Byte) Parse(r Reader, _ ClassMap, _ PoolMap, _ *ClassMetadata) error { + x, err := r.Byte() + *b = Byte(x) + return err +} + +func (*Byte) Resolve(ClassMap, PoolMap) error { return nil } + +func toByte(p Parseable) (int8, error) { + x, ok := p.(*Byte) + if !ok { + return 0, errors.New("not a Byte") + } + return int8(*x), nil +} + +type Char uint16 + +func (c *Char) Parse(r Reader, _ ClassMap, _ PoolMap, _ *ClassMetadata) error { + x, err := r.Char() + if err != nil { + return fmt.Errorf("unable to resolve char: %w", err) + } + *c = Char(x) + return nil +} + +func (*Char) Resolve(ClassMap, PoolMap) error { + return nil +} + +type Double float64 + +func (d *Double) Parse(r Reader, _ ClassMap, _ PoolMap, _ *ClassMetadata) error { + x, err := r.Double() + *d = Double(x) + return err +} + +func (*Double) Resolve(ClassMap, PoolMap) error { return nil } + +func toDouble(p Parseable) (float64, error) { + x, ok := p.(*Double) + if !ok { + return 0, errors.New("not a Double") + } + return float64(*x), nil +} + +type Float float32 + +func (f *Float) Parse(r Reader, _ ClassMap, _ PoolMap, _ *ClassMetadata) error { + x, err := r.Float() + *f = Float(x) + return err +} + +func (*Float) Resolve(ClassMap, PoolMap) error { return nil } + +func toFloat(p Parseable) (float32, error) { + x, ok := p.(*Float) + if !ok { + return 0, errors.New("not a Float") + } + return float32(*x), nil +} + +type Int int32 + +func (i *Int) Parse(r Reader, _ ClassMap, _ PoolMap, _ *ClassMetadata) error { + x, err := r.VarInt() + *i = Int(x) + return err +} + +func (*Int) Resolve(ClassMap, PoolMap) error { return nil } + +func toInt(p Parseable) (int32, error) { + x, ok := p.(*Int) + if !ok { + return 0, errors.New("not an Int") + } + return int32(*x), nil +} + +type Long int64 + +func (l *Long) Parse(r Reader, _ ClassMap, _ PoolMap, _ *ClassMetadata) error { + x, err := r.VarLong() + *l = Long(x) + return err +} + +func (*Long) Resolve(ClassMap, PoolMap) error { return nil } + +func toLong(p Parseable) (int64, error) { + x, ok := p.(*Long) + if !ok { + return 0, errors.New("not a Long") + } + return int64(*x), nil +} + +type Short int16 + +func (s *Short) Parse(r Reader, _ ClassMap, _ PoolMap, _ *ClassMetadata) error { + x, err := r.VarShort() + *s = Short(x) + return err +} + +func (*Short) Resolve(ClassMap, PoolMap) error { return nil } + +type UShort uint16 + +func (u *UShort) Parse(r Reader, _ ClassMap, _ PoolMap, _ *ClassMetadata) error { + x, err := r.VarShort() + if err != nil { + return fmt.Errorf("unable to resolve unsigned short: %w", err) + } + *u = UShort(x) + return nil +} + +func (*UShort) Resolve(ClassMap, PoolMap) error { return nil } + +type Class struct { + ClassLoader *ClassLoader + Name *Symbol + Package *Package + Modifiers int64 + BaseStructType +} + +func (c *Class) setField(name string, p ParseResolvable) (err error) { + switch name { + case "classLoader": + c.ClassLoader, err = toClassLoader(p) + case "name": + c.Name, err = toSymbol(p) + case "package": + c.Package, err = toPackage(p) + case "modifers": + c.Modifiers, err = toLong(p) + } + return err +} + +func toClass(p ParseResolvable) (*Class, error) { + c, ok := p.(*Class) + if !ok { + // TODO + return nil, errors.New("") + } + return c, nil +} + +type String struct { + s string + constantRef *constantReference +} + +func (s *String) Parse(r Reader, classMap ClassMap, pools PoolMap, classMetadata *ClassMetadata) error { + if classMap[classMetadata.ID].Name != "java.lang.String" { + return fmt.Errorf("expect type of java.lang.String, got type %s", classMap[classMetadata.ID].Name) + } + + x, err := r.String() + if err != nil { + return fmt.Errorf("unable to parse string: %w", err) + } + + if x.constantRef != nil { + x.constantRef.classID = classMetadata.ID + } + + *s = *x + return nil +} + +func (s *String) Resolve(_ ClassMap, poolMap PoolMap) error { + if s.constantRef != nil { + cPool := poolMap[s.constantRef.classID] + if cPool == nil { + return errors.New("the string constant pool is nil") + } + v, ok := cPool.Pool[s.constantRef.index] + if !ok { + return fmt.Errorf("string not found in the pool") + } + str, ok := v.(*String) + if !ok { + return fmt.Errorf("not type of parser.String") + } + *s = *str + } + return nil +} + +func ToString(p Parseable) (string, error) { + s, ok := p.(*String) + if !ok { + return "", errors.New("not a String") + } + return s.s, nil +} + +type Thread struct { + BaseStructType + OsName string + OsThreadID int64 + JavaName string + JavaThreadID int64 +} + +func (t *Thread) setField(name string, p ParseResolvable) (err error) { + switch name { + case "osName": + t.OsName, err = ToString(p) + case "osThreadId": + t.OsThreadID, err = toLong(p) + case "javaName": + t.JavaName, err = ToString(p) + case "javaThreadId": + t.JavaThreadID, err = toLong(p) + } + return err +} + +func toThread(p ParseResolvable) (*Thread, error) { + t, ok := p.(*Thread) + if !ok { + return nil, errors.New("not a Thread") + } + return t, nil +} + +type ClassLoader struct { + Type *Class + Name *Symbol + BaseStructType +} + +func (cl *ClassLoader) setField(name string, p ParseResolvable) (err error) { + switch name { + case "type": + cl.Type, err = toClass(p) + case "name": + cl.Name, err = toSymbol(p) + } + return err +} + +func toClassLoader(p ParseResolvable) (*ClassLoader, error) { + c, ok := p.(*ClassLoader) + if !ok { + // TODO + return nil, errors.New("") + } + return c, nil +} + +type CodeBlobType struct { + String string + BaseStructType +} + +func (cbt *CodeBlobType) setField(name string, p ParseResolvable) (err error) { + switch name { + case "string": + cbt.String, err = ToString(p) + } + return err +} + +func toCodeBlobType(p ParseResolvable) (*CodeBlobType, error) { + cbt, ok := p.(*CodeBlobType) + if !ok { + return nil, errors.New("not a CodeBlobType") + } + return cbt, nil +} + +type FlagValueOrigin struct { + String string + BaseStructType +} + +func (fvo *FlagValueOrigin) setField(name string, p ParseResolvable) (err error) { + switch name { + case "description": + fvo.String, err = ToString(p) + } + return err +} + +func toFlagValueOrigin(p Parseable) (*FlagValueOrigin, error) { + fvo, ok := p.(*FlagValueOrigin) + if !ok { + return nil, errors.New("not a FlagValueOrigin") + } + return fvo, nil +} + +type FrameType struct { + BaseStructType + Description string +} + +func (ft *FrameType) setField(name string, p ParseResolvable) (err error) { + switch name { + case "description": + ft.Description, err = ToString(p) + } + return err +} + +func toFrameType(p Parseable) (*FrameType, error) { + ft, ok := p.(*FrameType) + if !ok { + return nil, errors.New("not a FrameType") + } + return ft, nil +} + +type G1YCType struct { + String string + BaseStructType +} + +func (gyt *G1YCType) setField(name string, p ParseResolvable) (err error) { + switch name { + case "string": + gyt.String, err = ToString(p) + } + return err +} + +func toG1YCType(p Parseable) (*G1YCType, error) { + gyt, ok := p.(*G1YCType) + if !ok { + return nil, errors.New("not a G1YCType") + } + return gyt, nil +} + +type GCName struct { + String string + BaseStructType +} + +func (gn *GCName) setField(name string, p ParseResolvable) (err error) { + switch name { + case "string": + gn.String, err = ToString(p) + } + return err +} + +func toGCName(p Parseable) (*GCName, error) { + gn, ok := p.(*GCName) + if !ok { + return nil, errors.New("not a GCName") + } + return gn, nil +} + +type Method struct { + Type *Class + Name *Symbol + Descriptor *Symbol + Modifiers int32 + Hidden bool + BaseStructType +} + +func (m *Method) setField(name string, p ParseResolvable) (err error) { + switch name { + case "type": + m.Type, err = toClass(p) + case "name": + m.Name, err = toSymbol(p) + case "descriptor": + m.Descriptor, err = toSymbol(p) + case "modifiers": + m.Modifiers, err = toInt(p) + case "hidden": + m.Hidden, err = toBoolean(p) + } + return err +} + +func toMethod(p ParseResolvable) (*Method, error) { + m, ok := p.(*Method) + if !ok { + return nil, errors.New("not a Method") + } + return m, nil +} + +type Module struct { + Name *Symbol + Version *Symbol + Location *Symbol + ClassLoader *ClassLoader + BaseStructType +} + +func (m *Module) setField(name string, p ParseResolvable) (err error) { + switch name { + case "name": + m.Name, err = toSymbol(p) + case "version": + m.Version, err = toSymbol(p) + case "location": + m.Location, err = toSymbol(p) + case "classLoader": + m.ClassLoader, err = toClassLoader(p) + } + return err +} + +func toModule(p ParseResolvable) (*Module, error) { + m, ok := p.(*Module) + if !ok { + return nil, errors.New("not a Module") + } + return m, nil +} + +type NarrowOopMode struct { + String string + BaseStructType +} + +func (nom *NarrowOopMode) setField(name string, p ParseResolvable) (err error) { + switch name { + case "string": + nom.String, err = ToString(p) + } + return err +} + +func toNarrowOopMode(p Parseable) (*NarrowOopMode, error) { + nom, ok := p.(*NarrowOopMode) + if !ok { + return nil, errors.New("not a NarrowOopMode") + } + return nom, nil +} + +type NetworkInterfaceName struct { + NetworkInterface string + BaseStructType +} + +func (nim *NetworkInterfaceName) setField(name string, p ParseResolvable) (err error) { + switch name { + case "networkInterface": + nim.NetworkInterface, err = ToString(p) + } + return err +} + +func toNetworkInterfaceName(p Parseable) (*NetworkInterfaceName, error) { + nim, ok := p.(*NetworkInterfaceName) + if !ok { + return nil, errors.New("not a NetworkInterfaceName") + } + return nim, nil +} + +type Package struct { + Name *Symbol + BaseStructType +} + +func (pkg *Package) setField(name string, p ParseResolvable) (err error) { + switch name { + case "name": + pkg.Name, err = toSymbol(p) + } + return err +} + +func toPackage(p ParseResolvable) (*Package, error) { + pkg, ok := p.(*Package) + if !ok { + // TODO + return nil, errors.New("") + } + return pkg, nil +} + +type StackFrame struct { + Method *Method + LineNumber int32 + ByteCodeIndex int32 + Type *FrameType + BaseStructType +} + +func (sf *StackFrame) setField(name string, p ParseResolvable) (err error) { + switch name { + case "method": + sf.Method, err = toMethod(p) + case "lineNumber": + sf.LineNumber, err = toInt(p) + case "byteCodeIndex": + sf.ByteCodeIndex, err = toInt(p) + case "type": + sf.Type, err = toFrameType(p) + } + return err +} + +func toStackFrame(p ParseResolvable) (*StackFrame, error) { + sf, ok := p.(*StackFrame) + if !ok { + return nil, errors.New("not a StackFrame") + } + return sf, nil +} + +type StackTrace struct { + Truncated bool + Frames []*StackFrame + BaseStructType +} + +func (st *StackTrace) setField(name string, p ParseResolvable) (err error) { + switch name { + case "truncated": + st.Truncated, err = toBoolean(p) + case "frames": + var sf *StackFrame + sf, err := toStackFrame(p) + if err != nil { + return err + } + st.Frames = append(st.Frames, sf) + } + return err +} + +func toStackTrace(p ParseResolvable) (*StackTrace, error) { + st, ok := p.(*StackTrace) + if !ok { + return nil, errors.New("not a StackTrace") + } + return st, nil +} + +type Symbol struct { + String string + BaseStructType +} + +func (s *Symbol) setField(name string, p ParseResolvable) (err error) { + switch name { + case "string": + s.String, err = ToString(p) + } + return err +} + +func toSymbol(p ParseResolvable) (*Symbol, error) { + s, ok := p.(*Symbol) + if !ok { + // TODO + return nil, errors.New("") + } + return s, nil +} + +type ThreadState struct { + Name string + BaseStructType +} + +func (ts *ThreadState) setField(name string, p ParseResolvable) (err error) { + switch name { + case "name": + ts.Name, err = ToString(p) + } + return err +} + +func toThreadState(p ParseResolvable) (*ThreadState, error) { + ts, ok := p.(*ThreadState) + if !ok { + return nil, errors.New("not a ThreadState") + } + return ts, nil +} + +type InflateCause struct { + BaseStructType + Cause string +} + +func (i *InflateCause) setField(name string, p ParseResolvable) error { + return setStringField(name, "cause", p, &i.Cause) +} + +type GCCause struct { + BaseStructType + Cause string +} + +func (g *GCCause) setField(name string, p ParseResolvable) error { + return setStringField(name, "cause", p, &g.Cause) +} + +func setStringField(name, expectedFieldName string, p ParseResolvable, ptr *string) (err error) { + if name != expectedFieldName { + return + } + *ptr, err = ToString(p) + if err != nil { + return fmt.Errorf("unable to resolve string from %v(type: %T)", p, p) + } + return +} + +type CompilerPhaseType struct { + BaseStructType + Phase string +} + +func (c *CompilerPhaseType) setField(name string, p ParseResolvable) error { + return setStringField(name, "phase", p, &c.Phase) +} + +type ThreadGroup struct { + BaseStructType + Name string + Parent *ThreadGroup +} + +func (t *ThreadGroup) setField(name string, p ParseResolvable) (err error) { + switch name { + case "name": + t.Name, err = ToString(p) + case "parent": + t.Parent, err = ToThreadGroup(p) + } + return +} + +func ToThreadGroup(p ParseResolvable) (*ThreadGroup, error) { + t, ok := p.(*ThreadGroup) + if !ok { + return nil, fmt.Errorf("type *ThreadGroup expected, got %T", p) + } + + return t, nil +} + +type GCThresholdUpdater struct { + BaseStructType + Updater string +} + +func (g *GCThresholdUpdater) setField(name string, p ParseResolvable) (err error) { + return setStringField(name, "updater", p, &g.Updater) +} + +// MetaspaceObjectType jdk.types.MetaspaceObjectType +type MetaspaceObjectType struct { + BaseStructType + Type string +} + +func (m *MetaspaceObjectType) setField(name string, p ParseResolvable) (err error) { + return setStringField(name, "type", p, &m.Type) +} + +// ExecutionMode datadog.types.ExecutionMode +type ExecutionMode struct { + BaseStructType + Name string +} + +func (e *ExecutionMode) setField(name string, p ParseResolvable) (err error) { + return setStringField(name, "name", p, &e.Name) +} + +// VMOperationType jdk.types.VMOperationType +type VMOperationType struct { + BaseStructType + Type string +} + +func (v *VMOperationType) setField(name string, p ParseResolvable) (err error) { + return setStringField(name, "type", p, &v.Type) +} + +// G1HeapRegionType jdk.types.G1HeapRegionType +type G1HeapRegionType struct { + BaseStructType + Type string +} + +func (g *G1HeapRegionType) setField(name string, p ParseResolvable) (err error) { + return setStringField(name, "type", p, &g.Type) +} + +// GCWhen jdk.types.GCWhen +type GCWhen struct { + BaseStructType + When string +} + +func (g *GCWhen) setField(name string, p ParseResolvable) error { + return setStringField(name, "when", p, &g.When) +} + +// ReferenceType jdk.types.ReferenceType +type ReferenceType struct { + BaseStructType + Type string +} + +func (r *ReferenceType) setField(name string, p ParseResolvable) error { + return setStringField(name, "type", p, &r.Type) +} + +// MetadataType jdk.types.MetadataType +type MetadataType struct { + BaseStructType + Type string +} + +func (m *MetadataType) setField(name string, p ParseResolvable) error { + return setStringField(name, "type", p, &m.Type) +} + +// LogLevel profiler.types.LogLevel +type LogLevel struct { + BaseStructType + Name string +} + +func (l *LogLevel) setField(name string, p ParseResolvable) error { + return setStringField(name, "name", p, &l.Name) +} + +// AttributeValue profiler.types.AttributeValue +type AttributeValue struct { + BaseStructType + Value string +} + +func (a *AttributeValue) setField(name string, p ParseResolvable) error { + return setStringField(name, "value", p, &a.Value) +} + +type ParseResolvableArray []ParseResolvable + +func (a ParseResolvableArray) Parse(r Reader, classes ClassMap, cpools PoolMap, class *ClassMetadata) error { + return nil +} + +func (a ParseResolvableArray) Resolve(classes ClassMap, cpools PoolMap) error { + for _, resolvable := range a { + if err := resolvable.Resolve(classes, cpools); err != nil { + return err + } + } + return nil +} + +// DefaultStructType represents any type that is not supported by the parser. +// This will allow to still read the unsupported type instead of returning an error. +type DefaultStructType struct { + BaseStructType + className string + fieldsDict map[string]*FieldMetadata + fields map[string]ParseResolvable +} + +func (d *DefaultStructType) setField(name string, p ParseResolvable) error { + if d.fields == nil { + d.fields = make(map[string]ParseResolvable) + } + + if d.fieldsDict[name].IsArray() { + if d.fields[name] == nil { + d.fields[name] = make(ParseResolvableArray, 0, 1) + } + d.fields[name] = append(d.fields[name].(ParseResolvableArray), p) + } else { + d.fields[name] = p + } + + return nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/active_settings.go b/vendor/github.com/grafana/jfr-parser/parser/types/active_settings.go new file mode 100644 index 0000000000..fdfbea585b --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/active_settings.go @@ -0,0 +1,400 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindActiveSetting struct { + Temp ActiveSetting + Fields []BindFieldActiveSetting +} + +type BindFieldActiveSetting struct { + Field *def.Field + uint64 *uint64 + ThreadRef *ThreadRef + StackTraceRef *StackTraceRef + string *string +} + +func NewBindActiveSetting(typ *def.Class, typeMap *def.TypeMap) *BindActiveSetting { + res := new(BindActiveSetting) + res.Fields = make([]BindFieldActiveSetting, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "startTime": + if typ.Fields[i].Equals(&def.Field{Name: "startTime", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i], uint64: &res.Temp.StartTime}) + } else { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i]}) // skip changed field + } + case "duration": + if typ.Fields[i].Equals(&def.Field{Name: "duration", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i], uint64: &res.Temp.Duration}) + } else { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i]}) // skip changed field + } + case "eventThread": + if typ.Fields[i].Equals(&def.Field{Name: "eventThread", Type: typeMap.T_THREAD, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i], ThreadRef: &res.Temp.EventThread}) + } else { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i]}) // skip changed field + } + case "stackTrace": + if typ.Fields[i].Equals(&def.Field{Name: "stackTrace", Type: typeMap.T_STACK_TRACE, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i], StackTraceRef: &res.Temp.StackTrace}) + } else { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i]}) // skip changed field + } + case "id": + if typ.Fields[i].Equals(&def.Field{Name: "id", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i], uint64: &res.Temp.Id}) + } else { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i]}) // skip changed field + } + case "name": + if typ.Fields[i].Equals(&def.Field{Name: "name", Type: typeMap.T_STRING, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i], string: &res.Temp.Name}) + } else { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i]}) // skip changed field + } + case "value": + if typ.Fields[i].Equals(&def.Field{Name: "value", Type: typeMap.T_STRING, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i], string: &res.Temp.Value}) + } else { + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldActiveSetting{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type ActiveSetting struct { + StartTime uint64 + Duration uint64 + EventThread ThreadRef + StackTrace StackTraceRef + Id uint64 + Name string + Value string +} + +func (this *ActiveSetting) Parse(data []byte, bind *BindActiveSetting, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + switch bind.Fields[bindFieldIndex].Field.Type { + case typeMap.T_THREAD: + if bind.Fields[bindFieldIndex].ThreadRef != nil { + *bind.Fields[bindFieldIndex].ThreadRef = ThreadRef(v32_) + } + case typeMap.T_STACK_TRACE: + if bind.Fields[bindFieldIndex].StackTraceRef != nil { + *bind.Fields[bindFieldIndex].StackTraceRef = StackTraceRef(v32_) + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + if bind.Fields[bindFieldIndex].string != nil { + *bind.Fields[bindFieldIndex].string = s_ + } + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + if bind.Fields[bindFieldIndex].uint64 != nil { + *bind.Fields[bindFieldIndex].uint64 = v64_ + } + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + *this = bind.Temp + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/allocation_in_new_tlab.go b/vendor/github.com/grafana/jfr-parser/parser/types/allocation_in_new_tlab.go new file mode 100644 index 0000000000..b50358aa56 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/allocation_in_new_tlab.go @@ -0,0 +1,402 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindObjectAllocationInNewTLAB struct { + Temp ObjectAllocationInNewTLAB + Fields []BindFieldObjectAllocationInNewTLAB +} + +type BindFieldObjectAllocationInNewTLAB struct { + Field *def.Field + uint64 *uint64 + ThreadRef *ThreadRef + StackTraceRef *StackTraceRef + ClassRef *ClassRef +} + +func NewBindObjectAllocationInNewTLAB(typ *def.Class, typeMap *def.TypeMap) *BindObjectAllocationInNewTLAB { + res := new(BindObjectAllocationInNewTLAB) + res.Fields = make([]BindFieldObjectAllocationInNewTLAB, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "startTime": + if typ.Fields[i].Equals(&def.Field{Name: "startTime", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i], uint64: &res.Temp.StartTime}) + } else { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i]}) // skip changed field + } + case "eventThread": + if typ.Fields[i].Equals(&def.Field{Name: "eventThread", Type: typeMap.T_THREAD, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i], ThreadRef: &res.Temp.EventThread}) + } else { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i]}) // skip changed field + } + case "stackTrace": + if typ.Fields[i].Equals(&def.Field{Name: "stackTrace", Type: typeMap.T_STACK_TRACE, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i], StackTraceRef: &res.Temp.StackTrace}) + } else { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i]}) // skip changed field + } + case "objectClass": + if typ.Fields[i].Equals(&def.Field{Name: "objectClass", Type: typeMap.T_CLASS, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i], ClassRef: &res.Temp.ObjectClass}) + } else { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i]}) // skip changed field + } + case "allocationSize": + if typ.Fields[i].Equals(&def.Field{Name: "allocationSize", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i], uint64: &res.Temp.AllocationSize}) + } else { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i]}) // skip changed field + } + case "tlabSize": + if typ.Fields[i].Equals(&def.Field{Name: "tlabSize", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i], uint64: &res.Temp.TlabSize}) + } else { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i]}) // skip changed field + } + case "contextId": + if typ.Fields[i].Equals(&def.Field{Name: "contextId", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i], uint64: &res.Temp.ContextId}) + } else { + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldObjectAllocationInNewTLAB{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type ObjectAllocationInNewTLAB struct { + StartTime uint64 + EventThread ThreadRef + StackTrace StackTraceRef + ObjectClass ClassRef + AllocationSize uint64 + TlabSize uint64 + ContextId uint64 +} + +func (this *ObjectAllocationInNewTLAB) Parse(data []byte, bind *BindObjectAllocationInNewTLAB, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + switch bind.Fields[bindFieldIndex].Field.Type { + case typeMap.T_THREAD: + if bind.Fields[bindFieldIndex].ThreadRef != nil { + *bind.Fields[bindFieldIndex].ThreadRef = ThreadRef(v32_) + } + case typeMap.T_STACK_TRACE: + if bind.Fields[bindFieldIndex].StackTraceRef != nil { + *bind.Fields[bindFieldIndex].StackTraceRef = StackTraceRef(v32_) + } + case typeMap.T_CLASS: + if bind.Fields[bindFieldIndex].ClassRef != nil { + *bind.Fields[bindFieldIndex].ClassRef = ClassRef(v32_) + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + if bind.Fields[bindFieldIndex].uint64 != nil { + *bind.Fields[bindFieldIndex].uint64 = v64_ + } + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + *this = bind.Temp + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/allocation_outside_tlab.go b/vendor/github.com/grafana/jfr-parser/parser/types/allocation_outside_tlab.go new file mode 100644 index 0000000000..3ff5288b36 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/allocation_outside_tlab.go @@ -0,0 +1,395 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindObjectAllocationOutsideTLAB struct { + Temp ObjectAllocationOutsideTLAB + Fields []BindFieldObjectAllocationOutsideTLAB +} + +type BindFieldObjectAllocationOutsideTLAB struct { + Field *def.Field + uint64 *uint64 + ThreadRef *ThreadRef + StackTraceRef *StackTraceRef + ClassRef *ClassRef +} + +func NewBindObjectAllocationOutsideTLAB(typ *def.Class, typeMap *def.TypeMap) *BindObjectAllocationOutsideTLAB { + res := new(BindObjectAllocationOutsideTLAB) + res.Fields = make([]BindFieldObjectAllocationOutsideTLAB, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "startTime": + if typ.Fields[i].Equals(&def.Field{Name: "startTime", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldObjectAllocationOutsideTLAB{Field: &typ.Fields[i], uint64: &res.Temp.StartTime}) + } else { + res.Fields = append(res.Fields, BindFieldObjectAllocationOutsideTLAB{Field: &typ.Fields[i]}) // skip changed field + } + case "eventThread": + if typ.Fields[i].Equals(&def.Field{Name: "eventThread", Type: typeMap.T_THREAD, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldObjectAllocationOutsideTLAB{Field: &typ.Fields[i], ThreadRef: &res.Temp.EventThread}) + } else { + res.Fields = append(res.Fields, BindFieldObjectAllocationOutsideTLAB{Field: &typ.Fields[i]}) // skip changed field + } + case "stackTrace": + if typ.Fields[i].Equals(&def.Field{Name: "stackTrace", Type: typeMap.T_STACK_TRACE, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldObjectAllocationOutsideTLAB{Field: &typ.Fields[i], StackTraceRef: &res.Temp.StackTrace}) + } else { + res.Fields = append(res.Fields, BindFieldObjectAllocationOutsideTLAB{Field: &typ.Fields[i]}) // skip changed field + } + case "objectClass": + if typ.Fields[i].Equals(&def.Field{Name: "objectClass", Type: typeMap.T_CLASS, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldObjectAllocationOutsideTLAB{Field: &typ.Fields[i], ClassRef: &res.Temp.ObjectClass}) + } else { + res.Fields = append(res.Fields, BindFieldObjectAllocationOutsideTLAB{Field: &typ.Fields[i]}) // skip changed field + } + case "allocationSize": + if typ.Fields[i].Equals(&def.Field{Name: "allocationSize", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldObjectAllocationOutsideTLAB{Field: &typ.Fields[i], uint64: &res.Temp.AllocationSize}) + } else { + res.Fields = append(res.Fields, BindFieldObjectAllocationOutsideTLAB{Field: &typ.Fields[i]}) // skip changed field + } + case "contextId": + if typ.Fields[i].Equals(&def.Field{Name: "contextId", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldObjectAllocationOutsideTLAB{Field: &typ.Fields[i], uint64: &res.Temp.ContextId}) + } else { + res.Fields = append(res.Fields, BindFieldObjectAllocationOutsideTLAB{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldObjectAllocationOutsideTLAB{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type ObjectAllocationOutsideTLAB struct { + StartTime uint64 + EventThread ThreadRef + StackTrace StackTraceRef + ObjectClass ClassRef + AllocationSize uint64 + ContextId uint64 +} + +func (this *ObjectAllocationOutsideTLAB) Parse(data []byte, bind *BindObjectAllocationOutsideTLAB, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + switch bind.Fields[bindFieldIndex].Field.Type { + case typeMap.T_THREAD: + if bind.Fields[bindFieldIndex].ThreadRef != nil { + *bind.Fields[bindFieldIndex].ThreadRef = ThreadRef(v32_) + } + case typeMap.T_STACK_TRACE: + if bind.Fields[bindFieldIndex].StackTraceRef != nil { + *bind.Fields[bindFieldIndex].StackTraceRef = StackTraceRef(v32_) + } + case typeMap.T_CLASS: + if bind.Fields[bindFieldIndex].ClassRef != nil { + *bind.Fields[bindFieldIndex].ClassRef = ClassRef(v32_) + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + if bind.Fields[bindFieldIndex].uint64 != nil { + *bind.Fields[bindFieldIndex].uint64 = v64_ + } + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + *this = bind.Temp + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/bind.go b/vendor/github.com/grafana/jfr-parser/parser/types/bind.go new file mode 100644 index 0000000000..ab1254f4c2 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/bind.go @@ -0,0 +1 @@ +package types diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/class.go b/vendor/github.com/grafana/jfr-parser/parser/types/class.go new file mode 100644 index 0000000000..5280af6d6e --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/class.go @@ -0,0 +1,412 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindClass struct { + Temp Class + Fields []BindFieldClass +} + +type BindFieldClass struct { + Field *def.Field + ClassLoaderRef *ClassLoaderRef + SymbolRef *SymbolRef + PackageRef *PackageRef + uint32 *uint32 +} + +func NewBindClass(typ *def.Class, typeMap *def.TypeMap) *BindClass { + res := new(BindClass) + res.Fields = make([]BindFieldClass, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "classLoader": + res.Fields = append(res.Fields, BindFieldClass{Field: &typ.Fields[i]}) // skip to save mem + case "name": + if typ.Fields[i].Equals(&def.Field{Name: "name", Type: typeMap.T_SYMBOL, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldClass{Field: &typ.Fields[i], SymbolRef: &res.Temp.Name}) + } else { + res.Fields = append(res.Fields, BindFieldClass{Field: &typ.Fields[i]}) // skip changed field + } + case "package": + res.Fields = append(res.Fields, BindFieldClass{Field: &typ.Fields[i]}) // skip to save mem + case "modifiers": + res.Fields = append(res.Fields, BindFieldClass{Field: &typ.Fields[i]}) // skip to save mem + default: + res.Fields = append(res.Fields, BindFieldClass{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type ClassRef uint32 +type ClassList struct { + IDMap map[ClassRef]uint32 + Class []Class +} + +type Class struct { + // skip classLoader + Name SymbolRef + // skip package + // skip modifiers +} + +func (this *ClassList) Parse(data []byte, bind *BindClass, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + n := int(v32_) + this.IDMap = make(map[ClassRef]uint32, n) + this.Class = make([]Class, n) + for i := 0; i < n; i++ { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + id := ClassRef(v32_) + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + switch bind.Fields[bindFieldIndex].Field.Type { + case typeMap.T_CLASS_LOADER: + if bind.Fields[bindFieldIndex].ClassLoaderRef != nil { + *bind.Fields[bindFieldIndex].ClassLoaderRef = ClassLoaderRef(v32_) + } + case typeMap.T_SYMBOL: + if bind.Fields[bindFieldIndex].SymbolRef != nil { + *bind.Fields[bindFieldIndex].SymbolRef = SymbolRef(v32_) + } + case typeMap.T_PACKAGE: + if bind.Fields[bindFieldIndex].PackageRef != nil { + *bind.Fields[bindFieldIndex].PackageRef = PackageRef(v32_) + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if bind.Fields[bindFieldIndex].uint32 != nil { + *bind.Fields[bindFieldIndex].uint32 = v32_ + } + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + // skipping + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + this.Class[i] = bind.Temp + this.IDMap[id] = uint32(i) + } + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/classloader.go b/vendor/github.com/grafana/jfr-parser/parser/types/classloader.go new file mode 100644 index 0000000000..187434e984 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/classloader.go @@ -0,0 +1,402 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindClassLoader struct { + Temp ClassLoader + Fields []BindFieldClassLoader +} + +type BindFieldClassLoader struct { + Field *def.Field + ClassRef *ClassRef + SymbolRef *SymbolRef +} + +func NewBindClassLoader(typ *def.Class, typeMap *def.TypeMap) *BindClassLoader { + res := new(BindClassLoader) + res.Fields = make([]BindFieldClassLoader, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "type": + if typ.Fields[i].Equals(&def.Field{Name: "type", Type: typeMap.T_CLASS, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldClassLoader{Field: &typ.Fields[i], ClassRef: &res.Temp.Type}) + } else { + res.Fields = append(res.Fields, BindFieldClassLoader{Field: &typ.Fields[i]}) // skip changed field + } + case "name": + if typ.Fields[i].Equals(&def.Field{Name: "name", Type: typeMap.T_SYMBOL, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldClassLoader{Field: &typ.Fields[i], SymbolRef: &res.Temp.Name}) + } else { + res.Fields = append(res.Fields, BindFieldClassLoader{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldClassLoader{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type ClassLoaderRef uint32 +type ClassLoaderList struct { + IDMap map[ClassLoaderRef]uint32 + ClassLoader []ClassLoader +} + +type ClassLoader struct { + Type ClassRef + Name SymbolRef +} + +func (this *ClassLoaderList) Parse(data []byte, bind *BindClassLoader, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + n := int(v32_) + this.IDMap = make(map[ClassLoaderRef]uint32, n) + this.ClassLoader = make([]ClassLoader, n) + for i := 0; i < n; i++ { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + id := ClassLoaderRef(v32_) + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + switch bind.Fields[bindFieldIndex].Field.Type { + case typeMap.T_CLASS: + if bind.Fields[bindFieldIndex].ClassRef != nil { + *bind.Fields[bindFieldIndex].ClassRef = ClassRef(v32_) + } + case typeMap.T_SYMBOL: + if bind.Fields[bindFieldIndex].SymbolRef != nil { + *bind.Fields[bindFieldIndex].SymbolRef = SymbolRef(v32_) + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + // skipping + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + this.ClassLoader[i] = bind.Temp + this.IDMap[id] = uint32(i) + } + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/def/meta.go b/vendor/github.com/grafana/jfr-parser/parser/types/def/meta.go new file mode 100644 index 0000000000..5412a719f2 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/def/meta.go @@ -0,0 +1,101 @@ +package def + +import ( + "fmt" + "strconv" +) + +var ErrIntOverflow = fmt.Errorf("int overflow") +var ErrNameEmpty = fmt.Errorf("class/field name is empty") + +type Class struct { + Name string + ID TypeID + Fields []Field +} + +func NewClass(attrs map[string]string, childCount int) (*Class, error) { + id, err := strconv.Atoi(attrs["id"]) + if err != nil { + return nil, err + } + name := attrs["name"] + if name == "" { + return nil, ErrNameEmpty + } + return &Class{ + Name: name, + ID: TypeID(id), + Fields: make([]Field, 0, childCount), + }, nil +} + +func (c *Class) String() string { + if c == nil { + return "class{nil}" + } + return fmt.Sprintf("class{name: %s, id: %d, fields: %+v}", c.Name, c.ID, c.Fields) +} + +func (c *Class) TrimLastField(fieldName string) []Field { + if len(c.Fields) > 0 && c.Fields[len(c.Fields)-1].Name == fieldName { + return c.Fields[:len(c.Fields)-1] + } else { + return c.Fields + } +} + +func (c *Class) Field(name string) *Field { + for i := range c.Fields { + if c.Fields[i].Name == name { + return &c.Fields[i] + } + } + return nil +} + +type Field struct { + Name string + Type TypeID + ConstantPool bool + Array bool +} + +func (f *Field) Equals(other *Field) bool { + return f.Name == other.Name && + f.Type == other.Type && + f.ConstantPool == other.ConstantPool && + f.Array == other.Array +} + +func (f *Field) String() string { + return fmt.Sprintf("field{name: %s, typ: %d, constantPool: %t}", f.Name, f.Type, f.ConstantPool) +} + +func NewField(attrs map[string]string) (Field, error) { + cls := attrs["class"] + typ, err := strconv.Atoi(cls) + if err != nil { + return Field{}, err + } + name := attrs["name"] + if name == "" { + return Field{}, ErrNameEmpty + } + dimen := attrs["dimension"] + array := false + if dimen != "" { + if dimen == "1" { + array = true + } else { + return Field{}, fmt.Errorf("unsupported dimension %s", dimen) + } + } + + return Field{ + Name: name, + Type: TypeID(typ), + ConstantPool: attrs["constantPool"] == "true", + Array: array, + }, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/def/types.go b/vendor/github.com/grafana/jfr-parser/parser/types/def/types.go new file mode 100644 index 0000000000..545648f3a2 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/def/types.go @@ -0,0 +1,35 @@ +package def + +type TypeID uint64 + +type TypeMap struct { + IDMap map[TypeID]*Class + NameMap map[string]*Class + + T_STRING TypeID + T_INT TypeID + T_LONG TypeID + T_FLOAT TypeID + T_BOOLEAN TypeID + + T_CLASS TypeID + T_THREAD TypeID + T_FRAME_TYPE TypeID + T_THREAD_STATE TypeID + T_STACK_TRACE TypeID + T_METHOD TypeID + T_PACKAGE TypeID + T_SYMBOL TypeID + T_LOG_LEVEL TypeID + + T_STACK_FRAME TypeID + T_CLASS_LOADER TypeID + + T_EXECUTION_SAMPLE TypeID + T_ALLOC_IN_NEW_TLAB TypeID + T_ALLOC_OUTSIDE_TLAB TypeID + T_LIVE_OBJECT TypeID + T_MONITOR_ENTER TypeID + T_THREAD_PARK TypeID + T_ACTIVE_SETTING TypeID +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/execution_sample.go b/vendor/github.com/grafana/jfr-parser/parser/types/execution_sample.go new file mode 100644 index 0000000000..914c09d88e --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/execution_sample.go @@ -0,0 +1,388 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindExecutionSample struct { + Temp ExecutionSample + Fields []BindFieldExecutionSample +} + +type BindFieldExecutionSample struct { + Field *def.Field + uint64 *uint64 + ThreadRef *ThreadRef + StackTraceRef *StackTraceRef + ThreadStateRef *ThreadStateRef +} + +func NewBindExecutionSample(typ *def.Class, typeMap *def.TypeMap) *BindExecutionSample { + res := new(BindExecutionSample) + res.Fields = make([]BindFieldExecutionSample, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "startTime": + if typ.Fields[i].Equals(&def.Field{Name: "startTime", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldExecutionSample{Field: &typ.Fields[i], uint64: &res.Temp.StartTime}) + } else { + res.Fields = append(res.Fields, BindFieldExecutionSample{Field: &typ.Fields[i]}) // skip changed field + } + case "sampledThread": + if typ.Fields[i].Equals(&def.Field{Name: "sampledThread", Type: typeMap.T_THREAD, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldExecutionSample{Field: &typ.Fields[i], ThreadRef: &res.Temp.SampledThread}) + } else { + res.Fields = append(res.Fields, BindFieldExecutionSample{Field: &typ.Fields[i]}) // skip changed field + } + case "stackTrace": + if typ.Fields[i].Equals(&def.Field{Name: "stackTrace", Type: typeMap.T_STACK_TRACE, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldExecutionSample{Field: &typ.Fields[i], StackTraceRef: &res.Temp.StackTrace}) + } else { + res.Fields = append(res.Fields, BindFieldExecutionSample{Field: &typ.Fields[i]}) // skip changed field + } + case "state": + if typ.Fields[i].Equals(&def.Field{Name: "state", Type: typeMap.T_THREAD_STATE, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldExecutionSample{Field: &typ.Fields[i], ThreadStateRef: &res.Temp.State}) + } else { + res.Fields = append(res.Fields, BindFieldExecutionSample{Field: &typ.Fields[i]}) // skip changed field + } + case "contextId": + if typ.Fields[i].Equals(&def.Field{Name: "contextId", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldExecutionSample{Field: &typ.Fields[i], uint64: &res.Temp.ContextId}) + } else { + res.Fields = append(res.Fields, BindFieldExecutionSample{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldExecutionSample{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type ExecutionSample struct { + StartTime uint64 + SampledThread ThreadRef + StackTrace StackTraceRef + State ThreadStateRef + ContextId uint64 +} + +func (this *ExecutionSample) Parse(data []byte, bind *BindExecutionSample, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + switch bind.Fields[bindFieldIndex].Field.Type { + case typeMap.T_THREAD: + if bind.Fields[bindFieldIndex].ThreadRef != nil { + *bind.Fields[bindFieldIndex].ThreadRef = ThreadRef(v32_) + } + case typeMap.T_STACK_TRACE: + if bind.Fields[bindFieldIndex].StackTraceRef != nil { + *bind.Fields[bindFieldIndex].StackTraceRef = StackTraceRef(v32_) + } + case typeMap.T_THREAD_STATE: + if bind.Fields[bindFieldIndex].ThreadStateRef != nil { + *bind.Fields[bindFieldIndex].ThreadStateRef = ThreadStateRef(v32_) + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + if bind.Fields[bindFieldIndex].uint64 != nil { + *bind.Fields[bindFieldIndex].uint64 = v64_ + } + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + *this = bind.Temp + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/frametype.go b/vendor/github.com/grafana/jfr-parser/parser/types/frametype.go new file mode 100644 index 0000000000..2bcf85e848 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/frametype.go @@ -0,0 +1,386 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindFrameType struct { + Temp FrameType + Fields []BindFieldFrameType +} + +type BindFieldFrameType struct { + Field *def.Field + string *string +} + +func NewBindFrameType(typ *def.Class, typeMap *def.TypeMap) *BindFrameType { + res := new(BindFrameType) + res.Fields = make([]BindFieldFrameType, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "description": + if typ.Fields[i].Equals(&def.Field{Name: "description", Type: typeMap.T_STRING, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldFrameType{Field: &typ.Fields[i], string: &res.Temp.Description}) + } else { + res.Fields = append(res.Fields, BindFieldFrameType{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldFrameType{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type FrameTypeRef uint32 +type FrameTypeList struct { + IDMap map[FrameTypeRef]uint32 + FrameType []FrameType +} + +type FrameType struct { + Description string +} + +func (this *FrameTypeList) Parse(data []byte, bind *BindFrameType, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + n := int(v32_) + this.IDMap = make(map[FrameTypeRef]uint32, n) + this.FrameType = make([]FrameType, n) + for i := 0; i < n; i++ { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + id := FrameTypeRef(v32_) + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + if bind.Fields[bindFieldIndex].string != nil { + *bind.Fields[bindFieldIndex].string = s_ + } + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + // skipping + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + this.FrameType[i] = bind.Temp + this.IDMap[id] = uint32(i) + } + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/idmap.go b/vendor/github.com/grafana/jfr-parser/parser/types/idmap.go new file mode 100644 index 0000000000..77f9cae771 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/idmap.go @@ -0,0 +1,50 @@ +package types + +type IDMap[REF interface{ MethodRef | FrameTypeRef }] struct { + Dict map[REF]uint32 + Slice []uint32 + Size int +} + +func NewIDMap[REF interface{ MethodRef | FrameTypeRef }](n int) IDMap[REF] { + return IDMap[REF]{ + Slice: make([]uint32, n+1), + } +} + +func (m *IDMap[REF]) Get(ref REF) int { + if m.Dict == nil { + if int(ref) < len(m.Slice) { + return int(m.Slice[ref]) + } + return -1 + } + return m.getDict(ref) +} + +func (m *IDMap[REF]) Set(ref REF, idx int) { + if m.Dict == nil && int(ref) < len(m.Slice) { + m.Slice[ref] = uint32(idx) + return + } + m.setSlow(ref, idx) +} + +func (m *IDMap[REF]) setSlow(ref REF, idx int) { + if m.Dict == nil { + m.Dict = make(map[REF]uint32, m.Size) + for i, v := range m.Slice { + m.Dict[REF(i)] = v + } + m.Slice = nil + } + m.Dict[ref] = uint32(idx) +} + +func (m *IDMap[REF]) getDict(ref REF) int { + u, ok := m.Dict[ref] + if ok { + return int(u) + } + return -1 +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/live_object.go b/vendor/github.com/grafana/jfr-parser/parser/types/live_object.go new file mode 100644 index 0000000000..dd0d8666b9 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/live_object.go @@ -0,0 +1,395 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindLiveObject struct { + Temp LiveObject + Fields []BindFieldLiveObject +} + +type BindFieldLiveObject struct { + Field *def.Field + uint64 *uint64 + ThreadRef *ThreadRef + StackTraceRef *StackTraceRef + ClassRef *ClassRef +} + +func NewBindLiveObject(typ *def.Class, typeMap *def.TypeMap) *BindLiveObject { + res := new(BindLiveObject) + res.Fields = make([]BindFieldLiveObject, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "startTime": + if typ.Fields[i].Equals(&def.Field{Name: "startTime", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldLiveObject{Field: &typ.Fields[i], uint64: &res.Temp.StartTime}) + } else { + res.Fields = append(res.Fields, BindFieldLiveObject{Field: &typ.Fields[i]}) // skip changed field + } + case "eventThread": + if typ.Fields[i].Equals(&def.Field{Name: "eventThread", Type: typeMap.T_THREAD, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldLiveObject{Field: &typ.Fields[i], ThreadRef: &res.Temp.EventThread}) + } else { + res.Fields = append(res.Fields, BindFieldLiveObject{Field: &typ.Fields[i]}) // skip changed field + } + case "stackTrace": + if typ.Fields[i].Equals(&def.Field{Name: "stackTrace", Type: typeMap.T_STACK_TRACE, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldLiveObject{Field: &typ.Fields[i], StackTraceRef: &res.Temp.StackTrace}) + } else { + res.Fields = append(res.Fields, BindFieldLiveObject{Field: &typ.Fields[i]}) // skip changed field + } + case "objectClass": + if typ.Fields[i].Equals(&def.Field{Name: "objectClass", Type: typeMap.T_CLASS, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldLiveObject{Field: &typ.Fields[i], ClassRef: &res.Temp.ObjectClass}) + } else { + res.Fields = append(res.Fields, BindFieldLiveObject{Field: &typ.Fields[i]}) // skip changed field + } + case "allocationSize": + if typ.Fields[i].Equals(&def.Field{Name: "allocationSize", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldLiveObject{Field: &typ.Fields[i], uint64: &res.Temp.AllocationSize}) + } else { + res.Fields = append(res.Fields, BindFieldLiveObject{Field: &typ.Fields[i]}) // skip changed field + } + case "allocationTime": + if typ.Fields[i].Equals(&def.Field{Name: "allocationTime", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldLiveObject{Field: &typ.Fields[i], uint64: &res.Temp.AllocationTime}) + } else { + res.Fields = append(res.Fields, BindFieldLiveObject{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldLiveObject{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type LiveObject struct { + StartTime uint64 + EventThread ThreadRef + StackTrace StackTraceRef + ObjectClass ClassRef + AllocationSize uint64 + AllocationTime uint64 +} + +func (this *LiveObject) Parse(data []byte, bind *BindLiveObject, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + switch bind.Fields[bindFieldIndex].Field.Type { + case typeMap.T_THREAD: + if bind.Fields[bindFieldIndex].ThreadRef != nil { + *bind.Fields[bindFieldIndex].ThreadRef = ThreadRef(v32_) + } + case typeMap.T_STACK_TRACE: + if bind.Fields[bindFieldIndex].StackTraceRef != nil { + *bind.Fields[bindFieldIndex].StackTraceRef = StackTraceRef(v32_) + } + case typeMap.T_CLASS: + if bind.Fields[bindFieldIndex].ClassRef != nil { + *bind.Fields[bindFieldIndex].ClassRef = ClassRef(v32_) + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + if bind.Fields[bindFieldIndex].uint64 != nil { + *bind.Fields[bindFieldIndex].uint64 = v64_ + } + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + *this = bind.Temp + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/loglevel.go b/vendor/github.com/grafana/jfr-parser/parser/types/loglevel.go new file mode 100644 index 0000000000..c3b22c5a5b --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/loglevel.go @@ -0,0 +1,386 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindLogLevel struct { + Temp LogLevel + Fields []BindFieldLogLevel +} + +type BindFieldLogLevel struct { + Field *def.Field + string *string +} + +func NewBindLogLevel(typ *def.Class, typeMap *def.TypeMap) *BindLogLevel { + res := new(BindLogLevel) + res.Fields = make([]BindFieldLogLevel, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "name": + if typ.Fields[i].Equals(&def.Field{Name: "name", Type: typeMap.T_STRING, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldLogLevel{Field: &typ.Fields[i], string: &res.Temp.Name}) + } else { + res.Fields = append(res.Fields, BindFieldLogLevel{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldLogLevel{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type LogLevelRef uint32 +type LogLevelList struct { + IDMap map[LogLevelRef]uint32 + LogLevel []LogLevel +} + +type LogLevel struct { + Name string +} + +func (this *LogLevelList) Parse(data []byte, bind *BindLogLevel, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + n := int(v32_) + this.IDMap = make(map[LogLevelRef]uint32, n) + this.LogLevel = make([]LogLevel, n) + for i := 0; i < n; i++ { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + id := LogLevelRef(v32_) + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + if bind.Fields[bindFieldIndex].string != nil { + *bind.Fields[bindFieldIndex].string = s_ + } + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + // skipping + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + this.LogLevel[i] = bind.Temp + this.IDMap[id] = uint32(i) + } + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/method.go b/vendor/github.com/grafana/jfr-parser/parser/types/method.go new file mode 100644 index 0000000000..64ce853a58 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/method.go @@ -0,0 +1,417 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindMethod struct { + Temp Method + Fields []BindFieldMethod +} + +type BindFieldMethod struct { + Field *def.Field + ClassRef *ClassRef + SymbolRef *SymbolRef + uint32 *uint32 + bool *bool +} + +func NewBindMethod(typ *def.Class, typeMap *def.TypeMap) *BindMethod { + res := new(BindMethod) + res.Fields = make([]BindFieldMethod, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "type": + if typ.Fields[i].Equals(&def.Field{Name: "type", Type: typeMap.T_CLASS, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldMethod{Field: &typ.Fields[i], ClassRef: &res.Temp.Type}) + } else { + res.Fields = append(res.Fields, BindFieldMethod{Field: &typ.Fields[i]}) // skip changed field + } + case "name": + if typ.Fields[i].Equals(&def.Field{Name: "name", Type: typeMap.T_SYMBOL, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldMethod{Field: &typ.Fields[i], SymbolRef: &res.Temp.Name}) + } else { + res.Fields = append(res.Fields, BindFieldMethod{Field: &typ.Fields[i]}) // skip changed field + } + case "descriptor": + res.Fields = append(res.Fields, BindFieldMethod{Field: &typ.Fields[i]}) // skip to save mem + case "modifiers": + res.Fields = append(res.Fields, BindFieldMethod{Field: &typ.Fields[i]}) // skip to save mem + case "hidden": + res.Fields = append(res.Fields, BindFieldMethod{Field: &typ.Fields[i]}) // skip to save mem + default: + res.Fields = append(res.Fields, BindFieldMethod{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type MethodRef uint32 +type MethodList struct { + IDMap IDMap[MethodRef] + Method []Method +} + +type Method struct { + Type ClassRef + Name SymbolRef + // skip descriptor + // skip modifiers + // skip hidden +} + +func (this *MethodList) Parse(data []byte, bind *BindMethod, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + n := int(v32_) + this.IDMap = NewIDMap[MethodRef](n) + this.Method = make([]Method, n) + for i := 0; i < n; i++ { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + id := MethodRef(v32_) + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + switch bind.Fields[bindFieldIndex].Field.Type { + case typeMap.T_CLASS: + if bind.Fields[bindFieldIndex].ClassRef != nil { + *bind.Fields[bindFieldIndex].ClassRef = ClassRef(v32_) + } + case typeMap.T_SYMBOL: + if bind.Fields[bindFieldIndex].SymbolRef != nil { + *bind.Fields[bindFieldIndex].SymbolRef = SymbolRef(v32_) + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if bind.Fields[bindFieldIndex].uint32 != nil { + *bind.Fields[bindFieldIndex].uint32 = v32_ + } + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + // skipping + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if bind.Fields[bindFieldIndex].bool != nil { + *bind.Fields[bindFieldIndex].bool = b_ != 0 + } + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + this.Method[i] = bind.Temp + this.IDMap.Set(id, i) + } + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/monitor_enter.go b/vendor/github.com/grafana/jfr-parser/parser/types/monitor_enter.go new file mode 100644 index 0000000000..cf47e4f65f --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/monitor_enter.go @@ -0,0 +1,409 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindJavaMonitorEnter struct { + Temp JavaMonitorEnter + Fields []BindFieldJavaMonitorEnter +} + +type BindFieldJavaMonitorEnter struct { + Field *def.Field + uint64 *uint64 + ThreadRef *ThreadRef + StackTraceRef *StackTraceRef + ClassRef *ClassRef +} + +func NewBindJavaMonitorEnter(typ *def.Class, typeMap *def.TypeMap) *BindJavaMonitorEnter { + res := new(BindJavaMonitorEnter) + res.Fields = make([]BindFieldJavaMonitorEnter, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "startTime": + if typ.Fields[i].Equals(&def.Field{Name: "startTime", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i], uint64: &res.Temp.StartTime}) + } else { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i]}) // skip changed field + } + case "duration": + if typ.Fields[i].Equals(&def.Field{Name: "duration", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i], uint64: &res.Temp.Duration}) + } else { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i]}) // skip changed field + } + case "eventThread": + if typ.Fields[i].Equals(&def.Field{Name: "eventThread", Type: typeMap.T_THREAD, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i], ThreadRef: &res.Temp.EventThread}) + } else { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i]}) // skip changed field + } + case "stackTrace": + if typ.Fields[i].Equals(&def.Field{Name: "stackTrace", Type: typeMap.T_STACK_TRACE, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i], StackTraceRef: &res.Temp.StackTrace}) + } else { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i]}) // skip changed field + } + case "monitorClass": + if typ.Fields[i].Equals(&def.Field{Name: "monitorClass", Type: typeMap.T_CLASS, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i], ClassRef: &res.Temp.MonitorClass}) + } else { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i]}) // skip changed field + } + case "previousOwner": + if typ.Fields[i].Equals(&def.Field{Name: "previousOwner", Type: typeMap.T_THREAD, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i], ThreadRef: &res.Temp.PreviousOwner}) + } else { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i]}) // skip changed field + } + case "address": + if typ.Fields[i].Equals(&def.Field{Name: "address", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i], uint64: &res.Temp.Address}) + } else { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i]}) // skip changed field + } + case "contextId": + if typ.Fields[i].Equals(&def.Field{Name: "contextId", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i], uint64: &res.Temp.ContextId}) + } else { + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldJavaMonitorEnter{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type JavaMonitorEnter struct { + StartTime uint64 + Duration uint64 + EventThread ThreadRef + StackTrace StackTraceRef + MonitorClass ClassRef + PreviousOwner ThreadRef + Address uint64 + ContextId uint64 +} + +func (this *JavaMonitorEnter) Parse(data []byte, bind *BindJavaMonitorEnter, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + switch bind.Fields[bindFieldIndex].Field.Type { + case typeMap.T_THREAD: + if bind.Fields[bindFieldIndex].ThreadRef != nil { + *bind.Fields[bindFieldIndex].ThreadRef = ThreadRef(v32_) + } + case typeMap.T_STACK_TRACE: + if bind.Fields[bindFieldIndex].StackTraceRef != nil { + *bind.Fields[bindFieldIndex].StackTraceRef = StackTraceRef(v32_) + } + case typeMap.T_CLASS: + if bind.Fields[bindFieldIndex].ClassRef != nil { + *bind.Fields[bindFieldIndex].ClassRef = ClassRef(v32_) + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + if bind.Fields[bindFieldIndex].uint64 != nil { + *bind.Fields[bindFieldIndex].uint64 = v64_ + } + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + *this = bind.Temp + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/package.go b/vendor/github.com/grafana/jfr-parser/parser/types/package.go new file mode 100644 index 0000000000..69a35e6c9b --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/package.go @@ -0,0 +1,390 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindPackage struct { + Temp Package + Fields []BindFieldPackage +} + +type BindFieldPackage struct { + Field *def.Field + SymbolRef *SymbolRef +} + +func NewBindPackage(typ *def.Class, typeMap *def.TypeMap) *BindPackage { + res := new(BindPackage) + res.Fields = make([]BindFieldPackage, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "name": + if typ.Fields[i].Equals(&def.Field{Name: "name", Type: typeMap.T_SYMBOL, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldPackage{Field: &typ.Fields[i], SymbolRef: &res.Temp.Name}) + } else { + res.Fields = append(res.Fields, BindFieldPackage{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldPackage{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type PackageRef uint32 +type PackageList struct { + IDMap map[PackageRef]uint32 + Package []Package +} + +type Package struct { + Name SymbolRef +} + +func (this *PackageList) Parse(data []byte, bind *BindPackage, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + n := int(v32_) + this.IDMap = make(map[PackageRef]uint32, n) + this.Package = make([]Package, n) + for i := 0; i < n; i++ { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + id := PackageRef(v32_) + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + switch bind.Fields[bindFieldIndex].Field.Type { + case typeMap.T_SYMBOL: + if bind.Fields[bindFieldIndex].SymbolRef != nil { + *bind.Fields[bindFieldIndex].SymbolRef = SymbolRef(v32_) + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + // skipping + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + this.Package[i] = bind.Temp + this.IDMap[id] = uint32(i) + } + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/skipper.go b/vendor/github.com/grafana/jfr-parser/parser/types/skipper.go new file mode 100644 index 0000000000..a49fbd1565 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/skipper.go @@ -0,0 +1,369 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindSkipConstantPool struct { + Temp SkipConstantPool + Fields []BindFieldSkipConstantPool +} + +type BindFieldSkipConstantPool struct { + Field *def.Field +} + +func NewBindSkipConstantPool(typ *def.Class, typeMap *def.TypeMap) *BindSkipConstantPool { + res := new(BindSkipConstantPool) + res.Fields = make([]BindFieldSkipConstantPool, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + default: + res.Fields = append(res.Fields, BindFieldSkipConstantPool{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type SkipConstantPoolRef uint32 +type SkipConstantPoolList struct { +} + +type SkipConstantPool struct { +} + +func (this *SkipConstantPoolList) Parse(data []byte, bind *BindSkipConstantPool, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + n := int(v32_) + for i := 0; i < n; i++ { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + // skipping + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + } + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/stackframe.go b/vendor/github.com/grafana/jfr-parser/parser/types/stackframe.go new file mode 100644 index 0000000000..9a05fd9f6b --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/stackframe.go @@ -0,0 +1,368 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindStackFrame struct { + Temp StackFrame + Fields []BindFieldStackFrame +} + +type BindFieldStackFrame struct { + Field *def.Field + MethodRef *MethodRef + uint32 *uint32 + FrameTypeRef *FrameTypeRef +} + +func NewBindStackFrame(typ *def.Class, typeMap *def.TypeMap) *BindStackFrame { + res := new(BindStackFrame) + res.Fields = make([]BindFieldStackFrame, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "method": + if typ.Fields[i].Equals(&def.Field{Name: "method", Type: typeMap.T_METHOD, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldStackFrame{Field: &typ.Fields[i], MethodRef: &res.Temp.Method}) + } else { + res.Fields = append(res.Fields, BindFieldStackFrame{Field: &typ.Fields[i]}) // skip changed field + } + case "lineNumber": + if typ.Fields[i].Equals(&def.Field{Name: "lineNumber", Type: typeMap.T_INT, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldStackFrame{Field: &typ.Fields[i], uint32: &res.Temp.LineNumber}) + } else { + res.Fields = append(res.Fields, BindFieldStackFrame{Field: &typ.Fields[i]}) // skip changed field + } + case "bytecodeIndex": + res.Fields = append(res.Fields, BindFieldStackFrame{Field: &typ.Fields[i]}) // skip to save mem + case "type": + res.Fields = append(res.Fields, BindFieldStackFrame{Field: &typ.Fields[i]}) // skip to save mem + default: + res.Fields = append(res.Fields, BindFieldStackFrame{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type StackFrame struct { + Method MethodRef + LineNumber uint32 + // skip bytecodeIndex + // skip type +} + +func (this *StackFrame) Parse(data []byte, bind *BindStackFrame, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + switch bind.Fields[bindFieldIndex].Field.Type { + case typeMap.T_METHOD: + if bind.Fields[bindFieldIndex].MethodRef != nil { + *bind.Fields[bindFieldIndex].MethodRef = MethodRef(v32_) + } + case typeMap.T_FRAME_TYPE: + if bind.Fields[bindFieldIndex].FrameTypeRef != nil { + *bind.Fields[bindFieldIndex].FrameTypeRef = FrameTypeRef(v32_) + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if bind.Fields[bindFieldIndex].uint32 != nil { + *bind.Fields[bindFieldIndex].uint32 = v32_ + } + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + // skipping + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + *this = bind.Temp + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/stacktrace.go b/vendor/github.com/grafana/jfr-parser/parser/types/stacktrace.go new file mode 100644 index 0000000000..aac1f84408 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/stacktrace.go @@ -0,0 +1,696 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindStackTrace struct { + Temp StackTrace + Fields []BindFieldStackTrace +} + +type BindFieldStackTrace struct { + Field *def.Field + bool *bool + StackFrame *[]StackFrame +} + +func NewBindStackTrace(typ *def.Class, typeMap *def.TypeMap) *BindStackTrace { + res := new(BindStackTrace) + res.Fields = make([]BindFieldStackTrace, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "truncated": + if typ.Fields[i].Equals(&def.Field{Name: "truncated", Type: typeMap.T_BOOLEAN, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldStackTrace{Field: &typ.Fields[i], bool: &res.Temp.Truncated}) + } else { + res.Fields = append(res.Fields, BindFieldStackTrace{Field: &typ.Fields[i]}) // skip changed field + } + case "frames": + if typ.Fields[i].Equals(&def.Field{Name: "frames", Type: typeMap.T_STACK_FRAME, ConstantPool: false, Array: true}) { + res.Fields = append(res.Fields, BindFieldStackTrace{Field: &typ.Fields[i], StackFrame: &res.Temp.Frames}) + } else { + res.Fields = append(res.Fields, BindFieldStackTrace{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldStackTrace{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type StackTraceRef uint32 +type StackTraceList struct { + IDMap map[StackTraceRef]uint32 + StackTrace []StackTrace +} + +type StackTrace struct { + Truncated bool + Frames []StackFrame +} + +func (this *StackTraceList) Parse(data []byte, bind *BindStackTrace, bindStackFrame *BindStackFrame, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + n := int(v32_) + this.IDMap = make(map[StackTraceRef]uint32, n) + this.StackTrace = make([]StackTrace, n) + for i := 0; i < n; i++ { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + id := StackTraceRef(v32_) + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + if bind.Fields[bindFieldIndex].Field.Type == typeMap.T_STACK_FRAME { + *bind.Fields[bindFieldIndex].StackFrame = make([]StackFrame, 0, bindArraySize) + } + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + // skipping + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if bind.Fields[bindFieldIndex].bool != nil { + *bind.Fields[bindFieldIndex].bool = b_ != 0 + } + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_STACK_FRAME: + for bindStackFrameFieldIndex := 0; bindStackFrameFieldIndex < len(bindStackFrame.Fields); bindStackFrameFieldIndex++ { + bindStackFrameArraySize := 1 + if bindStackFrame.Fields[bindStackFrameFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindStackFrameArraySize = int(v32_) + } + for bindStackFrameArrayIndex := 0; bindStackFrameArrayIndex < bindStackFrameArraySize; bindStackFrameArrayIndex++ { + if bindStackFrame.Fields[bindStackFrameFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + switch bindStackFrame.Fields[bindStackFrameFieldIndex].Field.Type { + case typeMap.T_METHOD: + if bindStackFrame.Fields[bindStackFrameFieldIndex].MethodRef != nil { + *bindStackFrame.Fields[bindStackFrameFieldIndex].MethodRef = MethodRef(v32_) + } + case typeMap.T_FRAME_TYPE: + if bindStackFrame.Fields[bindStackFrameFieldIndex].FrameTypeRef != nil { + *bindStackFrame.Fields[bindStackFrameFieldIndex].FrameTypeRef = FrameTypeRef(v32_) + } + } + } else { + bindStackFrameFieldTypeID := bindStackFrame.Fields[bindStackFrameFieldIndex].Field.Type + switch bindStackFrameFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if bindStackFrame.Fields[bindStackFrameFieldIndex].uint32 != nil { + *bindStackFrame.Fields[bindStackFrameFieldIndex].uint32 = v32_ + } + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + // skipping + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindStackFrameFieldType := typeMap.IDMap[bindStackFrame.Fields[bindStackFrameFieldIndex].Field.Type] + if bindStackFrameFieldType == nil || len(bindStackFrameFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bindStackFrame.Fields[bindStackFrameFieldIndex].Field.Type) + } + bindStackFrameSkipObjects := 1 + if bindStackFrame.Fields[bindStackFrameFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindStackFrameSkipObjects = int(v32_) + } + for bindStackFrameSkipObjectIndex := 0; bindStackFrameSkipObjectIndex < bindStackFrameSkipObjects; bindStackFrameSkipObjectIndex++ { + for bindStackFrameskipFieldIndex := 0; bindStackFrameskipFieldIndex < len(bindStackFrameFieldType.Fields); bindStackFrameskipFieldIndex++ { + bindStackFrameSkipFieldType := bindStackFrameFieldType.Fields[bindStackFrameskipFieldIndex].Type + if bindStackFrameFieldType.Fields[bindStackFrameskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindStackFrameSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindStackFrameSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindStackFrameSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindStackFrameSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindStackFrameSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + if bind.Fields[bindFieldIndex].StackFrame != nil { + *bind.Fields[bindFieldIndex].StackFrame = append(*bind.Fields[bindFieldIndex].StackFrame, bindStackFrame.Temp) + } + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + this.StackTrace[i] = bind.Temp + this.IDMap[id] = uint32(i) + } + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/symbol.go b/vendor/github.com/grafana/jfr-parser/parser/types/symbol.go new file mode 100644 index 0000000000..4f6f5cfd6b --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/symbol.go @@ -0,0 +1,386 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindSymbol struct { + Temp Symbol + Fields []BindFieldSymbol +} + +type BindFieldSymbol struct { + Field *def.Field + string *string +} + +func NewBindSymbol(typ *def.Class, typeMap *def.TypeMap) *BindSymbol { + res := new(BindSymbol) + res.Fields = make([]BindFieldSymbol, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "string": + if typ.Fields[i].Equals(&def.Field{Name: "string", Type: typeMap.T_STRING, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldSymbol{Field: &typ.Fields[i], string: &res.Temp.String}) + } else { + res.Fields = append(res.Fields, BindFieldSymbol{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldSymbol{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type SymbolRef uint32 +type SymbolList struct { + IDMap map[SymbolRef]uint32 + Symbol []Symbol +} + +type Symbol struct { + String string +} + +func (this *SymbolList) Parse(data []byte, bind *BindSymbol, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + n := int(v32_) + this.IDMap = make(map[SymbolRef]uint32, n) + this.Symbol = make([]Symbol, n) + for i := 0; i < n; i++ { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + id := SymbolRef(v32_) + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + if bind.Fields[bindFieldIndex].string != nil { + *bind.Fields[bindFieldIndex].string = s_ + } + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + // skipping + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + this.Symbol[i] = bind.Temp + this.IDMap[id] = uint32(i) + } + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/thread.go b/vendor/github.com/grafana/jfr-parser/parser/types/thread.go new file mode 100644 index 0000000000..78ba708920 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/thread.go @@ -0,0 +1,410 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindThread struct { + Temp Thread + Fields []BindFieldThread +} + +type BindFieldThread struct { + Field *def.Field + string *string + uint64 *uint64 +} + +func NewBindThread(typ *def.Class, typeMap *def.TypeMap) *BindThread { + res := new(BindThread) + res.Fields = make([]BindFieldThread, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "osName": + if typ.Fields[i].Equals(&def.Field{Name: "osName", Type: typeMap.T_STRING, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldThread{Field: &typ.Fields[i], string: &res.Temp.OsName}) + } else { + res.Fields = append(res.Fields, BindFieldThread{Field: &typ.Fields[i]}) // skip changed field + } + case "osThreadId": + if typ.Fields[i].Equals(&def.Field{Name: "osThreadId", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldThread{Field: &typ.Fields[i], uint64: &res.Temp.OsThreadId}) + } else { + res.Fields = append(res.Fields, BindFieldThread{Field: &typ.Fields[i]}) // skip changed field + } + case "javaName": + if typ.Fields[i].Equals(&def.Field{Name: "javaName", Type: typeMap.T_STRING, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldThread{Field: &typ.Fields[i], string: &res.Temp.JavaName}) + } else { + res.Fields = append(res.Fields, BindFieldThread{Field: &typ.Fields[i]}) // skip changed field + } + case "javaThreadId": + if typ.Fields[i].Equals(&def.Field{Name: "javaThreadId", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldThread{Field: &typ.Fields[i], uint64: &res.Temp.JavaThreadId}) + } else { + res.Fields = append(res.Fields, BindFieldThread{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldThread{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type ThreadRef uint32 +type ThreadList struct { + IDMap map[ThreadRef]uint32 + Thread []Thread +} + +type Thread struct { + OsName string + OsThreadId uint64 + JavaName string + JavaThreadId uint64 +} + +func (this *ThreadList) Parse(data []byte, bind *BindThread, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + n := int(v32_) + this.IDMap = make(map[ThreadRef]uint32, n) + this.Thread = make([]Thread, n) + for i := 0; i < n; i++ { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + id := ThreadRef(v32_) + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + if bind.Fields[bindFieldIndex].string != nil { + *bind.Fields[bindFieldIndex].string = s_ + } + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + if bind.Fields[bindFieldIndex].uint64 != nil { + *bind.Fields[bindFieldIndex].uint64 = v64_ + } + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + this.Thread[i] = bind.Temp + this.IDMap[id] = uint32(i) + } + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/thread_park.go b/vendor/github.com/grafana/jfr-parser/parser/types/thread_park.go new file mode 100644 index 0000000000..492335ecd2 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/thread_park.go @@ -0,0 +1,416 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindThreadPark struct { + Temp ThreadPark + Fields []BindFieldThreadPark +} + +type BindFieldThreadPark struct { + Field *def.Field + uint64 *uint64 + ThreadRef *ThreadRef + StackTraceRef *StackTraceRef + ClassRef *ClassRef +} + +func NewBindThreadPark(typ *def.Class, typeMap *def.TypeMap) *BindThreadPark { + res := new(BindThreadPark) + res.Fields = make([]BindFieldThreadPark, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "startTime": + if typ.Fields[i].Equals(&def.Field{Name: "startTime", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i], uint64: &res.Temp.StartTime}) + } else { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i]}) // skip changed field + } + case "duration": + if typ.Fields[i].Equals(&def.Field{Name: "duration", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i], uint64: &res.Temp.Duration}) + } else { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i]}) // skip changed field + } + case "eventThread": + if typ.Fields[i].Equals(&def.Field{Name: "eventThread", Type: typeMap.T_THREAD, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i], ThreadRef: &res.Temp.EventThread}) + } else { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i]}) // skip changed field + } + case "stackTrace": + if typ.Fields[i].Equals(&def.Field{Name: "stackTrace", Type: typeMap.T_STACK_TRACE, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i], StackTraceRef: &res.Temp.StackTrace}) + } else { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i]}) // skip changed field + } + case "parkedClass": + if typ.Fields[i].Equals(&def.Field{Name: "parkedClass", Type: typeMap.T_CLASS, ConstantPool: true, Array: false}) { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i], ClassRef: &res.Temp.ParkedClass}) + } else { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i]}) // skip changed field + } + case "timeout": + if typ.Fields[i].Equals(&def.Field{Name: "timeout", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i], uint64: &res.Temp.Timeout}) + } else { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i]}) // skip changed field + } + case "until": + if typ.Fields[i].Equals(&def.Field{Name: "until", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i], uint64: &res.Temp.Until}) + } else { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i]}) // skip changed field + } + case "address": + if typ.Fields[i].Equals(&def.Field{Name: "address", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i], uint64: &res.Temp.Address}) + } else { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i]}) // skip changed field + } + case "contextId": + if typ.Fields[i].Equals(&def.Field{Name: "contextId", Type: typeMap.T_LONG, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i], uint64: &res.Temp.ContextId}) + } else { + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldThreadPark{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type ThreadPark struct { + StartTime uint64 + Duration uint64 + EventThread ThreadRef + StackTrace StackTraceRef + ParkedClass ClassRef + Timeout uint64 + Until uint64 + Address uint64 + ContextId uint64 +} + +func (this *ThreadPark) Parse(data []byte, bind *BindThreadPark, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + switch bind.Fields[bindFieldIndex].Field.Type { + case typeMap.T_THREAD: + if bind.Fields[bindFieldIndex].ThreadRef != nil { + *bind.Fields[bindFieldIndex].ThreadRef = ThreadRef(v32_) + } + case typeMap.T_STACK_TRACE: + if bind.Fields[bindFieldIndex].StackTraceRef != nil { + *bind.Fields[bindFieldIndex].StackTraceRef = StackTraceRef(v32_) + } + case typeMap.T_CLASS: + if bind.Fields[bindFieldIndex].ClassRef != nil { + *bind.Fields[bindFieldIndex].ClassRef = ClassRef(v32_) + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + // skipping + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + if bind.Fields[bindFieldIndex].uint64 != nil { + *bind.Fields[bindFieldIndex].uint64 = v64_ + } + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + *this = bind.Temp + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/parser/types/threadstate.go b/vendor/github.com/grafana/jfr-parser/parser/types/threadstate.go new file mode 100644 index 0000000000..1bfa51249e --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/parser/types/threadstate.go @@ -0,0 +1,386 @@ +// Code generated by gen/main.go. DO NOT EDIT. + +package types + +import ( + "fmt" + "github.com/grafana/jfr-parser/parser/types/def" + "io" + "unsafe" +) + +type BindThreadState struct { + Temp ThreadState + Fields []BindFieldThreadState +} + +type BindFieldThreadState struct { + Field *def.Field + string *string +} + +func NewBindThreadState(typ *def.Class, typeMap *def.TypeMap) *BindThreadState { + res := new(BindThreadState) + res.Fields = make([]BindFieldThreadState, 0, len(typ.Fields)) + for i := 0; i < len(typ.Fields); i++ { + switch typ.Fields[i].Name { + case "name": + if typ.Fields[i].Equals(&def.Field{Name: "name", Type: typeMap.T_STRING, ConstantPool: false, Array: false}) { + res.Fields = append(res.Fields, BindFieldThreadState{Field: &typ.Fields[i], string: &res.Temp.Name}) + } else { + res.Fields = append(res.Fields, BindFieldThreadState{Field: &typ.Fields[i]}) // skip changed field + } + default: + res.Fields = append(res.Fields, BindFieldThreadState{Field: &typ.Fields[i]}) // skip unknown new field + } + } + return res +} + +type ThreadStateRef uint32 +type ThreadStateList struct { + IDMap map[ThreadStateRef]uint32 + ThreadState []ThreadState +} + +type ThreadState struct { + Name string +} + +func (this *ThreadStateList) Parse(data []byte, bind *BindThreadState, typeMap *def.TypeMap) (pos int, err error) { + var ( + v64_ uint64 + v32_ uint32 + s_ string + b_ byte + shift = uint(0) + l = len(data) + ) + _ = v64_ + _ = v32_ + _ = s_ + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + n := int(v32_) + this.IDMap = make(map[ThreadStateRef]uint32, n) + this.ThreadState = make([]ThreadState, n) + for i := 0; i < n; i++ { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + id := ThreadStateRef(v32_) + for bindFieldIndex := 0; bindFieldIndex < len(bind.Fields); bindFieldIndex++ { + bindArraySize := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindArraySize = int(v32_) + } + for bindArrayIndex := 0; bindArrayIndex < bindArraySize; bindArrayIndex++ { + if bind.Fields[bindFieldIndex].Field.ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else { + bindFieldTypeID := bind.Fields[bindFieldIndex].Field.Type + switch bindFieldTypeID { + case typeMap.T_STRING: + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + if bind.Fields[bindFieldIndex].string != nil { + *bind.Fields[bindFieldIndex].string = s_ + } + case typeMap.T_INT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + case typeMap.T_LONG: + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + // skipping + case typeMap.T_BOOLEAN: + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + // skipping + case typeMap.T_FLOAT: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + // skipping + default: + bindFieldType := typeMap.IDMap[bind.Fields[bindFieldIndex].Field.Type] + if bindFieldType == nil || len(bindFieldType.Fields) == 0 { + return 0, fmt.Errorf("unknown type %d", bind.Fields[bindFieldIndex].Field.Type) + } + bindSkipObjects := 1 + if bind.Fields[bindFieldIndex].Field.Array { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + bindSkipObjects = int(v32_) + } + for bindSkipObjectIndex := 0; bindSkipObjectIndex < bindSkipObjects; bindSkipObjectIndex++ { + for bindskipFieldIndex := 0; bindskipFieldIndex < len(bindFieldType.Fields); bindskipFieldIndex++ { + bindSkipFieldType := bindFieldType.Fields[bindskipFieldIndex].Type + if bindFieldType.Fields[bindskipFieldIndex].ConstantPool { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_STRING { + s_ = "" + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + switch b_ { + case 0: + break + case 1: + break + case 3: + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + if pos+int(v32_) > l { + return 0, io.ErrUnexpectedEOF + } + bs := data[pos : pos+int(v32_)] + s_ = *(*string)(unsafe.Pointer(&bs)) + pos += int(v32_) + default: + return 0, fmt.Errorf("unknown string type %d at %d", b_, pos) + } + } else if bindSkipFieldType == typeMap.T_INT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_FLOAT { + v32_ = uint32(0) + for shift = uint(0); ; shift += 7 { + if shift >= 32 { + return 0, def.ErrIntOverflow + } + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + v32_ |= uint32(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } else if bindSkipFieldType == typeMap.T_LONG { + v64_ = 0 + for shift = uint(0); shift <= 56; shift += 7 { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + if shift == 56 { + v64_ |= uint64(b_&0xFF) << shift + break + } else { + v64_ |= uint64(b_&0x7F) << shift + if b_ < 0x80 { + break + } + } + } + } else if bindSkipFieldType == typeMap.T_BOOLEAN { + if pos >= l { + return 0, io.ErrUnexpectedEOF + } + b_ = data[pos] + pos++ + } else { + return 0, fmt.Errorf("nested objects not implemented. ") + } + } + } + } + } + } + } + this.ThreadState[i] = bind.Temp + this.IDMap[id] = uint32(i) + } + return pos, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/reader/compressed.go b/vendor/github.com/grafana/jfr-parser/reader/compressed.go new file mode 100644 index 0000000000..73466243a7 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/reader/compressed.go @@ -0,0 +1,67 @@ +package reader + +import ( + "fmt" + "io" +) + +type VarReader interface { + VarShort() (int16, error) + VarInt() (int32, error) + VarLong() (int64, error) +} + +type compressed struct { + io.ByteReader +} + +func NewCompressed(r io.ByteReader) VarReader { + return compressed{ByteReader: r} +} + +func (c compressed) VarShort() (int16, error) { + n, err := c.ulong() + if err != nil { + return 0, err + } + if (n >> 48) > 0 { + return 0, fmt.Errorf("overflow: %d bigger than 16 bits", n) + } + return int16(n), nil +} + +func (c compressed) VarInt() (int32, error) { + n, err := c.ulong() + if err != nil { + return 0, err + } + if (n >> 32) > 0 { + return 0, fmt.Errorf("overflow: %d bigger than 32 bits", n) + } + return int32(n), nil +} + +func (c compressed) VarLong() (int64, error) { + n, err := c.ulong() + return int64(n), err +} + +func (c compressed) ulong() (n uint64, err error) { + s := 0 + for i := 0; i < 9; i++ { + b, err := c.ReadByte() + if err != nil { + return 0, err + } + if b&0x80 == 0 { + n |= uint64(b) << s + return n, nil + } + if i < 8 { + b &= 0x7f + } + n |= uint64(b) << s + s += 7 + } + return n, nil +} diff --git a/vendor/github.com/grafana/jfr-parser/reader/int.go b/vendor/github.com/grafana/jfr-parser/reader/int.go new file mode 100644 index 0000000000..fbe2a02a41 --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/reader/int.go @@ -0,0 +1,24 @@ +package reader + +import ( + "encoding/binary" + "io" +) + +func Short(r io.Reader) (int16, error) { + var n int16 + err := binary.Read(r, binary.BigEndian, &n) + return n, err +} + +func Int(r io.Reader) (int32, error) { + var n int32 + err := binary.Read(r, binary.BigEndian, &n) + return n, err +} + +func Long(r io.Reader) (int64, error) { + var n int64 + err := binary.Read(r, binary.BigEndian, &n) + return n, err +} diff --git a/vendor/github.com/grafana/jfr-parser/reader/uncompressed.go b/vendor/github.com/grafana/jfr-parser/reader/uncompressed.go new file mode 100644 index 0000000000..9758e50f1a --- /dev/null +++ b/vendor/github.com/grafana/jfr-parser/reader/uncompressed.go @@ -0,0 +1,25 @@ +package reader + +import ( + "io" +) + +type uncompressed struct { + io.Reader +} + +func NewUncompressed(r io.Reader) VarReader { + return uncompressed{Reader: r} +} + +func (c uncompressed) VarShort() (int16, error) { + return Short(c) +} + +func (c uncompressed) VarInt() (int32, error) { + return Int(c) +} + +func (c uncompressed) VarLong() (int64, error) { + return Long(c) +} diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh deleted file mode 100644 index b45ef68831..0000000000 --- a/vendor/github.com/json-iterator/go/build.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e -set -x - -if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then - mkdir -p /tmp/build-golang/src/github.com/json-iterator - ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go -fi -export GOPATH=/tmp/build-golang -go get -u github.com/golang/dep/cmd/dep -cd /tmp/build-golang/src/github.com/json-iterator/go -exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/ugorji/go/codec/build.sh b/vendor/github.com/ugorji/go/codec/build.sh deleted file mode 100644 index 023faf3d4c..0000000000 --- a/vendor/github.com/ugorji/go/codec/build.sh +++ /dev/null @@ -1,370 +0,0 @@ -#!/bin/bash - -# Run all the different permutations of all the tests and other things -# This helps ensure that nothing gets broken. - -_tests() { - local vet="" # TODO: make it off - local gover=$( ${gocmd} version | cut -f 3 -d ' ' ) - [[ $( ${gocmd} version ) == *"gccgo"* ]] && zcover=0 - [[ $( ${gocmd} version ) == *"gollvm"* ]] && zcover=0 - case $gover in - go1.[7-9]*|go1.1[0-9]*|go2.*|devel*) true ;; - *) return 1 - esac - # note that codecgen requires fastpath, so you cannot do "codecgen codec.notfastpath" - # we test the following permutations wnich all execute different code paths as below. - echo "TestCodecSuite: (fastpath/unsafe), (!fastpath/unsafe), (fastpath/!unsafe), (!fastpath/!unsafe), (codecgen/unsafe)" - local echo=1 - local nc=2 # count - local cpus="1,$(nproc)" - # if using the race detector, then set nc to - if [[ " ${zargs[@]} " =~ "-race" ]]; then - cpus="$(nproc)" - fi - local a=( "" "codec.notfastpath" "codec.safe" "codec.notfastpath codec.safe" "codecgen" ) - local b=() - local c=() - for i in "${a[@]}" - do - local i2=${i:-default} - [[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'alltests $i'; RUN: 'TestCodecSuite'" - [[ "$zcover" == "1" ]] && c=( -coverprofile "${i2// /-}.cov.out" ) - true && - ${gocmd} vet -printfuncs "errorf" "$@" && - if [[ "$echo" == 1 ]]; then set -o xtrace; fi && - ${gocmd} test ${zargs[*]} ${ztestargs[*]} -vet "$vet" -tags "alltests $i" -count $nc -cpu $cpus -run "TestCodecSuite" "${c[@]}" "$@" & - if [[ "$echo" == 1 ]]; then set +o xtrace; fi - b+=("${i2// /-}.cov.out") - [[ "$zwait" == "1" ]] && wait - - # if [[ "$?" != 0 ]]; then return 1; fi - done - if [[ "$zextra" == "1" ]]; then - [[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'codec.notfastpath x'; RUN: 'Test.*X$'" - [[ "$zcover" == "1" ]] && c=( -coverprofile "x.cov.out" ) - ${gocmd} test ${zargs[*]} ${ztestargs[*]} -vet "$vet" -tags "codec.notfastpath x" -count $nc -run 'Test.*X$' "${c[@]}" & - b+=("x.cov.out") - [[ "$zwait" == "1" ]] && wait - fi - wait - # go tool cover is not supported for gccgo, gollvm, other non-standard go compilers - [[ "$zcover" == "1" ]] && - command -v gocovmerge && - gocovmerge "${b[@]}" > __merge.cov.out && - ${gocmd} tool cover -html=__merge.cov.out -} - -# is a generation needed? -_ng() { - local a="$1" - if [[ ! -e "$a" ]]; then echo 1; return; fi - for i in `ls -1 *.go.tmpl gen.go values_test.go` - do - if [[ "$a" -ot "$i" ]]; then echo 1; return; fi - done -} - -_prependbt() { - cat > ${2} <> ${2} - rm -f ${1} -} - -# _build generates fast-path.go and gen-helper.go. -_build() { - if ! [[ "${zforce}" || $(_ng "fast-path.generated.go") || $(_ng "gen-helper.generated.go") || $(_ng "gen.generated.go") ]]; then return 0; fi - - if [ "${zbak}" ]; then - _zts=`date '+%m%d%Y_%H%M%S'` - _gg=".generated.go" - [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak - [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak - [ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak - fi - rm -f gen-helper.generated.go fast-path.generated.go gen.generated.go \ - *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go - - cat > gen.generated.go <> gen.generated.go < gen-dec-map.go.tmpl - cat >> gen.generated.go <> gen.generated.go < gen-dec-array.go.tmpl - cat >> gen.generated.go <> gen.generated.go < gen-enc-chan.go.tmpl - cat >> gen.generated.go < gen-from-tmpl.codec.generated.go < gen-from-tmpl.sort-slice-stubs.generated.go <> gen-from-tmpl.sort-slice-stubs.generated.go < bench/shared_test.go - - # explicitly return 0 if this passes, else return 1 - local btags="codec.notfastpath codec.safe codecgen.exec" - rm -f sort-slice.generated.go fast-path.generated.go gen-helper.generated.go mammoth_generated_test.go mammoth2_generated_test.go - - cat > gen-from-tmpl.sort-slice.generated.go < gen-from-tmpl.generated.go < $f <>$f - if [[ "$i" != "master" ]]; then i="release-branch.go$i"; fi - (false || - (echo "===== BUILDING GO SDK for branch: $i ... =====" && - cd $GOROOT && - git checkout -f $i && git reset --hard && git clean -f . && - cd src && ./make.bash >>$f 2>&1 && sleep 1 ) ) && - echo "===== GO SDK BUILD DONE =====" && - _prebuild && - echo "===== PREBUILD DONE with exit: $? =====" && - _tests "$@" - if [[ "$?" != 0 ]]; then return 1; fi - done - zforce=${makeforce} - echo "++++++++ RELEASE TEST SUITES ALL PASSED ++++++++" -} - -_usage() { - # hidden args: - # -pf [p=prebuild (f=force)] - - cat < t=tests [e=extra, s=short, o=cover, w=wait] - -[md] -> [m=make, d=race detector] - -[n l i] -> [n=inlining diagnostics, l=mid-stack inlining, i=check inlining for path (path)] - -v -> v=verbose -EOF - if [[ "$(type -t _usage_run)" = "function" ]]; then _usage_run ; fi -} - -_main() { - if [[ -z "$1" ]]; then _usage; return 1; fi - local x # determines the main action to run in this build - local zforce # force - local zcover # generate cover profile and show in browser when done - local zwait # run tests in sequence, not parallel ie wait for one to finish before starting another - local zextra # means run extra (python based tests, etc) during testing - - local ztestargs=() - local zargs=() - local zverbose=() - local zbenchflags="" - - local gocmd=${MYGOCMD:-go} - - OPTIND=1 - while getopts ":cetmnrgpfvldsowkxyzi" flag - do - case "x$flag" in - 'xo') zcover=1 ;; - 'xe') zextra=1 ;; - 'xw') zwait=1 ;; - 'xf') zforce=1 ;; - 'xs') ztestargs+=("-short") ;; - 'xv') zverbose+=(1) ;; - 'xl') zargs+=("-gcflags"); zargs+=("-l=4") ;; - 'xn') zargs+=("-gcflags"); zargs+=("-m=2") ;; - 'xd') zargs+=("-race") ;; - # 'xi') x='i'; zbenchflags=${OPTARG} ;; - x\?) _usage; return 1 ;; - *) x=$flag ;; - esac - done - shift $((OPTIND-1)) - # echo ">>>> _main: extra args: $@" - case "x$x" in - 'xt') _tests "$@" ;; - 'xm') _make "$@" ;; - 'xr') _release "$@" ;; - 'xg') _go ;; - 'xp') _prebuild "$@" ;; - 'xc') _clean "$@" ;; - 'xx') _analyze_checks "$@" ;; - 'xy') _analyze_debug_types "$@" ;; - 'xz') _analyze_do_inlining_and_more "$@" ;; - 'xk') _go_compiler_validation_suite ;; - 'xi') _check_inlining_one "$@" ;; - esac - # unset zforce zargs zbenchflags -} - -[ "." = `dirname $0` ] && _main "$@" - diff --git a/vendor/modules.txt b/vendor/modules.txt index 4bc0962315..5ae4af3563 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -64,6 +64,20 @@ github.com/GuanceCloud/cliutils/pipeline/ptinput/utils github.com/GuanceCloud/cliutils/pipeline/stats github.com/GuanceCloud/cliutils/pkg/hash github.com/GuanceCloud/cliutils/point +github.com/GuanceCloud/cliutils/pprofparser/cfg +github.com/GuanceCloud/cliutils/pprofparser/domain/events +github.com/GuanceCloud/cliutils/pprofparser/domain/languages +github.com/GuanceCloud/cliutils/pprofparser/domain/parameter +github.com/GuanceCloud/cliutils/pprofparser/domain/pprof +github.com/GuanceCloud/cliutils/pprofparser/domain/quantity +github.com/GuanceCloud/cliutils/pprofparser/domain/tracing +github.com/GuanceCloud/cliutils/pprofparser/service/parsing +github.com/GuanceCloud/cliutils/pprofparser/service/storage +github.com/GuanceCloud/cliutils/pprofparser/tools/filepathtoolkit +github.com/GuanceCloud/cliutils/pprofparser/tools/jsontoolkit +github.com/GuanceCloud/cliutils/pprofparser/tools/logtoolkit +github.com/GuanceCloud/cliutils/pprofparser/tools/mathtoolkit +github.com/GuanceCloud/cliutils/pprofparser/tools/parsetoolkit github.com/GuanceCloud/cliutils/system/rtpanic github.com/GuanceCloud/cliutils/tracer # github.com/GuanceCloud/confd v0.1.101 @@ -134,6 +148,9 @@ github.com/GuanceCloud/tracing-protos/skywalking-gen-go/language/profile/v3/comp github.com/GuanceCloud/tracing-protos/skywalking-gen-go/logging/v3 github.com/GuanceCloud/tracing-protos/skywalking-gen-go/management/v3 github.com/GuanceCloud/tracing-protos/skywalking-gen-go/management/v3/compat +# github.com/GuanceCloud/zipstream v0.1.0 +## explicit; go 1.12 +github.com/GuanceCloud/zipstream # github.com/IBM/sarama v1.41.2 ## explicit; go 1.17 github.com/IBM/sarama @@ -881,6 +898,17 @@ github.com/gorilla/websocket # github.com/gosnmp/gosnmp v1.35.0 ## explicit; go 1.17 github.com/gosnmp/gosnmp +# github.com/grafana/jfr-parser v0.0.1 => github.com/GuanceCloud/jfr-parser v0.8.6 +## explicit; go 1.21 +github.com/grafana/jfr-parser/common/attributes +github.com/grafana/jfr-parser/common/filters +github.com/grafana/jfr-parser/common/types +github.com/grafana/jfr-parser/common/units +github.com/grafana/jfr-parser/internal/utils +github.com/grafana/jfr-parser/parser +github.com/grafana/jfr-parser/parser/types +github.com/grafana/jfr-parser/parser/types/def +github.com/grafana/jfr-parser/reader # github.com/grafana/loki v1.6.2-0.20210806161513-f5fd02966003 ## explicit; go 1.15 github.com/grafana/loki/pkg/loghttp @@ -2481,6 +2509,7 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml # github.com/c-bata/go-prompt => github.com/coanor/go-prompt v0.2.6 # github.com/google/gopacket => github.com/GuanceCloud/gopacket v0.0.1 +# github.com/grafana/jfr-parser => github.com/GuanceCloud/jfr-parser v0.8.6 # github.com/influxdata/influxdb1-client => github.com/GuanceCloud/influxdb1-client v0.1.8 # github.com/iovisor/gobpf => github.com/DataDog/gobpf v0.0.0-20210322155958-9866ef4cd22c # github.com/kardianos/service => github.com/GuanceCloud/service v1.2.4