-
Notifications
You must be signed in to change notification settings - Fork 114
/
Copy pathCMakeLists.txt
333 lines (287 loc) · 11.4 KB
/
CMakeLists.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met: *
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. * Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided
# with the distribution. * Neither the name of NVIDIA CORPORATION nor the names
# of its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
cmake_minimum_required(VERSION 3.17)
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules/set_ifndef.cmake)
set_ifndef(TRTLLM_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../tensorrt_llm)
include_directories(${TRTLLM_DIR} ${TRTLLM_DIR}/cpp/include)
include(${TRTLLM_DIR}/cpp/cmake/modules/find_library_create_target.cmake)
project(tritontensorrtllmbackend LANGUAGES C CXX)
add_compile_options("-DENABLE_MULTI_DEVICE=1")
# https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html
option(USE_CXX11_ABI "Using CXX11 ABI of libstdc++" OFF)
message(STATUS "USE_CXX11_ABI: ${USE_CXX11_ABI}")
if(USE_CXX11_ABI)
add_compile_options("-D_GLIBCXX_USE_CXX11_ABI=1")
else()
add_compile_options("-D_GLIBCXX_USE_CXX11_ABI=0")
endif()
#
# Options
#
# Must include options required for this project as well as any projects
# included in this one by FetchContent.
#
# TRITON_ENABLE_GPU is set to OFF as currently the code does not use any GPU
# related features since TRT-LLM backend manages the usage on GPUs itself.
option(TRITON_ENABLE_GPU "Enable GPU support in backend" OFF)
option(TRITON_ENABLE_STATS "Include statistics collections in backend" ON)
option(TRITON_ENABLE_METRICS "Include metrics support in server" ON)
option(BUILD_TESTS "Build Google tests" OFF)
if(TRITON_ENABLE_METRICS AND NOT TRITON_ENABLE_STATS)
message(
FATAL_ERROR "TRITON_ENABLE_METRICS=ON requires TRITON_ENABLE_STATS=ON")
endif()
set(TRITON_COMMON_REPO_TAG
"main"
CACHE STRING "Tag for triton-inference-server/common repo")
set(TRITON_CORE_REPO_TAG
"main"
CACHE STRING "Tag for triton-inference-server/core repo")
set(TRITON_BACKEND_REPO_TAG
"main"
CACHE STRING "Tag for triton-inference-server/backend repo")
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif()
set(COMMON_HEADER_DIRS ${PROJECT_SOURCE_DIR} ${CUDA_PATH}/include)
message(STATUS "COMMON_HEADER_DIRS: ${COMMON_HEADER_DIRS}")
#
# Dependencies
#
# FetchContent requires us to include the transitive closure of all repos that
# we depend on so that we can override the tags.
#
include(FetchContent)
FetchContent_Declare(
repo-common
GIT_REPOSITORY https://github.com/triton-inference-server/common.git
GIT_TAG ${TRITON_COMMON_REPO_TAG}
GIT_SHALLOW ON)
FetchContent_Declare(
repo-core
GIT_REPOSITORY https://github.com/triton-inference-server/core.git
GIT_TAG ${TRITON_CORE_REPO_TAG}
GIT_SHALLOW ON)
FetchContent_Declare(
repo-backend
GIT_REPOSITORY https://github.com/triton-inference-server/backend.git
GIT_TAG ${TRITON_BACKEND_REPO_TAG}
GIT_SHALLOW ON)
FetchContent_MakeAvailable(repo-common repo-core repo-backend)
#
# The backend must be built into a shared library. Use an ldscript to hide all
# symbols except for the TRITONBACKEND API.
#
configure_file(src/libtriton_tensorrtllm.ldscript
libtriton_tensorrtllm.ldscript COPYONLY)
set(SRCS src/libtensorrtllm.cc src/model_instance_state.cc src/model_state.cc
src/utils.cc)
add_library(triton-tensorrt-llm-backend SHARED ${SRCS})
enable_language(CUDA)
find_package(CUDA ${CUDA_REQUIRED_VERSION} REQUIRED)
find_package(Python3 COMPONENTS Interpreter Development)
find_library(
tensorrt_llm libtensorrt_llm.so REQUIRED
PATHS ${Python3_SITEARCH}/tensorrt_llm/libs
${TRTLLM_DIR}/cpp/build/tensorrt_llm
${CMAKE_CURRENT_SOURCE_DIR}/../tensorrt_llm/cpp/build/tensorrt_llm)
find_library(
nvinfer_plugin_tensorrt_llm libnvinfer_plugin_tensorrt_llm.so REQUIRED
PATHS
${Python3_SITEARCH}/tensorrt_llm/libs
${TRTLLM_DIR}/cpp/build/tensorrt_llm/plugins
${CMAKE_CURRENT_SOURCE_DIR}/../tensorrt_llm/cpp/build/tensorrt_llm/plugins)
find_program(
TRTLLM_EXECUTOR_WORKER executorWorker REQUIRED
PATHS
${Python3_SITEARCH}/tensorrt_llm/bin
${TRTLLM_DIR}/cpp/build/tensorrt_llm/executor_worker
${CMAKE_CURRENT_SOURCE_DIR}/../tensorrt_llm/cpp/build/tensorrt_llm/executor_worker
)
install(
PROGRAMS ${TRTLLM_EXECUTOR_WORKER}
DESTINATION ${CMAKE_BINARY_DIR}
RENAME trtllmExecutorWorker)
find_library(
CUDNN_LIB cudnn
HINTS ${CUDA_TOOLKIT_ROOT_DIR} ${CUDNN_ROOT_DIR}
PATH_SUFFIXES lib64 lib)
find_library(
CUBLAS_LIB cublas
HINTS ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES lib64 lib lib/stubs)
find_library(
CUBLASLT_LIB cublasLt
HINTS ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES lib64 lib lib/stubs)
find_library(
CUDART_LIB cudart
HINTS ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES lib lib64)
find_library(
CUDA_DRV_LIB cuda
HINTS ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES lib lib64 lib/stubs lib64/stubs)
find_library(
NVIDIA_ML_LIB nvidia-ml
HINTS ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES lib lib64 lib/stubs lib64/stubs)
set(CUDA_LIBRARIES ${CUDART_LIB} ${NVIDIA_ML_LIB})
find_package(MPI REQUIRED)
message(STATUS "Using MPI_INCLUDE_PATH: ${MPI_INCLUDE_PATH}")
message(STATUS "Using MPI_LIBRARIES: ${MPI_LIBRARIES}")
# NCCL dependencies
set_ifndef(NCCL_LIB_DIR /usr/lib/x86_64-linux-gnu/)
set_ifndef(NCCL_INCLUDE_DIR /usr/include/)
find_library(NCCL_LIB nccl HINTS ${NCCL_LIB_DIR})
# TRT_LIB_DIR and TRT_INCLUDE_DIR should be aligned with the path in the
# environment_setup.sh script
set_ifndef(TRT_LIB_DIR
/usr/local/tensorrt/targets/${CMAKE_SYSTEM_PROCESSOR}-linux-gnu/lib)
set_ifndef(
TRT_INCLUDE_DIR
/usr/local/tensorrt/targets/${CMAKE_SYSTEM_PROCESSOR}-linux-gnu/include)
set(TRT_LIB nvinfer)
find_library_create_target(${TRT_LIB} nvinfer SHARED ${TRT_LIB_DIR})
file(STRINGS "${TRT_INCLUDE_DIR}/NvInferVersion.h" VERSION_STRINGS
REGEX "#define NV_TENSORRT_.*")
foreach(TYPE MAJOR MINOR PATCH BUILD)
string(REGEX MATCH "NV_TENSORRT_${TYPE} [0-9]+" TRT_TYPE_STRING
${VERSION_STRINGS})
string(REGEX MATCH "[0-9]+" TRT_${TYPE} ${TRT_TYPE_STRING})
endforeach(TYPE)
foreach(TYPE MAJOR MINOR PATCH)
string(REGEX MATCH "NV_TENSORRT_${TYPE} [0-9]+" TRT_TYPE_STRING
${VERSION_STRINGS})
string(REGEX MATCH "[0-9]+" TRT_SO_${TYPE} ${TRT_TYPE_STRING})
endforeach(TYPE)
set(TRT_VERSION
"${TRT_MAJOR}.${TRT_MINOR}.${TRT_PATCH}"
CACHE STRING "TensorRT project version")
set(TRT_SOVERSION
"${TRT_SO_MAJOR}"
CACHE STRING "TensorRT library so version")
message(
STATUS
"Building for TensorRT version: ${TRT_VERSION}, library version: ${TRT_SOVERSION}"
)
if(${TRT_MAJOR} GREATER_EQUAL 10)
add_definitions("-DTRT_LLM_USE_DIM64")
message(
STATUS "TensorRT version ${TRT_MAJOR} >= 10, int64 dimension is enabled")
endif()
list(APPEND COMMON_HEADER_DIRS ${TORCH_INCLUDE_DIRS} ${TRT_INCLUDE_DIR})
include_directories(${COMMON_HEADER_DIRS})
target_include_directories(
triton-tensorrt-llm-backend
PRIVATE ${TRTLLM_DIR}/cpp
${TRTLLM_DIR}/cpp/include
${CMAKE_CURRENT_SOURCE_DIR}/src
${CUDA_INCLUDE_DIRS}
${CUDNN_ROOT_DIR}/include
${NCCL_INCLUDE_DIR}
${3RDPARTY_DIR}/cutlass/include
${MPI_INCLUDE_PATH}
${COMMON_HEADER_DIR})
target_compile_features(triton-tensorrt-llm-backend PRIVATE cxx_std_17)
set(COMPILE_OPTIONS
$<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-Wall
-Wextra
-Wno-unused-parameter
-Wno-deprecated-declarations
-Wno-type-limits>
$<$<CXX_COMPILER_ID:MSVC>:/Wall
/D_WIN32_WINNT=0x0A00
/EHsc>)
target_compile_options(triton-tensorrt-llm-backend PRIVATE ${COMPILE_OPTIONS})
if(TRITON_ENABLE_METRICS)
list(APPEND REPORTER_SRCS
src/custom_metrics_reporter/custom_metrics_reporter.cc)
list(APPEND REPORTER_HDRS
src/custom_metrics_reporter/custom_metrics_reporter.h)
add_library(triton-custom-metrics-reporter-library EXCLUDE_FROM_ALL
${REPORTER_SRCS} ${REPORTER_HDRS})
target_compile_features(triton-custom-metrics-reporter-library
PRIVATE cxx_std_17)
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
target_compile_options(triton-custom-metrics-reporter-library
PRIVATE /W1 /D_WIN32_WINNT=0x0A00 /EHsc)
else()
target_compile_options(
triton-custom-metrics-reporter-library
PRIVATE -Wall -Wextra -Wno-unused-parameter -Wno-deprecated-declarations
-Werror)
endif()
set_target_properties(triton-custom-metrics-reporter-library
PROPERTIES POSITION_INDEPENDENT_CODE ON)
target_link_libraries(
triton-custom-metrics-reporter-library
PUBLIC triton-common-json # from repo-common
triton-common-logging # from repo-common
triton-core-serverapi # from repo-core
triton-core-serverstub # from repo-core
triton-backend-utils # from repo-backend
${tensorrt_llm})
target_compile_definitions(triton-tensorrt-llm-backend
PRIVATE TRITON_ENABLE_METRICS=1)
target_link_libraries(triton-tensorrt-llm-backend
PRIVATE triton-custom-metrics-reporter-library)
endif()
target_link_libraries(
triton-tensorrt-llm-backend
PUBLIC ${tensorrt_llm}
triton-core-serverapi # from repo-core
triton-core-backendapi # from repo-core
triton-core-serverstub # from repo-core
triton-backend-utils # from repo-backend
${MPI_LIBRARIES}
${CUDA_LIBRARIES}
nvinfer
${nvinfer_plugin_tensorrt_llm})
FetchContent_Declare(
json
GIT_REPOSITORY https://github.com/nlohmann/json.git
GIT_TAG v3.11.2)
FetchContent_MakeAvailable(json)
target_link_libraries(triton-tensorrt-llm-backend
PRIVATE nlohmann_json::nlohmann_json)
if(WIN32)
set_target_properties(
triton-tensorrt-llm-backend PROPERTIES POSITION_INDEPENDENT_CODE ON
OUTPUT_NAME triton_tensorrtllm)
else()
set_target_properties(
triton-tensorrt-llm-backend
PROPERTIES
POSITION_INDEPENDENT_CODE ON
OUTPUT_NAME triton_tensorrtllm
LINK_DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/libtriton_tensorrtllm.ldscript
LINK_FLAGS
"-Wl,--version-script libtriton_tensorrtllm.ldscript -Wl,-rpath,'$ORIGIN' -Wl,--no-undefined"
)
endif()
if(BUILD_TESTS)
enable_testing()
add_subdirectory(tests)
endif()