Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cleanup/Minimalization of C++ Reflection patch #4209

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -99,3 +99,6 @@ tests/test-tokenizer-0-llama
tests/test-tokenizer-0-falcon
tests/test-tokenizer-1-llama
tests/test-tokenizer-1-bpe
/#llama.cpp#
#*
\\#*
31 changes: 22 additions & 9 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ option(LLAMA_BUILD_SERVER "llama: build server example"
# Compile flags
#

set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD_REQUIRED true)
set(CMAKE_C_STANDARD 11)
set(CMAKE_C_STANDARD_REQUIRED true)
Expand Down Expand Up @@ -230,7 +230,12 @@ if (LLAMA_BLAS)

message(STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS}")
add_compile_options(${BLAS_LINKER_FLAGS})
add_compile_definitions(GGML_USE_OPENBLAS)

# from https://github.com/NVIDIA/cutlass
make_directory("${PROJECT_BINARY_DIR}/nvcc_tmp")
set(cuda_flags --keep "SHELL:--keep-dir ${PROJECT_BINARY_DIR}/nvcc_tmp" ${cuda_flags})

# add_compile_definitions(GGML_USE_OPENBLAS)
if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${LLAMA_BLAS_VENDOR} MATCHES "Generic" OR ${LLAMA_BLAS_VENDOR} MATCHES "Intel"))
add_compile_definitions(GGML_BLAS_USE_MKL)
endif()
Expand Down Expand Up @@ -312,7 +317,7 @@ if (LLAMA_MPI)
if (MPI_C_FOUND)
message(STATUS "MPI found")
set(GGML_HEADERS_MPI ggml-mpi.h)
set(GGML_SOURCES_MPI ggml-mpi.c ggml-mpi.h)
set(GGML_SOURCES_MPI ggml-mpi.cpp ggml-mpi.h)
add_compile_definitions(GGML_USE_MPI)
add_compile_definitions(${MPI_C_COMPILE_DEFINITIONS})
if (NOT MSVC)
Expand Down Expand Up @@ -438,6 +443,9 @@ if (NOT cuda_host_flags STREQUAL "")
set(cuda_flags ${cuda_flags} -Xcompiler ${cuda_host_flags})
endif()

#
set(cuda_flags --verbose -G ${cuda_flags})

add_compile_options("$<$<COMPILE_LANGUAGE:CUDA>:${cuda_flags}>")

if (WIN32)
Expand Down Expand Up @@ -485,8 +493,10 @@ if (NOT MSVC)
add_link_options(-static-libgcc -static-libstdc++)
endif()
endif()
add_link_options("-Wl,-Map=${TARGET}.map")

if (LLAMA_GPROF)
add_compile_options(-pg)
add_compile_options(-pg)
endif()
endif()

Expand Down Expand Up @@ -645,13 +655,16 @@ if (GGML_USE_CPU_HBM)
endif()

add_library(ggml OBJECT
ggml.c
ggml.cpp
ggml.h
ggml-alloc.c
print.hpp
ggml-internal.hpp
llama-internal.hpp
ggml-alloc.cpp
ggml-alloc.h
ggml-backend.c
ggml-backend.cpp
ggml-backend.h
ggml-quants.c
ggml-quants.cpp
ggml-quants.h
${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA}
${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL}
Expand Down Expand Up @@ -683,7 +696,7 @@ add_library(llama
)

target_include_directories(llama PUBLIC .)
target_compile_features(llama PUBLIC cxx_std_11) # don't bump
target_compile_features(llama PUBLIC cxx_std_20) # don't bump
target_link_libraries(llama PRIVATE
ggml
${LLAMA_EXTRA_LIBS}
Expand Down
24 changes: 12 additions & 12 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ endif
# keep standard at C11 and C++11
MK_CPPFLAGS = -I. -Icommon
MK_CFLAGS = -std=c11 -fPIC
MK_CXXFLAGS = -std=c++11 -fPIC
MK_CXXFLAGS = -std=c++20 -fPIC -fpermissive -DCPP_ONLY

# -Ofast tends to produce faster code, but may not be available for some compilers.
ifdef LLAMA_FAST
Expand Down Expand Up @@ -502,7 +502,7 @@ ggml-metal.o: ggml-metal.m ggml-metal.h
endif # LLAMA_METAL

ifdef LLAMA_MPI
ggml-mpi.o: ggml-mpi.c ggml-mpi.h
ggml-mpi.o: ggml-mpi.cpp ggml-mpi.h
$(CC) $(CFLAGS) -c $< -o $@
endif # LLAMA_MPI

Expand Down Expand Up @@ -537,17 +537,17 @@ $(info )
# Build library
#

ggml.o: ggml.c ggml.h ggml-cuda.h
$(CC) $(CFLAGS) -c $< -o $@
ggml.o: ggml.cpp ggml.h ggml-cuda.h
$(CXX) $(CXXFLAGS) -c $< -o $@

ggml-alloc.o: ggml-alloc.c ggml.h ggml-alloc.h
$(CC) $(CFLAGS) -c $< -o $@
ggml-alloc.o: ggml-alloc.cpp ggml.h ggml-alloc.h
$(CXX) $(CXXFLAGS) -c $< -o $@

ggml-backend.o: ggml-backend.c ggml.h ggml-backend.h
$(CC) $(CFLAGS) -c $< -o $@
ggml-backend.o: ggml-backend.cpp ggml.h ggml-backend.h
$(CXX) $(CXXFLAGS) -c $< -o $@

ggml-quants.o: ggml-quants.c ggml.h ggml-quants.h
$(CC) $(CFLAGS) -c $< -o $@
ggml-quants.o: ggml-quants.cpp ggml.h ggml-quants.h
$(CXX) $(CXXFLAGS) -c $< -o $@

OBJS += ggml-alloc.o ggml-backend.o ggml-quants.o

Expand Down Expand Up @@ -734,5 +734,5 @@ tests/test-tokenizer-1-bpe: tests/test-tokenizer-1-bpe.cpp ggml.o llama.o $(COMM
tests/test-tokenizer-1-llama: tests/test-tokenizer-1-llama.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)

tests/test-c.o: tests/test-c.c llama.h
$(CC) $(CFLAGS) -c $(filter-out %.h,$^) -o $@
tests/test-c.o: tests/test-c.cpp llama.h
$(CXX) $(CXXFLAGS) -c $(filter-out %.h,$^) -o $@
Loading