11cmake_minimum_required (VERSION 3.13) # for add_link_options
2+
23project ("llama.cpp" C CXX)
34
45set (CMAKE_EXPORT_COMPILE_COMMANDS ON )
@@ -80,7 +81,7 @@ option(LLAMA_ACCELERATE "llama: enable Accelerate framework
8081option (LLAMA_BLAS "llama: use BLAS" OFF )
8182set (LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor" )
8283option (LLAMA_CUBLAS "llama: use CUDA" OFF )
83- #option(LLAMA_CUDA_CUBLAS "llama: use cuBLAS for prompt processing" OFF)
84+ #option(LLAMA_CUDA_CUBLAS "llama: use cuBLAS for prompt processing" OFF)
8485option (LLAMA_CUDA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF )
8586set (LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels" )
8687set (LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels" )
@@ -89,16 +90,17 @@ set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for
8990set (LLAMA_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING
9091 "llama: max. batch size for using peer access" )
9192option (LLAMA_HIPBLAS "llama: use hipBLAS" OFF )
93+ option (LLAMA_MINGW_COMPAT "llama: use MinGW compatibility headers" OFF )
9294option (LLAMA_CLBLAST "llama: use CLBlast" OFF )
9395option (LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT} )
9496option (LLAMA_METAL_NDEBUG "llama: disable Metal debugging" OFF )
9597option (LLAMA_MPI "llama: use MPI" OFF )
9698option (LLAMA_K_QUANTS "llama: use k-quants" ON )
9799option (LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF )
98100
99- option (LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE} )
100- option (LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE} )
101- option (LLAMA_BUILD_SERVER "llama: build server example" ON )
101+ option (LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE} )
102+ option (LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE} )
103+ option (LLAMA_BUILD_SERVER "llama: build server example" ON )
102104
103105#
104106# Build info header
@@ -646,6 +648,9 @@ endif()
646648if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD" )
647649 add_compile_definitions (_BSD_SOURCE)
648650endif ()
651+ if ((MINGW) AND (LLAMA_MINGW_COMPAT))
652+ add_compile_definitions (_USE_MINGW_COMPAT)
653+ endif ()
649654
650655#
651656# libraries
@@ -695,6 +700,10 @@ add_library(llama
695700 )
696701
697702target_include_directories (llama PUBLIC .)
703+ if (MINGW AND LLAMA_MINGW_COMPAT)
704+ include_directories (PRIVATE ${CMAKE_SOURCE_DIR} /compat/mingw)
705+ endif ()
706+
698707target_compile_features (llama PUBLIC cxx_std_11) # don't bump
699708target_link_libraries (llama PRIVATE
700709 ggml
@@ -750,10 +759,10 @@ set(GGML_PUBLIC_HEADERS "ggml.h"
750759 "${GGML_HEADERS_METAL} " "${GGML_HEADERS_MPI} " "${GGML_HEADERS_EXTRA} " )
751760
752761set_target_properties (ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS} " )
753- install (TARGETS ggml PUBLIC_HEADER )
762+ install (TARGETS ggml PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} )
754763
755764set_target_properties (llama PROPERTIES PUBLIC_HEADER ${CMAKE_CURRENT_SOURCE_DIR} /llama.h)
756- install (TARGETS llama LIBRARY PUBLIC_HEADER )
765+ install (TARGETS llama PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} )
757766
758767install (
759768 FILES convert.py
0 commit comments