1
0
Fork 0
mirror of https://gitlab.com/niansa/libjustlm.git synced 2025-03-06 20:49:17 +01:00

Final fixup step #3

This commit is contained in:
niansa 2023-05-19 16:20:51 +02:00
parent 4974338e41
commit b17cc6ffbd
4 changed files with 12 additions and 2 deletions

3
.gitmodules vendored
View file

@ -4,3 +4,6 @@
[submodule "llama.cpp-alibi"]
path = llama.cpp-alibi
url = https://github.com/manyoso/llama.cpp.git
[submodule "llama.cpp-mainline"]
path = llama.cpp-mainline
url = https://github.com/ggerganov/llama.cpp.git

1
llama.cpp-mainline Submodule

@ -0,0 +1 @@
Subproject commit 5ea43392731040b454c293123839b90e159cbb99

View file

@ -332,10 +332,16 @@ function(include_ggml DIRECTORY SUFFIX WITH_LLAMA)
endif()
if (WITH_LLAMA)
# Backwards compatibility with old llama.cpp versions
set(LLAMA_UTIL_SOURCE_FILE llama-util.h)
if (NOT EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${DIRECTORY}/${LLAMA_UTIL_SOURCE_FILE})
set(LLAMA_UTIL_SOURCE_FILE llama_util.h)
endif()
add_library(llama${SUFFIX}
${DIRECTORY}/llama.cpp
${DIRECTORY}/llama.h
${DIRECTORY}/llama_util.h)
${DIRECTORY}/${LLAMA_UTIL_SOURCE_FILE})
target_include_directories(llama${SUFFIX} PUBLIC ${DIRECTORY})
target_compile_features(llama${SUFFIX} PUBLIC cxx_std_11) # don't bump

View file

@ -20,6 +20,6 @@ bool magic_match(uint32_t magic) {
LM::Inference *construct(const std::string &weights_path, std::ifstream& f, const LM::Inference::Params &p) {
f.close();
return new LM::LLaMaInference(weights_path, p);
return new LM::LLaMAInference(weights_path, p);
}
}