1
0
Fork 0
mirror of https://gitlab.com/niansa/libjustlm.git synced 2025-03-06 20:49:17 +01:00

Renamed llama-mainline to llama_old

This commit is contained in:
niansa 2023-05-19 15:57:17 +02:00
parent b5e10d1fa3
commit 9bf70e3f5d
13 changed files with 17 additions and 16 deletions

2
.gitmodules vendored
View file

@ -1,5 +1,5 @@
[submodule "llama.cpp"]
path = llama.cpp-mainline
path = llama.cpp-old
url = https://github.com/ggerganov/llama.cpp.git
[submodule "llama.cpp-alibi"]
path = llama.cpp-alibi

View file

@ -12,6 +12,7 @@ set(LM_PYBIND No CACHE BOOL "If justlm Python bindings should be build")
set(LM_COSCHED No CACHE BOOL "If justlm should make use of CoSched")
set(LM_NOEXCEPT No CACHE BOOL "If justlm exceptions should be disabled")
set(LM_LLAMA Yes CACHE BOOL "If LLaMa model support should be built into justlm")
set(LM_LLAMA_OLD Yes CACHE BOOL "If old LLaMa model support should be built into justlm")
set(LM_GPTJ Yes CACHE BOOL "If GPT-J model support should be built into justlm")
set(LM_MPT Yes CACHE BOOL "If MPT model support should be built into justlm")
@ -35,11 +36,11 @@ endfunction()
include(llama.cpp.cmake)
include_ggml(llama.cpp-mainline _mainline Yes)
include_ggml(llama.cpp-old _old Yes)
include_ggml(llama.cpp-alibi _alibi No)
add_library(justlm_g4a_common SHARED g4a-common.cpp g4a-common.hpp)
add_library(justlm_g4a_common SHARED g4a_common.cpp g4a_common.hpp)
if (LM_MPT)
@ -50,14 +51,14 @@ endif()
if (LM_GPTJ)
add_library(justlm_gptj SHARED gptj.cpp justlm_gptj.hpp gptj/gptj.cpp gptj/gptj.hpp)
target_link_libraries(justlm_gptj PRIVATE ggml_mainline justlm_g4a_common)
target_link_libraries(justlm_gptj PRIVATE ggml_old justlm_g4a_common)
target_justlm_setup(justlm_gptj)
endif()
if (LM_LLAMA)
add_library(justlm_llama SHARED llama.cpp justlm_llama.hpp)
target_link_libraries(justlm_llama PRIVATE ggml_mainline llama_mainline)
target_justlm_setup(justlm_llama)
if (LM_LLAMA_OLD)
add_library(justlm_llama_old SHARED llama_old.cpp justlm_llama_old.hpp)
target_link_libraries(justlm_llama_old PRIVATE ggml_old llama_old)
target_justlm_setup(justlm_llama_old)
endif()

View file

@ -1,4 +1,4 @@
#include "g4a-common.hpp"
#include "g4a_common.hpp"
#include <fstream>
#include <regex>

View file

@ -1,6 +1,6 @@
#include "gptj.hpp"
#include "../g4a-common.hpp"
#include "../g4a_common.hpp"
#include <cassert>
#include <cmath>

View file

@ -5,7 +5,7 @@
#include <map>
#include <ggml.h>
#include "../g4a-common.hpp"
#include "../g4a_common.hpp"
// default hparams (GPT-J 6B)

View file

@ -4,7 +4,7 @@
#include <random>
#include <cstring>
#include "gptj/gptj.hpp"
#include "g4a-common.hpp"
#include "g4a_common.hpp"
namespace LM {

View file

@ -4,7 +4,7 @@
#include <random>
#include <cstring>
#include "mpt/mpt.hpp"
#include "g4a-common.hpp"
#include "g4a_common.hpp"
namespace LM {

View file

@ -1,4 +1,4 @@
#include "justlm_llama.hpp"
#include "justlm_llama_old.hpp"
#include "justlm.hpp"
#include <string>

View file

@ -1,5 +1,5 @@
#include "mpt.hpp"
#include "../g4a-common.hpp"
#include "../g4a_common.hpp"
#include <cassert>
#include <cmath>

View file

@ -1,6 +1,6 @@
#ifndef MPT_H
#define MPT_H
#include "../g4a-common.hpp"
#include "../g4a_common.hpp"
#include <string>
#include <vector>