mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-08-14 00:04:03 -04:00
Compare commits
12 Commits
18ca8901f0
...
6200900677
Author | SHA1 | Date | |
---|---|---|---|
|
6200900677 | ||
|
4963db8f43 | ||
|
0efdbfcffe | ||
|
315a1f2aa2 | ||
|
ae8eb297ac | ||
|
1f749d7633 | ||
|
33557b1f39 | ||
|
64b409e0b8 | ||
|
e59946f05d | ||
|
b72b409d40 | ||
|
59cae1132c | ||
|
a0dae86a95 |
@ -444,9 +444,6 @@ jobs:
|
||||
build-csharp-linux:
|
||||
docker:
|
||||
- image: mcr.microsoft.com/dotnet/sdk:7.0-jammy # Ubuntu 22.04
|
||||
steps:
|
||||
- when:
|
||||
condition: << pipeline.parameters.run-csharp-workflow >>
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
@ -495,9 +492,6 @@ jobs:
|
||||
name: win/default
|
||||
size: large
|
||||
shell: powershell.exe -ExecutionPolicy Bypass
|
||||
steps:
|
||||
- when:
|
||||
condition: << pipeline.parameters.run-csharp-workflow >>
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
@ -544,9 +538,6 @@ jobs:
|
||||
build-csharp-macos:
|
||||
macos:
|
||||
xcode: "14.0.0"
|
||||
steps:
|
||||
- when:
|
||||
condition: << pipeline.parameters.run-csharp-workflow >>
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
@ -717,7 +708,6 @@ workflows:
|
||||
only:
|
||||
requires:
|
||||
- hold
|
||||
# CSharp Jobs
|
||||
- build-csharp-linux:
|
||||
filters:
|
||||
branches:
|
||||
@ -745,4 +735,3 @@ workflows:
|
||||
- build-csharp-windows
|
||||
- build-csharp-linux
|
||||
- build-csharp-macos
|
||||
|
@ -20,7 +20,7 @@ endif()
|
||||
include_directories("${CMAKE_CURRENT_BINARY_DIR}")
|
||||
|
||||
set(LLMODEL_VERSION_MAJOR 0)
|
||||
set(LLMODEL_VERSION_MINOR 2)
|
||||
set(LLMODEL_VERSION_MINOR 3)
|
||||
set(LLMODEL_VERSION_PATCH 0)
|
||||
set(LLMODEL_VERSION "${LLMODEL_VERSION_MAJOR}.${LLMODEL_VERSION_MINOR}.${LLMODEL_VERSION_PATCH}")
|
||||
project(llmodel VERSION ${LLMODEL_VERSION} LANGUAGES CXX C)
|
||||
@ -125,6 +125,10 @@ foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS)
|
||||
add_library(mpt-${BUILD_VARIANT} SHARED
|
||||
mpt.cpp utils.h utils.cpp llmodel_shared.cpp llmodel_shared.h)
|
||||
prepare_target(mpt ggml-230511)
|
||||
|
||||
add_library(bert-${BUILD_VARIANT} SHARED
|
||||
bert.cpp utils.h utils.cpp llmodel_shared.cpp llmodel_shared.h)
|
||||
prepare_target(bert llama-mainline)
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
|
1067
gpt4all-backend/bert.cpp
Normal file
1067
gpt4all-backend/bert.cpp
Normal file
File diff suppressed because it is too large
Load Diff
44
gpt4all-backend/bert_impl.h
Normal file
44
gpt4all-backend/bert_impl.h
Normal file
@ -0,0 +1,44 @@
|
||||
#ifndef BERT_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
|
||||
#error This file is NOT meant to be included outside of bert.cpp. Doing so is DANGEROUS. Be sure to know what you are doing before proceeding to #define BERT_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
|
||||
#endif
|
||||
#ifndef BERT_H
|
||||
#define BERT_H
|
||||
|
||||
#include <string>
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "llmodel.h"
|
||||
|
||||
struct BertPrivate;
|
||||
class Bert : public LLModel {
|
||||
public:
|
||||
Bert();
|
||||
~Bert();
|
||||
|
||||
bool supportsEmbedding() const override { return true; }
|
||||
bool supportsCompletion() const override { return true; }
|
||||
bool loadModel(const std::string &modelPath) override;
|
||||
bool isModelLoaded() const override;
|
||||
size_t requiredMem(const std::string &modelPath) override;
|
||||
size_t stateSize() const override;
|
||||
size_t saveState(uint8_t *dest) const override;
|
||||
size_t restoreState(const uint8_t *src) override;
|
||||
void setThreadCount(int32_t n_threads) override;
|
||||
int32_t threadCount() const override;
|
||||
|
||||
std::vector<float> embedding(const std::string &text) override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<BertPrivate> d_ptr;
|
||||
|
||||
protected:
|
||||
std::vector<Token> tokenize(PromptContext &, const std::string&) const override;
|
||||
Token sampleToken(PromptContext &ctx) const override;
|
||||
std::string tokenToString(Token) const override;
|
||||
bool evalTokens(PromptContext &ctx, const std::vector<int32_t> &tokens) const override;
|
||||
int32_t contextLength() const override;
|
||||
const std::vector<Token>& endTokens() const override;
|
||||
};
|
||||
|
||||
#endif // BERT_H
|
@ -16,6 +16,8 @@ public:
|
||||
Falcon();
|
||||
~Falcon();
|
||||
|
||||
bool supportsEmbedding() const override { return false; }
|
||||
bool supportsCompletion() const override { return true; }
|
||||
bool loadModel(const std::string &modelPath) override;
|
||||
bool isModelLoaded() const override;
|
||||
size_t requiredMem(const std::string &modelPath) override;
|
||||
|
@ -15,6 +15,8 @@ public:
|
||||
GPTJ();
|
||||
~GPTJ();
|
||||
|
||||
bool supportsEmbedding() const override { return false; }
|
||||
bool supportsCompletion() const override { return true; }
|
||||
bool loadModel(const std::string &modelPath) override;
|
||||
bool isModelLoaded() const override;
|
||||
size_t requiredMem(const std::string &modelPath) override;
|
||||
|
@ -15,6 +15,8 @@ public:
|
||||
LLamaModel();
|
||||
~LLamaModel();
|
||||
|
||||
bool supportsEmbedding() const override { return false; }
|
||||
bool supportsCompletion() const override { return true; }
|
||||
bool loadModel(const std::string &modelPath) override;
|
||||
bool isModelLoaded() const override;
|
||||
size_t requiredMem(const std::string &modelPath) override;
|
||||
|
@ -18,7 +18,7 @@
|
||||
std::string s_implementations_search_path = ".";
|
||||
|
||||
static bool has_at_least_minimal_hardware() {
|
||||
#ifdef __x86_64__
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
#ifndef _MSC_VER
|
||||
return __builtin_cpu_supports("avx");
|
||||
#else
|
||||
@ -30,7 +30,7 @@ static bool has_at_least_minimal_hardware() {
|
||||
}
|
||||
|
||||
static bool requires_avxonly() {
|
||||
#ifdef __x86_64__
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
#ifndef _MSC_VER
|
||||
return !__builtin_cpu_supports("avx2");
|
||||
#else
|
||||
@ -41,41 +41,42 @@ static bool requires_avxonly() {
|
||||
#endif
|
||||
}
|
||||
|
||||
LLModel::Implementation::Implementation(Dlhandle &&dlhandle_) : dlhandle(new Dlhandle(std::move(dlhandle_))) {
|
||||
auto get_model_type = dlhandle->get<const char *()>("get_model_type");
|
||||
LLModel::Implementation::Implementation(Dlhandle &&dlhandle_)
|
||||
: m_dlhandle(new Dlhandle(std::move(dlhandle_))) {
|
||||
auto get_model_type = m_dlhandle->get<const char *()>("get_model_type");
|
||||
assert(get_model_type);
|
||||
modelType = get_model_type();
|
||||
auto get_build_variant = dlhandle->get<const char *()>("get_build_variant");
|
||||
m_modelType = get_model_type();
|
||||
auto get_build_variant = m_dlhandle->get<const char *()>("get_build_variant");
|
||||
assert(get_build_variant);
|
||||
buildVariant = get_build_variant();
|
||||
magicMatch = dlhandle->get<bool(std::ifstream&)>("magic_match");
|
||||
assert(magicMatch);
|
||||
construct_ = dlhandle->get<LLModel *()>("construct");
|
||||
assert(construct_);
|
||||
m_buildVariant = get_build_variant();
|
||||
m_magicMatch = m_dlhandle->get<bool(std::ifstream&)>("magic_match");
|
||||
assert(m_magicMatch);
|
||||
m_construct = m_dlhandle->get<LLModel *()>("construct");
|
||||
assert(m_construct);
|
||||
}
|
||||
|
||||
LLModel::Implementation::Implementation(Implementation &&o)
|
||||
: construct_(o.construct_)
|
||||
, modelType(o.modelType)
|
||||
, buildVariant(o.buildVariant)
|
||||
, magicMatch(o.magicMatch)
|
||||
, dlhandle(o.dlhandle) {
|
||||
o.dlhandle = nullptr;
|
||||
: m_magicMatch(o.m_magicMatch)
|
||||
, m_construct(o.m_construct)
|
||||
, m_modelType(o.m_modelType)
|
||||
, m_buildVariant(o.m_buildVariant)
|
||||
, m_dlhandle(o.m_dlhandle) {
|
||||
o.m_dlhandle = nullptr;
|
||||
}
|
||||
|
||||
LLModel::Implementation::~Implementation() {
|
||||
if (dlhandle) delete dlhandle;
|
||||
if (m_dlhandle) delete m_dlhandle;
|
||||
}
|
||||
|
||||
bool LLModel::Implementation::isImplementation(const Dlhandle &dl) {
|
||||
return dl.get<bool(uint32_t)>("is_g4a_backend_model_implementation");
|
||||
}
|
||||
|
||||
const std::vector<LLModel::Implementation> &LLModel::implementationList() {
|
||||
const std::vector<LLModel::Implementation> &LLModel::Implementation::implementationList() {
|
||||
// NOTE: allocated on heap so we leak intentionally on exit so we have a chance to clean up the
|
||||
// individual models without the cleanup of the static list interfering
|
||||
static auto* libs = new std::vector<LLModel::Implementation>([] () {
|
||||
std::vector<LLModel::Implementation> fres;
|
||||
static auto* libs = new std::vector<Implementation>([] () {
|
||||
std::vector<Implementation> fres;
|
||||
|
||||
auto search_in_directory = [&](const std::string& paths) {
|
||||
std::stringstream ss(paths);
|
||||
@ -107,17 +108,17 @@ const std::vector<LLModel::Implementation> &LLModel::implementationList() {
|
||||
return *libs;
|
||||
}
|
||||
|
||||
const LLModel::Implementation* LLModel::implementation(std::ifstream& f, const std::string& buildVariant) {
|
||||
const LLModel::Implementation* LLModel::Implementation::implementation(std::ifstream& f, const std::string& buildVariant) {
|
||||
for (const auto& i : implementationList()) {
|
||||
f.seekg(0);
|
||||
if (!i.magicMatch(f)) continue;
|
||||
if (buildVariant != i.buildVariant) continue;
|
||||
if (!i.m_magicMatch(f)) continue;
|
||||
if (buildVariant != i.m_buildVariant) continue;
|
||||
return &i;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
LLModel *LLModel::construct(const std::string &modelPath, std::string buildVariant) {
|
||||
LLModel *LLModel::Implementation::construct(const std::string &modelPath, std::string buildVariant) {
|
||||
|
||||
if (!has_at_least_minimal_hardware())
|
||||
return nullptr;
|
||||
@ -126,14 +127,15 @@ LLModel *LLModel::construct(const std::string &modelPath, std::string buildVaria
|
||||
std::ifstream f(modelPath, std::ios::binary);
|
||||
if (!f) return nullptr;
|
||||
// Get correct implementation
|
||||
const LLModel::Implementation* impl = nullptr;
|
||||
const Implementation* impl = nullptr;
|
||||
|
||||
#if defined(__APPLE__) && defined(__arm64__) // FIXME: See if metal works for intel macs
|
||||
if (buildVariant == "auto") {
|
||||
size_t total_mem = getSystemTotalRAMInBytes();
|
||||
impl = implementation(f, "metal");
|
||||
if(impl) {
|
||||
LLModel* metalimpl = impl->construct();
|
||||
LLModel* metalimpl = impl->m_construct();
|
||||
metalimpl->m_implementation = impl;
|
||||
size_t req_mem = metalimpl->requiredMem(modelPath);
|
||||
float req_to_total = (float) req_mem / (float) total_mem;
|
||||
// on a 16GB M2 Mac a 13B q4_0 (0.52) works for me but a 13B q4_K_M (0.55) does not
|
||||
@ -160,14 +162,17 @@ LLModel *LLModel::construct(const std::string &modelPath, std::string buildVaria
|
||||
if (!impl) return nullptr;
|
||||
}
|
||||
f.close();
|
||||
|
||||
// Construct and return llmodel implementation
|
||||
return impl->construct();
|
||||
auto fres = impl->m_construct();
|
||||
fres->m_implementation = impl;
|
||||
return fres;
|
||||
}
|
||||
|
||||
void LLModel::setImplementationsSearchPath(const std::string& path) {
|
||||
void LLModel::Implementation::setImplementationsSearchPath(const std::string& path) {
|
||||
s_implementations_search_path = path;
|
||||
}
|
||||
|
||||
const std::string& LLModel::implementationsSearchPath() {
|
||||
const std::string& LLModel::Implementation::implementationsSearchPath() {
|
||||
return s_implementations_search_path;
|
||||
}
|
||||
|
@ -12,32 +12,34 @@
|
||||
#define LLMODEL_MAX_PROMPT_BATCH 128
|
||||
|
||||
class Dlhandle;
|
||||
|
||||
class LLModel {
|
||||
public:
|
||||
using Token = int32_t;
|
||||
|
||||
class Implementation {
|
||||
LLModel *(*construct_)();
|
||||
|
||||
public:
|
||||
Implementation(Dlhandle&&);
|
||||
Implementation(const Implementation&) = delete;
|
||||
Implementation(Implementation&&);
|
||||
~Implementation();
|
||||
|
||||
std::string_view modelType() const { return m_modelType; }
|
||||
std::string_view buildVariant() const { return m_buildVariant; }
|
||||
|
||||
static bool isImplementation(const Dlhandle&);
|
||||
static const std::vector<Implementation>& implementationList();
|
||||
static const Implementation *implementation(std::ifstream& f, const std::string& buildVariant);
|
||||
static LLModel *construct(const std::string &modelPath, std::string buildVariant = "auto");
|
||||
static void setImplementationsSearchPath(const std::string& path);
|
||||
static const std::string& implementationsSearchPath();
|
||||
|
||||
std::string_view modelType, buildVariant;
|
||||
bool (*magicMatch)(std::ifstream& f);
|
||||
Dlhandle *dlhandle;
|
||||
private:
|
||||
bool (*m_magicMatch)(std::ifstream& f);
|
||||
LLModel *(*m_construct)();
|
||||
|
||||
// The only way an implementation should be constructed
|
||||
LLModel *construct() const {
|
||||
auto fres = construct_();
|
||||
fres->m_implementation = this;
|
||||
return fres;
|
||||
}
|
||||
private:
|
||||
std::string_view m_modelType;
|
||||
std::string_view m_buildVariant;
|
||||
Dlhandle *m_dlhandle;
|
||||
};
|
||||
|
||||
struct PromptContext {
|
||||
@ -59,18 +61,25 @@ public:
|
||||
explicit LLModel() {}
|
||||
virtual ~LLModel() {}
|
||||
|
||||
virtual bool supportsEmbedding() const = 0;
|
||||
virtual bool supportsCompletion() const = 0;
|
||||
virtual bool loadModel(const std::string &modelPath) = 0;
|
||||
virtual bool isModelLoaded() const = 0;
|
||||
virtual size_t requiredMem(const std::string &modelPath) = 0;
|
||||
virtual size_t stateSize() const { return 0; }
|
||||
virtual size_t saveState(uint8_t */*dest*/) const { return 0; }
|
||||
virtual size_t restoreState(const uint8_t */*src*/) { return 0; }
|
||||
|
||||
// This method requires the model to return true from supportsCompletion otherwise it will throw
|
||||
// an error
|
||||
virtual void prompt(const std::string &prompt,
|
||||
std::function<bool(int32_t)> promptCallback,
|
||||
std::function<bool(int32_t, const std::string&)> responseCallback,
|
||||
std::function<bool(bool)> recalculateCallback,
|
||||
PromptContext &ctx);
|
||||
|
||||
virtual std::vector<float> embedding(const std::string &text);
|
||||
|
||||
virtual void setThreadCount(int32_t /*n_threads*/) {}
|
||||
virtual int32_t threadCount() const { return 1; }
|
||||
|
||||
@ -78,13 +87,6 @@ public:
|
||||
return *m_implementation;
|
||||
}
|
||||
|
||||
static const std::vector<Implementation>& implementationList();
|
||||
static const Implementation *implementation(std::ifstream& f, const std::string& buildVariant);
|
||||
static LLModel *construct(const std::string &modelPath, std::string buildVariant = "auto");
|
||||
|
||||
static void setImplementationsSearchPath(const std::string& path);
|
||||
static const std::string& implementationsSearchPath();
|
||||
|
||||
protected:
|
||||
// These are pure virtual because subclasses need to implement as the default implementation of
|
||||
// 'prompt' above calls these functions
|
||||
@ -100,5 +102,9 @@ protected:
|
||||
void recalculateContext(PromptContext &promptCtx, std::function<bool(bool)> recalculate);
|
||||
|
||||
const Implementation *m_implementation = nullptr;
|
||||
|
||||
private:
|
||||
friend class LLMImplementation;
|
||||
};
|
||||
|
||||
#endif // LLMODEL_H
|
||||
|
@ -29,7 +29,7 @@ llmodel_model llmodel_model_create2(const char *model_path, const char *build_va
|
||||
int error_code = 0;
|
||||
|
||||
try {
|
||||
wrapper->llModel = LLModel::construct(model_path, build_variant);
|
||||
wrapper->llModel = LLModel::Implementation::construct(model_path, build_variant);
|
||||
} catch (const std::exception& e) {
|
||||
error_code = EINVAL;
|
||||
last_error_message = e.what();
|
||||
@ -166,6 +166,25 @@ void llmodel_prompt(llmodel_model model, const char *prompt,
|
||||
ctx->context_erase = wrapper->promptContext.contextErase;
|
||||
}
|
||||
|
||||
float *llmodel_embedding(llmodel_model model, const char *text, size_t *embedding_size)
|
||||
{
|
||||
LLModelWrapper *wrapper = reinterpret_cast<LLModelWrapper*>(model);
|
||||
std::vector<float> embeddingVector = wrapper->llModel->embedding(text);
|
||||
float *embedding = (float *)malloc(embeddingVector.size() * sizeof(float));
|
||||
if(embedding == nullptr) {
|
||||
*embedding_size = 0;
|
||||
return nullptr;
|
||||
}
|
||||
std::copy(embeddingVector.begin(), embeddingVector.end(), embedding);
|
||||
*embedding_size = embeddingVector.size();
|
||||
return embedding;
|
||||
}
|
||||
|
||||
void llmodel_free_embedding(float *ptr)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
void llmodel_setThreadCount(llmodel_model model, int32_t n_threads)
|
||||
{
|
||||
LLModelWrapper *wrapper = reinterpret_cast<LLModelWrapper*>(model);
|
||||
@ -180,10 +199,10 @@ int32_t llmodel_threadCount(llmodel_model model)
|
||||
|
||||
void llmodel_set_implementation_search_path(const char *path)
|
||||
{
|
||||
LLModel::setImplementationsSearchPath(path);
|
||||
LLModel::Implementation::setImplementationsSearchPath(path);
|
||||
}
|
||||
|
||||
const char *llmodel_get_implementation_search_path()
|
||||
{
|
||||
return LLModel::implementationsSearchPath().c_str();
|
||||
return LLModel::Implementation::implementationsSearchPath().c_str();
|
||||
}
|
||||
|
@ -171,6 +171,23 @@ void llmodel_prompt(llmodel_model model, const char *prompt,
|
||||
llmodel_recalculate_callback recalculate_callback,
|
||||
llmodel_prompt_context *ctx);
|
||||
|
||||
/**
|
||||
* Generate an embedding using the model.
|
||||
* @param model A pointer to the llmodel_model instance.
|
||||
* @param text A string representing the text to generate an embedding for.
|
||||
* @param embedding_size A pointer to a size_t type that will be set by the call indicating the length
|
||||
* of the returned floating point array.
|
||||
* @return A pointer to an array of floating point values passed to the calling method which then will
|
||||
* be responsible for lifetime of this memory.
|
||||
*/
|
||||
float *llmodel_embedding(llmodel_model model, const char *text, size_t *embedding_size);
|
||||
|
||||
/**
|
||||
* Frees the memory allocated by the llmodel_embedding function.
|
||||
* @param ptr A pointer to the embedding as returned from llmodel_embedding.
|
||||
*/
|
||||
void llmodel_free_embedding(float *ptr);
|
||||
|
||||
/**
|
||||
* Set the number of threads to be used by the model.
|
||||
* @param model A pointer to the llmodel_model instance.
|
||||
|
@ -33,7 +33,14 @@ void LLModel::prompt(const std::string &prompt,
|
||||
PromptContext &promptCtx)
|
||||
{
|
||||
if (!isModelLoaded()) {
|
||||
std::cerr << implementation().modelType << " ERROR: prompt won't work with an unloaded model!\n";
|
||||
std::cerr << implementation().modelType() << " ERROR: prompt won't work with an unloaded model!\n";
|
||||
return;
|
||||
}
|
||||
|
||||
if (!supportsCompletion()) {
|
||||
std::string errorMessage = "ERROR: this model does not support text completion or chat!\n";
|
||||
responseCallback(-1, errorMessage);
|
||||
std::cerr << implementation().modelType() << errorMessage;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -45,8 +52,8 @@ void LLModel::prompt(const std::string &prompt,
|
||||
|
||||
if ((int) embd_inp.size() > promptCtx.n_ctx - 4) {
|
||||
responseCallback(-1, "ERROR: The prompt size exceeds the context window size and cannot be processed.");
|
||||
std::cerr << implementation().modelType << " ERROR: The prompt is" << embd_inp.size() <<
|
||||
"tokens and the context window is" << promptCtx.n_ctx << "!\n";
|
||||
std::cerr << implementation().modelType() << " ERROR: The prompt is " << embd_inp.size() <<
|
||||
" tokens and the context window is " << promptCtx.n_ctx << "!\n";
|
||||
return;
|
||||
}
|
||||
|
||||
@ -64,7 +71,7 @@ void LLModel::prompt(const std::string &prompt,
|
||||
if (promptCtx.n_past + int32_t(batch.size()) > promptCtx.n_ctx) {
|
||||
const int32_t erasePoint = promptCtx.n_ctx * promptCtx.contextErase;
|
||||
// Erase the first percentage of context from the tokens...
|
||||
std::cerr << implementation().modelType << ": reached the end of the context window so resizing\n";
|
||||
std::cerr << implementation().modelType() << ": reached the end of the context window so resizing\n";
|
||||
promptCtx.tokens.erase(promptCtx.tokens.begin(), promptCtx.tokens.begin() + erasePoint);
|
||||
promptCtx.n_past = promptCtx.tokens.size();
|
||||
recalculateContext(promptCtx, recalculateCallback);
|
||||
@ -72,7 +79,7 @@ void LLModel::prompt(const std::string &prompt,
|
||||
}
|
||||
|
||||
if (!evalTokens(promptCtx, batch)) {
|
||||
std::cerr << implementation().modelType << " ERROR: Failed to process prompt\n";
|
||||
std::cerr << implementation().modelType() << " ERROR: Failed to process prompt\n";
|
||||
return;
|
||||
}
|
||||
|
||||
@ -103,7 +110,7 @@ void LLModel::prompt(const std::string &prompt,
|
||||
if (promptCtx.n_past + 1 > promptCtx.n_ctx) {
|
||||
const int32_t erasePoint = promptCtx.n_ctx * promptCtx.contextErase;
|
||||
// Erase the first percentage of context from the tokens...
|
||||
std::cerr << implementation().modelType << ": reached the end of the context window so resizing\n";
|
||||
std::cerr << implementation().modelType() << ": reached the end of the context window so resizing\n";
|
||||
promptCtx.tokens.erase(promptCtx.tokens.begin(), promptCtx.tokens.begin() + erasePoint);
|
||||
promptCtx.n_past = promptCtx.tokens.size();
|
||||
recalculateContext(promptCtx, recalculateCallback);
|
||||
@ -111,7 +118,7 @@ void LLModel::prompt(const std::string &prompt,
|
||||
}
|
||||
|
||||
if (!evalTokens(promptCtx, { id })) {
|
||||
std::cerr << implementation().modelType << " ERROR: Failed to predict next token\n";
|
||||
std::cerr << implementation().modelType() << " ERROR: Failed to predict next token\n";
|
||||
return;
|
||||
}
|
||||
|
||||
@ -158,3 +165,12 @@ void LLModel::prompt(const std::string &prompt,
|
||||
cachedTokens.clear();
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<float> LLModel::embedding(const std::string &/*text*/)
|
||||
{
|
||||
if (!supportsCompletion()) {
|
||||
std::string errorMessage = "ERROR: this model does not support generating embeddings!\n";
|
||||
std::cerr << implementation().modelType() << errorMessage;
|
||||
}
|
||||
return std::vector<float>();
|
||||
}
|
||||
|
@ -15,6 +15,8 @@ public:
|
||||
MPT();
|
||||
~MPT();
|
||||
|
||||
bool supportsEmbedding() const override { return false; }
|
||||
bool supportsCompletion() const override { return true; }
|
||||
bool loadModel(const std::string &modelPath) override;
|
||||
bool isModelLoaded() const override;
|
||||
size_t requiredMem(const std::string &modelPath) override;
|
||||
|
@ -17,6 +17,8 @@ public:
|
||||
Replit();
|
||||
~Replit();
|
||||
|
||||
bool supportsEmbedding() const override { return false; }
|
||||
bool supportsCompletion() const override { return true; }
|
||||
bool loadModel(const std::string &modelPath) override;
|
||||
bool isModelLoaded() const override;
|
||||
size_t requiredMem(const std::string & modelPath) override;
|
||||
|
102
gpt4all-backend/scripts/convert_bert_hf_to_ggml.py
Normal file
102
gpt4all-backend/scripts/convert_bert_hf_to_ggml.py
Normal file
@ -0,0 +1,102 @@
|
||||
import sys
|
||||
import struct
|
||||
import json
|
||||
import torch
|
||||
import numpy as np
|
||||
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: convert-h5-to-ggml.py dir-model [use-f32]\n")
|
||||
print(" ftype == 0 -> float32")
|
||||
print(" ftype == 1 -> float16")
|
||||
sys.exit(1)
|
||||
|
||||
# output in the same directory as the model
|
||||
dir_model = sys.argv[1]
|
||||
fname_out = sys.argv[1] + "/ggml-model.bin"
|
||||
|
||||
with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f:
|
||||
encoder = json.load(f)
|
||||
|
||||
with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
|
||||
hparams = json.load(f)
|
||||
|
||||
with open(dir_model + "/vocab.txt", "r", encoding="utf-8") as f:
|
||||
vocab = f.readlines()
|
||||
# possible data types
|
||||
# ftype == 0 -> float32
|
||||
# ftype == 1 -> float16
|
||||
#
|
||||
# map from ftype to string
|
||||
ftype_str = ["f32", "f16"]
|
||||
|
||||
ftype = 1
|
||||
if len(sys.argv) > 2:
|
||||
ftype = int(sys.argv[2])
|
||||
if ftype < 0 or ftype > 1:
|
||||
print("Invalid ftype: " + str(ftype))
|
||||
sys.exit(1)
|
||||
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin"
|
||||
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(dir_model)
|
||||
model = AutoModel.from_pretrained(dir_model, low_cpu_mem_usage=True)
|
||||
print (model)
|
||||
|
||||
print(tokenizer.encode('I believe the meaning of life is'))
|
||||
|
||||
list_vars = model.state_dict()
|
||||
for name in list_vars.keys():
|
||||
print(name, list_vars[name].shape, list_vars[name].dtype)
|
||||
|
||||
fout = open(fname_out, "wb")
|
||||
|
||||
print(hparams)
|
||||
|
||||
fout.write(struct.pack("i", 0x62657274)) # magic: ggml in hex
|
||||
fout.write(struct.pack("i", hparams["vocab_size"]))
|
||||
fout.write(struct.pack("i", hparams["max_position_embeddings"]))
|
||||
fout.write(struct.pack("i", hparams["hidden_size"]))
|
||||
fout.write(struct.pack("i", hparams["intermediate_size"]))
|
||||
fout.write(struct.pack("i", hparams["num_attention_heads"]))
|
||||
fout.write(struct.pack("i", hparams["num_hidden_layers"]))
|
||||
fout.write(struct.pack("i", ftype))
|
||||
|
||||
for i in range(hparams["vocab_size"]):
|
||||
text = vocab[i][:-1] # strips newline at the end
|
||||
#print(f"{i}:{text}")
|
||||
data = bytes(text, 'utf-8')
|
||||
fout.write(struct.pack("i", len(data)))
|
||||
fout.write(data)
|
||||
|
||||
for name in list_vars.keys():
|
||||
data = list_vars[name].squeeze().numpy()
|
||||
if name in ['embeddings.position_ids', 'pooler.dense.weight', 'pooler.dense.bias']:
|
||||
continue
|
||||
print("Processing variable: " + name + " with shape: ", data.shape)
|
||||
|
||||
n_dims = len(data.shape);
|
||||
|
||||
# ftype == 0 -> float32, ftype == 1 -> float16
|
||||
if ftype == 1 and name[-7:] == ".weight" and n_dims == 2:
|
||||
print(" Converting to float16")
|
||||
data = data.astype(np.float16)
|
||||
l_type = 1
|
||||
else:
|
||||
l_type = 0
|
||||
|
||||
# header
|
||||
str = name.encode('utf-8')
|
||||
fout.write(struct.pack("iii", n_dims, len(str), l_type))
|
||||
for i in range(n_dims):
|
||||
fout.write(struct.pack("i", data.shape[n_dims - 1 - i]))
|
||||
fout.write(str);
|
||||
|
||||
# data
|
||||
data.tofile(fout)
|
||||
|
||||
fout.close()
|
||||
|
||||
print("Done. Output file: " + fname_out)
|
||||
print("")
|
@ -1,2 +1,2 @@
|
||||
from .gpt4all import GPT4All # noqa
|
||||
from .gpt4all import GPT4All, embed # noqa
|
||||
from .pyllmodel import LLModel # noqa
|
||||
|
@ -15,6 +15,20 @@ from . import pyllmodel
|
||||
# TODO: move to config
|
||||
DEFAULT_MODEL_DIRECTORY = os.path.join(str(Path.home()), ".cache", "gpt4all").replace("\\", "\\\\")
|
||||
|
||||
def embed(
|
||||
text: str
|
||||
) -> list[float]:
|
||||
"""
|
||||
Generate an embedding for all GPT4All.
|
||||
|
||||
Args:
|
||||
text: The text document to generate an embedding for.
|
||||
|
||||
Returns:
|
||||
An embedding of your document of text.
|
||||
"""
|
||||
model = GPT4All(model_name='ggml-all-MiniLM-L6-v2-f16.bin')
|
||||
return model.model.generate_embedding(text)
|
||||
|
||||
class GPT4All:
|
||||
"""
|
||||
|
@ -112,6 +112,19 @@ llmodel.llmodel_prompt.argtypes = [
|
||||
|
||||
llmodel.llmodel_prompt.restype = None
|
||||
|
||||
llmodel.llmodel_embedding.argtypes = [
|
||||
ctypes.c_void_p,
|
||||
ctypes.c_char_p,
|
||||
ctypes.POINTER(ctypes.c_size_t),
|
||||
]
|
||||
|
||||
llmodel.llmodel_embedding.restype = ctypes.POINTER(ctypes.c_float)
|
||||
|
||||
llmodel.llmodel_free_embedding.argtypes = [
|
||||
ctypes.POINTER(ctypes.c_float)
|
||||
]
|
||||
llmodel.llmodel_free_embedding.restype = None
|
||||
|
||||
llmodel.llmodel_setThreadCount.argtypes = [ctypes.c_void_p, ctypes.c_int32]
|
||||
llmodel.llmodel_setThreadCount.restype = None
|
||||
|
||||
@ -233,6 +246,17 @@ class LLModel:
|
||||
self.context.repeat_last_n = repeat_last_n
|
||||
self.context.context_erase = context_erase
|
||||
|
||||
def generate_embedding(
|
||||
self,
|
||||
text: str
|
||||
) -> list[float]:
|
||||
embedding_size = ctypes.c_size_t()
|
||||
c_text = ctypes.c_char_p(text.encode('utf-8'))
|
||||
embedding_ptr = llmodel.llmodel_embedding(self.model, c_text, ctypes.byref(embedding_size))
|
||||
embedding_array = ctypes.cast(embedding_ptr, ctypes.POINTER(ctypes.c_float * embedding_size.value)).contents
|
||||
llmodel.llmodel_free_embedding(embedding_ptr)
|
||||
return list(embedding_array)
|
||||
|
||||
def prompt_model(
|
||||
self,
|
||||
prompt: str,
|
||||
|
File diff suppressed because one or more lines are too long
@ -61,7 +61,7 @@ copy_prebuilt_C_lib(SRC_CLIB_DIRECtORY,
|
||||
|
||||
setup(
|
||||
name=package_name,
|
||||
version="1.0.3",
|
||||
version="1.0.4",
|
||||
description="Python bindings for GPT4All",
|
||||
author="Richard Guo",
|
||||
author_email="richard@nomic.ai",
|
||||
|
@ -206,6 +206,8 @@ install(TARGETS replit-mainline-default DESTINATION lib COMPONENT ${COMPONENT_NA
|
||||
if(APPLE)
|
||||
install(TARGETS replit-mainline-metal DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
endif()
|
||||
install(TARGETS bert-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
install(TARGETS bert-default DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
|
||||
set(CPACK_GENERATOR "IFW")
|
||||
set(CPACK_VERBATIM_VARIABLES YES)
|
||||
|
@ -46,6 +46,8 @@ public:
|
||||
ChatGPT();
|
||||
virtual ~ChatGPT();
|
||||
|
||||
bool supportsEmbedding() const override { return false; }
|
||||
bool supportsCompletion() const override { return true; }
|
||||
bool loadModel(const std::string &modelPath) override;
|
||||
bool isModelLoaded() const override;
|
||||
size_t requiredMem(const std::string &modelPath) override;
|
||||
|
@ -14,6 +14,7 @@
|
||||
#define REPLIT_INTERNAL_STATE_VERSION 0
|
||||
#define LLAMA_INTERNAL_STATE_VERSION 0
|
||||
#define FALCON_INTERNAL_STATE_VERSION 0
|
||||
#define BERT_INTERNAL_STATE_VERSION 0
|
||||
|
||||
class LLModelStore {
|
||||
public:
|
||||
@ -240,11 +241,11 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
||||
|
||||
#if defined(Q_OS_MAC) && defined(__arm__)
|
||||
if (m_forceMetal)
|
||||
m_llModelInfo.model = LLModel::construct(filePath.toStdString(), "metal");
|
||||
m_llModelInfo.model = LLMImplementation::construct(filePath.toStdString(), "metal");
|
||||
else
|
||||
m_llModelInfo.model = LLModel::construct(filePath.toStdString(), "auto");
|
||||
m_llModelInfo.model = LLMImplementation::construct(filePath.toStdString(), "auto");
|
||||
#else
|
||||
m_llModelInfo.model = LLModel::construct(filePath.toStdString(), "auto");
|
||||
m_llModelInfo.model = LLModel::Implementation::construct(filePath.toStdString(), "auto");
|
||||
#endif
|
||||
|
||||
if (m_llModelInfo.model) {
|
||||
@ -258,12 +259,13 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
|
||||
m_llModelInfo = LLModelInfo();
|
||||
emit modelLoadingError(QString("Could not load model due to invalid model file for %1").arg(modelInfo.filename()));
|
||||
} else {
|
||||
switch (m_llModelInfo.model->implementation().modelType[0]) {
|
||||
switch (m_llModelInfo.model->implementation().modelType()[0]) {
|
||||
case 'L': m_llModelType = LLModelType::LLAMA_; break;
|
||||
case 'G': m_llModelType = LLModelType::GPTJ_; break;
|
||||
case 'M': m_llModelType = LLModelType::MPT_; break;
|
||||
case 'R': m_llModelType = LLModelType::REPLIT_; break;
|
||||
case 'F': m_llModelType = LLModelType::FALCON_; break;
|
||||
case 'B': m_llModelType = LLModelType::BERT_; break;
|
||||
default:
|
||||
{
|
||||
delete std::exchange(m_llModelInfo.model, nullptr);
|
||||
@ -628,8 +630,8 @@ bool ChatLLM::handleNameRecalculate(bool isRecalc)
|
||||
qDebug() << "name recalc" << m_llmThread.objectName() << isRecalc;
|
||||
#endif
|
||||
Q_UNUSED(isRecalc);
|
||||
Q_UNREACHABLE();
|
||||
return false;
|
||||
qt_noop();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ChatLLM::handleSystemPrompt(int32_t token)
|
||||
@ -669,7 +671,8 @@ bool ChatLLM::serialize(QDataStream &stream, int version)
|
||||
case MPT_: stream << MPT_INTERNAL_STATE_VERSION; break;
|
||||
case GPTJ_: stream << GPTJ_INTERNAL_STATE_VERSION; break;
|
||||
case LLAMA_: stream << LLAMA_INTERNAL_STATE_VERSION; break;
|
||||
case FALCON_: stream << LLAMA_INTERNAL_STATE_VERSION; break;
|
||||
case FALCON_: stream << FALCON_INTERNAL_STATE_VERSION; break;
|
||||
case BERT_: stream << BERT_INTERNAL_STATE_VERSION; break;
|
||||
default: Q_UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ enum LLModelType {
|
||||
CHATGPT_,
|
||||
REPLIT_,
|
||||
FALCON_,
|
||||
BERT_
|
||||
};
|
||||
|
||||
struct LLModelInfo {
|
||||
|
@ -7,16 +7,19 @@ file(GLOB MYMPTLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NA
|
||||
file(GLOB MYLLAMALIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllama*)
|
||||
file(GLOB MYREPLITLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libreplit*)
|
||||
file(GLOB MYFALCONLLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libfalcon*)
|
||||
file(GLOB MYBERTLLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libbert*)
|
||||
file(GLOB MYLLMODELLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllmodel.*)
|
||||
file(COPY ${MYGPTJLIBS}
|
||||
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks)
|
||||
file(COPY ${MYMPTLIBS}
|
||||
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks)
|
||||
file(COPY ${MYLLAMALIBS}
|
||||
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks)
|
||||
file(COPY ${MYREPLITLIBS}
|
||||
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks)
|
||||
file(COPY ${MYFALCONLLIBS}
|
||||
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks)
|
||||
file(COPY ${MYLLAMALIBS}
|
||||
file(COPY ${MYBERTLLIBS}
|
||||
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks)
|
||||
file(COPY ${MYLLMODELLIBS}
|
||||
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks)
|
||||
|
@ -34,7 +34,7 @@ LLM::LLM()
|
||||
if (directoryExists(frameworksDir))
|
||||
llmodelSearchPaths += ";" + frameworksDir;
|
||||
#endif
|
||||
LLModel::setImplementationsSearchPath(llmodelSearchPaths.toStdString());
|
||||
LLModel::Implementation::setImplementationsSearchPath(llmodelSearchPaths.toStdString());
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#ifndef _MSC_VER
|
||||
|
@ -258,5 +258,20 @@
|
||||
"systemPrompt": " ",
|
||||
"description": "<strong>Trained on subset of the Stack</strong><br><ul><li>Code completion based<li>Licensed for commercial use</ul>",
|
||||
"url": "https://huggingface.co/nomic-ai/ggml-replit-code-v1-3b/resolve/main/ggml-replit-code-v1-3b.bin"
|
||||
},
|
||||
{
|
||||
"order": "t",
|
||||
"md5sum": "031bb5d5722c08d13e3e8eaf55c37391",
|
||||
"disableGUI": "true",
|
||||
"name": "Bert",
|
||||
"filename": "ggml-all-MiniLM-L6-v2-f16.bin",
|
||||
"filesize": "45521167",
|
||||
"requires": "2.4.14",
|
||||
"ramrequired": "1",
|
||||
"parameters": "1 million",
|
||||
"quant": "f16",
|
||||
"type": "Bert",
|
||||
"systemPrompt": " ",
|
||||
"description": "<strong>Sbert</strong><br><ul><li>For embeddings"
|
||||
}
|
||||
]
|
||||
|
Loading…
x
Reference in New Issue
Block a user