Compare commits

..

No commits in common. "96cee4f9ace334e237b385a73c624d8ca063db00" and "c72c73a94fcf653ecf0c8969a88068dd0e0d416f" have entirely different histories.

6 changed files with 17 additions and 15 deletions

View File

@ -53,8 +53,6 @@ public:
} }
}; };
#else #else
#include <algorithm>
#include <filesystem>
#include <string> #include <string>
#include <exception> #include <exception>
#include <stdexcept> #include <stdexcept>
@ -77,9 +75,7 @@ public:
Dlhandle() : chandle(nullptr) {} Dlhandle() : chandle(nullptr) {}
Dlhandle(const std::string& fpath) { Dlhandle(const std::string& fpath) {
std::string afpath = std::filesystem::absolute(fpath).string(); chandle = LoadLibraryExA(fpath.c_str(), NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS | LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR);
std::replace(afpath.begin(), afpath.end(), '/', '\\');
chandle = LoadLibraryExA(afpath.c_str(), NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS | LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR);
if (!chandle) { if (!chandle) {
throw Exception("dlopen(\""+fpath+"\"): Error"); throw Exception("dlopen(\""+fpath+"\"): Error");
} }

View File

@ -298,8 +298,6 @@ LLModel::Token LLamaModel::sampleToken(PromptContext &promptCtx) const
bool LLamaModel::evalTokens(PromptContext &ctx, const std::vector<int32_t> &tokens) const bool LLamaModel::evalTokens(PromptContext &ctx, const std::vector<int32_t> &tokens) const
{ {
llama_kv_cache_seq_rm(d_ptr->ctx, 0, ctx.n_past, -1);
llama_batch batch = llama_batch_init(tokens.size(), 0, 1); llama_batch batch = llama_batch_init(tokens.size(), 0, 1);
batch.n_tokens = tokens.size(); batch.n_tokens = tokens.size();

View File

@ -10,9 +10,14 @@ Chat::Chat(QObject *parent)
, m_id(Network::globalInstance()->generateUniqueId()) , m_id(Network::globalInstance()->generateUniqueId())
, m_name(tr("New Chat")) , m_name(tr("New Chat"))
, m_chatModel(new ChatModel(this)) , m_chatModel(new ChatModel(this))
, m_responseInProgress(false)
, m_responseState(Chat::ResponseStopped) , m_responseState(Chat::ResponseStopped)
, m_creationDate(QDateTime::currentSecsSinceEpoch()) , m_creationDate(QDateTime::currentSecsSinceEpoch())
, m_llmodel(new ChatLLM(this)) , m_llmodel(new ChatLLM(this))
, m_isServer(false)
, m_shouldDeleteLater(false)
, m_isModelLoaded(false)
, m_shouldLoadModelWhenInstalled(false)
, m_collectionModel(new LocalDocsCollectionsModel(this)) , m_collectionModel(new LocalDocsCollectionsModel(this))
{ {
connectLLM(); connectLLM();

View File

@ -155,15 +155,15 @@ private:
QString m_response; QString m_response;
QList<QString> m_collections; QList<QString> m_collections;
ChatModel *m_chatModel; ChatModel *m_chatModel;
bool m_responseInProgress = false; bool m_responseInProgress;
ResponseState m_responseState; ResponseState m_responseState;
qint64 m_creationDate; qint64 m_creationDate;
ChatLLM *m_llmodel; ChatLLM *m_llmodel;
QList<ResultInfo> m_databaseResults; QList<ResultInfo> m_databaseResults;
bool m_isServer = false; bool m_isServer;
bool m_shouldDeleteLater = false; bool m_shouldDeleteLater;
bool m_isModelLoaded = false; bool m_isModelLoaded;
bool m_shouldLoadModelWhenInstalled = false; bool m_shouldLoadModelWhenInstalled;
LocalDocsCollectionsModel *m_collectionModel; LocalDocsCollectionsModel *m_collectionModel;
}; };

View File

@ -16,6 +16,9 @@ ChatListModel *ChatListModel::globalInstance()
ChatListModel::ChatListModel() ChatListModel::ChatListModel()
: QAbstractListModel(nullptr) : QAbstractListModel(nullptr)
, m_newChat(nullptr)
, m_serverChat(nullptr)
, m_currentChat(nullptr)
{ {
addChat(); addChat();

View File

@ -239,9 +239,9 @@ private Q_SLOTS:
} }
private: private:
Chat* m_newChat = nullptr; Chat* m_newChat;
Chat* m_serverChat = nullptr; Chat* m_serverChat;
Chat* m_currentChat = nullptr; Chat* m_currentChat;
QList<Chat*> m_chats; QList<Chat*> m_chats;
private: private: