Compare commits

..

No commits in common. "main" and "v3.5.3" have entirely different histories.
main ... v3.5.3

102 changed files with 3736 additions and 8965 deletions

View File

@ -1,17 +1,13 @@
version: 2.1
setup: true
orbs:
path-filtering: circleci/path-filtering@1.3.0
path-filtering: circleci/path-filtering@1.1.0
workflows:
version: 2.1
generate-config:
jobs:
- path-filtering/filter:
filters:
tags:
only:
- /.*/
base-revision: main
config-path: .circleci/continue_config.yml
mapping: |

File diff suppressed because it is too large Load Diff

12
.gitmodules vendored
View File

@ -17,9 +17,9 @@
[submodule "gpt4all-chat/deps/QXlsx"]
path = gpt4all-chat/deps/QXlsx
url = https://github.com/nomic-ai/QXlsx.git
[submodule "gpt4all-chat/deps/minja"]
path = gpt4all-chat/deps/minja
url = https://github.com/nomic-ai/minja.git
[submodule "gpt4all-chat/deps/json"]
path = gpt4all-chat/deps/json
url = https://github.com/nlohmann/json.git
[submodule "gpt4all-chat/deps/Jinja2Cpp"]
path = gpt4all-chat/deps/Jinja2Cpp
url = https://github.com/nomic-ai/jinja2cpp.git
[submodule "gpt4all-chat/deps/rapidjson"]
path = gpt4all-chat/deps/rapidjson
url = https://github.com/nomic-ai/rapidjson.git

View File

@ -51,6 +51,11 @@ Thiago Ramos ([@thiagojramos](https://github.com/thiagojramos))<br/>
E-mail: thiagojramos@outlook.com<br/>
- pt\_BR translation
Victor Emanuel ([@SINAPSA-IC](https://github.com/SINAPSA-IC))<br/>
E-mail: contact@sinapsaro.ro<br/>
Discord: `@sinapsa_ic_56124_99632`
- ro\_RO translation
不知火 Shiranui ([@supersonictw](https://github.com/supersonictw))<br/>
E-mail: supersonic@livemail.tw<br/>
Discord: `@supersonictw`
@ -72,6 +77,6 @@ Discord: `@Tim453`
- Flatpak
Jack ([@wuodoo](https://github.com/wuodoo))<br/>
E-mail: 2296103047@qq.com<br/>
E-mail: 2296103047@qq.com><br/>
Discord: `@mikage`
- zh\_CN translation

View File

@ -1,9 +1,5 @@
<h1 align="center">GPT4All</h1>
<p align="center">
Now with support for DeepSeek R1 Distillations
</p>
<p align="center">
<a href="https://www.nomic.ai/gpt4all">Website</a> &bull; <a href="https://docs.gpt4all.io">Documentation</a> &bull; <a href="https://discord.gg/mGZE39AS3e">Discord</a> &bull; <a href="https://www.youtube.com/watch?v=gQcZDXRVJok">YouTube Tutorial</a>
</p>
@ -27,6 +23,9 @@ https://github.com/nomic-ai/gpt4all/assets/70534565/513a0f15-4964-4109-89e4-4f9a
<p align="center">
GPT4All is made possible by our compute partner <a href="https://www.paperspace.com/">Paperspace</a>.
</p>
<p align="center">
<a href="https://www.phorm.ai/query?projectId=755eecd3-24ad-49cc-abf4-0ab84caacf63"><img src="https://img.shields.io/badge/Phorm-Ask_AI-%23F2777A.svg" alt="phorm.ai"></a>
</p>
## Download Links
@ -35,11 +34,6 @@ GPT4All is made possible by our compute partner <a href="https://www.paperspace.
<img src="gpt4all-bindings/python/docs/assets/windows.png" style="height: 1em; width: auto" /> Windows Installer
</a> &mdash;
</p>
<p>
&mdash; <a href="https://gpt4all.io/installers/gpt4all-installer-win64-arm.exe">
<img src="gpt4all-bindings/python/docs/assets/windows.png" style="height: 1em; width: auto" /> Windows ARM Installer
</a> &mdash;
</p>
<p>
&mdash; <a href="https://gpt4all.io/installers/gpt4all-installer-darwin.dmg">
<img src="gpt4all-bindings/python/docs/assets/mac.png" style="height: 1em; width: auto" /> macOS Installer
@ -51,16 +45,10 @@ GPT4All is made possible by our compute partner <a href="https://www.paperspace.
</a> &mdash;
</p>
<p>
The Windows and Linux builds require Intel Core i3 2nd Gen / AMD Bulldozer, or better.
Windows and Linux require Intel Core i3 2nd Gen / AMD Bulldozer, or better. x86-64 only, no ARM.
</p>
<p>
The Windows ARM build supports Qualcomm Snapdragon and Microsoft SQ1/SQ2 processors.
</p>
<p>
The Linux build is x86-64 only (no ARM).
</p>
<p>
The macOS build requires Monterey 12.6 or newer. Best results with Apple Silicon M-series processors.
macOS requires Monterey 12.6 or newer. Best results with Apple Silicon M-series processors.
</p>
See the full [System Requirements](gpt4all-chat/system_requirements.md) for more details.

View File

@ -11,6 +11,7 @@ function(gpt4all_add_warning_options target)
-Wextra-semi
-Wformat=2
-Wmissing-include-dirs
-Wstrict-overflow=2
-Wsuggest-override
-Wvla
# errors

View File

@ -69,7 +69,7 @@ if (LLMODEL_CUDA)
cmake_minimum_required(VERSION 3.18) # for CMAKE_CUDA_ARCHITECTURES
# Defaults must be set before enable_language(CUDA).
# Keep this in sync with the arch list in ggml/src/CMakeLists.txt (plus 5.0 for non-F16 branch).
# Keep this in sync with the arch list in ggml/src/CMakeLists.txt.
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
# 52 == lowest CUDA 12 standard
# 60 == f16 CUDA intrinsics
@ -78,7 +78,7 @@ if (LLMODEL_CUDA)
if (GGML_CUDA_F16 OR GGML_CUDA_DMMV_F16)
set(CMAKE_CUDA_ARCHITECTURES "60;61;70;75") # needed for f16 CUDA intrinsics
else()
set(CMAKE_CUDA_ARCHITECTURES "50;52;61;70;75") # lowest CUDA 12 standard + lowest for integer intrinsics
set(CMAKE_CUDA_ARCHITECTURES "52;61;70;75") # lowest CUDA 12 standard + lowest for integer intrinsics
#set(CMAKE_CUDA_ARCHITECTURES "OFF") # use this to compile much faster, but only F16 models work
endif()
endif()

@ -1 +1 @@
Subproject commit 11f734c3b0334dbae4823b4a7467764e447fc6d6
Subproject commit 58a55efc4ae5dd3bc12887d47981faa7136027af

View File

@ -53,8 +53,6 @@ static const std::vector<const char *> KNOWN_ARCHES {
"gpt2",
// "gptj", -- no inference code
"gptneox",
"granite",
"granitemoe",
"mpt",
"baichuan",
"starcoder",
@ -82,7 +80,6 @@ static const std::vector<const char *> KNOWN_ARCHES {
"command-r",
// "dbrx", -- 16x12B parameters
"olmo",
"olmoe",
"openelm",
// "arctic", -- 10B+128x3.66B parameters
"deepseek2",

View File

@ -140,14 +140,9 @@ const std::vector<LLModel::Implementation> &LLModel::Implementation::implementat
std::string path;
// Split the paths string by the delimiter and process each path.
while (std::getline(ss, path, ';')) {
fs::directory_iterator iter;
try {
iter = fs::directory_iterator(std::u8string(path.begin(), path.end()));
} catch (const fs::filesystem_error &) {
continue; // skip nonexistent path
}
std::u8string u8_path(path.begin(), path.end());
// Iterate over all libraries
for (const auto &f : iter) {
for (const auto &f : fs::directory_iterator(u8_path)) {
const fs::path &p = f.path();
if (p.extension() != LIB_FILE_EXT) continue;

View File

@ -8,10 +8,8 @@
| --- | --- | --- |
| **Theme** | Color theme for the application. Options are `Light`, `Dark`, and `LegacyDark` | `Light` |
| **Font Size** | Font size setting for text throughout the application. Options are Small, Medium, and Large | Small |
| **Language and Locale** | The language and locale of that language you wish to use | System Locale |
| **Device** | Device that will run your models. Options are `Auto` (GPT4All chooses), `Metal` (Apple Silicon M1+), `CPU`, and `GPU` | `Auto` |
| **Default Model** | Choose your preferred LLM to load by default on startup| Auto |
| **Suggestion Mode** | Generate suggested follow up questions at the end of responses | When chatting with LocalDocs |
| **Download Path** | Select a destination on your device to save downloaded models | Windows: `C:\Users\{username}\AppData\Local\nomic.ai\GPT4All`<br><br>Mac: `/Users/{username}/Library/Application Support/nomic.ai/GPT4All/`<br><br>Linux: `/home/{username}/.local/share/nomic.ai/GPT4All` |
| **Enable Datalake** | Opt-in to sharing interactions with GPT4All community (**anonymous** and **optional**) | Off |
@ -20,7 +18,7 @@
| Setting | Description | Default Value |
| --- | --- | --- |
| **CPU Threads** | Number of concurrently running CPU threads (more can speed up responses) | 4 |
| **Enable System Tray** | The application will minimize to the system tray / taskbar when the window is closed | Off |
| **Save Chat Context** | Save chat context to disk to pick up exactly where a model left off. | Off |
| **Enable Local Server** | Allow any application on your device to use GPT4All via an OpenAI-compatible GPT4All API | Off |
| **API Server Port** | Local HTTP port for the local API server | 4891 |
@ -31,11 +29,8 @@
| Setting | Description | Default Value |
| --- | --- | --- |
| **Name** | Unique name of this model / character| set by model uploader |
| **Model File** | Filename (.gguf) of the model | set by model uploader |
| **System Message** | General instructions for the chats this model will be used for | set by model uploader |
| **Chat Template** | Format of user <-> assistant interactions for the chats this model will be used for | set by model uploader |
| **Chat Name Prompt** | Prompt used to automatically generate chat names | Describe the above conversation in seven words or less. |
| **Suggested FollowUp Prompt** | Prompt used to automatically generate follow up questions after a chat response | Suggest three very short factual follow-up questions that have not been answered yet or cannot be found inspired by the previous conversation and excerpts. |
| **System Prompt** | General instructions for the chats this model will be used for | set by model uploader |
| **Prompt Template** | Format of user <-> assistant interactions for the chats this model will be used for | set by model uploader |
### Clone

View File

@ -4,90 +4,6 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
## [3.10.0] - 2025-02-24
### Added
- Whitelist Granite (non-MoE) model architecture (by [@ThiloteE](https://github.com/ThiloteE) in [#3487](https://github.com/nomic-ai/gpt4all/pull/3487))
- Add support for CUDA compute 5.0 GPUs such as the GTX 750 ([#3499](https://github.com/nomic-ai/gpt4all/pull/3499))
- Add a Remote Providers tab to the Add Model page ([#3506](https://github.com/nomic-ai/gpt4all/pull/3506))
### Changed
- Substitute prettier default templates for OLMoE 7B 0924/0125 and Granite 3.1 3B/8B (by [@ThiloteE](https://github.com/ThiloteE) in [#3471](https://github.com/nomic-ai/gpt4all/pull/3471))
- Build with LLVM Clang 19 on macOS and Ubuntu ([#3500](https://github.com/nomic-ai/gpt4all/pull/3500))
### Fixed
- Fix several potential crashes ([#3465](https://github.com/nomic-ai/gpt4all/pull/3465))
- Fix visual spacing issues with deepseek models ([#3470](https://github.com/nomic-ai/gpt4all/pull/3470))
- Add missing strings to Italian translation (by [@Harvester62](https://github.com/Harvester62) in [#3496](https://github.com/nomic-ai/gpt4all/pull/3496))
- Update Simplified Chinese translation (by [@Junior2Ran](https://github.com/Junior2Ran) in [#3467](https://github.com/nomic-ai/pull/3467))
## [3.9.0] - 2025-02-04
### Added
- Whitelist OLMoE and Granite MoE model architectures (no Vulkan) (by [@ThiloteE](https://github.com/ThiloteE) in [#3449](https://github.com/nomic-ai/gpt4all/pull/3449))
### Fixed
- Fix "index N is not a prompt" when using LocalDocs with reasoning ([#3451](https://github.com/nomic-ai/gpt4all/pull/3451))
- Work around rendering artifacts on Snapdragon SoCs with Windows ([#3450](https://github.com/nomic-ai/gpt4all/pull/3450))
- Prevent DeepSeek-R1 reasoning from appearing in chat names and follow-up questions ([#3458](https://github.com/nomic-ai/gpt4all/pull/3458))
- Fix LocalDocs crash on Windows ARM when reading PDFs ([#3460](https://github.com/nomic-ai/gpt4all/pull/3460))
- Fix UI freeze when chat template is `{#` ([#3446](https://github.com/nomic-ai/gpt4all/pull/3446))
## [3.8.0] - 2025-01-30
### Added
- Support DeepSeek-R1 Qwen models ([#3431](https://github.com/nomic-ai/gpt4all/pull/3431))
- Support for think tags in the GUI ([#3440](https://github.com/nomic-ai/gpt4all/pull/3440))
- Support specifying SHA256 hash in models3.json instead of MD5 ([#3437](https://github.com/nomic-ai/gpt4all/pull/3437))
### Changed
- Use minja instead of Jinja2Cpp for significantly improved template compatibility ([#3433](https://github.com/nomic-ai/gpt4all/pull/3433))
### Fixed
- Fix regression while using localdocs with server API ([#3410](https://github.com/nomic-ai/gpt4all/pull/3410))
- Don't show system messages in server chat view ([#3411](https://github.com/nomic-ai/gpt4all/pull/3411))
- Fix `codesign --verify` failure on macOS ([#3413](https://github.com/nomic-ai/gpt4all/pull/3413))
- Code Interpreter: Fix console.log not accepting a single string after v3.7.0 ([#3426](https://github.com/nomic-ai/gpt4all/pull/3426))
- Fix Phi 3.1 Mini 128K Instruct template (by [@ThiloteE](https://github.com/ThiloteE) in [#3412](https://github.com/nomic-ai/gpt4all/pull/3412))
- Don't block the gui thread for reasoning ([#3435](https://github.com/nomic-ai/gpt4all/pull/3435))
- Fix corruption of unicode in output of reasoning models ([#3443](https://github.com/nomic-ai/gpt4all/pull/3443))
## [3.7.0] - 2025-01-21
### Added
- Add support for the Windows ARM64 target platform (CPU-only) ([#3385](https://github.com/nomic-ai/gpt4all/pull/3385))
### Changed
- Update from Qt 6.5.1 to 6.8.1 ([#3386](https://github.com/nomic-ai/gpt4all/pull/3386))
### Fixed
- Fix the timeout error in code interpreter ([#3369](https://github.com/nomic-ai/gpt4all/pull/3369))
- Fix code interpreter console.log not accepting multiple arguments ([#3371](https://github.com/nomic-ai/gpt4all/pull/3371))
- Remove 'X is defined' checks from templates for better compatibility ([#3372](https://github.com/nomic-ai/gpt4all/pull/3372))
- Jinja2Cpp: Add 'if' requirement for 'else' parsing to fix crash ([#3373](https://github.com/nomic-ai/gpt4all/pull/3373))
- Save chats on quit, even if the window isn't closed first ([#3387](https://github.com/nomic-ai/gpt4all/pull/3387))
- Add chat template replacements for five new models and fix EM German Mistral ([#3393](https://github.com/nomic-ai/gpt4all/pull/3393))
- Fix crash when entering `{{ a["foo"(` as chat template ([#3394](https://github.com/nomic-ai/gpt4all/pull/3394))
- Sign the maintenance tool on macOS to prevent crash on Sequoia ([#3391](https://github.com/nomic-ai/gpt4all/pull/3391))
- Jinja2Cpp: Fix operator precedence in 'not X is defined' ([#3402](https://github.com/nomic-ai/gpt4all/pull/3402))
## [3.6.1] - 2024-12-20
### Fixed
- Fix the stop generation button no longer working in v3.6.0 ([#3336](https://github.com/nomic-ai/gpt4all/pull/3336))
- Fix the copy entire conversation button no longer working in v3.6.0 ([#3336](https://github.com/nomic-ai/gpt4all/pull/3336))
## [3.6.0] - 2024-12-19
### Added
- Automatically substitute chat templates that are not compatible with Jinja2Cpp in GGUFs ([#3327](https://github.com/nomic-ai/gpt4all/pull/3327))
- Built-in javascript code interpreter tool plus model ([#3173](https://github.com/nomic-ai/gpt4all/pull/3173))
### Fixed
- Fix remote model template to allow for XML in messages ([#3318](https://github.com/nomic-ai/gpt4all/pull/3318))
- Fix Jinja2Cpp bug that broke system message detection in chat templates ([#3325](https://github.com/nomic-ai/gpt4all/pull/3325))
- Fix LocalDocs sources displaying in unconsolidated form after v3.5.0 ([#3328](https://github.com/nomic-ai/gpt4all/pull/3328))
## [3.5.3] - 2024-12-16
### Fixed
@ -312,12 +228,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
- Fix several Vulkan resource management issues ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694))
- Fix crash/hang when some models stop generating, by showing special tokens ([#2701](https://github.com/nomic-ai/gpt4all/pull/2701))
[3.10.0]: https://github.com/nomic-ai/gpt4all/compare/v3.9.0...v3.10.0
[3.9.0]: https://github.com/nomic-ai/gpt4all/compare/v3.8.0...v3.9.0
[3.8.0]: https://github.com/nomic-ai/gpt4all/compare/v3.7.0...v3.8.0
[3.7.0]: https://github.com/nomic-ai/gpt4all/compare/v3.6.1...v3.7.0
[3.6.1]: https://github.com/nomic-ai/gpt4all/compare/v3.6.0...v3.6.1
[3.6.0]: https://github.com/nomic-ai/gpt4all/compare/v3.5.3...v3.6.0
[3.5.3]: https://github.com/nomic-ai/gpt4all/compare/v3.5.2...v3.5.3
[3.5.2]: https://github.com/nomic-ai/gpt4all/compare/v3.5.1...v3.5.2
[3.5.1]: https://github.com/nomic-ai/gpt4all/compare/v3.5.0...v3.5.1

View File

@ -3,17 +3,13 @@ cmake_minimum_required(VERSION 3.25) # for try_compile SOURCE_FROM_VAR
include(../common/common.cmake)
set(APP_VERSION_MAJOR 3)
set(APP_VERSION_MINOR 10)
set(APP_VERSION_PATCH 1)
set(APP_VERSION_MINOR 5)
set(APP_VERSION_PATCH 3)
set(APP_VERSION_BASE "${APP_VERSION_MAJOR}.${APP_VERSION_MINOR}.${APP_VERSION_PATCH}")
set(APP_VERSION "${APP_VERSION_BASE}-dev0")
set(APP_VERSION "${APP_VERSION_BASE}")
project(gpt4all VERSION ${APP_VERSION_BASE} LANGUAGES CXX C)
if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install CACHE PATH "..." FORCE)
endif()
if(APPLE)
option(BUILD_UNIVERSAL "Build a Universal binary on macOS" OFF)
if(BUILD_UNIVERSAL)
@ -32,28 +28,13 @@ option(GPT4ALL_TEST "Build the tests" ${Python3_FOUND})
option(GPT4ALL_LOCALHOST "Build installer for localhost repo" OFF)
option(GPT4ALL_OFFLINE_INSTALLER "Build an offline installer" OFF)
option(GPT4ALL_SIGN_INSTALL "Sign installed binaries and installers (requires signing identities)" OFF)
option(GPT4ALL_GEN_CPACK_CONFIG "Generate the CPack config.xml in the package step and nothing else." OFF)
set(GPT4ALL_USE_QTPDF "AUTO" CACHE STRING "Whether to Use QtPDF for LocalDocs. If OFF or not available on this platform, PDFium is used.")
set_property(CACHE GPT4ALL_USE_QTPDF PROPERTY STRINGS AUTO ON OFF)
set(GPT4ALL_FORCE_D3D12 "AUTO" CACHE STRING "Whether to use Direct3D 12 as the Qt scene graph backend. Defaults to ON on Windows ARM.")
set_property(CACHE GPT4ALL_FORCE_D3D12 PROPERTY STRINGS AUTO ON OFF)
include(cmake/cpack_config.cmake)
if (GPT4ALL_GEN_CPACK_CONFIG)
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/cpack-steal-config.cmake.in"
"${CMAKE_BINARY_DIR}/cmake/cpack-steal-config.cmake" @ONLY)
set(CPACK_POST_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/cpack-steal-config.cmake)
include(CPack)
include(CPackIFW)
return()
endif()
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(CMAKE_CXX_STANDARD 23)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
if (MSVC)
# Enable accurate __cplusplus macro
# Enable accurate __cplusplus macro to fix errors in Jinja2Cpp
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/Zc:__cplusplus>)
endif()
@ -92,19 +73,14 @@ include_directories("${CMAKE_CURRENT_BINARY_DIR}")
set(CMAKE_AUTOMOC ON)
set(CMAKE_AUTORCC ON)
# Generate a header file with the version number
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/config.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/config.h"
)
set(CMAKE_FIND_PACKAGE_TARGETS_GLOBAL ON)
set(GPT4ALL_QT_COMPONENTS Core HttpServer LinguistTools Quick QuickDialogs2 Sql Svg)
set(GPT4ALL_USING_QTPDF OFF)
if (CMAKE_SYSTEM_NAME MATCHES Windows AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|AARCH64|arm64|ARM64)$")
# QtPDF is not available.
if (GPT4ALL_USE_QTPDF STREQUAL "ON")
message(FATAL_ERROR "QtPDF is not available on Windows ARM64.")
endif()
elseif (GPT4ALL_USE_QTPDF MATCHES "^(ON|AUTO)$")
set(GPT4ALL_USING_QTPDF ON)
list(APPEND GPT4ALL_QT_COMPONENTS Pdf)
endif()
find_package(Qt6 6.8 COMPONENTS ${GPT4ALL_QT_COMPONENTS} REQUIRED)
find_package(Qt6 6.5 COMPONENTS Core HttpServer LinguistTools Pdf Quick QuickDialogs2 Sql Svg REQUIRED)
if (QT_KNOWN_POLICY_QTP0004)
qt_policy(SET QTP0004 NEW) # generate extra qmldir files on Qt 6.8+
@ -126,24 +102,10 @@ message(STATUS "Qt 6 root directory: ${Qt6_ROOT_DIR}")
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
set(GPT4ALL_CONFIG_FORCE_D3D12 -1)
if (NOT CMAKE_SYSTEM_NAME MATCHES Windows OR Qt6_VERSION VERSION_LESS "6.6")
# Direct3D 12 is not available.
if (GPT4ALL_FORCE_D3D12 STREQUAL "ON")
message(FATAL_ERROR "Cannot use Direct3D 12 on this platform.")
endif()
elseif (GPT4ALL_FORCE_D3D12 MATCHES "^(ON|AUTO)$")
if (GPT4ALL_FORCE_D3D12 STREQUAL "ON" OR CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|AARCH64|arm64|ARM64)$")
set(GPT4ALL_CONFIG_FORCE_D3D12 1)
endif()
if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install CACHE PATH "..." FORCE)
endif()
# Generate a header file for configuration
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/src/config.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/config.h"
)
add_subdirectory(deps)
add_subdirectory(../gpt4all-backend llmodel)
@ -231,14 +193,12 @@ qt_add_executable(chat
src/chatapi.cpp src/chatapi.h
src/chatlistmodel.cpp src/chatlistmodel.h
src/chatllm.cpp src/chatllm.h
src/chatmodel.h src/chatmodel.cpp
src/chatmodel.h
src/chatviewtextprocessor.cpp src/chatviewtextprocessor.h
src/codeinterpreter.cpp src/codeinterpreter.h
src/database.cpp src/database.h
src/download.cpp src/download.h
src/embllm.cpp src/embllm.h
src/jinja_helpers.cpp src/jinja_helpers.h
src/jinja_replacements.cpp src/jinja_replacements.h
src/llm.cpp src/llm.h
src/localdocs.cpp src/localdocs.h
src/localdocsmodel.cpp src/localdocsmodel.h
@ -247,9 +207,6 @@ qt_add_executable(chat
src/mysettings.cpp src/mysettings.h
src/network.cpp src/network.h
src/server.cpp src/server.h
src/tool.cpp src/tool.h
src/toolcallparser.cpp src/toolcallparser.h
src/toolmodel.cpp src/toolmodel.h
src/xlsxtomd.cpp src/xlsxtomd.h
${CHAT_EXE_RESOURCES}
${MACOS_SOURCES}
@ -266,13 +223,10 @@ qt_add_qml_module(chat
qml/AddModelView.qml
qml/AddGPT4AllModelView.qml
qml/AddHFModelView.qml
qml/AddRemoteModelView.qml
qml/ApplicationSettings.qml
qml/ChatDrawer.qml
qml/ChatCollapsibleItem.qml
qml/ChatItemView.qml
qml/ChatMessageButton.qml
qml/ChatTextItem.qml
qml/ChatView.qml
qml/CollectionsDrawer.qml
qml/HomeView.qml
@ -315,7 +269,6 @@ qt_add_qml_module(chat
qml/MyTextField.qml
qml/MyToolButton.qml
qml/MyWelcomeButton.qml
qml/RemoteModelCard.qml
RESOURCES
icons/antenna_1.svg
icons/antenna_2.svg
@ -346,7 +299,6 @@ qt_add_qml_module(chat
icons/gpt4all-48.png
icons/gpt4all.svg
icons/gpt4all_transparent.svg
icons/groq.svg
icons/home.svg
icons/image.svg
icons/info.svg
@ -354,14 +306,12 @@ qt_add_qml_module(chat
icons/left_panel_open.svg
icons/local-docs.svg
icons/models.svg
icons/mistral.svg
icons/network.svg
icons/nomic_logo.svg
icons/notes.svg
icons/paperclip.svg
icons/plus.svg
icons/plus_circle.svg
icons/openai.svg
icons/recycle.svg
icons/regenerate.svg
icons/search.svg
@ -447,19 +397,9 @@ target_include_directories(chat PRIVATE deps/usearch/include
deps/usearch/fp16/include)
target_link_libraries(chat
PRIVATE Qt6::Core Qt6::HttpServer Qt6::Quick Qt6::Sql Qt6::Svg)
if (GPT4ALL_USING_QTPDF)
target_compile_definitions(chat PRIVATE GPT4ALL_USE_QTPDF)
target_link_libraries(chat PRIVATE Qt6::Pdf)
else()
# Link PDFium
target_link_libraries(chat PRIVATE pdfium)
endif()
PRIVATE Qt6::Core Qt6::HttpServer Qt6::Pdf Qt6::Quick Qt6::Sql Qt6::Svg)
target_link_libraries(chat
PRIVATE llmodel SingleApplication fmt::fmt duckx::duckx QXlsx)
target_include_directories(chat PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/deps/json/include)
target_include_directories(chat PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/deps/json/include/nlohmann)
target_include_directories(chat PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/deps/minja/include)
PRIVATE llmodel SingleApplication fmt::fmt duckx::duckx QXlsx jinja2cpp)
if (APPLE)
target_link_libraries(chat PRIVATE ${COCOA_LIBRARY})
@ -467,18 +407,14 @@ endif()
# -- install --
if (APPLE)
set(GPT4ALL_LIB_DEST bin/gpt4all.app/Contents/Frameworks)
else()
set(GPT4ALL_LIB_DEST lib)
endif()
set(COMPONENT_NAME_MAIN ${PROJECT_NAME})
install(TARGETS chat DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN})
install(
TARGETS llmodel
LIBRARY DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib
RUNTIME DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN} # .dll
LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib
RUNTIME DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN} # .dll
)
# We should probably iterate through the list of the cmake for backend, but these need to be installed
@ -501,8 +437,8 @@ endif()
install(
TARGETS ${MODEL_IMPL_TARGETS}
LIBRARY DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib
RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll
LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib
RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll
)
if(APPLE AND GPT4ALL_SIGN_INSTALL)
@ -531,7 +467,7 @@ if (LLMODEL_CUDA)
TARGETS llamamodel-mainline-cuda
llamamodel-mainline-cuda-avxonly
RUNTIME_DEPENDENCY_SET llama-cuda-deps
LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so
LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib
RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll
)
if (WIN32)
@ -545,38 +481,67 @@ if (LLMODEL_CUDA)
endif()
endif()
if (NOT GPT4ALL_USING_QTPDF)
# Install PDFium
if (WIN32)
install(FILES ${PDFium_LIBRARY} DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN}) # .dll
else()
install(FILES ${PDFium_LIBRARY} DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN}) # .so/.dylib
endif()
endif()
if (NOT APPLE)
install(FILES "${LOCAL_EMBEDDING_MODEL_PATH}"
DESTINATION resources
COMPONENT ${COMPONENT_NAME_MAIN})
endif()
if (CMAKE_SYSTEM_NAME MATCHES Linux)
set(CPACK_GENERATOR "IFW")
set(CPACK_VERBATIM_VARIABLES YES)
set(CPACK_IFW_VERBOSE ON)
if(${CMAKE_SYSTEM_NAME} MATCHES Linux)
find_program(LINUXDEPLOYQT linuxdeployqt HINTS "$ENV{HOME}/dev/linuxdeployqt/build/tools/linuxdeployqt" "$ENV{HOME}/project/linuxdeployqt/bin")
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-linux.cmake.in"
"${CMAKE_BINARY_DIR}/cmake/deploy-qt-linux.cmake" @ONLY)
set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-linux.cmake)
elseif (CMAKE_SYSTEM_NAME MATCHES Windows)
find_program(WINDEPLOYQT windeployqt)
set(CPACK_IFW_ROOT "~/Qt/Tools/QtInstallerFramework/4.6")
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-linux")
set(CPACK_IFW_TARGET_DIRECTORY "@HomeDir@/${COMPONENT_NAME_MAIN}")
elseif(${CMAKE_SYSTEM_NAME} MATCHES Windows)
find_program(WINDEPLOYQT windeployqt HINTS ${_qt_bin_dir})
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-windows.cmake.in"
"${CMAKE_BINARY_DIR}/cmake/deploy-qt-windows.cmake" @ONLY)
set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-windows.cmake)
elseif (CMAKE_SYSTEM_NAME MATCHES Darwin)
find_program(MACDEPLOYQT macdeployqt)
set(CPACK_IFW_ROOT "C:/Qt/Tools/QtInstallerFramework/4.6")
set(CPACK_IFW_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.ico")
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-win64")
set(CPACK_IFW_TARGET_DIRECTORY "@HomeDir@\\${COMPONENT_NAME_MAIN}")
elseif(${CMAKE_SYSTEM_NAME} MATCHES Darwin)
find_program(MACDEPLOYQT macdeployqt HINTS ${_qt_bin_dir})
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-mac.cmake.in"
"${CMAKE_BINARY_DIR}/cmake/deploy-qt-mac.cmake" @ONLY)
set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-mac.cmake)
set(CPACK_IFW_ROOT "~/Qt/Tools/QtInstallerFramework/4.6")
set(CPACK_IFW_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns")
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-darwin")
set(CPACK_IFW_TARGET_DIRECTORY "@ApplicationsDir@/${COMPONENT_NAME_MAIN}")
set(CPACK_BUNDLE_NAME ${COMPONENT_NAME_MAIN})
set(CPACK_BUNDLE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns")
endif()
set(CPACK_COMPONENTS_ALL gpt4all) # exclude development components
set(CPACK_PACKAGE_INSTALL_DIRECTORY ${COMPONENT_NAME_MAIN})
set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR})
set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR})
SET(CPACK_PACKAGE_VERSION_PATCH ${PROJECT_VERSION_PATCH})
set(CPACK_PACKAGE_HOMEPAGE_URL "https://www.nomic.ai/gpt4all")
set(CPACK_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png")
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_CURRENT_SOURCE_DIR}/LICENSE)
set(CPACK_RESOURCE_FILE_README ${CMAKE_CURRENT_SOURCE_DIR}/README.md)
set(CPACK_PACKAGE_EXECUTABLES "GPT4All")
set(CPACK_CREATE_DESKTOP_LINKS "GPT4All")
set(CPACK_IFW_PACKAGE_NAME "GPT4All")
set(CPACK_IFW_PACKAGE_TITLE "GPT4All Installer")
set(CPACK_IFW_PACKAGE_PUBLISHER "Nomic, Inc.")
set(CPACK_IFW_PRODUCT_URL "https://www.nomic.ai/gpt4all")
set(CPACK_IFW_PACKAGE_WIZARD_STYLE "Aero")
set(CPACK_IFW_PACKAGE_LOGO "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png")
set(CPACK_IFW_PACKAGE_WINDOW_ICON "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png")
set(CPACK_IFW_PACKAGE_WIZARD_SHOW_PAGE_LIST OFF)
set(CPACK_IFW_PACKAGE_CONTROL_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/cmake/installer_control.qs")
include(InstallRequiredSystemLibraries)
include(CPack)
include(CPackIFW)
@ -588,35 +553,20 @@ endif()
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} ESSENTIAL FORCED_INSTALLATION)
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} VERSION ${APP_VERSION})
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} LICENSES "MIT LICENSE" ${CPACK_RESOURCE_FILE_LICENSE})
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/cmake/installer_gpt4all_component.qs")
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/cmake/installer_component.qs")
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} REPLACES "gpt4all-chat") #Was used in very earliest prototypes
if (APPLE AND GPT4ALL_SIGN_INSTALL)
if (GPT4ALL_OFFLINE_INSTALLER)
cpack_add_component(maintenancetool HIDDEN)
else()
cpack_add_component(maintenancetool HIDDEN DOWNLOADED)
endif()
cpack_ifw_configure_component(maintenancetool ESSENTIAL FORCED_INSTALLATION)
cpack_ifw_configure_component(maintenancetool VERSION ${APP_VERSION})
cpack_ifw_configure_component(maintenancetool SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/cmake/installer_maintenancetool_component.qs")
endif()
if (GPT4ALL_LOCALHOST)
cpack_ifw_add_repository("GPT4AllRepository" URL "http://localhost/repository")
elseif (GPT4ALL_OFFLINE_INSTALLER)
add_compile_definitions(GPT4ALL_OFFLINE_INSTALLER)
elseif(GPT4ALL_OFFLINE_INSTALLER)
add_compile_definitions(GPT4ALL_OFFLINE_INSTALLER)
else()
if (CMAKE_SYSTEM_NAME MATCHES Linux)
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/linux/repository")
elseif (CMAKE_SYSTEM_NAME MATCHES Windows)
# To sign the target on windows have to create a batch script add use it as a custom target and then use CPACK_IFW_EXTRA_TARGETS to set this extra target
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|AMD64|amd64)$")
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/windows/repository")
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|AARCH64|arm64|ARM64)$")
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/windows_arm/repository")
endif()
elseif (CMAKE_SYSTEM_NAME MATCHES Darwin)
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/mac/repository")
endif()
if(${CMAKE_SYSTEM_NAME} MATCHES Linux)
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/linux/repository")
elseif(${CMAKE_SYSTEM_NAME} MATCHES Windows)
#To sign the target on windows have to create a batch script add use it as a custom target and then use CPACK_IFW_EXTRA_TARGETS to set this extra target
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/windows/repository")
elseif(${CMAKE_SYSTEM_NAME} MATCHES Darwin)
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/mac/repository")
endif()
endif()

45
gpt4all-chat/README.md Normal file
View File

@ -0,0 +1,45 @@
# gpt4all-chat
Cross platform Qt based GUI for GPT4All versions with GPT-J as the base
model. NOTE: The model seen in the screenshot is actually a preview of a
new training run for GPT4All based on GPT-J. The GPT4All project is busy
at work getting ready to release this model including installers for all
three major OS's. In the meantime, you can try this UI out with the original
GPT-J model by following build instructions below.
![image](https://user-images.githubusercontent.com/50458173/231464085-da9edff6-a593-410e-8f38-7513f75c8aab.png)
## Install
One click installers for macOS, Linux, and Windows at https://www.nomic.ai/gpt4all
## Features
* Cross-platform (Linux, Windows, MacOSX)
* The UI is made to look and feel like you've come to expect from a chatty gpt
* Check for updates so you can always stay fresh with latest models
* Easy to install with precompiled binaries available for all three major desktop platforms
* Multi-modal - Ability to load more than one model and switch between them
* Multi-chat - a list of current and past chats and the ability to save/delete/export and switch between
* Supports models that are supported by llama.cpp
* Model downloader in GUI featuring many popular open source models
* Settings dialog to change temp, top_p, min_p, top_k, threads, etc
* Copy your conversation to clipboard
* RAG via LocalDocs feature
* Check for updates to get the very latest GUI
## Building and running
* Follow the visual instructions on the [build_and_run](build_and_run.md) page
## Getting the latest
If you've already checked out the source code and/or built the program make sure when you do a git fetch to get the latest changes and that you also do `git submodule update --init --recursive` to update the submodules. (If you ever run into trouble, deinitializing via `git submodule deinit -f .` and then initializing again via `git submodule update --init --recursive` fixes most issues)
## Contributing
* Pull requests welcome. See the feature wish list for ideas :)
## License
The source code of this chat interface is currently under a MIT license.

View File

@ -0,0 +1,6 @@
#ifndef CONFIG_H
#define CONFIG_H
#define APP_VERSION "@APP_VERSION@"
#endif // CONFIG_H

View File

@ -1,2 +0,0 @@
set(OUTPUT_DIR "@CMAKE_BINARY_DIR@")
file(COPY ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/config DESTINATION ${OUTPUT_DIR}/cpack-config)

View File

@ -1,50 +0,0 @@
set(COMPONENT_NAME_MAIN "gpt4all")
set(CPACK_GENERATOR "IFW")
set(CPACK_VERBATIM_VARIABLES YES)
set(CPACK_IFW_VERBOSE ON)
if (CMAKE_SYSTEM_NAME MATCHES Linux)
set(CPACK_IFW_ROOT "~/Qt/Tools/QtInstallerFramework/4.6")
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-linux")
set(CPACK_IFW_TARGET_DIRECTORY "@HomeDir@/${COMPONENT_NAME_MAIN}")
elseif (CMAKE_SYSTEM_NAME MATCHES Windows)
set(CPACK_IFW_ROOT "C:/Qt/Tools/QtInstallerFramework/4.6")
set(CPACK_IFW_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.ico")
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|AMD64|amd64)$")
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-win64")
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|AARCH64|arm64|ARM64)$")
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-win64-arm")
else()
message(FATAL_ERROR "Unrecognized processor: ${CMAKE_SYSTEM_PROCESSOR}")
endif()
set(CPACK_IFW_TARGET_DIRECTORY "@HomeDir@\\${COMPONENT_NAME_MAIN}")
elseif (CMAKE_SYSTEM_NAME MATCHES Darwin)
set(CPACK_IFW_ROOT "~/Qt/Tools/QtInstallerFramework/4.6")
set(CPACK_IFW_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns")
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-darwin")
set(CPACK_IFW_TARGET_DIRECTORY "@ApplicationsDir@/${COMPONENT_NAME_MAIN}")
endif()
set(CPACK_COMPONENTS_ALL ${COMPONENT_NAME_MAIN}) # exclude development components
if (APPLE AND GPT4ALL_SIGN_INSTALL)
list(APPEND CPACK_COMPONENTS_ALL maintenancetool)
endif()
set(CPACK_PACKAGE_INSTALL_DIRECTORY ${COMPONENT_NAME_MAIN})
set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR})
set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR})
set(CPACK_PACKAGE_VERSION_PATCH ${PROJECT_VERSION_PATCH})
set(CPACK_PACKAGE_HOMEPAGE_URL "https://www.nomic.ai/gpt4all")
set(CPACK_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png")
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_CURRENT_SOURCE_DIR}/LICENSE)
set(CPACK_PACKAGE_EXECUTABLES "GPT4All")
set(CPACK_CREATE_DESKTOP_LINKS "GPT4All")
set(CPACK_IFW_PACKAGE_NAME "GPT4All")
set(CPACK_IFW_PACKAGE_TITLE "GPT4All Installer")
set(CPACK_IFW_PACKAGE_PUBLISHER "Nomic, Inc.")
set(CPACK_IFW_PRODUCT_URL "https://www.nomic.ai/gpt4all")
set(CPACK_IFW_PACKAGE_WIZARD_STYLE "Aero")
set(CPACK_IFW_PACKAGE_LOGO "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png")
set(CPACK_IFW_PACKAGE_WINDOW_ICON "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png")
set(CPACK_IFW_PACKAGE_WIZARD_SHOW_PAGE_LIST OFF)
set(CPACK_IFW_PACKAGE_CONTROL_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/cmake/installer_control.qs")

View File

@ -1,26 +1,20 @@
set(MACDEPLOYQT "@MACDEPLOYQT@")
set(COMPONENT_NAME_MAIN "@COMPONENT_NAME_MAIN@")
set(CMAKE_CURRENT_SOURCE_DIR "@CMAKE_CURRENT_SOURCE_DIR@")
set(GPT4ALL_SIGN_INSTALL "@GPT4ALL_SIGN_INSTALL@")
set(GPT4ALL_SIGNING_ID "@MAC_SIGNING_IDENTITY@")
set(CPACK_CONFIG_DIR "@CMAKE_BINARY_DIR@")
if (GPT4ALL_SIGN_INSTALL)
if (GPT4ALL_SIGNING_ID)
set(MAC_NOTARIZE -sign-for-notarization=${GPT4ALL_SIGNING_ID})
endif()
execute_process(COMMAND ${MACDEPLOYQT} ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app -qmldir=${CMAKE_CURRENT_SOURCE_DIR} -verbose=2 ${MAC_NOTARIZE})
file(GLOB MYLLAMALIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllama*)
file(GLOB MYLLMODELLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllmodel.*)
file(COPY ${MYLLAMALIBS}
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks)
file(COPY ${MYLLMODELLIBS}
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks)
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png"
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png"
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns"
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)
if (GPT4ALL_SIGN_INSTALL)
# Create signed MaintenanceTool
set(MT_DATA_DIR ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/maintenancetool/data)
file(MAKE_DIRECTORY ${MT_DATA_DIR})
execute_process(
COMMAND binarycreator --config ${CPACK_CONFIG_DIR}/cpack-config/config/config.xml --create-maintenancetool --sign ${GPT4ALL_SIGNING_ID}
WORKING_DIRECTORY ${MT_DATA_DIR}
)
endif()

View File

@ -1,19 +0,0 @@
function Component()
{
component.ifwVersion = installer.value("FrameworkVersion");
installer.installationStarted.connect(this, Component.prototype.onInstallationStarted);
}
Component.prototype.onInstallationStarted = function()
{
if (component.updateRequested() || component.installationRequested()) {
if (installer.value("os") == "win") {
component.installerbaseBinaryPath = "@TargetDir@/installerbase.exe";
} else if (installer.value("os") == "x11") {
component.installerbaseBinaryPath = "@TargetDir@/installerbase";
} else if (installer.value("os") == "mac") {
component.installerbaseBinaryPath = "@TargetDir@/MaintenanceTool.app";
}
installer.setInstallerBaseBinary(component.installerbaseBinaryPath);
}
}

View File

@ -1,6 +1,3 @@
include(FetchContent)
set(BUILD_SHARED_LIBS OFF)
set(FMT_INSTALL OFF)
@ -15,37 +12,11 @@ add_subdirectory(DuckX)
set(QT_VERSION_MAJOR 6)
add_subdirectory(QXlsx/QXlsx)
if (NOT GPT4ALL_USING_QTPDF)
# If we do not use QtPDF, we need to get PDFium.
set(GPT4ALL_PDFIUM_TAG "chromium/6996")
if (CMAKE_SYSTEM_NAME MATCHES Linux)
FetchContent_Declare(
pdfium
URL "https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-linux-x64.tgz"
URL_HASH "SHA256=68b381b87efed539f2e33ae1e280304c9a42643a878cc296c1d66a93b0cb4335"
)
elseif (CMAKE_SYSTEM_NAME MATCHES Windows)
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|AMD64|amd64)$")
FetchContent_Declare(
pdfium
URL "https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-win-x64.tgz"
URL_HASH "SHA256=83e714c302ceacccf403826d5cb57ea39b77f393d83b8d5781283012774a9378"
)
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|AARCH64|arm64|ARM64)$")
FetchContent_Declare(
pdfium
URL "https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-win-arm64.tgz"
URL_HASH "SHA256=78e77e871453a4915cbf66fb381b951c9932f88a747c6b2b33c9f27ec2371445"
)
endif()
elseif (CMAKE_SYSTEM_NAME MATCHES Darwin)
FetchContent_Declare(
pdfium
URL "https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-mac-univ.tgz"
URL_HASH "SHA256=e7577f3242ff9c1df50025f9615673a43601a201bc51ee4792975f98920793a2"
)
endif()
# forked dependency of Jinja2Cpp
set(RAPIDJSON_BUILD_DOC OFF)
set(RAPIDJSON_BUILD_EXAMPLES OFF)
set(RAPIDJSON_BUILD_TESTS OFF)
set(RAPIDJSON_ENABLE_INSTRUMENTATION_OPT OFF)
add_subdirectory(rapidjson)
FetchContent_MakeAvailable(pdfium)
find_package(PDFium REQUIRED PATHS "${pdfium_SOURCE_DIR}" NO_DEFAULT_PATH)
endif()
add_subdirectory(Jinja2Cpp)

@ -0,0 +1 @@
Subproject commit bcf2f82ae120f0a71c114ecb64a63ab5fe1ffc79

@ -1 +0,0 @@
Subproject commit 606b6347edf0758c531abb6c36743e09a4c48a84

@ -1 +0,0 @@
Subproject commit e97bb2442cd6ab3d5bb5f5a3e8a1f7d6081d613b

@ -0,0 +1 @@
Subproject commit 9b547ef4bd86210ef084abc2790bd1ddfe66b592

View File

@ -1,3 +0,0 @@
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 26.3 26.3"><defs><style>.cls-1{fill:#f05237;}.cls-2{fill:#fff;}</style></defs><g id="Layer_2" data-name="Layer 2"><g id="Content"><circle class="cls-1" cx="13.15" cy="13.15" r="13.15"/><path class="cls-2" d="M13.17,6.88a4.43,4.43,0,0,0,0,8.85h1.45V14.07H13.17a2.77,2.77,0,1,1,2.77-2.76v4.07a2.74,2.74,0,0,1-4.67,2L10.1,18.51a4.37,4.37,0,0,0,3.07,1.29h.06a4.42,4.42,0,0,0,4.36-4.4V11.2a4.43,4.43,0,0,0-4.42-4.32"/></g></g></svg>

Before

Width:  |  Height:  |  Size: 620 B

View File

@ -1 +0,0 @@
<svg viewBox="0 0 512 512" xmlns="http://www.w3.org/2000/svg" fill-rule="evenodd" clip-rule="evenodd" stroke-linejoin="round" stroke-miterlimit="2"><path d="M189.08 303.228H94.587l.044-94.446h94.497l-.048 94.446z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M283.528 397.674h-94.493l.044-94.446h94.496l-.047 94.446z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M283.575 303.228H189.08l.046-94.446h94.496l-.047 94.446z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M378.07 303.228h-94.495l.044-94.446h94.498l-.047 94.446zM189.128 208.779H94.633l.044-94.448h94.498l-.047 94.448zM378.115 208.779h-94.494l.045-94.448h94.496l-.047 94.448zM94.587 303.227H.093l.044-96.017h94.496l-.046 96.017z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M94.633 208.779H.138l.046-94.448H94.68l-.047 94.448z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M94.68 115.902H.185L.23 19.885h94.498l-.047 96.017zM472.657 114.331h-94.495l.044-94.446h94.497l-.046 94.446zM94.54 399.244H.046l.044-97.588h94.497l-.047 97.588z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M94.495 492.123H0l.044-94.446H94.54l-.045 94.446zM472.563 303.228H378.07l.044-94.446h94.496l-.047 94.446zM472.61 208.779h-94.495l.044-94.448h94.498l-.047 94.448z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M472.517 397.674h-94.494l.044-94.446h94.497l-.047 94.446z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M472.47 492.121h-94.493l.044-96.017h94.496l-.047 96.017z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M228.375 303.22h-96.061l.046-94.446h96.067l-.052 94.446z" fill="#ff7000" fill-rule="nonzero"/><path d="M322.827 397.666h-94.495l.044-96.018h94.498l-.047 96.018z" fill="#ff4900" fill-rule="nonzero"/><path d="M324.444 303.22h-97.636l.046-94.446h97.638l-.048 94.446z" fill="#ff7000" fill-rule="nonzero"/><path d="M418.938 303.22h-96.064l.045-94.446h96.066l-.047 94.446z" fill="#ff7000" fill-rule="nonzero"/><path d="M228.423 208.77H132.36l.045-94.445h96.066l-.05 94.446zM418.985 208.77H322.92l.044-94.445h96.069l-.048 94.446z" fill="#ffa300" fill-rule="nonzero"/><path d="M133.883 304.79H39.392l.044-96.017h94.496l-.049 96.017z" fill="#ff7000" fill-rule="nonzero"/><path d="M133.929 208.77H39.437l.044-95.445h94.496l-.048 95.445z" fill="#ffa300" fill-rule="nonzero"/><path d="M133.976 114.325H39.484l.044-94.448h94.497l-.05 94.448zM511.954 115.325h-94.493l.044-95.448h94.497l-.048 95.448z" fill="#ffce00" fill-rule="nonzero"/><path d="M133.836 399.667H39.345l.044-96.447h94.496l-.049 96.447z" fill="#ff4900" fill-rule="nonzero"/><path d="M133.79 492.117H39.3l.044-94.448h94.496l-.049 94.448z" fill="#ff0107" fill-rule="nonzero"/><path d="M511.862 303.22h-94.495l.046-94.446h94.496l-.047 94.446z" fill="#ff7000" fill-rule="nonzero"/><path d="M511.907 208.77h-94.493l.044-94.445h94.496l-.047 94.446z" fill="#ffa300" fill-rule="nonzero"/><path d="M511.815 398.666h-94.493l.044-95.447h94.496l-.047 95.447z" fill="#ff4900" fill-rule="nonzero"/><path d="M511.77 492.117h-94.496l.046-94.448h94.496l-.047 94.448z" fill="#ff0107" fill-rule="nonzero"/></svg>

Before

Width:  |  Height:  |  Size: 2.9 KiB

View File

@ -1,2 +0,0 @@
<?xml version="1.0" encoding="utf-8"?><!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg fill="#000000" width="800px" height="800px" viewBox="0 0 24 24" role="img" xmlns="http://www.w3.org/2000/svg"><title>OpenAI icon</title><path d="M22.2819 9.8211a5.9847 5.9847 0 0 0-.5157-4.9108 6.0462 6.0462 0 0 0-6.5098-2.9A6.0651 6.0651 0 0 0 4.9807 4.1818a5.9847 5.9847 0 0 0-3.9977 2.9 6.0462 6.0462 0 0 0 .7427 7.0966 5.98 5.98 0 0 0 .511 4.9107 6.051 6.051 0 0 0 6.5146 2.9001A5.9847 5.9847 0 0 0 13.2599 24a6.0557 6.0557 0 0 0 5.7718-4.2058 5.9894 5.9894 0 0 0 3.9977-2.9001 6.0557 6.0557 0 0 0-.7475-7.0729zm-9.022 12.6081a4.4755 4.4755 0 0 1-2.8764-1.0408l.1419-.0804 4.7783-2.7582a.7948.7948 0 0 0 .3927-.6813v-6.7369l2.02 1.1686a.071.071 0 0 1 .038.052v5.5826a4.504 4.504 0 0 1-4.4945 4.4944zm-9.6607-4.1254a4.4708 4.4708 0 0 1-.5346-3.0137l.142.0852 4.783 2.7582a.7712.7712 0 0 0 .7806 0l5.8428-3.3685v2.3324a.0804.0804 0 0 1-.0332.0615L9.74 19.9502a4.4992 4.4992 0 0 1-6.1408-1.6464zM2.3408 7.8956a4.485 4.485 0 0 1 2.3655-1.9728V11.6a.7664.7664 0 0 0 .3879.6765l5.8144 3.3543-2.0201 1.1685a.0757.0757 0 0 1-.071 0l-4.8303-2.7865A4.504 4.504 0 0 1 2.3408 7.872zm16.5963 3.8558L13.1038 8.364 15.1192 7.2a.0757.0757 0 0 1 .071 0l4.8303 2.7913a4.4944 4.4944 0 0 1-.6765 8.1042v-5.6772a.79.79 0 0 0-.407-.667zm2.0107-3.0231l-.142-.0852-4.7735-2.7818a.7759.7759 0 0 0-.7854 0L9.409 9.2297V6.8974a.0662.0662 0 0 1 .0284-.0615l4.8303-2.7866a4.4992 4.4992 0 0 1 6.6802 4.66zM8.3065 12.863l-2.02-1.1638a.0804.0804 0 0 1-.038-.0567V6.0742a4.4992 4.4992 0 0 1 7.3757-3.4537l-.142.0805L8.704 5.459a.7948.7948 0 0 0-.3927.6813zm1.0976-2.3654l2.602-1.4998 2.6069 1.4998v2.9994l-2.5974 1.4997-2.6067-1.4997Z"/></svg>

Before

Width:  |  Height:  |  Size: 1.7 KiB

View File

@ -54,7 +54,7 @@ Window {
systemTrayIcon.shouldClose = true;
window.shouldClose = true;
savingPopup.open();
ChatListModel.saveChatsForQuit();
ChatListModel.saveChats();
}
}
}
@ -231,8 +231,8 @@ Window {
window.shouldClose = true;
savingPopup.open();
ChatListModel.saveChatsForQuit();
close.accepted = false;
ChatListModel.saveChats();
close.accepted = false
}
Connections {

View File

@ -1,15 +1,20 @@
## Latest News
GPT4All v3.10.0 was released on February 24th. Changes include:
GPT4All v3.5.2 was released on December 13th. It changes the "Explore Models" page and fixes issues with the API server and cloned models.
* **Remote Models:**
* The Add Model page now has a dedicated tab for remote model providers.
* Groq, OpenAI, and Mistral remote models are now easier to configure.
* **CUDA Compatibility:** GPUs with CUDA compute capability 5.0 such as the GTX 750 are now supported by the CUDA backend.
* **New Model:** The non-MoE Granite model is now supported.
* **Translation Updates:**
* The Italian translation has been updated.
* The Simplified Chinese translation has been significantly improved.
* **Better Chat Templates:** The default chat templates for OLMoE 7B 0924/0125 and Granite 3.1 3B/8B have been improved.
* **Whitespace Fixes:** DeepSeek-R1-based models now have better whitespace behavior in their output.
* **Crash Fixes:** Several issues that could potentially cause GPT4All to crash have been fixed.
GPT4All v3.5.1 was released on December 10th and fixes several issues with the new chat templates. Additionally, it fixes a bug with the default model button as well an issue with remote models.
---
GPT4All v3.5.0 was released on December 9th. Changes include:
* **Message Editing:**
* You can now edit any message you've sent by clicking the pencil icon below it.
* You can now redo earlier responses in the conversation.
* **Templates:** Chat templates have been completely overhauled! They now use Jinja-style syntax. You may notice warnings or errors in the UI. Read the linked docs, and if you have any questions, please ask on the Discord.
* **File Attachments:** Markdown and plain text files are now supported as file attachments.
* **System Tray:** There is now an option in Application Settings to allow GPT4All to minimize to the system tray instead of closing.
* **Local API Server:**
* The API server now supports system messages from the client and no longer uses the system message in settings.
* You can now send messages to the API server in any order supported by the model instead of just user/assistant pairs.
* **Translations:** The Italian and Romanian translations have been improved.

View File

@ -1,22 +1,6 @@
[
{
"order": "a",
"md5sum": "a54c08a7b90e4029a8c2ab5b5dc936aa",
"name": "Reasoner v1",
"filename": "qwen2.5-coder-7b-instruct-q4_0.gguf",
"filesize": "4431390720",
"requires": "3.6.0",
"ramrequired": "8",
"parameters": "8 billion",
"quant": "q4_0",
"type": "qwen2",
"description": "<ul><li>Based on <a href=\"https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct\">Qwen2.5-Coder 7B</a></li><li>Uses built-in javascript code interpreter</li><li>Use for complex reasoning tasks that can be aided by computation analysis</li><li>License: <a href=\"https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct/blob/main/LICENSE\">Apache License Version 2.0</a></li><li>#reasoning</li></ul>",
"url": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GGUF/resolve/main/qwen2.5-coder-7b-instruct-q4_0.gguf",
"chatTemplate": "{{- '<|im_start|>system\\n' }}\n{% if toolList|length > 0 %}You have access to the following functions:\n{% for tool in toolList %}\nUse the function '{{tool.function}}' to: '{{tool.description}}'\n{% if tool.parameters|length > 0 %}\nparameters:\n{% for info in tool.parameters %}\n {{info.name}}:\n type: {{info.type}}\n description: {{info.description}}\n required: {{info.required}}\n{% endfor %}\n{% endif %}\n# Tool Instructions\nIf you CHOOSE to call this function ONLY reply with the following format:\n'{{tool.symbolicFormat}}'\nHere is an example. If the user says, '{{tool.examplePrompt}}', then you reply\n'{{tool.exampleCall}}'\nAfter the result you might reply with, '{{tool.exampleReply}}'\n{% endfor %}\nYou MUST include both the start and end tags when you use a function.\n\nYou are a helpful AI assistant who uses the functions to break down, analyze, perform, and verify complex reasoning tasks. You SHOULD try to verify your answers using the functions where possible.\n{% endif %}\n{{- '<|im_end|>\\n' }}\n{% for message in messages %}\n{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{% endfor %}\n{% if add_generation_prompt %}\n{{ '<|im_start|>assistant\\n' }}\n{% endif %}\n",
"systemPrompt": ""
},
{
"order": "aa",
"md5sum": "c87ad09e1e4c8f9c35a5fcef52b6f1c9",
"name": "Llama 3 8B Instruct",
"filename": "Meta-Llama-3-8B-Instruct.Q4_0.gguf",
@ -29,68 +13,7 @@
"description": "<ul><li>Fast responses</li><li>Chat based model</li><li>Accepts system prompts in Llama 3 format</li><li>Trained by Meta</li><li>License: <a href=\"https://llama.meta.com/llama3/license/\">Meta Llama 3 Community License</a></li></ul>",
"url": "https://gpt4all.io/models/gguf/Meta-Llama-3-8B-Instruct.Q4_0.gguf",
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2<|eot_id|>",
"systemPrompt": "",
"chatTemplate": "{%- set loop_messages = messages %}\n{%- for message in loop_messages %}\n {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' %}\n {{- content }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
},
{
"order": "aa1",
"sha256sum": "5cd4ee65211770f1d99b4f6f4951780b9ef40e29314bd6542bb5bd0ad0bc29d1",
"name": "DeepSeek-R1-Distill-Qwen-7B",
"filename": "DeepSeek-R1-Distill-Qwen-7B-Q4_0.gguf",
"filesize": "4444121056",
"requires": "3.8.0",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "deepseek",
"description": "<p>The official Qwen2.5-Math-7B distillation of DeepSeek-R1.</p><ul><li>License: <a href=\"https://opensource.org/license/mit\">MIT</a></li><li>No restrictions on commercial use</li><li>#reasoning</li></ul>",
"url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-7B-Q4_0.gguf",
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<User>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*</think>', '') %}\n {{- '<Assistant>' + content + '<end▁of▁sentence>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<Assistant>' }}\n{%- endif %}"
},
{
"order": "aa2",
"sha256sum": "906b3382f2680f4ce845459b4a122e904002b075238080307586bcffcde49eef",
"name": "DeepSeek-R1-Distill-Qwen-14B",
"filename": "DeepSeek-R1-Distill-Qwen-14B-Q4_0.gguf",
"filesize": "8544267680",
"requires": "3.8.0",
"ramrequired": "16",
"parameters": "14 billion",
"quant": "q4_0",
"type": "deepseek",
"description": "<p>The official Qwen2.5-14B distillation of DeepSeek-R1.</p><ul><li>License: <a href=\"https://opensource.org/license/mit\">MIT</a></li><li>No restrictions on commercial use</li><li>#reasoning</li></ul>",
"url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-14B-Q4_0.gguf",
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<User>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*</think>', '') %}\n {{- '<Assistant>' + content + '<end▁of▁sentence>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<Assistant>' }}\n{%- endif %}"
},
{
"order": "aa3",
"sha256sum": "0eb93e436ac8beec18aceb958c120d282cb2cf5451b23185e7be268fe9d375cc",
"name": "DeepSeek-R1-Distill-Llama-8B",
"filename": "DeepSeek-R1-Distill-Llama-8B-Q4_0.gguf",
"filesize": "4675894112",
"requires": "3.8.0",
"ramrequired": "8",
"parameters": "8 billion",
"quant": "q4_0",
"type": "deepseek",
"description": "<p>The official Llama-3.1-8B distillation of DeepSeek-R1.</p><ul><li>License: <a href=\"https://opensource.org/license/mit\">MIT</a></li><li>No restrictions on commercial use</li><li>#reasoning</li></ul>",
"url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Llama-8B-GGUF/resolve/main/DeepSeek-R1-Distill-Llama-8B-Q4_0.gguf",
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<User>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*</think>', '') %}\n {{- '<Assistant>' + content + '<end▁of▁sentence>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<Assistant>' }}\n{%- endif %}"
},
{
"order": "aa4",
"sha256sum": "b3af887d0a015b39fab2395e4faf682c1a81a6a3fd09a43f0d4292f7d94bf4d0",
"name": "DeepSeek-R1-Distill-Qwen-1.5B",
"filename": "DeepSeek-R1-Distill-Qwen-1.5B-Q4_0.gguf",
"filesize": "1068807776",
"requires": "3.8.0",
"ramrequired": "3",
"parameters": "1.5 billion",
"quant": "q4_0",
"type": "deepseek",
"description": "<p>The official Qwen2.5-Math-1.5B distillation of DeepSeek-R1.</p><ul><li>License: <a href=\"https://opensource.org/license/mit\">MIT</a></li><li>No restrictions on commercial use</li><li>#reasoning</li></ul>",
"url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-1.5B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-1.5B-Q4_0.gguf",
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<User>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*</think>', '') %}\n {{- '<Assistant>' + content + '<end▁of▁sentence>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<Assistant>' }}\n{%- endif %}"
"systemPrompt": ""
},
{
"order": "b",
@ -107,7 +30,7 @@
"url": "https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/Llama-3.2-3B-Instruct-Q4_0.gguf",
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2",
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>",
"chatTemplate": "{{- bos_token }}\n{%- set date_string = strftime_now('%d %b %Y') %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] | trim %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set system_message = '' %}\n {%- set loop_start = 0 %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\\n\\n' }}\n{{- 'Cutting Knowledge Date: December 2023\\n' }}\n{{- 'Today Date: ' + date_string + '\\n\\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + message['content'] | trim + '<|eot_id|>' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
"chatTemplate": "{{- bos_token }}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now('%d %b %Y') %}\n {%- else %}\n {%- set date_string = '26 Jul 2024' %}\n {%- endif %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] | trim %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set system_message = '' %}\n {%- set loop_start = 0 %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\\n\\n' }}\n{{- 'Cutting Knowledge Date: December 2023\\n' }}\n{{- 'Today Date: ' + date_string + '\\n\\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + message['content'] | trim + '<|eot_id|>' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
},
{
"order": "c",
@ -124,7 +47,7 @@
"url": "https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q4_0.gguf",
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2",
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>",
"chatTemplate": "{{- bos_token }}\n{%- set date_string = strftime_now('%d %b %Y') %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] | trim %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set system_message = '' %}\n {%- set loop_start = 0 %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\\n\\n' }}\n{{- 'Cutting Knowledge Date: December 2023\\n' }}\n{{- 'Today Date: ' + date_string + '\\n\\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + message['content'] | trim + '<|eot_id|>' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
"chatTemplate": "{{- bos_token }}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now('%d %b %Y') %}\n {%- else %}\n {%- set date_string = '26 Jul 2024' %}\n {%- endif %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] | trim %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set system_message = '' %}\n {%- set loop_start = 0 %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\\n\\n' }}\n{{- 'Cutting Knowledge Date: December 2023\\n' }}\n{{- 'Today Date: ' + date_string + '\\n\\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + message['content'] | trim + '<|eot_id|>' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
},
{
"order": "d",
@ -141,7 +64,7 @@
"url": "https://huggingface.co/NousResearch/Nous-Hermes-2-Mistral-7B-DPO-GGUF/resolve/main/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n",
"systemPrompt": "",
"chatTemplate": "{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
"chatTemplate": "{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
},
{
"order": "e",
@ -158,7 +81,7 @@
"description": "<strong>Strong overall fast instruction following model</strong><br><ul><li>Fast responses</li><li>Trained by Mistral AI<li>Uncensored</li><li>Licensed for commercial use</li></ul>",
"url": "https://gpt4all.io/models/gguf/mistral-7b-instruct-v0.1.Q4_0.gguf",
"promptTemplate": "[INST] %1 [/INST]",
"chatTemplate": "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if (message['role'] == 'user') != ((loop.index0 - loop_start) % 2 == 0) %}\n {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {%- if loop.index0 == loop_start and loop_start == 1 %}\n {{- ' [INST] ' + system_message + '\\n\\n' + message['content'] + ' [/INST]' }}\n {%- else %}\n {{- ' [INST] ' + message['content'] + ' [/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {{- ' ' + message['content'] + eos_token }}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}"
"chatTemplate": "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if (message['role'] == 'user') != ((loop.index0 - loop_start) % 2 == 0) %}\n {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {%- if loop.first and system_message is defined %}\n {{- ' [INST] ' + system_message + '\\n\\n' + message['content'] + ' [/INST]' }}\n {%- else %}\n {{- ' [INST] ' + message['content'] + ' [/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {{- ' ' + message['content'] + eos_token }}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}"
},
{
"order": "f",
@ -174,8 +97,7 @@
"description": "<ul><li><strong>For advanced users only. Not recommended for use on Windows or Linux without selecting CUDA due to speed issues.</strong></li><li>Fast responses</li><li>Chat based model</li><li>Large context size of 128k</li><li>Accepts agentic system prompts in Llama 3.1 format</li><li>Trained by Meta</li><li>License: <a href=\"https://llama.meta.com/llama3_1/license/\">Meta Llama 3.1 Community License</a></li></ul>",
"url": "https://huggingface.co/GPT4All-Community/Meta-Llama-3.1-8B-Instruct-128k/resolve/main/Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf",
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2",
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>",
"chatTemplate": "{%- set loop_messages = messages %}\n{%- for message in loop_messages %}\n {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' %}\n {%- if loop.index0 == 0 %}\n {%- set content = bos_token + content %}\n {%- endif %}\n {{- content }}\n{%- endfor %}\n{{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}"
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>"
},
{
"order": "g",
@ -192,7 +114,7 @@
"url": "https://gpt4all.io/models/gguf/mistral-7b-openorca.gguf2.Q4_0.gguf",
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n",
"systemPrompt": "<|im_start|>system\nYou are MistralOrca, a large language model trained by Alignment Lab AI.\n<|im_end|>\n",
"chatTemplate": "{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
},
{
"order": "h",
@ -275,7 +197,6 @@
"url": "https://huggingface.co/lamhieu/ghost-7b-v0.9.1-gguf/resolve/main/ghost-7b-v0.9.1-Q4_0.gguf",
"promptTemplate": "<|user|>\n%1</s>\n<|assistant|>\n%2</s>\n",
"systemPrompt": "<|system|>\nYou are Ghost created by Lam Hieu. You are a helpful and knowledgeable assistant. You like to help and always give honest information, in its original language. In communication, you are always respectful, equal and promote positive behavior.\n</s>",
"chatTemplate": "{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|user|>\\n' + message['content'] + eos_token }}\n {%- elif message['role'] == 'system' %}\n {{- '<|system|>\\n' + message['content'] + eos_token }}\n {%- elif message['role'] == 'assistant' %}\n {{- '<|assistant|>\\n' + message['content'] + eos_token }}\n {%- endif %}\n {%- if loop.last and add_generation_prompt %}\n {{- '<|assistant|>' }}\n {%- endif %}\n{%- endfor %}",
"systemMessage": "You are Ghost created by Lam Hieu. You are a helpful and knowledgeable assistant. You like to help and always give honest information, in its original language. In communication, you are always respectful, equal and promote positive behavior."
},
{
@ -361,8 +282,7 @@
"description": "<ul><li>Very fast responses</li><li>Chat based model</li><li>Accepts system prompts in Phi-3 format</li><li>Trained by Microsoft</li><li>License: <a href=\"https://opensource.org/license/mit\">MIT</a></li><li>No restrictions on commercial use</li></ul>",
"url": "https://gpt4all.io/models/gguf/Phi-3-mini-4k-instruct.Q4_0.gguf",
"promptTemplate": "<|user|>\n%1<|end|>\n<|assistant|>\n%2<|end|>\n",
"systemPrompt": "",
"chatTemplate": "{{- bos_token }}\n{%- for message in messages %}\n {{- '<|' + message['role'] + '|>\\n' + message['content'] + '<|end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|assistant|>\\n' }}\n{%- else %}\n {{- eos_token }}\n{%- endif %}"
"systemPrompt": ""
},
{
"order": "r",
@ -486,7 +406,7 @@
"url": "https://huggingface.co/TheBloke/em_german_mistral_v01-GGUF/resolve/main/em_german_mistral_v01.Q4_0.gguf",
"promptTemplate": "USER: %1 ASSISTANT: ",
"systemPrompt": "Du bist ein hilfreicher Assistent. ",
"chatTemplate": "{%- if messages[0]['role'] == 'system' %}\n {%- set loop_start = 1 %}\n {{- messages[0]['content'] }}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if not loop.first %}\n {{- ' ' }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {{- 'USER: ' + message['content'] }}\n {%- elif message['role'] == 'assistant' %}\n {{- 'ASSISTANT: ' + message['content'] }}\n {%- else %}\n {{- raise_exception('After the optional system message, conversation roles must be either user or assistant.') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {%- if messages %}\n {{- ' ' }}\n {%- endif %}\n {{- 'ASSISTANT:' }}\n{%- endif %}",
"chatTemplate": "{%- set system_message = false %}\n{%- if messages[0]['role'] == 'system' %}\n {%- set loop_start = 1 %}\n {%- set system_message = true %}\n {{- messages[0]['content'] }}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if (not loop.first) or (system_message is not none) %}\n {{- ' ' }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {{- 'USER: ' + message['content'] }}\n {%- elif message['role'] == 'assistant' %}\n {{- 'ASSISTANT: ' + message['content'] }}\n {%- else %}\n {{- raise_exception('After the optional system message, conversation roles must be either user or assistant.') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {%- if messages %}\n {{- ' ' }}\n {%- endif %}\n {{- 'ASSISTANT:' }}\n{%- endif %}",
"systemMessage": "Du bist ein hilfreicher Assistent."
},
{
@ -532,7 +452,7 @@
"filename": "qwen2-1_5b-instruct-q4_0.gguf",
"filesize": "937532800",
"requires": "3.0",
"ramrequired": "3",
"ramrequired": "4",
"parameters": "1.5 billion",
"quant": "q4_0",
"type": "qwen2",
@ -540,6 +460,6 @@
"url": "https://huggingface.co/Qwen/Qwen2-1.5B-Instruct-GGUF/resolve/main/qwen2-1_5b-instruct-q4_0.gguf",
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>",
"systemPrompt": "<|im_start|>system\nBelow is an instruction that describes a task. Write a response that appropriately completes the request.<|im_end|>\n",
"chatTemplate": "{%- for message in messages %}\n {%- if loop.first and messages[0]['role'] != 'system' %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
"chatTemplate": "{%- for message in messages %}\n {%- if loop.first and messages[0]['role'] != 'system' %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
}
]

View File

@ -243,40 +243,5 @@
"version": "3.5.2",
"notes": "* **Model Search:** There are now separate tabs for official and third-party models.\n* **Local Server Fixes:** Several mistakes in v3.5's changes to the API server have been corrected.\n* **Cloned Model Fixes:** The chat template and system message of cloned models now manage their defaults correctly.\n* **Translation Improvements:** The Romanian and Italian translations have been updated.\n",
"contributors": "* Jared Van Bortel (Nomic AI)\n* Adam Treat (Nomic AI)\n* Riccardo Giovanetti (`@Harvester62`)\n* Victor Emanuel (`@SINAPSA-IC`)"
},
{
"version": "3.5.3",
"notes": "* **LocalDocs Fix:** A serious issue causing LocalDocs to not work properly in v3.5.2 has been fixed.\n",
"contributors": "* Jared Van Bortel (Nomic AI)\n* Adam Treat (Nomic AI)"
},
{
"version": "3.6.0",
"notes": "* **Reasoner v1:**\n * Built-in javascript code interpreter tool.\n * Custom curated model that utilizes the code interpreter to break down, analyze, perform, and verify complex reasoning tasks.\n* **Templates:** Automatically substitute chat templates that are not compatible with Jinja2Cpp in GGUFs.\n* **Fixes:**\n * Remote model template to allow for XML in messages.\n * Jinja2Cpp bug that broke system message detection in chat templates.\n * LocalDocs sources displaying in unconsolidated form after v3.5.0.\n",
"contributors": "* Adam Treat (Nomic AI)\n* Jared Van Bortel (Nomic AI)"
},
{
"version": "3.6.1",
"notes": "* **Fixes:**\n * The stop generation button no longer working in v3.6.0.\n * The copy entire conversation button no longer working in v3.6.0.\n",
"contributors": "* Adam Treat (Nomic AI)"
},
{
"version": "3.7.0",
"notes": "* **Windows ARM Support:** GPT4All now supports the Windows ARM platform, ensuring compatibility with devices powered by Qualcomm Snapdragon and Microsoft SQ-series processors.\n * **NOTE:** Support for GPU and/or NPU acceleration is not available at this time. Only the CPU will be used to run LLMs.\n * **NOTE:** You must install the new *Windows ARM* version of GPT4All from the website. The standard *Windows* version will not work due to emulation limitations.\n* **Fixed Updating on macOS:** The maintenance tool no longer crashes when attempting to update or uninstall GPT4All on Sequoia.\n * **NOTE:** If you have installed the version from the GitHub releases as a workaround for this issue, you can safely uninstall it and switch back to the version from the website.\n* **Fixed Chat Saving on macOS:** Chats now save as expected when the application is quit with Command-Q.\n* **Code Interpreter Improvements:**\n * The behavior when the code takes too long to execute and times out has been improved.\n * console.log now accepts multiple arguments for better compatibility with native JavaScript.\n* **Chat Templating Improvements:**\n * Two crashes and one compatibility issue have been fixed in the chat template parser.\n * The default chat template for EM German Mistral has been fixed.\n * Automatic replacements have been added for five new models as we continue to improve compatibility with common chat templates.\n",
"contributors": "* Jared Van Bortel (Nomic AI)\n* Adam Treat (Nomic AI)\n* Riccardo Giovanetti (`@Harvester62`)"
},
{
"version": "3.8.0",
"notes": "* **Native DeepSeek-R1-Distill Support:** GPT4All now has robust support for the DeepSeek-R1 family of distillations.\n * Several model variants are now available on the downloads page.\n * Reasoning (wrapped in \"think\" tags) is displayed similarly to the Reasoner model.\n * The DeepSeek-R1 Qwen pretokenizer is now supported, resolving the loading failure in previous versions.\n * The model is now configured with a GPT4All-compatible prompt template by default.\n* **Chat Templating Overhaul:** The template parser has been *completely* replaced with one that has much better compatibility with common models.\n* **Code Interpreter Fixes:**\n * An issue preventing the code interpreter from logging a single string in v3.7.0 has been fixed.\n * The UI no longer freezes while the code interpreter is running a computation.\n* **Local Server Fixes:**\n * An issue preventing the server from using LocalDocs after the first request since v3.5.0 has been fixed.\n * System messages are now correctly hidden from the message history.\n",
"contributors": "* Jared Van Bortel (Nomic AI)\n* Adam Treat (Nomic AI)\n* ThiloteE (`@ThiloteE`)"
},
{
"version": "3.9.0",
"notes": "* **LocalDocs Fix:** LocalDocs no longer shows an error on later messages with reasoning models.\n* **DeepSeek Fix:** DeepSeek-R1 reasoning (in 'think' tags) no longer appears in chat names and follow-up questions.\n* **Windows ARM Improvements:**\n * Graphical artifacts on some SoCs have been fixed.\n * A crash when adding a collection of PDFs to LocalDocs has been fixed.\n* **Template Parser Fixes:** Chat templates containing an unclosed comment no longer freeze GPT4All.\n* **New Models:** OLMoE and Granite MoE models are now supported.\n",
"contributors": "* Jared Van Bortel (Nomic AI)\n* Adam Treat (Nomic AI)\n* ThiloteE (`@ThiloteE`)"
},
{
"version": "3.10.0",
"notes": "* **Remote Models:**\n * The Add Model page now has a dedicated tab for remote model providers.\n * Groq, OpenAI, and Mistral remote models are now easier to configure.\n* **CUDA Compatibility:** GPUs with CUDA compute capability 5.0 such as the GTX 750 are now supported by the CUDA backend.\n* **New Model:** The non-MoE Granite model is now supported.\n* **Translation Updates:**\n * The Italian translation has been updated.\n * The Simplified Chinese translation has been significantly improved.\n* **Better Chat Templates:** The default chat templates for OLMoE 7B 0924/0125 and Granite 3.1 3B/8B have been improved.\n* **Whitespace Fixes:** DeepSeek-R1-based models now have better whitespace behavior in their output.\n* **Crash Fixes:** Several issues that could potentially cause GPT4All to crash have been fixed.\n",
"contributors": "* Jared Van Bortel (Nomic AI)\n* Adam Treat (Nomic AI)\n* ThiloteE (`@ThiloteE`)\n* Lil Bob (`@Junior2Ran`)\n* Riccardo Giovanetti (`@Harvester62`)"
}
]

View File

@ -56,52 +56,6 @@ ColumnLayout {
Accessible.description: qsTr("Displayed when the models request is ongoing")
}
RowLayout {
ButtonGroup {
id: buttonGroup
exclusive: true
}
MyButton {
text: qsTr("All")
checked: true
borderWidth: 0
backgroundColor: checked ? theme.lightButtonBackground : "transparent"
backgroundColorHovered: theme.lighterButtonBackgroundHovered
backgroundRadius: 5
padding: 15
topPadding: 8
bottomPadding: 8
textColor: theme.lighterButtonForeground
fontPixelSize: theme.fontSizeLarge
fontPixelBold: true
checkable: true
ButtonGroup.group: buttonGroup
onClicked: {
ModelList.gpt4AllDownloadableModels.filter("");
}
}
MyButton {
text: qsTr("Reasoning")
borderWidth: 0
backgroundColor: checked ? theme.lightButtonBackground : "transparent"
backgroundColorHovered: theme.lighterButtonBackgroundHovered
backgroundRadius: 5
padding: 15
topPadding: 8
bottomPadding: 8
textColor: theme.lighterButtonForeground
fontPixelSize: theme.fontSizeLarge
fontPixelBold: true
checkable: true
ButtonGroup.group: buttonGroup
onClicked: {
ModelList.gpt4AllDownloadableModels.filter("#reasoning");
}
}
Layout.bottomMargin: 10
}
ScrollView {
id: scrollView
ScrollBar.vertical.policy: ScrollBar.AsNeeded
@ -204,7 +158,7 @@ ColumnLayout {
Layout.minimumWidth: 200
Layout.fillWidth: true
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
visible: !installed && !calcHash && downloadError === ""
visible: !isOnline && !installed && !calcHash && downloadError === ""
Accessible.description: qsTr("Stop/restart/start the download")
onClicked: {
if (!isDownloading) {
@ -230,6 +184,52 @@ ColumnLayout {
}
}
MySettingsButton {
id: installButton
visible: !installed && isOnline
Layout.topMargin: 20
Layout.leftMargin: 20
Layout.minimumWidth: 200
Layout.fillWidth: true
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
text: qsTr("Install")
font.pixelSize: theme.fontSizeLarge
onClicked: {
var apiKeyText = apiKey.text.trim(),
baseUrlText = baseUrl.text.trim(),
modelNameText = modelName.text.trim();
var apiKeyOk = apiKeyText !== "",
baseUrlOk = !isCompatibleApi || baseUrlText !== "",
modelNameOk = !isCompatibleApi || modelNameText !== "";
if (!apiKeyOk)
apiKey.showError();
if (!baseUrlOk)
baseUrl.showError();
if (!modelNameOk)
modelName.showError();
if (!apiKeyOk || !baseUrlOk || !modelNameOk)
return;
if (!isCompatibleApi)
Download.installModel(
filename,
apiKeyText,
);
else
Download.installCompatibleModel(
modelNameText,
apiKeyText,
baseUrlText,
);
}
Accessible.role: Accessible.Button
Accessible.name: qsTr("Install")
Accessible.description: qsTr("Install online model")
}
ColumnLayout {
spacing: 0
Label {
@ -344,6 +344,69 @@ ColumnLayout {
Accessible.description: qsTr("Displayed when the file hash is being calculated")
}
}
MyTextField {
id: apiKey
visible: !installed && isOnline
Layout.topMargin: 20
Layout.leftMargin: 20
Layout.minimumWidth: 200
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
wrapMode: Text.WrapAnywhere
function showError() {
messageToast.show(qsTr("ERROR: $API_KEY is empty."));
apiKey.placeholderTextColor = theme.textErrorColor;
}
onTextChanged: {
apiKey.placeholderTextColor = theme.mutedTextColor;
}
placeholderText: qsTr("enter $API_KEY")
Accessible.role: Accessible.EditableText
Accessible.name: placeholderText
Accessible.description: qsTr("Whether the file hash is being calculated")
}
MyTextField {
id: baseUrl
visible: !installed && isOnline && isCompatibleApi
Layout.topMargin: 20
Layout.leftMargin: 20
Layout.minimumWidth: 200
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
wrapMode: Text.WrapAnywhere
function showError() {
messageToast.show(qsTr("ERROR: $BASE_URL is empty."));
baseUrl.placeholderTextColor = theme.textErrorColor;
}
onTextChanged: {
baseUrl.placeholderTextColor = theme.mutedTextColor;
}
placeholderText: qsTr("enter $BASE_URL")
Accessible.role: Accessible.EditableText
Accessible.name: placeholderText
Accessible.description: qsTr("Whether the file hash is being calculated")
}
MyTextField {
id: modelName
visible: !installed && isOnline && isCompatibleApi
Layout.topMargin: 20
Layout.leftMargin: 20
Layout.minimumWidth: 200
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
wrapMode: Text.WrapAnywhere
function showError() {
messageToast.show(qsTr("ERROR: $MODEL_NAME is empty."))
modelName.placeholderTextColor = theme.textErrorColor;
}
onTextChanged: {
modelName.placeholderTextColor = theme.mutedTextColor;
}
placeholderText: qsTr("enter $MODEL_NAME")
Accessible.role: Accessible.EditableText
Accessible.name: placeholderText
Accessible.description: qsTr("Whether the file hash is being calculated")
}
}
}
}

View File

@ -89,13 +89,6 @@ Rectangle {
gpt4AllModelView.show();
}
}
MyTabButton {
text: qsTr("Remote Providers")
isSelected: remoteModelView.isShown()
onPressed: {
remoteModelView.show();
}
}
MyTabButton {
text: qsTr("HuggingFace")
isSelected: huggingfaceModelView.isShown()
@ -119,20 +112,7 @@ Rectangle {
stackLayout.currentIndex = 0;
}
function isShown() {
return stackLayout.currentIndex === 0;
}
}
AddRemoteModelView {
id: remoteModelView
Layout.fillWidth: true
Layout.fillHeight: true
function show() {
stackLayout.currentIndex = 1;
}
function isShown() {
return stackLayout.currentIndex === 1;
return stackLayout.currentIndex === 0
}
}
@ -146,10 +126,10 @@ Rectangle {
anchors.fill: parent
function show() {
stackLayout.currentIndex = 2;
stackLayout.currentIndex = 1;
}
function isShown() {
return stackLayout.currentIndex === 2;
return stackLayout.currentIndex === 1
}
}
}

View File

@ -1,147 +0,0 @@
import QtCore
import QtQuick
import QtQuick.Controls
import QtQuick.Controls.Basic
import QtQuick.Layouts
import QtQuick.Dialogs
import Qt.labs.folderlistmodel
import Qt5Compat.GraphicalEffects
import llm
import chatlistmodel
import download
import modellist
import network
import gpt4all
import mysettings
import localdocs
ColumnLayout {
Layout.fillWidth: true
Layout.alignment: Qt.AlignTop
spacing: 5
Label {
Layout.topMargin: 0
Layout.bottomMargin: 25
Layout.rightMargin: 150 * theme.fontScale
Layout.alignment: Qt.AlignTop
Layout.fillWidth: true
verticalAlignment: Text.AlignTop
text: qsTr("Various remote model providers that use network resources for inference.")
font.pixelSize: theme.fontSizeLarger
color: theme.textColor
wrapMode: Text.WordWrap
}
ScrollView {
id: scrollView
ScrollBar.vertical.policy: ScrollBar.AsNeeded
Layout.fillWidth: true
Layout.fillHeight: true
contentWidth: availableWidth
clip: true
Flow {
anchors.left: parent.left
anchors.right: parent.right
spacing: 20
bottomPadding: 20
property int childWidth: 330 * theme.fontScale
property int childHeight: 400 + 166 * theme.fontScale
RemoteModelCard {
width: parent.childWidth
height: parent.childHeight
providerBaseUrl: "https://api.groq.com/openai/v1/"
providerName: qsTr("Groq")
providerImage: "qrc:/gpt4all/icons/groq.svg"
providerDesc: qsTr('Groq offers a high-performance AI inference engine designed for low-latency and efficient processing. Optimized for real-time applications, Groqs technology is ideal for users who need fast responses from open large language models and other AI workloads.<br><br>Get your API key: <a href="https://console.groq.com/keys">https://groq.com/</a>')
modelWhitelist: [
// last updated 2025-02-24
"deepseek-r1-distill-llama-70b",
"deepseek-r1-distill-qwen-32b",
"gemma2-9b-it",
"llama-3.1-8b-instant",
"llama-3.2-1b-preview",
"llama-3.2-3b-preview",
"llama-3.3-70b-specdec",
"llama-3.3-70b-versatile",
"llama3-70b-8192",
"llama3-8b-8192",
"mixtral-8x7b-32768",
"qwen-2.5-32b",
"qwen-2.5-coder-32b",
]
}
RemoteModelCard {
width: parent.childWidth
height: parent.childHeight
providerBaseUrl: "https://api.openai.com/v1/"
providerName: qsTr("OpenAI")
providerImage: "qrc:/gpt4all/icons/openai.svg"
providerDesc: qsTr('OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range of applications, from conversational AI to content generation and code completion.<br><br>Get your API key: <a href="https://platform.openai.com/signup">https://openai.com/</a>')
modelWhitelist: [
// last updated 2025-02-24
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-4",
"gpt-4-32k",
"gpt-4-turbo",
"gpt-4o",
]
}
RemoteModelCard {
width: parent.childWidth
height: parent.childHeight
providerBaseUrl: "https://api.mistral.ai/v1/"
providerName: qsTr("Mistral")
providerImage: "qrc:/gpt4all/icons/mistral.svg"
providerDesc: qsTr('Mistral AI specializes in efficient, open-weight language models optimized for various natural language processing tasks. Their models are designed for flexibility and performance, making them a solid option for applications requiring scalable AI solutions.<br><br>Get your API key: <a href="https://mistral.ai/">https://mistral.ai/</a>')
modelWhitelist: [
// last updated 2025-02-24
"codestral-2405",
"codestral-2411-rc5",
"codestral-2412",
"codestral-2501",
"codestral-latest",
"codestral-mamba-2407",
"codestral-mamba-latest",
"ministral-3b-2410",
"ministral-3b-latest",
"ministral-8b-2410",
"ministral-8b-latest",
"mistral-large-2402",
"mistral-large-2407",
"mistral-large-2411",
"mistral-large-latest",
"mistral-medium-2312",
"mistral-medium-latest",
"mistral-saba-2502",
"mistral-saba-latest",
"mistral-small-2312",
"mistral-small-2402",
"mistral-small-2409",
"mistral-small-2501",
"mistral-small-latest",
"mistral-tiny-2312",
"mistral-tiny-2407",
"mistral-tiny-latest",
"open-codestral-mamba",
"open-mistral-7b",
"open-mistral-nemo",
"open-mistral-nemo-2407",
"open-mixtral-8x22b",
"open-mixtral-8x22b-2404",
"open-mixtral-8x7b",
]
}
RemoteModelCard {
width: parent.childWidth
height: parent.childHeight
providerIsCustom: true
providerName: qsTr("Custom")
providerImage: "qrc:/gpt4all/icons/antenna_3.svg"
providerDesc: qsTr("The custom provider option allows users to connect their own OpenAI-compatible AI models or third-party inference services. This is useful for organizations with proprietary models or those leveraging niche AI providers not listed here.")
}
}
}
}

View File

@ -1,166 +0,0 @@
import Qt5Compat.GraphicalEffects
import QtCore
import QtQuick
import QtQuick.Controls
import QtQuick.Controls.Basic
import QtQuick.Layouts
import gpt4all
import mysettings
import toolenums
ColumnLayout {
property alias textContent: innerTextItem.textContent
property bool isCurrent: false
property bool isError: false
property bool isThinking: false
property int thinkingTime: 0
Layout.topMargin: 10
Layout.bottomMargin: 10
Item {
Layout.preferredWidth: childrenRect.width
Layout.preferredHeight: 38
RowLayout {
anchors.left: parent.left
anchors.top: parent.top
anchors.bottom: parent.bottom
Item {
Layout.preferredWidth: myTextArea.implicitWidth
Layout.preferredHeight: myTextArea.implicitHeight
TextArea {
id: myTextArea
text: {
if (isError)
return qsTr("Analysis encountered error");
if (isCurrent)
return isThinking ? qsTr("Thinking") : qsTr("Analyzing");
return isThinking
? qsTr("Thought for %1 %2")
.arg(Math.ceil(thinkingTime / 1000.0))
.arg(Math.ceil(thinkingTime / 1000.0) === 1 ? qsTr("second") : qsTr("seconds"))
: qsTr("Analyzed");
}
padding: 0
font.pixelSize: theme.fontSizeLarger
enabled: false
focus: false
readOnly: true
color: headerMA.containsMouse ? theme.mutedDarkTextColorHovered : theme.mutedTextColor
hoverEnabled: false
}
Item {
id: textColorOverlay
anchors.fill: parent
clip: true
visible: false
Rectangle {
id: animationRec
width: myTextArea.width * 0.3
anchors.top: parent.top
anchors.bottom: parent.bottom
color: theme.textColor
SequentialAnimation {
running: isCurrent
loops: Animation.Infinite
NumberAnimation {
target: animationRec;
property: "x";
from: -animationRec.width;
to: myTextArea.width * 3;
duration: 2000
}
}
}
}
OpacityMask {
visible: isCurrent
anchors.fill: parent
maskSource: myTextArea
source: textColorOverlay
}
}
Item {
id: caret
Layout.preferredWidth: contentCaret.width
Layout.preferredHeight: contentCaret.height
Image {
id: contentCaret
anchors.centerIn: parent
visible: false
sourceSize.width: theme.fontSizeLarge
sourceSize.height: theme.fontSizeLarge
mipmap: true
source: {
if (contentLayout.state === "collapsed")
return "qrc:/gpt4all/icons/caret_right.svg";
else
return "qrc:/gpt4all/icons/caret_down.svg";
}
}
ColorOverlay {
anchors.fill: contentCaret
source: contentCaret
color: headerMA.containsMouse ? theme.mutedDarkTextColorHovered : theme.mutedTextColor
}
}
}
MouseArea {
id: headerMA
hoverEnabled: true
anchors.fill: parent
onClicked: {
if (contentLayout.state === "collapsed")
contentLayout.state = "expanded";
else
contentLayout.state = "collapsed";
}
}
}
ColumnLayout {
id: contentLayout
spacing: 0
state: "collapsed"
clip: true
states: [
State {
name: "expanded"
PropertyChanges { target: contentLayout; Layout.preferredHeight: innerContentLayout.height }
},
State {
name: "collapsed"
PropertyChanges { target: contentLayout; Layout.preferredHeight: 0 }
}
]
transitions: [
Transition {
SequentialAnimation {
PropertyAnimation {
target: contentLayout
property: "Layout.preferredHeight"
duration: 300
easing.type: Easing.InOutQuad
}
}
}
]
ColumnLayout {
id: innerContentLayout
Layout.leftMargin: 30
ChatTextItem {
id: innerTextItem
}
}
}
}

View File

@ -4,11 +4,9 @@ import QtQuick
import QtQuick.Controls
import QtQuick.Controls.Basic
import QtQuick.Layouts
import Qt.labs.qmlmodels
import gpt4all
import mysettings
import toolenums
ColumnLayout {
@ -35,8 +33,6 @@ GridLayout {
Layout.alignment: Qt.AlignVCenter | Qt.AlignRight
Layout.preferredWidth: 32
Layout.preferredHeight: 32
Layout.topMargin: model.index > 0 ? 25 : 0
Image {
id: logo
sourceSize: Qt.size(32, 32)
@ -69,8 +65,6 @@ GridLayout {
Layout.column: 1
Layout.fillWidth: true
Layout.preferredHeight: 38
Layout.topMargin: model.index > 0 ? 25 : 0
RowLayout {
spacing: 5
anchors.left: parent.left
@ -78,11 +72,7 @@ GridLayout {
anchors.bottom: parent.bottom
TextArea {
text: {
if (name === "Response: ")
return qsTr("GPT4All");
return qsTr("You");
}
text: name === "Response: " ? qsTr("GPT4All") : qsTr("You")
padding: 0
font.pixelSize: theme.fontSizeLarger
font.bold: true
@ -98,7 +88,7 @@ GridLayout {
color: theme.mutedTextColor
}
RowLayout {
visible: isCurrentResponse && (content === "" && currentChat.responseInProgress)
visible: isCurrentResponse && (value === "" && currentChat.responseInProgress)
Text {
color: theme.mutedTextColor
font.pixelSize: theme.fontSizeLarger
@ -110,7 +100,6 @@ GridLayout {
case Chat.PromptProcessing: return qsTr("processing ...")
case Chat.ResponseGeneration: return qsTr("generating response ...");
case Chat.GeneratingQuestions: return qsTr("generating questions ...");
case Chat.ToolCallGeneration: return qsTr("generating toolcall ...");
default: return ""; // handle unexpected values
}
}
@ -167,48 +156,131 @@ GridLayout {
}
}
Repeater {
model: childItems
TextArea {
id: myTextArea
Layout.fillWidth: true
padding: 0
color: {
if (!currentChat.isServer)
return theme.textColor
return theme.white
}
wrapMode: Text.WordWrap
textFormat: TextEdit.PlainText
focus: false
readOnly: true
font.pixelSize: theme.fontSizeLarge
cursorVisible: isCurrentResponse ? currentChat.responseInProgress : false
cursorPosition: text.length
TapHandler {
id: tapHandler
onTapped: function(eventPoint, button) {
var clickedPos = myTextArea.positionAt(eventPoint.position.x, eventPoint.position.y);
var success = textProcessor.tryCopyAtPosition(clickedPos);
if (success)
copyCodeMessage.open();
}
}
DelegateChooser {
id: chooser
role: "name"
DelegateChoice {
roleValue: "Text: ";
ChatTextItem {
Layout.fillWidth: true
textContent: modelData.content
MouseArea {
id: conversationMouseArea
anchors.fill: parent
acceptedButtons: Qt.RightButton
onClicked: (mouse) => {
if (mouse.button === Qt.RightButton) {
conversationContextMenu.x = conversationMouseArea.mouseX
conversationContextMenu.y = conversationMouseArea.mouseY
conversationContextMenu.open()
}
}
}
onLinkActivated: function(link) {
if (!isCurrentResponse || !currentChat.responseInProgress)
Qt.openUrlExternally(link)
}
onLinkHovered: function (link) {
if (!isCurrentResponse || !currentChat.responseInProgress)
statusBar.externalHoveredLink = link
}
MyMenu {
id: conversationContextMenu
MyMenuItem {
text: qsTr("Copy")
enabled: myTextArea.selectedText !== ""
height: enabled ? implicitHeight : 0
onTriggered: myTextArea.copy()
}
MyMenuItem {
text: qsTr("Copy Message")
enabled: myTextArea.selectedText === ""
height: enabled ? implicitHeight : 0
onTriggered: {
myTextArea.selectAll()
myTextArea.copy()
myTextArea.deselect()
}
}
DelegateChoice {
roleValue: "ToolCall: ";
ChatCollapsibleItem {
Layout.fillWidth: true
textContent: modelData.content
isCurrent: modelData.isCurrentResponse
isError: modelData.isToolCallError
}
}
DelegateChoice {
roleValue: "Think: ";
ChatCollapsibleItem {
Layout.fillWidth: true
textContent: modelData.content
isCurrent: modelData.isCurrentResponse
isError: false
isThinking: true
thinkingTime: modelData.thinkingTime
visible: modelData.content !== ""
MyMenuItem {
text: textProcessor.shouldProcessText ? qsTr("Disable markdown") : qsTr("Enable markdown")
height: enabled ? implicitHeight : 0
onTriggered: {
textProcessor.shouldProcessText = !textProcessor.shouldProcessText;
textProcessor.setValue(value);
}
}
}
delegate: chooser
}
ChatViewTextProcessor {
id: textProcessor
}
ChatTextItem {
Layout.fillWidth: true
textContent: content
function resetChatViewTextProcessor() {
textProcessor.fontPixelSize = myTextArea.font.pixelSize
textProcessor.codeColors.defaultColor = theme.codeDefaultColor
textProcessor.codeColors.keywordColor = theme.codeKeywordColor
textProcessor.codeColors.functionColor = theme.codeFunctionColor
textProcessor.codeColors.functionCallColor = theme.codeFunctionCallColor
textProcessor.codeColors.commentColor = theme.codeCommentColor
textProcessor.codeColors.stringColor = theme.codeStringColor
textProcessor.codeColors.numberColor = theme.codeNumberColor
textProcessor.codeColors.headerColor = theme.codeHeaderColor
textProcessor.codeColors.backgroundColor = theme.codeBackgroundColor
textProcessor.textDocument = textDocument
textProcessor.setValue(value);
}
property bool textProcessorReady: false
Component.onCompleted: {
resetChatViewTextProcessor();
textProcessorReady = true;
}
Connections {
target: chatModel
function onValueChanged(i, value) {
if (myTextArea.textProcessorReady && index === i)
textProcessor.setValue(value);
}
}
Connections {
target: MySettings
function onFontSizeChanged() {
myTextArea.resetChatViewTextProcessor();
}
function onChatThemeChanged() {
myTextArea.resetChatViewTextProcessor();
}
}
Accessible.role: Accessible.Paragraph
Accessible.name: text
Accessible.description: name === "Response: " ? "The response by the model" : "The prompt by the user"
}
ThumbsDownDialog {
@ -217,16 +289,16 @@ GridLayout {
y: Math.round((parent.height - height) / 2)
width: 640
height: 300
property string text: content
property string text: value
response: newResponse === undefined || newResponse === "" ? text : newResponse
onAccepted: {
var responseHasChanged = response !== text && response !== newResponse
if (thumbsDownState && !thumbsUpState && !responseHasChanged)
return
chatModel.updateNewResponse(model.index, response)
chatModel.updateThumbsUpState(model.index, false)
chatModel.updateThumbsDownState(model.index, true)
chatModel.updateNewResponse(index, response)
chatModel.updateThumbsUpState(index, false)
chatModel.updateThumbsDownState(index, true)
Network.sendConversation(currentChat.id, getConversationJson());
}
}
@ -344,7 +416,7 @@ GridLayout {
states: [
State {
name: "expanded"
PropertyChanges { target: sourcesLayout; Layout.preferredHeight: sourcesFlow.height }
PropertyChanges { target: sourcesLayout; Layout.preferredHeight: flow.height }
},
State {
name: "collapsed"
@ -366,7 +438,7 @@ GridLayout {
]
Flow {
id: sourcesFlow
id: flow
Layout.fillWidth: true
spacing: 10
visible: consolidatedSources.length !== 0
@ -545,7 +617,9 @@ GridLayout {
name: qsTr("Copy")
source: "qrc:/gpt4all/icons/copy.svg"
onClicked: {
chatModel.copyToClipboard(index);
myTextArea.selectAll();
myTextArea.copy();
myTextArea.deselect();
}
}

View File

@ -1,139 +0,0 @@
import Qt5Compat.GraphicalEffects
import QtCore
import QtQuick
import QtQuick.Controls
import QtQuick.Controls.Basic
import QtQuick.Layouts
import gpt4all
import mysettings
import toolenums
TextArea {
id: myTextArea
property string textContent: ""
visible: textContent != ""
Layout.fillWidth: true
padding: 0
color: {
if (!currentChat.isServer)
return theme.textColor
return theme.white
}
wrapMode: Text.WordWrap
textFormat: TextEdit.PlainText
focus: false
readOnly: true
font.pixelSize: theme.fontSizeLarge
cursorVisible: isCurrentResponse ? currentChat.responseInProgress : false
cursorPosition: text.length
TapHandler {
id: tapHandler
onTapped: function(eventPoint, button) {
var clickedPos = myTextArea.positionAt(eventPoint.position.x, eventPoint.position.y);
var success = textProcessor.tryCopyAtPosition(clickedPos);
if (success)
copyCodeMessage.open();
}
}
MouseArea {
id: conversationMouseArea
anchors.fill: parent
acceptedButtons: Qt.RightButton
onClicked: (mouse) => {
if (mouse.button === Qt.RightButton) {
conversationContextMenu.x = conversationMouseArea.mouseX
conversationContextMenu.y = conversationMouseArea.mouseY
conversationContextMenu.open()
}
}
}
onLinkActivated: function(link) {
if (!isCurrentResponse || !currentChat.responseInProgress)
Qt.openUrlExternally(link)
}
onLinkHovered: function (link) {
if (!isCurrentResponse || !currentChat.responseInProgress)
statusBar.externalHoveredLink = link
}
MyMenu {
id: conversationContextMenu
MyMenuItem {
text: qsTr("Copy")
enabled: myTextArea.selectedText !== ""
height: enabled ? implicitHeight : 0
onTriggered: myTextArea.copy()
}
MyMenuItem {
text: qsTr("Copy Message")
enabled: myTextArea.selectedText === ""
height: enabled ? implicitHeight : 0
onTriggered: {
myTextArea.selectAll()
myTextArea.copy()
myTextArea.deselect()
}
}
MyMenuItem {
text: textProcessor.shouldProcessText ? qsTr("Disable markdown") : qsTr("Enable markdown")
height: enabled ? implicitHeight : 0
onTriggered: {
textProcessor.shouldProcessText = !textProcessor.shouldProcessText;
textProcessor.setValue(textContent);
}
}
}
ChatViewTextProcessor {
id: textProcessor
}
function resetChatViewTextProcessor() {
textProcessor.fontPixelSize = myTextArea.font.pixelSize
textProcessor.codeColors.defaultColor = theme.codeDefaultColor
textProcessor.codeColors.keywordColor = theme.codeKeywordColor
textProcessor.codeColors.functionColor = theme.codeFunctionColor
textProcessor.codeColors.functionCallColor = theme.codeFunctionCallColor
textProcessor.codeColors.commentColor = theme.codeCommentColor
textProcessor.codeColors.stringColor = theme.codeStringColor
textProcessor.codeColors.numberColor = theme.codeNumberColor
textProcessor.codeColors.headerColor = theme.codeHeaderColor
textProcessor.codeColors.backgroundColor = theme.codeBackgroundColor
textProcessor.textDocument = textDocument
textProcessor.setValue(textContent);
}
property bool textProcessorReady: false
Component.onCompleted: {
resetChatViewTextProcessor();
textProcessorReady = true;
}
Connections {
target: myTextArea
function onTextContentChanged() {
if (myTextArea.textProcessorReady)
textProcessor.setValue(textContent);
}
}
Connections {
target: MySettings
function onFontSizeChanged() {
myTextArea.resetChatViewTextProcessor();
}
function onChatThemeChanged() {
myTextArea.resetChatViewTextProcessor();
}
}
Accessible.role: Accessible.Paragraph
Accessible.name: text
Accessible.description: name === "Response: " ? "The response by the model" : "The prompt by the user"
}

View File

@ -37,11 +37,10 @@ Rectangle {
Connections {
target: currentChat
// FIXME: https://github.com/nomic-ai/gpt4all/issues/3334
// function onResponseInProgressChanged() {
// if (MySettings.networkIsActive && !currentChat.responseInProgress)
// Network.sendConversation(currentChat.id, getConversationJson());
// }
function onResponseInProgressChanged() {
if (MySettings.networkIsActive && !currentChat.responseInProgress)
Network.sendConversation(currentChat.id, getConversationJson());
}
function onModelLoadingErrorChanged() {
if (currentChat.modelLoadingError !== "")
modelLoadingErrorPopup.open()
@ -117,44 +116,42 @@ Rectangle {
}
}
// FIXME: https://github.com/nomic-ai/gpt4all/issues/3334
// function getConversation() {
// var conversation = "";
// for (var i = 0; i < chatModel.count; i++) {
// var item = chatModel.get(i)
// var string = item.name;
// var isResponse = item.name === "Response: "
// string += chatModel.get(i).value
// if (isResponse && item.stopped)
// string += " <stopped>"
// string += "\n"
// conversation += string
// }
// return conversation
// }
function getConversation() {
var conversation = "";
for (var i = 0; i < chatModel.count; i++) {
var item = chatModel.get(i)
var string = item.name;
var isResponse = item.name === "Response: "
string += chatModel.get(i).value
if (isResponse && item.stopped)
string += " <stopped>"
string += "\n"
conversation += string
}
return conversation
}
// FIXME: https://github.com/nomic-ai/gpt4all/issues/3334
// function getConversationJson() {
// var str = "{\"conversation\": [";
// for (var i = 0; i < chatModel.count; i++) {
// var item = chatModel.get(i)
// var isResponse = item.name === "Response: "
// str += "{\"content\": ";
// str += JSON.stringify(item.value)
// str += ", \"role\": \"" + (isResponse ? "assistant" : "user") + "\"";
// if (isResponse && item.thumbsUpState !== item.thumbsDownState)
// str += ", \"rating\": \"" + (item.thumbsUpState ? "positive" : "negative") + "\"";
// if (isResponse && item.newResponse !== "")
// str += ", \"edited_content\": " + JSON.stringify(item.newResponse);
// if (isResponse && item.stopped)
// str += ", \"stopped\": \"true\""
// if (!isResponse)
// str += "},"
// else
// str += ((i < chatModel.count - 1) ? "}," : "}")
// }
// return str + "]}"
// }
function getConversationJson() {
var str = "{\"conversation\": [";
for (var i = 0; i < chatModel.count; i++) {
var item = chatModel.get(i)
var isResponse = item.name === "Response: "
str += "{\"content\": ";
str += JSON.stringify(item.value)
str += ", \"role\": \"" + (isResponse ? "assistant" : "user") + "\"";
if (isResponse && item.thumbsUpState !== item.thumbsDownState)
str += ", \"rating\": \"" + (item.thumbsUpState ? "positive" : "negative") + "\"";
if (isResponse && item.newResponse !== "")
str += ", \"edited_content\": " + JSON.stringify(item.newResponse);
if (isResponse && item.stopped)
str += ", \"stopped\": \"true\""
if (!isResponse)
str += "},"
else
str += ((i < chatModel.count - 1) ? "}," : "}")
}
return str + "]}"
}
ChatDrawer {
id: chatDrawer
@ -827,8 +824,6 @@ Rectangle {
textInput.forceActiveFocus();
textInput.cursorPosition = text.length;
}
height: visible ? implicitHeight : 0
visible: name !== "ToolResponse: " && name !== "System: "
}
remove: Transition {
@ -935,7 +930,10 @@ Rectangle {
visible: false
}
onClicked: {
chatModel.copyToClipboard()
var conversation = getConversation()
copyEdit.text = conversation
copyEdit.selectAll()
copyEdit.copy()
copyMessage.open()
}
ToolTip.visible: copyChatButton.hovered
@ -1354,10 +1352,9 @@ Rectangle {
ToolTip.text: Accessible.description
onClicked: {
// FIXME: This no longer sets a 'stopped' field so conversations that
// are copied to clipboard or to datalake don't indicate if the user
// has prematurely stopped the response. This has been broken since
// v3.0.0 at least.
var index = Math.max(0, chatModel.count - 1);
var listElement = chatModel.get(index);
listElement.stopped = true
currentChat.stopGenerating()
}
}

View File

@ -60,28 +60,27 @@ ComboBox {
highlighted: comboBox.highlightedIndex === index
}
popup: Popup {
// FIXME This should be made much nicer to take into account lists that are very long so
// that it is scrollable and also sized optimally taking into account the x,y and the content
// width and height as well as the window width and height
y: comboBox.height - 1
width: comboBox.width
implicitHeight: Math.min(window.height - y, contentItem.implicitHeight + 20)
implicitHeight: contentItem.implicitHeight + 20
padding: 0
contentItem: Rectangle {
implicitWidth: comboBox.width
implicitWidth: myListView.contentWidth
implicitHeight: myListView.contentHeight
color: "transparent"
radius: 10
ScrollView {
ListView {
id: myListView
anchors.fill: parent
anchors.margins: 10
clip: true
ScrollBar.vertical.policy: ScrollBar.AsNeeded
ScrollBar.horizontal.policy: ScrollBar.AlwaysOff
ListView {
id: myListView
implicitHeight: contentHeight
model: comboBox.popup.visible ? comboBox.delegateModel : null
currentIndex: comboBox.highlightedIndex
ScrollIndicator.vertical: ScrollIndicator { }
}
implicitHeight: contentHeight
model: comboBox.popup.visible ? comboBox.delegateModel : null
currentIndex: comboBox.highlightedIndex
ScrollIndicator.vertical: ScrollIndicator { }
}
}

View File

@ -1,221 +0,0 @@
import QtCore
import QtQuick
import QtQuick.Controls
import QtQuick.Controls.Basic
import QtQuick.Layouts
import QtQuick.Dialogs
import Qt.labs.folderlistmodel
import Qt5Compat.GraphicalEffects
import llm
import chatlistmodel
import download
import modellist
import network
import gpt4all
import mysettings
import localdocs
Rectangle {
property alias providerName: providerNameLabel.text
property alias providerImage: myimage.source
property alias providerDesc: providerDescLabel.text
property string providerBaseUrl: ""
property bool providerIsCustom: false
property var modelWhitelist: null
color: theme.conversationBackground
radius: 10
border.width: 1
border.color: theme.controlBorder
implicitHeight: topColumn.height + bottomColumn.height + 33 * theme.fontScale
ColumnLayout {
id: topColumn
anchors.left: parent.left
anchors.right: parent.right
anchors.top: parent.top
anchors.margins: 20
spacing: 15 * theme.fontScale
RowLayout {
Layout.alignment: Qt.AlignTop
spacing: 10
Item {
Layout.preferredWidth: 27 * theme.fontScale
Layout.preferredHeight: 27 * theme.fontScale
Layout.alignment: Qt.AlignLeft
Image {
id: myimage
anchors.centerIn: parent
sourceSize.width: parent.width
sourceSize.height: parent.height
mipmap: true
fillMode: Image.PreserveAspectFit
}
}
Label {
id: providerNameLabel
color: theme.textColor
font.pixelSize: theme.fontSizeBanner
}
}
Label {
id: providerDescLabel
Layout.fillWidth: true
wrapMode: Text.Wrap
color: theme.settingsTitleTextColor
font.pixelSize: theme.fontSizeLarge
onLinkActivated: function(link) { Qt.openUrlExternally(link); }
MouseArea {
anchors.fill: parent
acceptedButtons: Qt.NoButton // pass clicks to parent
cursorShape: parent.hoveredLink ? Qt.PointingHandCursor : Qt.ArrowCursor
}
}
}
ColumnLayout {
id: bottomColumn
anchors.left: parent.left
anchors.right: parent.right
anchors.bottom: parent.bottom
anchors.margins: 20
spacing: 30
ColumnLayout {
MySettingsLabel {
text: qsTr("API Key")
font.bold: true
font.pixelSize: theme.fontSizeLarge
color: theme.settingsTitleTextColor
}
MyTextField {
id: apiKeyField
Layout.fillWidth: true
font.pixelSize: theme.fontSizeLarge
wrapMode: Text.WrapAnywhere
function showError() {
messageToast.show(qsTr("ERROR: $API_KEY is empty."));
apiKeyField.placeholderTextColor = theme.textErrorColor;
}
onTextChanged: {
apiKeyField.placeholderTextColor = theme.mutedTextColor;
if (!providerIsCustom) {
let models = ModelList.remoteModelList(apiKeyField.text, providerBaseUrl);
if (modelWhitelist !== null)
models = models.filter(m => modelWhitelist.includes(m));
myModelList.model = models;
myModelList.currentIndex = -1;
}
}
placeholderText: qsTr("enter $API_KEY")
Accessible.role: Accessible.EditableText
Accessible.name: placeholderText
Accessible.description: qsTr("Whether the file hash is being calculated")
}
}
ColumnLayout {
visible: providerIsCustom
MySettingsLabel {
text: qsTr("Base Url")
font.bold: true
font.pixelSize: theme.fontSizeLarge
color: theme.settingsTitleTextColor
}
MyTextField {
id: baseUrlField
Layout.fillWidth: true
font.pixelSize: theme.fontSizeLarge
wrapMode: Text.WrapAnywhere
function showError() {
messageToast.show(qsTr("ERROR: $BASE_URL is empty."));
baseUrlField.placeholderTextColor = theme.textErrorColor;
}
onTextChanged: {
baseUrlField.placeholderTextColor = theme.mutedTextColor;
}
placeholderText: qsTr("enter $BASE_URL")
Accessible.role: Accessible.EditableText
Accessible.name: placeholderText
}
}
ColumnLayout {
visible: providerIsCustom
MySettingsLabel {
text: qsTr("Model Name")
font.bold: true
font.pixelSize: theme.fontSizeLarge
color: theme.settingsTitleTextColor
}
MyTextField {
id: modelNameField
Layout.fillWidth: true
font.pixelSize: theme.fontSizeLarge
wrapMode: Text.WrapAnywhere
function showError() {
messageToast.show(qsTr("ERROR: $MODEL_NAME is empty."))
modelNameField.placeholderTextColor = theme.textErrorColor;
}
onTextChanged: {
modelNameField.placeholderTextColor = theme.mutedTextColor;
}
placeholderText: qsTr("enter $MODEL_NAME")
Accessible.role: Accessible.EditableText
Accessible.name: placeholderText
}
}
ColumnLayout {
visible: myModelList.count > 0 && !providerIsCustom
MySettingsLabel {
text: qsTr("Models")
font.bold: true
font.pixelSize: theme.fontSizeLarge
color: theme.settingsTitleTextColor
}
RowLayout {
spacing: 10
MyComboBox {
Layout.fillWidth: true
id: myModelList
currentIndex: -1;
}
}
}
MySettingsButton {
id: installButton
Layout.alignment: Qt.AlignRight
text: qsTr("Install")
font.pixelSize: theme.fontSizeLarge
property string apiKeyText: apiKeyField.text.trim()
property string baseUrlText: providerIsCustom ? baseUrlField.text.trim() : providerBaseUrl.trim()
property string modelNameText: providerIsCustom ? modelNameField.text.trim() : myModelList.currentText.trim()
enabled: apiKeyText !== "" && baseUrlText !== "" && modelNameText !== ""
onClicked: {
Download.installCompatibleModel(
modelNameText,
apiKeyText,
baseUrlText,
);
myModelList.currentIndex = -1;
}
Accessible.role: Accessible.Button
Accessible.name: qsTr("Install")
Accessible.description: qsTr("Install remote model")
}
}
}

View File

@ -3,30 +3,19 @@
#include "chatlistmodel.h"
#include "network.h"
#include "server.h"
#include "tool.h"
#include "toolcallparser.h"
#include "toolmodel.h"
#include <QByteArray>
#include <QBuffer>
#include <QDataStream>
#include <QDebug>
#include <QFile>
#include <QFileInfo>
#include <QIODevice>
#include <QLatin1String>
#include <QMap>
#include <QRegularExpression>
#include <QString>
#include <QVariant>
#include <Qt>
#include <QtAssert>
#include <QtLogging>
#include <optional>
#include <utility>
using namespace ToolEnums;
Chat::Chat(QObject *parent)
: QObject(parent)
, m_id(Network::globalInstance()->generateUniqueId())
@ -65,6 +54,7 @@ void Chat::connectLLM()
// Should be in different threads
connect(m_llmodel, &ChatLLM::modelLoadingPercentageChanged, this, &Chat::handleModelLoadingPercentageChanged, Qt::QueuedConnection);
connect(m_llmodel, &ChatLLM::responseChanged, this, &Chat::handleResponseChanged, Qt::QueuedConnection);
connect(m_llmodel, &ChatLLM::responseFailed, this, &Chat::handleResponseFailed, Qt::QueuedConnection);
connect(m_llmodel, &ChatLLM::promptProcessing, this, &Chat::promptProcessing, Qt::QueuedConnection);
connect(m_llmodel, &ChatLLM::generatingQuestions, this, &Chat::generatingQuestions, Qt::QueuedConnection);
connect(m_llmodel, &ChatLLM::responseStopped, this, &Chat::responseStopped, Qt::QueuedConnection);
@ -183,11 +173,6 @@ QVariant Chat::popPrompt(int index)
void Chat::stopGenerating()
{
// In future if we have more than one tool we'll have to keep track of which tools are possibly
// running, but for now we only have one
Tool *toolInstance = ToolModel::globalInstance()->get(ToolCallConstants::CodeInterpreterFunction);
Q_ASSERT(toolInstance);
toolInstance->interrupt();
m_llmodel->stopGenerating();
}
@ -196,12 +181,23 @@ Chat::ResponseState Chat::responseState() const
return m_responseState;
}
void Chat::handleResponseChanged()
void Chat::handleResponseChanged(const QString &response)
{
if (m_responseState != Chat::ResponseGeneration) {
m_responseState = Chat::ResponseGeneration;
emit responseStateChanged();
}
const int index = m_chatModel->count() - 1;
m_chatModel->updateValue(index, response);
}
void Chat::handleResponseFailed(const QString &error)
{
const int index = m_chatModel->count() - 1;
m_chatModel->updateValue(index, error);
m_chatModel->setError();
responseStopped(0);
}
void Chat::handleModelLoadingPercentageChanged(float loadingPercentage)
@ -246,74 +242,14 @@ void Chat::responseStopped(qint64 promptResponseMs)
m_responseState = Chat::ResponseStopped;
emit responseInProgressChanged();
emit responseStateChanged();
if (m_generatedName.isEmpty())
emit generateNameRequested();
const QString possibleToolcall = m_chatModel->possibleToolcall();
Network::globalInstance()->trackChatEvent("response_stopped", {
Network::globalInstance()->trackChatEvent("response_complete", {
{"first", m_firstResponse},
{"message_count", chatModel()->count()},
{"$duration", promptResponseMs / 1000.},
});
ToolCallParser parser;
parser.update(possibleToolcall.toUtf8());
if (parser.state() == ToolEnums::ParseState::Complete && parser.startTag() != ToolCallConstants::ThinkStartTag)
processToolCall(parser.toolCall());
else
responseComplete();
}
void Chat::processToolCall(const QString &toolCall)
{
m_responseState = Chat::ToolCallGeneration;
emit responseStateChanged();
// Regex to remove the formatting around the code
static const QRegularExpression regex("^\\s*```javascript\\s*|\\s*```\\s*$");
QString code = toolCall;
code.remove(regex);
code = code.trimmed();
// Right now the code interpreter is the only available tool
Tool *toolInstance = ToolModel::globalInstance()->get(ToolCallConstants::CodeInterpreterFunction);
Q_ASSERT(toolInstance);
connect(toolInstance, &Tool::runComplete, this, &Chat::toolCallComplete, Qt::SingleShotConnection);
// The param is the code
const ToolParam param = { "code", ToolEnums::ParamType::String, code };
m_responseInProgress = true;
emit responseInProgressChanged();
toolInstance->run({param});
}
void Chat::toolCallComplete(const ToolCallInfo &info)
{
// Update the current response with meta information about toolcall and re-parent
m_chatModel->updateToolCall(info);
++m_consecutiveToolCalls;
m_responseInProgress = false;
emit responseInProgressChanged();
// We limit the number of consecutive toolcalls otherwise we get into a potentially endless loop
if (m_consecutiveToolCalls < 3 || info.error == ToolEnums::Error::NoError) {
resetResponseState();
emit promptRequested(m_collections); // triggers a new response
return;
}
responseComplete();
}
void Chat::responseComplete()
{
if (m_generatedName.isEmpty())
emit generateNameRequested();
m_responseState = Chat::ResponseStopped;
emit responseStateChanged();
m_consecutiveToolCalls = 0;
m_firstResponse = false;
}
@ -383,8 +319,11 @@ void Chat::trySwitchContextOfLoadedModel()
void Chat::generatedNameChanged(const QString &name)
{
m_generatedName = name;
m_name = name;
// Only use the first three words maximum and remove newlines and extra spaces
m_generatedName = name.simplified();
QStringList words = m_generatedName.split(' ', Qt::SkipEmptyParts);
int wordCount = qMin(7, words.size());
m_name = words.mid(0, wordCount).join(' ');
emit nameChanged();
m_needsSave = true;
}

View File

@ -3,26 +3,21 @@
#include "chatllm.h"
#include "chatmodel.h"
#include "database.h"
#include "localdocsmodel.h"
#include "database.h" // IWYU pragma: keep
#include "localdocsmodel.h" // IWYU pragma: keep
#include "modellist.h"
#include "tool.h"
#include <QDateTime>
#include <QList>
#include <QObject>
#include <QQmlEngine> // IWYU pragma: keep
#include <QQmlEngine>
#include <QString>
#include <QStringList> // IWYU pragma: keep
#include <QUrl>
#include <QVariant>
#include <QtTypes>
#include <QStringView>
#include <QtGlobal>
// IWYU pragma: no_forward_declare LocalDocsCollectionsModel
// IWYU pragma: no_forward_declare ToolCallInfo
class QDataStream;
class Chat : public QObject
{
Q_OBJECT
@ -60,8 +55,7 @@ public:
LocalDocsProcessing,
PromptProcessing,
GeneratingQuestions,
ResponseGeneration,
ToolCallGeneration
ResponseGeneration
};
Q_ENUM(ResponseState)
@ -167,14 +161,12 @@ Q_SIGNALS:
void generatedQuestionsChanged();
private Q_SLOTS:
void handleResponseChanged();
void handleResponseChanged(const QString &response);
void handleResponseFailed(const QString &error);
void handleModelLoadingPercentageChanged(float);
void promptProcessing();
void generatingQuestions();
void responseStopped(qint64 promptResponseMs);
void processToolCall(const QString &toolCall);
void toolCallComplete(const ToolCallInfo &info);
void responseComplete();
void generatedNameChanged(const QString &name);
void generatedQuestionFinished(const QString &question);
void handleModelLoadingError(const QString &error);
@ -213,7 +205,6 @@ private:
// - The chat was freshly created during this launch.
// - The chat was changed after loading it from disk.
bool m_needsSave = true;
int m_consecutiveToolCalls = 0;
};
#endif // CHAT_H

View File

@ -2,9 +2,6 @@
#include "utils.h"
#include <fmt/format.h>
#include <QAnyStringView>
#include <QCoreApplication>
#include <QDebug>
#include <QGuiApplication>
@ -12,17 +9,15 @@
#include <QJsonDocument>
#include <QJsonObject>
#include <QJsonValue>
#include <QLatin1String>
#include <QNetworkAccessManager>
#include <QNetworkRequest>
#include <QStringView>
#include <QThread>
#include <QUrl>
#include <QUtf8StringView> // IWYU pragma: keep
#include <QUtf8StringView>
#include <QVariant>
#include <QXmlStreamReader>
#include <Qt>
#include <QtAssert>
#include <QtGlobal>
#include <QtLogging>
#include <expected>
@ -34,7 +29,6 @@ using namespace Qt::Literals::StringLiterals;
//#define DEBUG
ChatAPI::ChatAPI()
: QObject(nullptr)
, m_modelName("gpt-3.5-turbo")

View File

@ -3,11 +3,10 @@
#include <gpt4all-backend/llmodel.h>
#include <QByteArray>
#include <QByteArray> // IWYU pragma: keep
#include <QNetworkReply>
#include <QObject>
#include <QString>
#include <QtPreprocessorSupport>
#include <cstddef>
#include <cstdint>
@ -18,11 +17,9 @@
#include <unordered_map>
#include <vector>
// IWYU pragma: no_forward_declare QByteArray
class ChatAPI;
class QNetworkAccessManager;
class ChatAPI;
class ChatAPIWorker : public QObject {
Q_OBJECT
public:

View File

@ -1,27 +1,26 @@
#include "chatlistmodel.h"
#include "database.h" // IWYU pragma: keep
#include "mysettings.h"
#include <QCoreApplication>
#include <QDataStream>
#include <QDir>
#include <QElapsedTimer>
#include <QEvent>
#include <QFile>
#include <QFileInfo>
#include <QGlobalStatic>
#include <QGuiApplication>
#include <QIODevice>
#include <QSettings>
#include <QStringList> // IWYU pragma: keep
#include <QString>
#include <QStringList>
#include <Qt>
#include <QtTypes>
#include <algorithm>
#include <memory>
static constexpr quint32 CHAT_FORMAT_MAGIC = 0xF5D553CC;
static constexpr qint32 CHAT_FORMAT_VERSION = 12;
static constexpr qint32 CHAT_FORMAT_VERSION = 11;
class MyChatListModel: public ChatListModel { };
Q_GLOBAL_STATIC(MyChatListModel, chatListModelInstance)
@ -53,11 +52,9 @@ void ChatListModel::loadChats()
connect(thread, &ChatsRestoreThread::finished, thread, &QObject::deleteLater);
thread->start();
m_chatSaver = std::make_unique<ChatSaver>();
connect(this, &ChatListModel::requestSaveChats, m_chatSaver.get(), &ChatSaver::saveChats, Qt::QueuedConnection);
connect(m_chatSaver.get(), &ChatSaver::saveChatsFinished, this, &ChatListModel::saveChatsFinished, Qt::QueuedConnection);
// save chats on application quit
connect(QCoreApplication::instance(), &QCoreApplication::aboutToQuit, this, &ChatListModel::saveChatsSync);
ChatSaver *saver = new ChatSaver;
connect(this, &ChatListModel::requestSaveChats, saver, &ChatSaver::saveChats, Qt::QueuedConnection);
connect(saver, &ChatSaver::saveChatsFinished, this, &ChatListModel::saveChatsFinished, Qt::QueuedConnection);
connect(MySettings::globalInstance(), &MySettings::serverChatChanged, this, &ChatListModel::handleServerEnabledChanged);
}
@ -81,24 +78,16 @@ ChatSaver::ChatSaver()
m_thread.start();
}
ChatSaver::~ChatSaver()
{
m_thread.quit();
m_thread.wait();
}
QVector<Chat *> ChatListModel::getChatsToSave() const
{
QVector<Chat *> toSave;
for (auto *chat : m_chats)
if (chat != m_serverChat && !chat->isNewChat())
toSave << chat;
return toSave;
}
void ChatListModel::saveChats()
{
auto toSave = getChatsToSave();
QVector<Chat*> toSave;
for (Chat *chat : m_chats) {
if (chat == m_serverChat)
continue;
if (chat->isNewChat())
continue;
toSave.append(chat);
}
if (toSave.isEmpty()) {
emit saveChatsFinished();
return;
@ -107,24 +96,8 @@ void ChatListModel::saveChats()
emit requestSaveChats(toSave);
}
void ChatListModel::saveChatsForQuit()
{
saveChats();
m_startedFinalSave = true;
}
void ChatListModel::saveChatsSync()
{
auto toSave = getChatsToSave();
if (!m_startedFinalSave && !toSave.isEmpty())
m_chatSaver->saveChats(toSave);
}
void ChatSaver::saveChats(const QVector<Chat *> &chats)
{
// we can be called from the main thread instead of a worker thread at quit time, so take a lock
QMutexLocker locker(&m_mutex);
QElapsedTimer timer;
timer.start();
const QString savePath = MySettings::globalInstance()->modelPath();

View File

@ -7,23 +7,16 @@
#include <QAbstractListModel>
#include <QByteArray>
#include <QDate>
#include <QDebug>
#include <QHash>
#include <QList>
#include <QMutex>
#include <QObject>
#include <QString>
#include <QThread>
#include <QVariant>
#include <QVector> // IWYU pragma: keep
#include <QVector>
#include <Qt>
#include <QtAssert>
#include <QtGlobal>
#include <QtLogging>
#include <QtPreprocessorSupport>
#include <memory>
class ChatsRestoreThread : public QThread
{
@ -40,7 +33,6 @@ class ChatSaver : public QObject
Q_OBJECT
public:
explicit ChatSaver();
~ChatSaver() override;
Q_SIGNALS:
void saveChatsFinished();
@ -50,7 +42,6 @@ public Q_SLOTS:
private:
QThread m_thread;
QMutex m_mutex;
};
class ChatListModel : public QAbstractListModel
@ -237,7 +228,6 @@ public:
void removeChatFile(Chat *chat) const;
Q_INVOKABLE void saveChats();
Q_INVOKABLE void saveChatsForQuit();
void restoreChat(Chat *chat);
void chatsRestoredFinished();
@ -254,9 +244,6 @@ protected:
bool eventFilter(QObject *obj, QEvent *ev) override;
private Q_SLOTS:
// Used with QCoreApplication::aboutToQuit. Does not require an event loop.
void saveChatsSync();
void newChatCountChanged()
{
Q_ASSERT(m_newChat && m_newChat->chatModel()->count());
@ -287,16 +274,11 @@ private Q_SLOTS:
}
}
private:
QVector<Chat *> getChatsToSave() const;
private:
Chat* m_newChat = nullptr;
Chat* m_serverChat = nullptr;
Chat* m_currentChat = nullptr;
QList<Chat*> m_chats;
std::unique_ptr<ChatSaver> m_chatSaver;
bool m_startedFinalSave = false;
private:
explicit ChatListModel();

View File

@ -7,48 +7,40 @@
#include "localdocs.h"
#include "mysettings.h"
#include "network.h"
#include "tool.h"
#include "toolmodel.h"
#include "toolcallparser.h"
#include <fmt/format.h>
#include <minja/minja.hpp>
#include <nlohmann/json.hpp>
#include <QChar>
#include <jinja2cpp/error_info.h>
#include <jinja2cpp/template.h>
#include <jinja2cpp/template_env.h>
#include <jinja2cpp/user_callable.h>
#include <jinja2cpp/value.h>
#include <QDataStream>
#include <QDebug>
#include <QFile>
#include <QGlobalStatic>
#include <QIODevice> // IWYU pragma: keep
#include <QIODevice>
#include <QJsonDocument>
#include <QJsonObject>
#include <QJsonValue>
#include <QMap>
#include <QMutex> // IWYU pragma: keep
#include <QMutex>
#include <QMutexLocker> // IWYU pragma: keep
#include <QRegularExpression> // IWYU pragma: keep
#include <QRegularExpressionMatch> // IWYU pragma: keep
#include <QRegularExpression>
#include <QRegularExpressionMatch>
#include <QSet>
#include <QStringView>
#include <QTextStream>
#include <QUrl>
#include <QVariant>
#include <QWaitCondition>
#include <Qt>
#include <QtAssert>
#include <QtLogging>
#include <QtTypes> // IWYU pragma: keep
#include <algorithm>
#include <chrono>
#include <cmath>
#include <concepts>
#include <cstddef>
#include <cstdint>
#include <ctime>
#include <exception>
#include <functional>
#include <iomanip>
#include <limits>
#include <optional>
@ -63,106 +55,45 @@
#include <vector>
using namespace Qt::Literals::StringLiterals;
using namespace ToolEnums;
namespace ranges = std::ranges;
using json = nlohmann::ordered_json;
//#define DEBUG
//#define DEBUG_MODEL_LOADING
// NOTE: not threadsafe
static const std::shared_ptr<minja::Context> &jinjaEnv()
static jinja2::TemplateEnv *jinjaEnv()
{
static std::shared_ptr<minja::Context> environment;
static std::optional<jinja2::TemplateEnv> environment;
if (!environment) {
environment = minja::Context::builtins();
environment->set("strftime_now", minja::simple_function(
"strftime_now", { "format" },
[](const std::shared_ptr<minja::Context> &, minja::Value &args) -> minja::Value {
auto format = args.at("format").get<std::string>();
auto &env = environment.emplace();
auto &settings = env.GetSettings();
settings.trimBlocks = true;
settings.lstripBlocks = true;
env.AddGlobal("raise_exception", jinja2::UserCallable(
/*callable*/ [](auto &params) -> jinja2::Value {
auto messageArg = params.args.find("message");
if (messageArg == params.args.end() || !messageArg->second.isString())
throw std::runtime_error("'message' argument to raise_exception() must be a string");
throw std::runtime_error(fmt::format("Jinja template error: {}", messageArg->second.asString()));
},
/*argsInfo*/ { jinja2::ArgInfo("message", /*isMandatory*/ true) }
));
env.AddGlobal("strftime_now", jinja2::UserCallable(
/*callable*/ [](auto &params) -> jinja2::Value {
using Clock = std::chrono::system_clock;
auto formatArg = params.args.find("format");
if (formatArg == params.args.end() || !formatArg->second.isString())
throw std::runtime_error("'format' argument to strftime_now() must be a string");
time_t nowUnix = Clock::to_time_t(Clock::now());
auto localDate = *std::localtime(&nowUnix);
std::ostringstream ss;
ss << std::put_time(&localDate, format.c_str());
ss << std::put_time(&localDate, formatArg->second.asString().c_str());
return ss.str();
}
));
environment->set("regex_replace", minja::simple_function(
"regex_replace", { "str", "pattern", "repl" },
[](const std::shared_ptr<minja::Context> &, minja::Value &args) -> minja::Value {
auto str = args.at("str" ).get<std::string>();
auto pattern = args.at("pattern").get<std::string>();
auto repl = args.at("repl" ).get<std::string>();
return std::regex_replace(str, std::regex(pattern), repl);
}
},
/*argsInfo*/ { jinja2::ArgInfo("format", /*isMandatory*/ true) }
));
}
return environment;
}
class BaseResponseHandler {
public:
virtual void onSplitIntoTwo (const QString &startTag, const QString &firstBuffer, const QString &secondBuffer) = 0;
virtual void onSplitIntoThree (const QString &secondBuffer, const QString &thirdBuffer) = 0;
// "old-style" responses, with all of the implementation details left in
virtual void onOldResponseChunk(const QByteArray &chunk) = 0;
// notify of a "new-style" response that has been cleaned of tool calling
virtual bool onBufferResponse (const QString &response, int bufferIdx) = 0;
// notify of a "new-style" response, no tool calling applicable
virtual bool onRegularResponse () = 0;
virtual bool getStopGenerating () const = 0;
};
static auto promptModelWithTools(
LLModel *model, const LLModel::PromptCallback &promptCallback, BaseResponseHandler &respHandler,
const LLModel::PromptContext &ctx, const QByteArray &prompt, const QStringList &toolNames
) -> std::pair<QStringList, bool>
{
ToolCallParser toolCallParser(toolNames);
auto handleResponse = [&toolCallParser, &respHandler](LLModel::Token token, std::string_view piece) -> bool {
Q_UNUSED(token)
toolCallParser.update(piece.data());
// Split the response into two if needed
if (toolCallParser.numberOfBuffers() < 2 && toolCallParser.splitIfPossible()) {
const auto parseBuffers = toolCallParser.buffers();
Q_ASSERT(parseBuffers.size() == 2);
respHandler.onSplitIntoTwo(toolCallParser.startTag(), parseBuffers.at(0), parseBuffers.at(1));
}
// Split the response into three if needed
if (toolCallParser.numberOfBuffers() < 3 && toolCallParser.startTag() == ToolCallConstants::ThinkStartTag
&& toolCallParser.splitIfPossible()) {
const auto parseBuffers = toolCallParser.buffers();
Q_ASSERT(parseBuffers.size() == 3);
respHandler.onSplitIntoThree(parseBuffers.at(1), parseBuffers.at(2));
}
respHandler.onOldResponseChunk(QByteArray::fromRawData(piece.data(), piece.size()));
bool ok;
const auto parseBuffers = toolCallParser.buffers();
if (parseBuffers.size() > 1) {
ok = respHandler.onBufferResponse(parseBuffers.last(), parseBuffers.size() - 1);
} else {
ok = respHandler.onRegularResponse();
}
if (!ok)
return false;
const bool shouldExecuteToolCall = toolCallParser.state() == ToolEnums::ParseState::Complete
&& toolCallParser.startTag() != ToolCallConstants::ThinkStartTag;
return !shouldExecuteToolCall && !respHandler.getStopGenerating();
};
model->prompt(std::string_view(prompt), promptCallback, handleResponse, ctx);
const bool shouldExecuteToolCall = toolCallParser.state() == ToolEnums::ParseState::Complete
&& toolCallParser.startTag() != ToolCallConstants::ThinkStartTag;
return { toolCallParser.buffers(), shouldExecuteToolCall };
return &*environment;
}
class LLModelStore {
@ -712,16 +643,40 @@ bool isAllSpace(R &&r)
void ChatLLM::regenerateResponse(int index)
{
Q_ASSERT(m_chatModel);
if (m_chatModel->regenerateResponse(index)) {
emit responseChanged();
prompt(m_chat->collectionList());
int promptIdx;
{
auto items = m_chatModel->chatItems(); // holds lock
if (index < 1 || index >= items.size() || items[index].type() != ChatItem::Type::Response)
return;
promptIdx = m_chatModel->getPeerUnlocked(index).value_or(-1);
}
emit responseChanged({});
m_chatModel->truncate(index + 1);
m_chatModel->updateCurrentResponse(index, true );
m_chatModel->updateNewResponse (index, {} );
m_chatModel->updateStopped (index, false);
m_chatModel->updateThumbsUpState (index, false);
m_chatModel->updateThumbsDownState(index, false);
m_chatModel->setError(false);
if (promptIdx >= 0)
m_chatModel->updateSources(promptIdx, {});
prompt(m_chat->collectionList());
}
std::optional<QString> ChatLLM::popPrompt(int index)
{
Q_ASSERT(m_chatModel);
return m_chatModel->popPrompt(index);
QString content;
{
auto items = m_chatModel->chatItems(); // holds lock
if (index < 0 || index >= items.size() || items[index].type() != ChatItem::Type::Prompt)
return std::nullopt;
content = items[index].value;
}
m_chatModel->truncate(index);
return content;
}
ModelInfo ChatLLM::modelInfo() const
@ -782,29 +737,28 @@ void ChatLLM::prompt(const QStringList &enabledCollections)
promptInternalChat(enabledCollections, promptContextFromSettings(m_modelInfo));
} catch (const std::exception &e) {
// FIXME(jared): this is neither translated nor serialized
m_chatModel->setResponseValue(u"Error: %1"_s.arg(QString::fromUtf8(e.what())));
m_chatModel->setError();
emit responseFailed(u"Error: %1"_s.arg(QString::fromUtf8(e.what())));
emit responseStopped(0);
}
}
std::vector<MessageItem> ChatLLM::forkConversation(const QString &prompt) const
// FIXME(jared): We can avoid this potentially expensive copy if we use ChatItem pointers, but this is only safe if we
// hold the lock while generating. We can't do that now because Chat is actually in charge of updating the response, not
// ChatLLM.
std::vector<ChatItem> ChatLLM::forkConversation(const QString &prompt) const
{
Q_ASSERT(m_chatModel);
if (m_chatModel->hasError())
throw std::logic_error("cannot continue conversation with an error");
std::vector<MessageItem> conversation;
std::vector<ChatItem> conversation;
{
auto items = m_chatModel->messageItems();
// It is possible the main thread could have erased the conversation while the llm thread,
// is busy forking the conversatoin but it must have set stop generating first
Q_ASSERT(items.size() >= 2 || m_stopGenerating); // should be prompt/response pairs
auto items = m_chatModel->chatItems(); // holds lock
Q_ASSERT(items.size() >= 2); // should be prompt/response pairs
conversation.reserve(items.size() + 1);
conversation.assign(items.begin(), items.end());
}
qsizetype nextIndex = conversation.empty() ? 0 : conversation.back().index().value() + 1;
conversation.emplace_back(nextIndex, MessageItem::Type::Prompt, prompt.toUtf8());
conversation.emplace_back(ChatItem::prompt_tag, prompt);
return conversation;
}
@ -823,22 +777,23 @@ static uint parseJinjaTemplateVersion(QStringView tmpl)
return 0;
}
static std::shared_ptr<minja::TemplateNode> loadJinjaTemplate(const std::string &source)
static auto loadJinjaTemplate(
std::optional<jinja2::Template> &tmpl /*out*/, const std::string &source
) -> jinja2::Result<void>
{
return minja::Parser::parse(source, { .trim_blocks = true, .lstrip_blocks = true, .keep_trailing_newline = false });
tmpl.emplace(jinjaEnv());
return tmpl->Load(source);
}
std::optional<std::string> ChatLLM::checkJinjaTemplateError(const std::string &source)
{
try {
loadJinjaTemplate(source);
} catch (const std::runtime_error &e) {
return e.what();
}
std::optional<jinja2::Template> tmpl;
if (auto res = loadJinjaTemplate(tmpl, source); !res)
return res.error().ToString();
return std::nullopt;
}
std::string ChatLLM::applyJinjaTemplate(std::span<const MessageItem> items) const
std::string ChatLLM::applyJinjaTemplate(std::span<const ChatItem> items) const
{
Q_ASSERT(items.size() >= 1);
@ -865,86 +820,80 @@ std::string ChatLLM::applyJinjaTemplate(std::span<const MessageItem> items) cons
uint version = parseJinjaTemplateVersion(chatTemplate);
auto makeMap = [version](const MessageItem &item) {
return JinjaMessage(version, item).AsJson();
auto makeMap = [version](const ChatItem &item) {
return jinja2::GenericMap([msg = std::make_shared<JinjaMessage>(version, item)] { return msg.get(); });
};
std::unique_ptr<MessageItem> systemItem;
std::unique_ptr<ChatItem> systemItem;
bool useSystem = !isAllSpace(systemMessage);
json::array_t messages;
jinja2::ValuesList messages;
messages.reserve(useSystem + items.size());
if (useSystem) {
systemItem = std::make_unique<MessageItem>(MessageItem::system_tag, systemMessage.toUtf8());
systemItem = std::make_unique<ChatItem>(ChatItem::system_tag, systemMessage);
messages.emplace_back(makeMap(*systemItem));
}
for (auto &item : items)
messages.emplace_back(makeMap(item));
json::array_t toolList;
const int toolCount = ToolModel::globalInstance()->count();
for (int i = 0; i < toolCount; ++i) {
Tool *t = ToolModel::globalInstance()->get(i);
toolList.push_back(t->jinjaValue());
}
json::object_t params {
jinja2::ValuesMap params {
{ "messages", std::move(messages) },
{ "add_generation_prompt", true },
{ "toolList", toolList },
};
for (auto &[name, token] : model->specialTokens())
params.emplace(std::move(name), std::move(token));
try {
auto tmpl = loadJinjaTemplate(chatTemplate.toStdString());
auto context = minja::Context::make(minja::Value(std::move(params)), jinjaEnv());
return tmpl->render(context);
} catch (const std::runtime_error &e) {
throw std::runtime_error(fmt::format("Failed to parse chat template: {}", e.what()));
}
Q_UNREACHABLE();
std::optional<jinja2::Template> tmpl;
auto maybeRendered = loadJinjaTemplate(tmpl, chatTemplate.toStdString())
.and_then([&] { return tmpl->RenderAsString(params); });
if (!maybeRendered)
throw std::runtime_error(fmt::format("Failed to parse chat template: {}", maybeRendered.error().ToString()));
return *maybeRendered;
}
auto ChatLLM::promptInternalChat(const QStringList &enabledCollections, const LLModel::PromptContext &ctx,
qsizetype startOffset) -> ChatPromptResult
std::optional<std::pair<int, int>> subrange) -> ChatPromptResult
{
Q_ASSERT(isModelLoaded());
Q_ASSERT(m_chatModel);
// Return a vector of relevant messages for this chat.
// "startOffset" is used to select only local server messages from the current chat session.
// Return a (ChatModelAccessor, std::span) pair where the span represents the relevant messages for this chat.
// "subrange" is used to select only local server messages from the current chat session.
auto getChat = [&]() {
auto items = m_chatModel->messageItems();
if (startOffset > 0)
items.erase(items.begin(), items.begin() + startOffset);
Q_ASSERT(items.size() >= 2);
return items;
auto items = m_chatModel->chatItems(); // holds lock
std::span view(items);
if (subrange)
view = view.subspan(subrange->first, subrange->second);
Q_ASSERT(view.size() >= 2);
return std::pair(std::move(items), view);
};
QList<ResultInfo> databaseResults;
if (!enabledCollections.isEmpty()) {
std::optional<std::pair<int, QString>> query;
{
// Find the prompt that represents the query. Server chats are flexible and may not have one.
auto items = getChat();
if (auto peer = m_chatModel->getPeer(items, items.end() - 1)) // peer of response
query = { (*peer)->index().value(), (*peer)->content() };
}
if (query) {
auto &[promptIndex, queryStr] = *query;
const int retrievalSize = MySettings::globalInstance()->localDocsRetrievalSize();
emit requestRetrieveFromDB(enabledCollections, queryStr, retrievalSize, &databaseResults); // blocks
m_chatModel->updateSources(promptIndex, databaseResults);
emit databaseResultsChanged(databaseResults);
}
// copy messages for safety (since we can't hold the lock the whole time)
std::optional<std::pair<int, QString>> query;
{
// Find the prompt that represents the query. Server chats are flexible and may not have one.
auto [_, view] = getChat(); // holds lock
if (auto peer = m_chatModel->getPeer(view, view.end() - 1)) // peer of response
query = { *peer - view.begin(), (*peer)->value };
}
auto messageItems = getChat();
messageItems.pop_back(); // exclude new response
QList<ResultInfo> databaseResults;
if (query && !enabledCollections.isEmpty()) {
auto &[promptIndex, queryStr] = *query;
const int retrievalSize = MySettings::globalInstance()->localDocsRetrievalSize();
emit requestRetrieveFromDB(enabledCollections, queryStr, retrievalSize, &databaseResults); // blocks
m_chatModel->updateSources(promptIndex, databaseResults);
emit databaseResultsChanged(databaseResults);
}
auto result = promptInternal(messageItems, ctx, !databaseResults.isEmpty());
// copy messages for safety (since we can't hold the lock the whole time)
std::vector<ChatItem> chatItems;
{
auto [_, view] = getChat(); // holds lock
chatItems.assign(view.begin(), view.end() - 1); // exclude new response
}
auto result = promptInternal(chatItems, ctx, !databaseResults.isEmpty());
return {
/*PromptResult*/ {
.response = std::move(result.response),
@ -955,65 +904,8 @@ auto ChatLLM::promptInternalChat(const QStringList &enabledCollections, const LL
};
}
class ChatViewResponseHandler : public BaseResponseHandler {
public:
ChatViewResponseHandler(ChatLLM *cllm, QElapsedTimer *totalTime, ChatLLM::PromptResult *result)
: m_cllm(cllm), m_totalTime(totalTime), m_result(result) {}
void onSplitIntoTwo(const QString &startTag, const QString &firstBuffer, const QString &secondBuffer) override
{
if (startTag == ToolCallConstants::ThinkStartTag)
m_cllm->m_chatModel->splitThinking({ firstBuffer, secondBuffer });
else
m_cllm->m_chatModel->splitToolCall({ firstBuffer, secondBuffer });
}
void onSplitIntoThree(const QString &secondBuffer, const QString &thirdBuffer) override
{
m_cllm->m_chatModel->endThinking({ secondBuffer, thirdBuffer }, m_totalTime->elapsed());
}
void onOldResponseChunk(const QByteArray &chunk) override
{
m_result->responseTokens++;
m_cllm->m_timer->inc();
m_result->response.append(chunk);
}
bool onBufferResponse(const QString &response, int bufferIdx) override
{
Q_UNUSED(bufferIdx)
try {
QString r = response;
m_cllm->m_chatModel->setResponseValue(removeLeadingWhitespace(r));
} catch (const std::exception &e) {
// We have a try/catch here because the main thread might have removed the response from
// the chatmodel by erasing the conversation during the response... the main thread sets
// m_stopGenerating before doing so, but it doesn't wait after that to reset the chatmodel
Q_ASSERT(m_cllm->m_stopGenerating);
return false;
}
emit m_cllm->responseChanged();
return true;
}
bool onRegularResponse() override
{
auto respStr = QString::fromUtf8(m_result->response);
return onBufferResponse(respStr, 0);
}
bool getStopGenerating() const override
{ return m_cllm->m_stopGenerating; }
private:
ChatLLM *m_cllm;
QElapsedTimer *m_totalTime;
ChatLLM::PromptResult *m_result;
};
auto ChatLLM::promptInternal(
const std::variant<std::span<const MessageItem>, std::string_view> &prompt,
const std::variant<std::span<const ChatItem>, std::string_view> &prompt,
const LLModel::PromptContext &ctx,
bool usedLocalDocs
) -> PromptResult
@ -1023,14 +915,14 @@ auto ChatLLM::promptInternal(
auto *mySettings = MySettings::globalInstance();
// unpack prompt argument
const std::span<const MessageItem> *messageItems = nullptr;
const std::span<const ChatItem> *chatItems = nullptr;
std::string jinjaBuffer;
std::string_view conversation;
if (auto *nonChat = std::get_if<std::string_view>(&prompt)) {
conversation = *nonChat; // complete the string without a template
} else {
messageItems = &std::get<std::span<const MessageItem>>(prompt);
jinjaBuffer = applyJinjaTemplate(*messageItems);
chatItems = &std::get<std::span<const ChatItem>>(prompt);
jinjaBuffer = applyJinjaTemplate(*chatItems);
conversation = jinjaBuffer;
}
@ -1038,8 +930,8 @@ auto ChatLLM::promptInternal(
if (!dynamic_cast<const ChatAPI *>(m_llModelInfo.model.get())) {
auto nCtx = m_llModelInfo.model->contextLength();
std::string jinjaBuffer2;
auto lastMessageRendered = (messageItems && messageItems->size() > 1)
? std::string_view(jinjaBuffer2 = applyJinjaTemplate({ &messageItems->back(), 1 }))
auto lastMessageRendered = (chatItems && chatItems->size() > 1)
? std::string_view(jinjaBuffer2 = applyJinjaTemplate({ &chatItems->back(), 1 }))
: conversation;
int32_t lastMessageLength = m_llModelInfo.model->countPromptTokens(lastMessageRendered);
if (auto limit = nCtx - 4; lastMessageLength > limit) {
@ -1059,22 +951,25 @@ auto ChatLLM::promptInternal(
return !m_stopGenerating;
};
auto handleResponse = [this, &result](LLModel::Token token, std::string_view piece) -> bool {
Q_UNUSED(token)
result.responseTokens++;
m_timer->inc();
result.response.append(piece.data(), piece.size());
auto respStr = QString::fromUtf8(result.response);
emit responseChanged(removeLeadingWhitespace(respStr));
return !m_stopGenerating;
};
QElapsedTimer totalTime;
totalTime.start();
ChatViewResponseHandler respHandler(this, &totalTime, &result);
m_timer->start();
QStringList finalBuffers;
bool shouldExecuteTool;
try {
emit promptProcessing();
m_llModelInfo.model->setThreadCount(mySettings->threadCount());
m_stopGenerating = false;
std::tie(finalBuffers, shouldExecuteTool) = promptModelWithTools(
m_llModelInfo.model.get(), handlePrompt, respHandler, ctx,
QByteArray::fromRawData(conversation.data(), conversation.size()),
ToolCallConstants::AllTagNames
);
m_llModelInfo.model->prompt(conversation, handlePrompt, handleResponse, ctx);
} catch (...) {
m_timer->stop();
throw;
@ -1085,16 +980,11 @@ auto ChatLLM::promptInternal(
// trim trailing whitespace
auto respStr = QString::fromUtf8(result.response);
if (!respStr.isEmpty() && (std::as_const(respStr).back().isSpace() || finalBuffers.size() > 1)) {
if (finalBuffers.size() > 1)
m_chatModel->setResponseValue(finalBuffers.last().trimmed());
else
m_chatModel->setResponseValue(respStr.trimmed());
emit responseChanged();
}
if (!respStr.isEmpty() && std::as_const(respStr).back().isSpace())
emit responseChanged(respStr.trimmed());
bool doQuestions = false;
if (!m_isServer && messageItems && !shouldExecuteTool) {
if (!m_isServer && chatItems) {
switch (mySettings->suggestionMode()) {
case SuggestionMode::On: doQuestions = true; break;
case SuggestionMode::LocalDocsOnly: doQuestions = usedLocalDocs; break;
@ -1172,66 +1062,6 @@ void ChatLLM::reloadModel()
loadModel(m);
}
// This class throws discards the text within thinking tags, for use with chat names and follow-up questions.
class SimpleResponseHandler : public BaseResponseHandler {
public:
SimpleResponseHandler(ChatLLM *cllm)
: m_cllm(cllm) {}
void onSplitIntoTwo(const QString &startTag, const QString &firstBuffer, const QString &secondBuffer) override
{ /* no-op */ }
void onSplitIntoThree(const QString &secondBuffer, const QString &thirdBuffer) override
{ /* no-op */ }
void onOldResponseChunk(const QByteArray &chunk) override
{ m_response.append(chunk); }
bool onBufferResponse(const QString &response, int bufferIdx) override
{
if (bufferIdx == 1)
return true; // ignore "think" content
return onSimpleResponse(response);
}
bool onRegularResponse() override
{ return onBufferResponse(QString::fromUtf8(m_response), 0); }
bool getStopGenerating() const override
{ return m_cllm->m_stopGenerating; }
protected:
virtual bool onSimpleResponse(const QString &response) = 0;
protected:
ChatLLM *m_cllm;
QByteArray m_response;
};
class NameResponseHandler : public SimpleResponseHandler {
private:
// max length of chat names, in words
static constexpr qsizetype MAX_WORDS = 3;
public:
using SimpleResponseHandler::SimpleResponseHandler;
protected:
bool onSimpleResponse(const QString &response) override
{
QTextStream stream(const_cast<QString *>(&response), QIODeviceBase::ReadOnly);
QStringList words;
while (!stream.atEnd() && words.size() < MAX_WORDS) {
QString word;
stream >> word;
words << word;
}
emit m_cllm->generatedNameChanged(words.join(u' '));
return words.size() < MAX_WORDS || stream.atEnd();
}
};
void ChatLLM::generateName()
{
Q_ASSERT(isModelLoaded());
@ -1248,15 +1078,23 @@ void ChatLLM::generateName()
return;
}
NameResponseHandler respHandler(this);
QByteArray response; // raw UTF-8
auto handleResponse = [this, &response](LLModel::Token token, std::string_view piece) -> bool {
Q_UNUSED(token)
response.append(piece.data(), piece.size());
QStringList words = QString::fromUtf8(response).simplified().split(u' ', Qt::SkipEmptyParts);
emit generatedNameChanged(words.join(u' '));
return words.size() <= 3;
};
try {
promptModelWithTools(
m_llModelInfo.model.get(),
/*promptCallback*/ [this](auto &&...) { return !m_stopGenerating; },
respHandler, promptContextFromSettings(m_modelInfo),
applyJinjaTemplate(forkConversation(chatNamePrompt)).c_str(),
{ ToolCallConstants::ThinkTagName }
m_llModelInfo.model->prompt(
applyJinjaTemplate(forkConversation(chatNamePrompt)),
[this](auto &&...) { return !m_stopGenerating; },
handleResponse,
promptContextFromSettings(m_modelInfo)
);
} catch (const std::exception &e) {
qWarning() << "ChatLLM failed to generate name:" << e.what();
@ -1268,43 +1106,13 @@ void ChatLLM::handleChatIdChanged(const QString &id)
m_llmThread.setObjectName(id);
}
class QuestionResponseHandler : public SimpleResponseHandler {
public:
using SimpleResponseHandler::SimpleResponseHandler;
protected:
bool onSimpleResponse(const QString &response) override
{
auto responseUtf8Bytes = response.toUtf8().slice(m_offset);
auto responseUtf8 = std::string(responseUtf8Bytes.begin(), responseUtf8Bytes.end());
// extract all questions from response
ptrdiff_t lastMatchEnd = -1;
auto it = std::sregex_iterator(responseUtf8.begin(), responseUtf8.end(), s_reQuestion);
auto end = std::sregex_iterator();
for (; it != end; ++it) {
auto pos = it->position();
auto len = it->length();
lastMatchEnd = pos + len;
emit m_cllm->generatedQuestionFinished(QString::fromUtf8(&responseUtf8[pos], len));
}
// remove processed input from buffer
if (lastMatchEnd != -1)
m_offset += lastMatchEnd;
return true;
}
private:
void ChatLLM::generateQuestions(qint64 elapsed)
{
// FIXME: This only works with response by the model in english which is not ideal for a multi-language
// model.
// match whole question sentences
static inline const std::regex s_reQuestion { R"(\b(?:What|Where|How|Why|When|Who|Which|Whose|Whom)\b[^?]*\?)" };
static const std::regex reQuestion(R"(\b(?:What|Where|How|Why|When|Who|Which|Whose|Whom)\b[^?]*\?)");
qsizetype m_offset = 0;
};
void ChatLLM::generateQuestions(qint64 elapsed)
{
Q_ASSERT(isModelLoaded());
if (!isModelLoaded()) {
emit responseStopped(elapsed);
@ -1322,17 +1130,39 @@ void ChatLLM::generateQuestions(qint64 elapsed)
emit generatingQuestions();
QuestionResponseHandler respHandler(this);
std::string response; // raw UTF-8
auto handleResponse = [this, &response](LLModel::Token token, std::string_view piece) -> bool {
Q_UNUSED(token)
// add token to buffer
response.append(piece);
// extract all questions from response
ptrdiff_t lastMatchEnd = -1;
auto it = std::sregex_iterator(response.begin(), response.end(), reQuestion);
auto end = std::sregex_iterator();
for (; it != end; ++it) {
auto pos = it->position();
auto len = it->length();
lastMatchEnd = pos + len;
emit generatedQuestionFinished(QString::fromUtf8(&response[pos], len));
}
// remove processed input from buffer
if (lastMatchEnd != -1)
response.erase(0, lastMatchEnd);
return true;
};
QElapsedTimer totalTime;
totalTime.start();
try {
promptModelWithTools(
m_llModelInfo.model.get(),
/*promptCallback*/ [this](auto &&...) { return !m_stopGenerating; },
respHandler, promptContextFromSettings(m_modelInfo),
applyJinjaTemplate(forkConversation(suggestedFollowUpPrompt)).c_str(),
{ ToolCallConstants::ThinkTagName }
m_llModelInfo.model->prompt(
applyJinjaTemplate(forkConversation(suggestedFollowUpPrompt)),
[this](auto &&...) { return !m_stopGenerating; },
handleResponse,
promptContextFromSettings(m_modelInfo)
);
} catch (const std::exception &e) {
qWarning() << "ChatLLM failed to generate follow-up questions:" << e.what();

View File

@ -2,7 +2,7 @@
#define CHATLLM_H
#include "chatmodel.h"
#include "database.h"
#include "database.h" // IWYU pragma: keep
#include "modellist.h"
#include <gpt4all-backend/llmodel.h>
@ -10,30 +10,28 @@
#include <QByteArray>
#include <QElapsedTimer>
#include <QFileInfo>
#include <QList>
#include <QList> // IWYU pragma: keep
#include <QObject>
#include <QPointer>
#include <QString>
#include <QStringList> // IWYU pragma: keep
#include <QStringView>
#include <QThread>
#include <QVariantMap> // IWYU pragma: keep
#include <QtNumeric>
#include <QtGlobal>
#include <atomic>
#include <cstdint>
#include <memory>
#include <optional>
#include <span>
#include <string>
#include <string_view>
#include <variant>
#include <vector>
using namespace Qt::Literals::StringLiterals;
class ChatLLM;
class QDataStream;
// NOTE: values serialized to disk, do not change or reuse
enum class LLModelTypeV0 { // chat versions 2-5
MPT = 0,
@ -90,6 +88,9 @@ inline LLModelTypeV1 parseLLModelTypeV0(int v0)
}
}
class ChatLLM;
class ChatModel;
struct LLModelInfo {
std::unique_ptr<LLModel> model;
QFileInfo fileInfo;
@ -219,8 +220,8 @@ Q_SIGNALS:
void modelLoadingPercentageChanged(float);
void modelLoadingError(const QString &error);
void modelLoadingWarning(const QString &warning);
void responseChanged();
void responseFailed();
void responseChanged(const QString &response);
void responseFailed(const QString &error);
void promptProcessing();
void generatingQuestions();
void responseStopped(qint64 promptResponseMs);
@ -250,20 +251,20 @@ protected:
};
ChatPromptResult promptInternalChat(const QStringList &enabledCollections, const LLModel::PromptContext &ctx,
qsizetype startOffset = 0);
std::optional<std::pair<int, int>> subrange = {});
// passing a string_view directly skips templating and uses the raw string
PromptResult promptInternal(const std::variant<std::span<const MessageItem>, std::string_view> &prompt,
PromptResult promptInternal(const std::variant<std::span<const ChatItem>, std::string_view> &prompt,
const LLModel::PromptContext &ctx,
bool usedLocalDocs);
private:
bool loadNewModel(const ModelInfo &modelInfo, QVariantMap &modelLoadProps);
std::vector<MessageItem> forkConversation(const QString &prompt) const;
std::vector<ChatItem> forkConversation(const QString &prompt) const;
// Applies the Jinja template. Query mode returns only the last message without special tokens.
// Returns a (# of messages, rendered prompt) pair.
std::string applyJinjaTemplate(std::span<const MessageItem> items) const;
std::string applyJinjaTemplate(std::span<const ChatItem> items) const;
void generateQuestions(qint64 elapsed);
@ -284,8 +285,6 @@ private:
bool m_isServer;
bool m_forceMetal;
bool m_reloadingToChangeVariant;
friend class ChatViewResponseHandler;
friend class SimpleResponseHandler;
};
#endif // CHATLLM_H

View File

@ -1,368 +0,0 @@
#include "chatmodel.h"
#include <QDebug>
#include <QMap>
#include <QTextStream>
#include <QtLogging>
#include <exception>
QList<ResultInfo> ChatItem::consolidateSources(const QList<ResultInfo> &sources)
{
QMap<QString, ResultInfo> groupedData;
for (const ResultInfo &info : sources) {
if (groupedData.contains(info.file)) {
groupedData[info.file].text += "\n---\n" + info.text;
} else {
groupedData[info.file] = info;
}
}
QList<ResultInfo> consolidatedSources = groupedData.values();
return consolidatedSources;
}
void ChatItem::serializeResponse(QDataStream &stream, int version)
{
stream << value;
}
void ChatItem::serializeToolCall(QDataStream &stream, int version)
{
stream << value;
toolCallInfo.serialize(stream, version);
}
void ChatItem::serializeToolResponse(QDataStream &stream, int version)
{
stream << value;
}
void ChatItem::serializeText(QDataStream &stream, int version)
{
stream << value;
}
void ChatItem::serializeThink(QDataStream &stream, int version)
{
stream << value;
stream << thinkingTime;
}
void ChatItem::serializeSubItems(QDataStream &stream, int version)
{
stream << name;
switch (auto typ = type()) {
using enum ChatItem::Type;
case Response: { serializeResponse(stream, version); break; }
case ToolCall: { serializeToolCall(stream, version); break; }
case ToolResponse: { serializeToolResponse(stream, version); break; }
case Text: { serializeText(stream, version); break; }
case Think: { serializeThink(stream, version); break; }
case System:
case Prompt:
throw std::invalid_argument(fmt::format("cannot serialize subitem type {}", int(typ)));
}
stream << qsizetype(subItems.size());
for (ChatItem *item :subItems)
item->serializeSubItems(stream, version);
}
void ChatItem::serialize(QDataStream &stream, int version)
{
stream << name;
stream << value;
stream << newResponse;
stream << isCurrentResponse;
stream << stopped;
stream << thumbsUpState;
stream << thumbsDownState;
if (version >= 11 && type() == ChatItem::Type::Response)
stream << isError;
if (version >= 8) {
stream << sources.size();
for (const ResultInfo &info : sources) {
Q_ASSERT(!info.file.isEmpty());
stream << info.collection;
stream << info.path;
stream << info.file;
stream << info.title;
stream << info.author;
stream << info.date;
stream << info.text;
stream << info.page;
stream << info.from;
stream << info.to;
}
} else if (version >= 3) {
QList<QString> references;
QList<QString> referencesContext;
int validReferenceNumber = 1;
for (const ResultInfo &info : sources) {
if (info.file.isEmpty())
continue;
QString reference;
{
QTextStream stream(&reference);
stream << (validReferenceNumber++) << ". ";
if (!info.title.isEmpty())
stream << "\"" << info.title << "\". ";
if (!info.author.isEmpty())
stream << "By " << info.author << ". ";
if (!info.date.isEmpty())
stream << "Date: " << info.date << ". ";
stream << "In " << info.file << ". ";
if (info.page != -1)
stream << "Page " << info.page << ". ";
if (info.from != -1) {
stream << "Lines " << info.from;
if (info.to != -1)
stream << "-" << info.to;
stream << ". ";
}
stream << "[Context](context://" << validReferenceNumber - 1 << ")";
}
references.append(reference);
referencesContext.append(info.text);
}
stream << references.join("\n");
stream << referencesContext;
}
if (version >= 10) {
stream << promptAttachments.size();
for (const PromptAttachment &a : promptAttachments) {
Q_ASSERT(!a.url.isEmpty());
stream << a.url;
stream << a.content;
}
}
if (version >= 12) {
stream << qsizetype(subItems.size());
for (ChatItem *item :subItems)
item->serializeSubItems(stream, version);
}
}
bool ChatItem::deserializeToolCall(QDataStream &stream, int version)
{
stream >> value;
return toolCallInfo.deserialize(stream, version);;
}
bool ChatItem::deserializeToolResponse(QDataStream &stream, int version)
{
stream >> value;
return true;
}
bool ChatItem::deserializeText(QDataStream &stream, int version)
{
stream >> value;
return true;
}
bool ChatItem::deserializeResponse(QDataStream &stream, int version)
{
stream >> value;
return true;
}
bool ChatItem::deserializeThink(QDataStream &stream, int version)
{
stream >> value;
stream >> thinkingTime;
return true;
}
bool ChatItem::deserializeSubItems(QDataStream &stream, int version)
{
stream >> name;
try {
type(); // check name
} catch (const std::exception &e) {
qWarning() << "ChatModel ERROR:" << e.what();
return false;
}
switch (auto typ = type()) {
using enum ChatItem::Type;
case Response: { deserializeResponse(stream, version); break; }
case ToolCall: { deserializeToolCall(stream, version); break; }
case ToolResponse: { deserializeToolResponse(stream, version); break; }
case Text: { deserializeText(stream, version); break; }
case Think: { deserializeThink(stream, version); break; }
case System:
case Prompt:
throw std::invalid_argument(fmt::format("cannot serialize subitem type {}", int(typ)));
}
qsizetype count;
stream >> count;
for (int i = 0; i < count; ++i) {
ChatItem *c = new ChatItem(this);
if (!c->deserializeSubItems(stream, version)) {
delete c;
return false;
}
subItems.push_back(c);
}
return true;
}
bool ChatItem::deserialize(QDataStream &stream, int version)
{
if (version < 12) {
int id;
stream >> id;
}
stream >> name;
try {
type(); // check name
} catch (const std::exception &e) {
qWarning() << "ChatModel ERROR:" << e.what();
return false;
}
stream >> value;
if (version < 10) {
// This is deprecated and no longer used
QString prompt;
stream >> prompt;
}
stream >> newResponse;
stream >> isCurrentResponse;
stream >> stopped;
stream >> thumbsUpState;
stream >> thumbsDownState;
if (version >= 11 && type() == ChatItem::Type::Response)
stream >> isError;
if (version >= 8) {
qsizetype count;
stream >> count;
for (int i = 0; i < count; ++i) {
ResultInfo info;
stream >> info.collection;
stream >> info.path;
stream >> info.file;
stream >> info.title;
stream >> info.author;
stream >> info.date;
stream >> info.text;
stream >> info.page;
stream >> info.from;
stream >> info.to;
sources.append(info);
}
consolidatedSources = ChatItem::consolidateSources(sources);
} else if (version >= 3) {
QString references;
QList<QString> referencesContext;
stream >> references;
stream >> referencesContext;
if (!references.isEmpty()) {
QList<QString> referenceList = references.split("\n");
// Ignore empty lines and those that begin with "---" which is no longer used
for (auto it = referenceList.begin(); it != referenceList.end();) {
if (it->trimmed().isEmpty() || it->trimmed().startsWith("---"))
it = referenceList.erase(it);
else
++it;
}
Q_ASSERT(referenceList.size() == referencesContext.size());
for (int j = 0; j < referenceList.size(); ++j) {
QString reference = referenceList[j];
QString context = referencesContext[j];
ResultInfo info;
QTextStream refStream(&reference);
QString dummy;
int validReferenceNumber;
refStream >> validReferenceNumber >> dummy;
// Extract title (between quotes)
if (reference.contains("\"")) {
int startIndex = reference.indexOf('"') + 1;
int endIndex = reference.indexOf('"', startIndex);
info.title = reference.mid(startIndex, endIndex - startIndex);
}
// Extract author (after "By " and before the next period)
if (reference.contains("By ")) {
int startIndex = reference.indexOf("By ") + 3;
int endIndex = reference.indexOf('.', startIndex);
info.author = reference.mid(startIndex, endIndex - startIndex).trimmed();
}
// Extract date (after "Date: " and before the next period)
if (reference.contains("Date: ")) {
int startIndex = reference.indexOf("Date: ") + 6;
int endIndex = reference.indexOf('.', startIndex);
info.date = reference.mid(startIndex, endIndex - startIndex).trimmed();
}
// Extract file name (after "In " and before the "[Context]")
if (reference.contains("In ") && reference.contains(". [Context]")) {
int startIndex = reference.indexOf("In ") + 3;
int endIndex = reference.indexOf(". [Context]", startIndex);
info.file = reference.mid(startIndex, endIndex - startIndex).trimmed();
}
// Extract page number (after "Page " and before the next space)
if (reference.contains("Page ")) {
int startIndex = reference.indexOf("Page ") + 5;
int endIndex = reference.indexOf(' ', startIndex);
if (endIndex == -1) endIndex = reference.length();
info.page = reference.mid(startIndex, endIndex - startIndex).toInt();
}
// Extract lines (after "Lines " and before the next space or hyphen)
if (reference.contains("Lines ")) {
int startIndex = reference.indexOf("Lines ") + 6;
int endIndex = reference.indexOf(' ', startIndex);
if (endIndex == -1) endIndex = reference.length();
int hyphenIndex = reference.indexOf('-', startIndex);
if (hyphenIndex != -1 && hyphenIndex < endIndex) {
info.from = reference.mid(startIndex, hyphenIndex - startIndex).toInt();
info.to = reference.mid(hyphenIndex + 1, endIndex - hyphenIndex - 1).toInt();
} else {
info.from = reference.mid(startIndex, endIndex - startIndex).toInt();
}
}
info.text = context;
sources.append(info);
}
consolidatedSources = ChatItem::consolidateSources(sources);
}
}
if (version >= 10) {
qsizetype count;
stream >> count;
QList<PromptAttachment> attachments;
for (int i = 0; i < count; ++i) {
PromptAttachment a;
stream >> a.url;
stream >> a.content;
attachments.append(a);
}
promptAttachments = attachments;
}
if (version >= 12) {
qsizetype count;
stream >> count;
for (int i = 0; i < count; ++i) {
ChatItem *c = new ChatItem(this);
if (!c->deserializeSubItems(stream, version)) {
delete c;
return false;
}
subItems.push_back(c);
}
}
return true;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,32 +1,29 @@
#include "chatviewtextprocessor.h"
#include <QAbstractTextDocumentLayout>
#include <QBrush>
#include <QChar>
#include <QClipboard>
#include <QDebug>
#include <QFlag>
#include <QFont>
#include <QFontMetricsF>
#include <QGuiApplication>
#include <QList> // IWYU pragma: keep
#include <QPair>
#include <QList>
#include <QPainter>
#include <QQuickTextDocument>
#include <QRegularExpression>
#include <QStringList> // IWYU pragma: keep
#include <QTextBlock> // IWYU pragma: keep
#include <QTextCharFormat> // IWYU pragma: keep
#include <QStringList>
#include <QTextBlock>
#include <QTextCharFormat>
#include <QTextCursor>
#include <QTextDocument>
#include <QTextDocumentFragment>
#include <QTextFrame> // IWYU pragma: keep
#include <QTextFrameFormat> // IWYU pragma: keep
#include <QTextFrame>
#include <QTextFrameFormat>
#include <QTextTableCell>
#include <QtAssert>
#include <QtLogging>
#include <QVariant>
#include <Qt>
#include <QtGlobal>
#include <algorithm>
#include <utility>
enum Language {
None,

View File

@ -3,15 +3,18 @@
#include <QColor>
#include <QObject>
#include <QQmlEngine> // IWYU pragma: keep
#include <QQuickTextDocument>
#include <QQmlEngine>
#include <QQuickTextDocument> // IWYU pragma: keep
#include <QRectF>
#include <QSizeF>
#include <QString>
#include <QSyntaxHighlighter>
#include <QVector> // IWYU pragma: keep
#include <QtTypes>
// IWYU pragma: no_forward_declare QQuickTextDocument
#include <QTextObjectInterface>
#include <QVector>
class QPainter;
class QTextDocument;
class QTextFormat;
struct CodeColors {
Q_GADGET

View File

@ -1,179 +0,0 @@
#include "codeinterpreter.h"
#include <QJSEngine>
#include <QJSValue>
#include <QList>
#include <QStringList> // IWYU pragma: keep
#include <QThread>
#include <QVariant>
#include <Qt>
using namespace Qt::Literals::StringLiterals;
CodeInterpreter::CodeInterpreter()
: Tool()
, m_error(ToolEnums::Error::NoError)
{
m_worker = new CodeInterpreterWorker;
connect(this, &CodeInterpreter::request, m_worker, &CodeInterpreterWorker::request, Qt::QueuedConnection);
}
void CodeInterpreter::run(const QList<ToolParam> &params)
{
m_error = ToolEnums::Error::NoError;
m_errorString = QString();
Q_ASSERT(params.count() == 1
&& params.first().name == "code"
&& params.first().type == ToolEnums::ParamType::String);
const QString code = params.first().value.toString();
connect(m_worker, &CodeInterpreterWorker::finished, [this, params] {
m_error = m_worker->error();
m_errorString = m_worker->errorString();
emit runComplete({
ToolCallConstants::CodeInterpreterFunction,
params,
m_worker->response(),
m_error,
m_errorString
});
});
emit request(code);
}
bool CodeInterpreter::interrupt()
{
return m_worker->interrupt();
}
QList<ToolParamInfo> CodeInterpreter::parameters() const
{
return {{
"code",
ToolEnums::ParamType::String,
"javascript code to compute",
true
}};
}
QString CodeInterpreter::symbolicFormat() const
{
return "{human readable plan to complete the task}\n" + ToolCallConstants::CodeInterpreterPrefix + "{code}\n" + ToolCallConstants::CodeInterpreterSuffix;
}
QString CodeInterpreter::examplePrompt() const
{
return R"(Write code to check if a number is prime, use that to see if the number 7 is prime)";
}
QString CodeInterpreter::exampleCall() const
{
static const QString example = R"(function isPrime(n) {
if (n <= 1) {
return false;
}
for (let i = 2; i <= Math.sqrt(n); i++) {
if (n % i === 0) {
return false;
}
}
return true;
}
const number = 7;
console.log(`The number ${number} is prime: ${isPrime(number)}`);
)";
return "Certainly! Let's compute the answer to whether the number 7 is prime.\n" + ToolCallConstants::CodeInterpreterPrefix + example + ToolCallConstants::CodeInterpreterSuffix;
}
QString CodeInterpreter::exampleReply() const
{
return R"("The computed result shows that 7 is a prime number.)";
}
CodeInterpreterWorker::CodeInterpreterWorker()
: QObject(nullptr)
, m_engine(new QJSEngine(this))
{
moveToThread(&m_thread);
QJSValue consoleInternalObject = m_engine->newQObject(&m_consoleCapture);
m_engine->globalObject().setProperty("console_internal", consoleInternalObject);
// preprocess console.log args in JS since Q_INVOKE doesn't support varargs
auto consoleObject = m_engine->evaluate(uR"(
class Console {
log(...args) {
if (args.length == 0)
return;
if (args.length >= 2 && typeof args[0] === 'string')
throw new Error('console.log string formatting not supported');
let cat = '';
for (const arg of args) {
cat += String(arg);
}
console_internal.log(cat);
}
}
new Console();
)"_s);
m_engine->globalObject().setProperty("console", consoleObject);
m_thread.start();
}
void CodeInterpreterWorker::reset()
{
m_response.clear();
m_error = ToolEnums::Error::NoError;
m_errorString.clear();
m_consoleCapture.output.clear();
m_engine->setInterrupted(false);
}
void CodeInterpreterWorker::request(const QString &code)
{
reset();
const QJSValue result = m_engine->evaluate(code);
QString resultString;
if (m_engine->isInterrupted()) {
resultString = QString("Error: code execution was interrupted or timed out.");
} else if (result.isError()) {
// NOTE: We purposely do not set the m_error or m_errorString for the code interpreter since
// we *want* the model to see the response has an error so it can hopefully correct itself. The
// error member variables are intended for tools that have error conditions that cannot be corrected.
// For instance, a tool depending upon the network might set these error variables if the network
// is not available.
const QStringList lines = code.split('\n');
const int line = result.property("lineNumber").toInt();
const int index = line - 1;
const QString lineContent = (index >= 0 && index < lines.size()) ? lines.at(index) : "Line not found in code.";
resultString = QString("Uncaught exception at line %1: %2\n\t%3")
.arg(line)
.arg(result.toString())
.arg(lineContent);
m_error = ToolEnums::Error::UnknownError;
m_errorString = resultString;
} else {
resultString = result.isUndefined() ? QString() : result.toString();
}
if (resultString.isEmpty())
resultString = m_consoleCapture.output;
else if (!m_consoleCapture.output.isEmpty())
resultString += "\n" + m_consoleCapture.output;
m_response = resultString;
emit finished();
}
bool CodeInterpreterWorker::interrupt()
{
m_error = ToolEnums::Error::TimeoutError;
m_engine->setInterrupted(true);
return true;
}

View File

@ -1,97 +0,0 @@
#ifndef CODEINTERPRETER_H
#define CODEINTERPRETER_H
#include "tool.h"
#include "toolcallparser.h"
#include <QObject>
#include <QString>
#include <QThread>
#include <QtAssert>
class QJSEngine;
class JavaScriptConsoleCapture : public QObject
{
Q_OBJECT
public:
QString output;
Q_INVOKABLE void log(const QString &message)
{
const int maxLength = 1024;
if (output.length() >= maxLength)
return;
if (output.length() + message.length() + 1 > maxLength) {
static const QString trunc = "\noutput truncated at " + QString::number(maxLength) + " characters...";
int remainingLength = maxLength - output.length();
if (remainingLength > 0)
output.append(message.left(remainingLength));
output.append(trunc);
Q_ASSERT(output.length() > maxLength);
} else {
output.append(message + "\n");
}
}
};
class CodeInterpreterWorker : public QObject {
Q_OBJECT
public:
CodeInterpreterWorker();
virtual ~CodeInterpreterWorker() {}
void reset();
QString response() const { return m_response; }
ToolEnums::Error error() const { return m_error; }
QString errorString() const { return m_errorString; }
bool interrupt();
public Q_SLOTS:
void request(const QString &code);
Q_SIGNALS:
void finished();
private:
QString m_response;
ToolEnums::Error m_error = ToolEnums::Error::NoError;
QString m_errorString;
QThread m_thread;
JavaScriptConsoleCapture m_consoleCapture;
QJSEngine *m_engine = nullptr;
};
class CodeInterpreter : public Tool
{
Q_OBJECT
public:
explicit CodeInterpreter();
virtual ~CodeInterpreter() {}
void run(const QList<ToolParam> &params) override;
bool interrupt() override;
ToolEnums::Error error() const override { return m_error; }
QString errorString() const override { return m_errorString; }
QString name() const override { return tr("Code Interpreter"); }
QString description() const override { return tr("compute javascript code using console.log as output"); }
QString function() const override { return ToolCallConstants::CodeInterpreterFunction; }
QList<ToolParamInfo> parameters() const override;
virtual QString symbolicFormat() const override;
QString examplePrompt() const override;
QString exampleCall() const override;
QString exampleReply() const override;
Q_SIGNALS:
void request(const QString &code);
private:
ToolEnums::Error m_error = ToolEnums::Error::NoError;
QString m_errorString;
CodeInterpreterWorker *m_worker;
};
#endif // CODEINTERPRETER_H

View File

@ -1,7 +0,0 @@
#pragma once
#define APP_VERSION "@APP_VERSION@"
#define G4A_CONFIG(name) (1/G4A_CONFIG_##name == 1)
#define G4A_CONFIG_force_d3d12 @GPT4ALL_CONFIG_FORCE_D3D12@

View File

@ -1,11 +1,10 @@
#include "database.h"
#include "mysettings.h"
#include "utils.h" // IWYU pragma: keep
#include "utils.h"
#include <duckx/duckx.hpp>
#include <fmt/format.h>
#include <usearch/index.hpp>
#include <usearch/index_plugins.hpp>
#include <QDebug>
@ -13,9 +12,9 @@
#include <QDirIterator>
#include <QFile>
#include <QFileSystemWatcher>
#include <QFlags>
#include <QIODevice>
#include <QKeyValueIterator>
#include <QPdfDocument>
#include <QPdfSelection>
#include <QRegularExpression>
#include <QSqlError>
#include <QSqlQuery>
@ -24,24 +23,14 @@
#include <QMap>
#include <QUtf8StringView>
#include <QVariant>
#include <Qt>
#include <QtLogging>
#include <QtMinMax>
#include <QtTypes>
#include <algorithm>
#include <cmath>
#include <optional>
#include <stdexcept>
#ifdef GPT4ALL_USE_QTPDF
# include <QPdfDocument>
# include <QPdfSelection>
#else
# include <fpdfview.h>
# include <fpdf_doc.h>
# include <fpdf_text.h>
#endif
using namespace Qt::Literals::StringLiterals;
namespace ranges = std::ranges;
namespace us = unum::usearch;
@ -49,7 +38,6 @@ namespace us = unum::usearch;
//#define DEBUG
//#define DEBUG_EXAMPLE
namespace {
/* QFile that checks input for binary data. If seen, it fails the read and returns true
@ -1115,9 +1103,9 @@ class DocumentReader {
public:
struct Metadata { QString title, author, subject, keywords; };
static std::unique_ptr<DocumentReader> fromDocument(DocumentInfo info);
static std::unique_ptr<DocumentReader> fromDocument(const DocumentInfo &info);
const DocumentInfo &doc () const { return m_info; }
const DocumentInfo &doc () const { return *m_info; }
const Metadata &metadata() const { return m_metadata; }
const std::optional<QString> &word () const { return m_word; }
const std::optional<QString> &nextWord() { m_word = advance(); return m_word; }
@ -1127,8 +1115,8 @@ public:
virtual ~DocumentReader() = default;
protected:
explicit DocumentReader(DocumentInfo info)
: m_info(std::move(info)) {}
explicit DocumentReader(const DocumentInfo &info)
: m_info(&info) {}
void postInit(Metadata &&metadata = {})
{
@ -1138,18 +1126,17 @@ protected:
virtual std::optional<QString> advance() = 0;
DocumentInfo m_info;
Metadata m_metadata;
std::optional<QString> m_word;
const DocumentInfo *m_info;
Metadata m_metadata;
std::optional<QString> m_word;
};
namespace {
#ifdef GPT4ALL_USE_QTPDF
class PdfDocumentReader final : public DocumentReader {
public:
explicit PdfDocumentReader(DocumentInfo info)
: DocumentReader(std::move(info))
explicit PdfDocumentReader(const DocumentInfo &info)
: DocumentReader(info)
{
QString path = info.file.canonicalFilePath();
if (m_doc.load(path) != QPdfDocument::Error::None)
@ -1186,103 +1173,11 @@ private:
QString m_pageText;
std::optional<QTextStream> m_stream;
};
#else
class PdfDocumentReader final : public DocumentReader {
public:
explicit PdfDocumentReader(DocumentInfo info)
: DocumentReader(std::move(info))
{
QString path = info.file.canonicalFilePath();
m_doc = FPDF_LoadDocument(path.toUtf8().constData(), nullptr);
if (!m_doc)
throw std::runtime_error(fmt::format("Failed to load PDF: {}", path));
// Extract metadata
Metadata metadata {
.title = getMetadata("Title" ),
.author = getMetadata("Author" ),
.subject = getMetadata("Subject" ),
.keywords = getMetadata("Keywords"),
};
postInit(std::move(metadata));
}
~PdfDocumentReader() override
{
if (m_page)
FPDF_ClosePage(m_page);
if (m_doc)
FPDF_CloseDocument(m_doc);
}
int page() const override { return m_currentPage; }
private:
std::optional<QString> advance() override
{
QString word;
do {
while (!m_stream || m_stream->atEnd()) {
if (m_currentPage >= FPDF_GetPageCount(m_doc))
return std::nullopt;
if (m_page)
FPDF_ClosePage(std::exchange(m_page, nullptr));
m_page = FPDF_LoadPage(m_doc, m_currentPage++);
if (!m_page)
throw std::runtime_error("Failed to load page.");
m_pageText = extractTextFromPage(m_page);
m_stream.emplace(&m_pageText);
}
*m_stream >> word;
} while (word.isEmpty());
return word;
}
QString getMetadata(FPDF_BYTESTRING key)
{
// FPDF_GetMetaText includes a 2-byte null terminator
ulong nBytes = FPDF_GetMetaText(m_doc, key, nullptr, 0);
if (nBytes <= sizeof (FPDF_WCHAR))
return { "" };
QByteArray buffer(nBytes, Qt::Uninitialized);
ulong nResultBytes = FPDF_GetMetaText(m_doc, key, buffer.data(), buffer.size());
Q_ASSERT(nResultBytes % 2 == 0);
Q_ASSERT(nResultBytes <= nBytes);
return QString::fromUtf16(reinterpret_cast<const char16_t *>(buffer.data()), nResultBytes / 2 - 1);
}
QString extractTextFromPage(FPDF_PAGE page)
{
FPDF_TEXTPAGE textPage = FPDFText_LoadPage(page);
if (!textPage)
throw std::runtime_error("Failed to load text page.");
int nChars = FPDFText_CountChars(textPage);
if (!nChars)
return {};
// FPDFText_GetText includes a 2-byte null terminator
QByteArray buffer((nChars + 1) * sizeof (FPDF_WCHAR), Qt::Uninitialized);
int nResultChars = FPDFText_GetText(textPage, 0, nChars, reinterpret_cast<ushort *>(buffer.data()));
Q_ASSERT(nResultChars <= nChars + 1);
FPDFText_ClosePage(textPage);
return QString::fromUtf16(reinterpret_cast<const char16_t *>(buffer.data()), nResultChars - 1);
}
FPDF_DOCUMENT m_doc = nullptr;
FPDF_PAGE m_page = nullptr;
int m_currentPage = 0;
QString m_pageText;
std::optional<QTextStream> m_stream;
};
#endif // !defined(GPT4ALL_USE_QTPDF)
class WordDocumentReader final : public DocumentReader {
public:
explicit WordDocumentReader(DocumentInfo info)
: DocumentReader(std::move(info))
explicit WordDocumentReader(const DocumentInfo &info)
: DocumentReader(info)
, m_doc(info.file.canonicalFilePath().toStdString())
{
m_doc.open();
@ -1374,8 +1269,8 @@ protected:
class TxtDocumentReader final : public DocumentReader {
public:
explicit TxtDocumentReader(DocumentInfo info)
: DocumentReader(std::move(info))
explicit TxtDocumentReader(const DocumentInfo &info)
: DocumentReader(info)
, m_file(info.file.canonicalFilePath())
{
if (!m_file.open(QIODevice::ReadOnly))
@ -1416,13 +1311,13 @@ protected:
} // namespace
std::unique_ptr<DocumentReader> DocumentReader::fromDocument(DocumentInfo doc)
std::unique_ptr<DocumentReader> DocumentReader::fromDocument(const DocumentInfo &doc)
{
if (doc.isPdf())
return std::make_unique<PdfDocumentReader>(std::move(doc));
return std::make_unique<PdfDocumentReader>(doc);
if (doc.isDocx())
return std::make_unique<WordDocumentReader>(std::move(doc));
return std::make_unique<TxtDocumentReader>(std::move(doc));
return std::make_unique<WordDocumentReader>(doc);
return std::make_unique<TxtDocumentReader>(doc);
}
ChunkStreamer::ChunkStreamer(Database *database)
@ -1430,12 +1325,12 @@ ChunkStreamer::ChunkStreamer(Database *database)
ChunkStreamer::~ChunkStreamer() = default;
void ChunkStreamer::setDocument(DocumentInfo doc, int documentId, const QString &embeddingModel)
void ChunkStreamer::setDocument(const DocumentInfo &doc, int documentId, const QString &embeddingModel)
{
auto docKey = doc.key();
if (!m_docKey || *m_docKey != docKey) {
m_docKey = docKey;
m_reader = DocumentReader::fromDocument(std::move(doc));
m_reader = DocumentReader::fromDocument(doc);
m_documentId = documentId;
m_embeddingModel = embeddingModel;
m_chunk.clear();
@ -1445,8 +1340,7 @@ void ChunkStreamer::setDocument(DocumentInfo doc, int documentId, const QString
if (m_database->m_documentIdCache.contains(documentId)) {
QSqlQuery q(m_database->m_db);
if (!m_database->removeChunksByDocumentId(q, documentId))
handleDocumentError("ERROR: Cannot remove chunks of document",
documentId, m_reader->doc().file.canonicalPath(), q.lastError());
handleDocumentError("ERROR: Cannot remove chunks of document", documentId, doc.file.canonicalPath(), q.lastError());
}
}
}

View File

@ -1,7 +1,7 @@
#ifndef DATABASE_H
#define DATABASE_H
#include "embllm.h"
#include "embllm.h" // IWYU pragma: keep
#include <QByteArray>
#include <QChar>
@ -15,11 +15,11 @@
#include <QSet>
#include <QSqlDatabase>
#include <QString>
#include <QStringList> // IWYU pragma: keep
#include <QStringList>
#include <QThread>
#include <QUrl>
#include <QVector> // IWYU pragma: keep
#include <QtAssert>
#include <QVector>
#include <QtGlobal>
#include <atomic>
#include <cstddef>
@ -28,7 +28,7 @@
#include <memory>
#include <optional>
#include <utility>
#include <vector> // IWYU pragma: keep
#include <vector>
using namespace Qt::Literals::StringLiterals;
@ -39,7 +39,6 @@ class QSqlQuery;
class QTextStream;
class QTimer;
/* Version 0: GPT4All v2.4.3, full-text search
* Version 1: GPT4All v2.5.3, embeddings in hsnwlib
* Version 2: GPT4All v3.0.0, embeddings in sqlite
@ -172,7 +171,7 @@ public:
explicit ChunkStreamer(Database *database);
~ChunkStreamer();
void setDocument(DocumentInfo doc, int documentId, const QString &embeddingModel);
void setDocument(const DocumentInfo &doc, int documentId, const QString &embeddingModel);
std::optional<DocumentInfo::key_type> currentDocKey() const;
void reset();

View File

@ -10,37 +10,32 @@
#include <QDebug>
#include <QGlobalStatic>
#include <QGuiApplication>
#include <QIODevice> // IWYU pragma: keep
#include <QIODevice>
#include <QJsonArray>
#include <QJsonDocument>
#include <QJsonObject>
#include <QJsonValue>
#include <QKeyValueIterator>
#include <QLocale>
#include <QNetworkRequest>
#include <QPair> // IWYU pragma: keep
#include <QRegularExpression>
#include <QRegularExpressionMatch>
#include <QPair>
#include <QSettings>
#include <QSslConfiguration>
#include <QSslSocket>
#include <QStringList> // IWYU pragma: keep
#include <QStringList>
#include <QTextStream>
#include <QUrl>
#include <QVariant>
#include <QVector> // IWYU pragma: keep
#include <QVector>
#include <Qt>
#include <QtAssert>
#include <QtLogging>
#include <QtMinMax>
#include <algorithm>
#include <compare>
#include <cstddef>
#include <utility>
using namespace Qt::Literals::StringLiterals;
class MyDownload: public Download { };
Q_GLOBAL_STATIC(MyDownload, downloadInstance)
Download *Download::globalInstance()

View File

@ -13,14 +13,10 @@
#include <QSslError>
#include <QString>
#include <QThread>
#include <QtTypes>
#include <QtGlobal>
// IWYU pragma: no_forward_declare QFile
// IWYU pragma: no_forward_declare QList
// IWYU pragma: no_forward_declare QSslError
class QByteArray;
struct ReleaseInfo {
Q_GADGET
Q_PROPERTY(QString version MEMBER version)

View File

@ -1,35 +1,35 @@
#include "embllm.h"
#include "modellist.h"
#include "mysettings.h"
#include <gpt4all-backend/llmodel.h>
#include <QCoreApplication>
#include <QDebug>
#include <QFile>
#include <QFileInfo>
#include <QGuiApplication>
#include <QIODevice>
#include <QJsonArray>
#include <QJsonDocument>
#include <QJsonObject>
#include <QJsonValue>
#include <QList>
#include <QMutexLocker> // IWYU pragma: keep
#include <QMutexLocker>
#include <QNetworkAccessManager>
#include <QNetworkReply>
#include <QNetworkRequest>
#include <QUrl>
#include <Qt>
#include <QtAssert>
#include <QtGlobal>
#include <QtLogging>
#include <exception>
#include <string>
#include <utility>
#include <vector>
using namespace Qt::Literals::StringLiterals;
static const QString EMBEDDING_MODEL_NAME = u"nomic-embed-text-v1.5"_s;
static const QString LOCAL_EMBEDDING_MODEL = u"nomic-embed-text-v1.5.f16.gguf"_s;
@ -359,11 +359,8 @@ void EmbeddingLLMWorker::handleFinished()
if (retrievedData.isValid() && retrievedData.canConvert<QVector<EmbeddingChunk>>())
chunks = retrievedData.value<QVector<EmbeddingChunk>>();
QVariant response;
if (reply->error() != QNetworkReply::NoError) {
response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
Q_ASSERT(response.isValid());
}
QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
Q_ASSERT(response.isValid());
bool ok;
int code = response.toInt(&ok);
if (!ok || code != 200) {

View File

@ -5,10 +5,10 @@
#include <QMutex>
#include <QObject>
#include <QString>
#include <QStringList> // IWYU pragma: keep
#include <QStringList>
#include <QThread>
#include <QVariant>
#include <QVector> // IWYU pragma: keep
#include <QVector>
#include <atomic>
#include <vector>
@ -16,7 +16,6 @@
class LLModel;
class QNetworkAccessManager;
struct EmbeddingChunk {
QString model; // TODO(jared): use to select model
int folder_id;

View File

@ -1,76 +1,111 @@
#include "jinja_helpers.h"
#include "utils.h"
#include <fmt/format.h>
#include <QString>
#include <QUrl>
#include <ranges>
#include <string>
#include <utility>
#include <memory>
#include <vector>
namespace views = std::views;
using json = nlohmann::ordered_json;
using namespace std::literals::string_view_literals;
json::object_t JinjaResultInfo::AsJson() const
JinjaResultInfo::~JinjaResultInfo() = default;
const JinjaFieldMap<ResultInfo> JinjaResultInfo::s_fields = {
{ "collection", [](auto &s) { return s.collection.toStdString(); } },
{ "path", [](auto &s) { return s.path .toStdString(); } },
{ "file", [](auto &s) { return s.file .toStdString(); } },
{ "title", [](auto &s) { return s.title .toStdString(); } },
{ "author", [](auto &s) { return s.author .toStdString(); } },
{ "date", [](auto &s) { return s.date .toStdString(); } },
{ "text", [](auto &s) { return s.text .toStdString(); } },
{ "page", [](auto &s) { return s.page; } },
{ "file_uri", [](auto &s) { return s.fileUri() .toStdString(); } },
};
JinjaPromptAttachment::~JinjaPromptAttachment() = default;
const JinjaFieldMap<PromptAttachment> JinjaPromptAttachment::s_fields = {
{ "url", [](auto &s) { return s.url.toString() .toStdString(); } },
{ "file", [](auto &s) { return s.file() .toStdString(); } },
{ "processed_content", [](auto &s) { return s.processedContent().toStdString(); } },
};
std::vector<std::string> JinjaMessage::GetKeys() const
{
return {
{ "collection", m_source->collection.toStdString() },
{ "path", m_source->path .toStdString() },
{ "file", m_source->file .toStdString() },
{ "title", m_source->title .toStdString() },
{ "author", m_source->author .toStdString() },
{ "date", m_source->date .toStdString() },
{ "text", m_source->text .toStdString() },
{ "page", m_source->page },
{ "file_uri", m_source->fileUri() .toStdString() },
};
std::vector<std::string> result;
auto &keys = this->keys();
result.reserve(keys.size());
result.assign(keys.begin(), keys.end());
return result;
}
json::object_t JinjaPromptAttachment::AsJson() const
auto JinjaMessage::keys() const -> const std::unordered_set<std::string_view> &
{
return {
{ "url", m_attachment->url.toString() .toStdString() },
{ "file", m_attachment->file() .toStdString() },
{ "processed_content", m_attachment->processedContent().toStdString() },
};
static const std::unordered_set<std::string_view> baseKeys
{ "role", "content" };
static const std::unordered_set<std::string_view> userKeys
{ "role", "content", "sources", "prompt_attachments" };
switch (m_item->type()) {
using enum ChatItem::Type;
case System:
case Response:
return baseKeys;
case Prompt:
return userKeys;
}
Q_UNREACHABLE();
}
json::object_t JinjaMessage::AsJson() const
bool operator==(const JinjaMessage &a, const JinjaMessage &b)
{
json::object_t obj;
{
json::string_t role;
switch (m_item->type()) {
using enum MessageItem::Type;
case System: role = "system"; break;
case Prompt: role = "user"; break;
case Response: role = "assistant"; break;
case ToolResponse: role = "tool"; break;
}
obj.emplace_back("role", std::move(role));
if (a.m_item == b.m_item)
return true;
const auto &[ia, ib] = std::tie(*a.m_item, *b.m_item);
auto type = ia.type();
if (type != ib.type() || ia.value != ib.value)
return false;
switch (type) {
using enum ChatItem::Type;
case System:
case Response:
return true;
case Prompt:
return ia.sources == ib.sources && ia.promptAttachments == ib.promptAttachments;
}
{
QString content;
if (m_version == 0 && m_item->type() == MessageItem::Type::Prompt) {
content = m_item->bakedPrompt();
} else {
content = m_item->content();
}
obj.emplace_back("content", content.toStdString());
}
if (m_item->type() == MessageItem::Type::Prompt) {
{
auto sources = m_item->sources() | views::transform([](auto &r) {
return JinjaResultInfo(r).AsJson();
});
obj.emplace("sources", json::array_t(sources.begin(), sources.end()));
}
{
auto attachments = m_item->promptAttachments() | views::transform([](auto &pa) {
return JinjaPromptAttachment(pa).AsJson();
});
obj.emplace("prompt_attachments", json::array_t(attachments.begin(), attachments.end()));
}
}
return obj;
Q_UNREACHABLE();
}
const JinjaFieldMap<JinjaMessage> JinjaMessage::s_fields = {
{ "role", [](auto &m) {
switch (m.item().type()) {
using enum ChatItem::Type;
case System: return "system"sv;
case Prompt: return "user"sv;
case Response: return "assistant"sv;
}
Q_UNREACHABLE();
} },
{ "content", [](auto &m) {
if (m.version() == 0 && m.item().type() == ChatItem::Type::Prompt)
return m.item().bakedPrompt().toStdString();
return m.item().value.toStdString();
} },
{ "sources", [](auto &m) {
auto sources = m.item().sources | views::transform([](auto &r) {
return jinja2::GenericMap([map = std::make_shared<JinjaResultInfo>(r)] { return map.get(); });
});
return jinja2::ValuesList(sources.begin(), sources.end());
} },
{ "prompt_attachments", [](auto &m) {
auto attachments = m.item().promptAttachments | views::transform([](auto &pa) {
return jinja2::GenericMap([map = std::make_shared<JinjaPromptAttachment>(pa)] { return map.get(); });
});
return jinja2::ValuesList(attachments.begin(), attachments.end());
} },
};

View File

@ -3,21 +3,47 @@
#include "chatmodel.h"
#include "database.h"
#include <nlohmann/json.hpp>
#include <jinja2cpp/value.h>
#include <QtTypes> // IWYU pragma: keep
#include <functional>
#include <ranges>
#include <string>
#include <string_view>
#include <unordered_map>
#include <unordered_set>
// IWYU pragma: no_forward_declare MessageItem
// IWYU pragma: no_forward_declare PromptAttachment
// IWYU pragma: no_forward_declare ResultInfo
#include <QtGlobal>
using json = nlohmann::ordered_json;
namespace views = std::views;
template <typename T>
using JinjaFieldMap = std::unordered_map<std::string_view, std::function<jinja2::Value (const T &)>>;
template <typename Derived>
class JinjaHelper {
class JinjaComparable : public jinja2::IMapItemAccessor {
public:
json::object_t AsJson() const { return static_cast<const Derived *>(this)->AsJson(); }
JinjaComparable() = default;
bool IsEqual(const jinja2::IComparable &other) const override;
private:
Q_DISABLE_COPY_MOVE(JinjaComparable)
};
template <typename Derived>
class JinjaHelper : public JinjaComparable<Derived> {
public:
size_t GetSize() const override
{ return Derived::s_fields.size(); }
bool HasValue(const std::string &name) const override
{ return Derived::s_fields.contains(name); }
jinja2::Value GetValueByName(const std::string &name) const override;
std::vector<std::string> GetKeys() const override
{ auto keys = views::elements<0>(Derived::s_fields); return { keys.begin(), keys.end() }; }
};
class JinjaResultInfo : public JinjaHelper<JinjaResultInfo> {
@ -25,10 +51,18 @@ public:
explicit JinjaResultInfo(const ResultInfo &source) noexcept
: m_source(&source) {}
json::object_t AsJson() const;
~JinjaResultInfo() override;
const ResultInfo &value() const { return *m_source; }
friend bool operator==(const JinjaResultInfo &a, const JinjaResultInfo &b)
{ return a.m_source == b.m_source || *a.m_source == *b.m_source; }
private:
static const JinjaFieldMap<ResultInfo> s_fields;
const ResultInfo *m_source;
friend class JinjaHelper<JinjaResultInfo>;
};
class JinjaPromptAttachment : public JinjaHelper<JinjaPromptAttachment> {
@ -36,20 +70,47 @@ public:
explicit JinjaPromptAttachment(const PromptAttachment &attachment) noexcept
: m_attachment(&attachment) {}
json::object_t AsJson() const;
~JinjaPromptAttachment() override;
const PromptAttachment &value() const { return *m_attachment; }
friend bool operator==(const JinjaPromptAttachment &a, const JinjaPromptAttachment &b)
{ return a.m_attachment == b.m_attachment || *a.m_attachment == *b.m_attachment; }
private:
static const JinjaFieldMap<PromptAttachment> s_fields;
const PromptAttachment *m_attachment;
friend class JinjaHelper<JinjaPromptAttachment>;
};
class JinjaMessage : public JinjaHelper<JinjaMessage> {
public:
explicit JinjaMessage(uint version, const MessageItem &item) noexcept
explicit JinjaMessage(uint version, const ChatItem &item) noexcept
: m_version(version), m_item(&item) {}
json::object_t AsJson() const;
const JinjaMessage &value () const { return *this; }
uint version() const { return m_version; }
const ChatItem &item () const { return *m_item; }
size_t GetSize() const override { return keys().size(); }
bool HasValue(const std::string &name) const override { return keys().contains(name); }
jinja2::Value GetValueByName(const std::string &name) const override
{ return HasValue(name) ? JinjaHelper::GetValueByName(name) : jinja2::EmptyValue(); }
std::vector<std::string> GetKeys() const override;
private:
uint m_version;
const MessageItem *m_item;
auto keys() const -> const std::unordered_set<std::string_view> &;
private:
static const JinjaFieldMap<JinjaMessage> s_fields;
uint m_version;
const ChatItem *m_item;
friend class JinjaHelper<JinjaMessage>;
friend bool operator==(const JinjaMessage &a, const JinjaMessage &b);
};
#include "jinja_helpers.inl"

View File

@ -0,0 +1,17 @@
template <typename D>
bool JinjaComparable<D>::IsEqual(const jinja2::IComparable &other) const
{
if (auto *omsg = dynamic_cast<const D *>(&other))
return *static_cast<const D *>(this) == *omsg;
return false;
}
template <typename D>
jinja2::Value JinjaHelper<D>::GetValueByName(const std::string &name) const
{
if (auto it = D::s_fields.find(name); it != D::s_fields.end()) {
auto [_, func] = *it;
return func(static_cast<const D *>(this)->value());
}
return jinja2::EmptyValue();
}

View File

@ -1,774 +0,0 @@
// The map in this file is automatically generated by Jared. Do not hand-edit it.
#include "jinja_replacements.h"
#include <utility>
// This is a list of prompt templates known to GPT4All and their associated replacements which are automatically used
// instead when loading the chat template from GGUF. These exist for two primary reasons:
// - HuggingFace model authors make ugly chat templates because they do not expect the end user to see them;
// - and chat templates occasionally use features we do not support. This is less true now that we use minja.
// The substitution list.
const std::unordered_map<std::string_view, std::string_view> CHAT_TEMPLATE_SUBSTITUTIONS {
// calme-2.1-phi3.5-4b.Q6_K.gguf (reported by ThilotE on Discord), Phi-3.5-mini-instruct-Q4_0.gguf (nomic-ai/gpt4all#3345)
{
// original
R"TEMPLATE({% for message in messages %}{% if message['role'] == 'system' and message['content'] %}{{'<|system|>
' + message['content'] + '<|end|>
'}}{% elif message['role'] == 'user' %}{{'<|user|>
' + message['content'] + '<|end|>
'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>
' + message['content'] + '<|end|>
'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>
' }}{% else %}{{ eos_token }}{% endif %})TEMPLATE",
// replacement
R"TEMPLATE({%- for message in messages %}
{%- if message['role'] == 'system' and message['content'] %}
{{- '<|system|>\n' + message['content'] + '<|end|>\n' }}
{%- elif message['role'] == 'user' %}
{{- '<|user|>\n' + message['content'] + '<|end|>\n' }}
{%- elif message['role'] == 'assistant' %}
{{- '<|assistant|>\n' + message['content'] + '<|end|>\n' }}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|assistant|>\n' }}
{%- else %}
{{- eos_token }}
{%- endif %})TEMPLATE",
},
// DeepSeek-R1-Distill-Qwen-7B-Q4_0.gguf
{
// original
R"TEMPLATE({% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<User>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<Assistant><tool▁calls▁begin><tool▁call▁begin>' + tool['type'] + '<tool▁sep>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<tool▁call▁end>'}}{%- set ns.is_first = true -%}{%- else %}{{'\n' + '<tool▁call▁begin>' + tool['type'] + '<tool▁sep>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<tool▁call▁end>'}}{{'<tool▁calls▁end><end▁of▁sentence>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<tool▁outputs▁end>' + message['content'] + '<end▁of▁sentence>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<Assistant>' + content + '<end▁of▁sentence>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<tool▁outputs▁begin><tool▁output▁begin>' + message['content'] + '<tool▁output▁end>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\n<tool▁output▁begin>' + message['content'] + '<tool▁output▁end>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<tool▁outputs▁end>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<Assistant>'}}{% endif %})TEMPLATE",
// replacement
R"TEMPLATE({%- if not add_generation_prompt is defined %}
{%- set add_generation_prompt = false %}
{%- endif %}
{%- if messages[0]['role'] == 'system' %}
{{- messages[0]['content'] }}
{%- endif %}
{%- for message in messages %}
{%- if message['role'] == 'user' %}
{{- '<User>' + message['content'] }}
{%- endif %}
{%- if message['role'] == 'assistant' %}
{%- set content = message['content'] | regex_replace('^[\\s\\S]*</think>', '') %}
{{- '<Assistant>' + content + '<endofsentence>' }}
{%- endif %}
{%- endfor -%}
{%- if add_generation_prompt %}
{{- '<Assistant>' }}
{%- endif %})TEMPLATE",
},
// gemma-2-9b-it-Q4_0.gguf (nomic-ai/gpt4all#3282)
{
// original
R"TEMPLATE({{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '
' + message['content'] | trim + '<end_of_turn>
' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model
'}}{% endif %})TEMPLATE",
// replacement
R"TEMPLATE({{- bos_token }}
{%- if messages[0]['role'] == 'system' %}
{{- raise_exception('System role not supported') }}
{%- endif %}
{%- for message in messages %}
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}
{{- raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}
{%- endif %}
{%- if message['role'] == 'assistant' %}
{%- set role = 'model' %}
{%- else %}
{%- set role = message['role'] %}
{%- endif %}
{{- '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<start_of_turn>model\n' }}
{%- endif %})TEMPLATE",
},
// ghost-7b-v0.9.1-Q4_0.gguf
{
// original
R"TEMPLATE({% for message in messages %}
{% if message['role'] == 'user' %}
{{ '<|user|>
' + message['content'] + eos_token }}
{% elif message['role'] == 'system' %}
{{ '<|system|>
' + message['content'] + eos_token }}
{% elif message['role'] == 'assistant' %}
{{ '<|assistant|>
' + message['content'] + eos_token }}
{% endif %}
{% if loop.last and add_generation_prompt %}
{{ '<|assistant|>' }}
{% endif %}
{% endfor %})TEMPLATE",
// replacement
R"TEMPLATE({%- for message in messages %}
{%- if message['role'] == 'user' %}
{{- '<|user|>\n' + message['content'] + eos_token }}
{%- elif message['role'] == 'system' %}
{{- '<|system|>\n' + message['content'] + eos_token }}
{%- elif message['role'] == 'assistant' %}
{{- '<|assistant|>\n' + message['content'] + eos_token }}
{%- endif %}
{%- if loop.last and add_generation_prompt %}
{{- '<|assistant|>' }}
{%- endif %}
{%- endfor %})TEMPLATE",
},
// granite-3.1-3b-a800m-instruct-Q4_0.gguf, granite-3.1-8b-instruct-Q4_0.gguf (nomic-ai/gpt4all#3471)
{
// original
R"TEMPLATE({%- if messages[0]['role'] == 'system' %}{%- set system_message = messages[0]['content'] %}{%- set loop_messages = messages[1:] %}{%- else %}{%- set system_message = "Knowledge Cutoff Date: April 2024. You are Granite, developed by IBM." %}{%- if tools and documents %}{%- set system_message = system_message + " You are a helpful AI assistant with access to the following tools. When a tool is required to answer the user's query, respond with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request. Write the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data." %}{%- elif tools %}{%- set system_message = system_message + " You are a helpful AI assistant with access to the following tools. When a tool is required to answer the user's query, respond with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request." %}{%- elif documents %}{%- set system_message = system_message + " Write the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data." %}{%- else %}{%- set system_message = system_message + " You are a helpful AI assistant." %}{%- endif %}{%- if controls and 'citations' in controls and documents %}{%- set system_message = system_message + ' In your response, use the symbols <co> and </co> to indicate when a fact comes from a document in the search result, e.g <co>0</co> for a fact from document 0. Afterwards, list all the citations with their corresponding documents in an ordered list.' %}{%- endif %}{%- if controls and 'hallucinations' in controls and documents %}{%- set system_message = system_message + ' Finally, after the response is written, include a numbered list of sentences from the response that are potentially hallucinated and not based in the documents.' %}{%- endif %}{%- set loop_messages = messages %}{%- endif %}{{- '<|start_of_role|>system<|end_of_role|>' + system_message + '<|end_of_text|> ' }}{%- if tools %}{{- '<|start_of_role|>tools<|end_of_role|>' }}{{- tools | tojson(indent=4) }}{{- '<|end_of_text|> ' }}{%- endif %}{%- if documents %}{{- '<|start_of_role|>documents<|end_of_role|>' }}{%- for document in documents %}{{- 'Document ' + loop.index0 | string + ' ' }}{{- document['text'] }}{%- if not loop.last %}{{- ' '}}{%- endif%}{%- endfor %}{{- '<|end_of_text|> ' }}{%- endif %}{%- for message in loop_messages %}{{- '<|start_of_role|>' + message['role'] + '<|end_of_role|>' + message['content'] + '<|end_of_text|> ' }}{%- if loop.last and add_generation_prompt %}{{- '<|start_of_role|>assistant' }}{%- if controls %}{{- ' ' + controls | tojson()}}{%- endif %}{{- '<|end_of_role|>' }}{%- endif %}{%- endfor %})TEMPLATE",
// replacement
R"TEMPLATE({%- if messages[0]['role'] == 'system' %}
{%- set system_message = messages[0]['content'] %}
{%- set loop_messages = messages[1:] %}
{%- else %}
{%- set system_message = "Knowledge Cutoff Date: April 2024. You are Granite, developed by IBM. You are a helpful AI assistant." %}
{%- set loop_messages = messages %}
{%- endif %}
{{- '<|start_of_role|>system<|end_of_role|>' + system_message + '<|end_of_text|> ' }}
{%- for message in loop_messages %}
{{- '<|start_of_role|>' + message['role'] + '<|end_of_role|>' + message['content'] + '<|end_of_text|> ' }}
{%- if loop.last and add_generation_prompt %}
{{- '<|start_of_role|>assistant<|end_of_role|>' }}
{%- endif %}
{%- endfor %})TEMPLATE",
},
// Hermes-3-Llama-3.2-3B.Q4_0.gguf, mistral-7b-openorca.gguf2.Q4_0.gguf
{
// original
R"TEMPLATE({% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '
' + message['content'] + '<|im_end|>' + '
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
' }}{% endif %})TEMPLATE",
// replacement
R"TEMPLATE({%- for message in messages %}
{{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>\n' }}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant\n' }}
{%- endif %})TEMPLATE",
},
// Llama-3.2-1B-Instruct-Q4_0.gguf, Llama-3.2-3B-Instruct-Q4_0.gguf, SummLlama3.2-3B-Q4_0.gguf (nomic-ai/gpt4all#3309)
{
// original
R"TEMPLATE({{- bos_token }}
{%- if custom_tools is defined %}
{%- set tools = custom_tools %}
{%- endif %}
{%- if not tools_in_user_message is defined %}
{%- set tools_in_user_message = true %}
{%- endif %}
{%- if not date_string is defined %}
{%- if strftime_now is defined %}
{%- set date_string = strftime_now("%d %b %Y") %}
{%- else %}
{%- set date_string = "26 Jul 2024" %}
{%- endif %}
{%- endif %}
{%- if not tools is defined %}
{%- set tools = none %}
{%- endif %}
{#- This block extracts the system message, so we can slot it into the right place. #}
{%- if messages[0]['role'] == 'system' %}
{%- set system_message = messages[0]['content']|trim %}
{%- set messages = messages[1:] %}
{%- else %}
{%- set system_message = "" %}
{%- endif %}
{#- System message #}
{{- "<|start_header_id|>system<|end_header_id|>\n\n" }}
{%- if tools is not none %}
{{- "Environment: ipython\n" }}
{%- endif %}
{{- "Cutting Knowledge Date: December 2023\n" }}
{{- "Today Date: " + date_string + "\n\n" }}
{%- if tools is not none and not tools_in_user_message %}
{{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }}
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
{{- "Do not use variables.\n\n" }}
{%- for t in tools %}
{{- t | tojson(indent=4) }}
{{- "\n\n" }}
{%- endfor %}
{%- endif %}
{{- system_message }}
{{- "<|eot_id|>" }}
{#- Custom tools are passed in a user message with some extra guidance #}
{%- if tools_in_user_message and not tools is none %}
{#- Extract the first user message so we can plug it in here #}
{%- if messages | length != 0 %}
{%- set first_user_message = messages[0]['content']|trim %}
{%- set messages = messages[1:] %}
{%- else %}
{{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }}
{%- endif %}
{{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}
{{- "Given the following functions, please respond with a JSON for a function call " }}
{{- "with its proper arguments that best answers the given prompt.\n\n" }}
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
{{- "Do not use variables.\n\n" }}
{%- for t in tools %}
{{- t | tojson(indent=4) }}
{{- "\n\n" }}
{%- endfor %}
{{- first_user_message + "<|eot_id|>"}}
{%- endif %}
{%- for message in messages %}
{%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}
{%- elif 'tool_calls' in message %}
{%- if not message.tool_calls|length == 1 %}
{{- raise_exception("This model only supports single tool-calls at once!") }}
{%- endif %}
{%- set tool_call = message.tool_calls[0].function %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
{{- '{"name": "' + tool_call.name + '", ' }}
{{- '"parameters": ' }}
{{- tool_call.arguments | tojson }}
{{- "}" }}
{{- "<|eot_id|>" }}
{%- elif message.role == "tool" or message.role == "ipython" %}
{{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }}
{%- if message.content is mapping or message.content is iterable %}
{{- message.content | tojson }}
{%- else %}
{{- message.content }}
{%- endif %}
{{- "<|eot_id|>" }}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
{%- endif %})TEMPLATE",
// replacement
R"TEMPLATE({{- bos_token }}
{%- set date_string = strftime_now('%d %b %Y') %}
{#- This block extracts the system message, so we can slot it into the right place. #}
{%- if messages[0]['role'] == 'system' %}
{%- set system_message = messages[0]['content'] | trim %}
{%- set loop_start = 1 %}
{%- else %}
{%- set system_message = '' %}
{%- set loop_start = 0 %}
{%- endif %}
{#- System message #}
{{- '<|start_header_id|>system<|end_header_id|>\n\n' }}
{{- 'Cutting Knowledge Date: December 2023\n' }}
{{- 'Today Date: ' + date_string + '\n\n' }}
{{- system_message }}
{{- '<|eot_id|>' }}
{%- for message in messages %}
{%- if loop.index0 >= loop_start %}
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
{%- endif %})TEMPLATE",
},
// Llama-3.3-70B-Instruct-Q4_0.gguf (nomic-ai/gpt4all#3305)
{
// original
R"TEMPLATE({{- bos_token }}
{%- if custom_tools is defined %}
{%- set tools = custom_tools %}
{%- endif %}
{%- if not tools_in_user_message is defined %}
{%- set tools_in_user_message = true %}
{%- endif %}
{%- if not date_string is defined %}
{%- set date_string = "26 Jul 2024" %}
{%- endif %}
{%- if not tools is defined %}
{%- set tools = none %}
{%- endif %}
{#- This block extracts the system message, so we can slot it into the right place. #}
{%- if messages[0]['role'] == 'system' %}
{%- set system_message = messages[0]['content']|trim %}
{%- set messages = messages[1:] %}
{%- else %}
{%- set system_message = "" %}
{%- endif %}
{#- System message + builtin tools #}
{{- "<|start_header_id|>system<|end_header_id|>\n\n" }}
{%- if builtin_tools is defined or tools is not none %}
{{- "Environment: ipython\n" }}
{%- endif %}
{%- if builtin_tools is defined %}
{{- "Tools: " + builtin_tools | reject('equalto', 'code_interpreter') | join(", ") + "\n\n"}}
{%- endif %}
{{- "Cutting Knowledge Date: December 2023\n" }}
{{- "Today Date: " + date_string + "\n\n" }}
{%- if tools is not none and not tools_in_user_message %}
{{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }}
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
{{- "Do not use variables.\n\n" }}
{%- for t in tools %}
{{- t | tojson(indent=4) }}
{{- "\n\n" }}
{%- endfor %}
{%- endif %}
{{- system_message }}
{{- "<|eot_id|>" }}
{#- Custom tools are passed in a user message with some extra guidance #}
{%- if tools_in_user_message and not tools is none %}
{#- Extract the first user message so we can plug it in here #}
{%- if messages | length != 0 %}
{%- set first_user_message = messages[0]['content']|trim %}
{%- set messages = messages[1:] %}
{%- else %}
{{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }}
{%- endif %}
{{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}
{{- "Given the following functions, please respond with a JSON for a function call " }}
{{- "with its proper arguments that best answers the given prompt.\n\n" }}
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
{{- "Do not use variables.\n\n" }}
{%- for t in tools %}
{{- t | tojson(indent=4) }}
{{- "\n\n" }}
{%- endfor %}
{{- first_user_message + "<|eot_id|>"}}
{%- endif %}
{%- for message in messages %}
{%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}
{%- elif 'tool_calls' in message %}
{%- if not message.tool_calls|length == 1 %}
{{- raise_exception("This model only supports single tool-calls at once!") }}
{%- endif %}
{%- set tool_call = message.tool_calls[0].function %}
{%- if builtin_tools is defined and tool_call.name in builtin_tools %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
{{- "<|python_tag|>" + tool_call.name + ".call(" }}
{%- for arg_name, arg_val in tool_call.arguments | items %}
{{- arg_name + '="' + arg_val + '"' }}
{%- if not loop.last %}
{{- ", " }}
{%- endif %}
{%- endfor %}
{{- ")" }}
{%- else %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
{{- '{"name": "' + tool_call.name + '", ' }}
{{- '"parameters": ' }}
{{- tool_call.arguments | tojson }}
{{- "}" }}
{%- endif %}
{%- if builtin_tools is defined %}
{#- This means we're in ipython mode #}
{{- "<|eom_id|>" }}
{%- else %}
{{- "<|eot_id|>" }}
{%- endif %}
{%- elif message.role == "tool" or message.role == "ipython" %}
{{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }}
{%- if message.content is mapping or message.content is iterable %}
{{- message.content | tojson }}
{%- else %}
{{- message.content }}
{%- endif %}
{{- "<|eot_id|>" }}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
{%- endif %})TEMPLATE",
// replacement
R"TEMPLATE({{- bos_token }}
{%- set date_string = strftime_now('%d %b %Y') %}
{#- This block extracts the system message, so we can slot it into the right place. #}
{%- if messages[0]['role'] == 'system' %}
{%- set system_message = messages[0]['content'] | trim %}
{%- set loop_start = 1 %}
{%- else %}
{%- set system_message = '' %}
{%- set loop_start = 0 %}
{%- endif %}
{#- System message #}
{{- '<|start_header_id|>system<|end_header_id|>\n\n' }}
{{- 'Cutting Knowledge Date: December 2023\n' }}
{{- 'Today Date: ' + date_string + '\n\n' }}
{{- system_message }}
{{- '<|eot_id|>' }}
{%- for message in messages %}
{%- if loop.index0 >= loop_start %}
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
{%- endif %})TEMPLATE",
},
// Llama3-DiscoLeo-Instruct-8B-32k-v0.1-Q4_0.gguf (nomic-ai/gpt4all#3347)
{
// original
R"TEMPLATE({% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>
'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>
' }}{% endif %})TEMPLATE",
// replacement
R"TEMPLATE({%- for message in messages %}
{%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' %}
{%- if loop.index0 == 0 %}
{%- set content = bos_token + content %}
{%- endif %}
{{- content }}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
{%- endif %})TEMPLATE",
},
// Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf
{
// original
R"TEMPLATE({% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>
'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>
' }})TEMPLATE",
// replacement
R"TEMPLATE({%- set loop_messages = messages %}
{%- for message in loop_messages %}
{%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}
{%- if loop.index0 == 0 %}
{%- set content = bos_token + content %}
{%- endif %}
{{- content }}
{%- endfor %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }})TEMPLATE",
},
// Meta-Llama-3-8B-Instruct.Q4_0.gguf
{
// original
R"TEMPLATE({% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>
'+ message['content'] | trim + '<|eot_id|>' %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>
' }}{% endif %})TEMPLATE",
// replacement
R"TEMPLATE({%- set loop_messages = messages %}
{%- for message in loop_messages %}
{%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}
{{- content }}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
{%- endif %})TEMPLATE",
},
// Mistral-Nemo-Instruct-2407-Q4_0.gguf (nomic-ai/gpt4all#3284)
{
// original
R"TEMPLATE({%- if messages[0]["role"] == "system" %}
{%- set system_message = messages[0]["content"] %}
{%- set loop_messages = messages[1:] %}
{%- else %}
{%- set loop_messages = messages %}
{%- endif %}
{%- if not tools is defined %}
{%- set tools = none %}
{%- endif %}
{%- set user_messages = loop_messages | selectattr("role", "equalto", "user") | list %}
{#- This block checks for alternating user/assistant messages, skipping tool calling messages #}
{%- set ns = namespace() %}
{%- set ns.index = 0 %}
{%- for message in loop_messages %}
{%- if not (message.role == "tool" or message.role == "tool_results" or (message.tool_calls is defined and message.tool_calls is not none)) %}
{%- if (message["role"] == "user") != (ns.index % 2 == 0) %}
{{- raise_exception("After the optional system message, conversation roles must alternate user/assistant/user/assistant/...") }}
{%- endif %}
{%- set ns.index = ns.index + 1 %}
{%- endif %}
{%- endfor %}
{{- bos_token }}
{%- for message in loop_messages %}
{%- if message["role"] == "user" %}
{%- if tools is not none and (message == user_messages[-1]) %}
{{- "[AVAILABLE_TOOLS][" }}
{%- for tool in tools %}
{%- set tool = tool.function %}
{{- '{"type": "function", "function": {' }}
{%- for key, val in tool.items() if key != "return" %}
{%- if val is string %}
{{- '"' + key + '": "' + val + '"' }}
{%- else %}
{{- '"' + key + '": ' + val|tojson }}
{%- endif %}
{%- if not loop.last %}
{{- ", " }}
{%- endif %}
{%- endfor %}
{{- "}}" }}
{%- if not loop.last %}
{{- ", " }}
{%- else %}
{{- "]" }}
{%- endif %}
{%- endfor %}
{{- "[/AVAILABLE_TOOLS]" }}
{%- endif %}
{%- if loop.last and system_message is defined %}
{{- "[INST]" + system_message + "\n\n" + message["content"] + "[/INST]" }}
{%- else %}
{{- "[INST]" + message["content"] + "[/INST]" }}
{%- endif %}
{%- elif (message.tool_calls is defined and message.tool_calls is not none) %}
{{- "[TOOL_CALLS][" }}
{%- for tool_call in message.tool_calls %}
{%- set out = tool_call.function|tojson %}
{{- out[:-1] }}
{%- if not tool_call.id is defined or tool_call.id|length != 9 %}
{{- raise_exception("Tool call IDs should be alphanumeric strings with length 9!") }}
{%- endif %}
{{- ', "id": "' + tool_call.id + '"}' }}
{%- if not loop.last %}
{{- ", " }}
{%- else %}
{{- "]" + eos_token }}
{%- endif %}
{%- endfor %}
{%- elif message["role"] == "assistant" %}
{{- message["content"] + eos_token}}
{%- elif message["role"] == "tool_results" or message["role"] == "tool" %}
{%- if message.content is defined and message.content.content is defined %}
{%- set content = message.content.content %}
{%- else %}
{%- set content = message.content %}
{%- endif %}
{{- '[TOOL_RESULTS]{"content": ' + content|string + ", " }}
{%- if not message.tool_call_id is defined or message.tool_call_id|length != 9 %}
{{- raise_exception("Tool call IDs should be alphanumeric strings with length 9!") }}
{%- endif %}
{{- '"call_id": "' + message.tool_call_id + '"}[/TOOL_RESULTS]' }}
{%- else %}
{{- raise_exception("Only user and assistant roles are supported, with the exception of an initial optional system message!") }}
{%- endif %}
{%- endfor %})TEMPLATE",
// replacement
R"TEMPLATE({%- if messages[0]['role'] == 'system' %}
{%- set system_message = messages[0]['content'] %}
{%- set loop_start = 1 %}
{%- else %}
{%- set loop_start = 0 %}
{%- endif %}
{{- bos_token }}
{%- for message in messages %}
{#- This block checks for alternating user/assistant messages, skipping tool calling messages #}
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}
{{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}
{%- endif %}
{%- if message['role'] == 'user' %}
{%- if loop.last and loop_start == 1 %}
{{- '[INST]' + system_message + '\n\n' + message['content'] + '[/INST]' }}
{%- else %}
{{- '[INST]' + message['content'] + '[/INST]' }}
{%- endif %}
{%- elif message['role'] == 'assistant' %}
{{- message['content'] + eos_token }}
{%- else %}
{{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}
{%- endif %}
{%- endfor %})TEMPLATE",
},
// Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf
{
// original
R"TEMPLATE({% for message in messages %}{{'<|im_start|>' + message['role'] + '
' + message['content'] + '<|im_end|>' + '
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
' }}{% endif %})TEMPLATE",
// replacement
R"TEMPLATE({%- for message in messages %}
{{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>\n' }}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant\n' }}
{%- endif %})TEMPLATE",
},
// occiglot-7b-de-en-instruct.Q4_0.gguf (nomic-ai/gpt4all#3283)
{
// original
R"TEMPLATE({{'<s>'}}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful assistant. Please give a long and detailed answer.' %}{% endif %}{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{{'<|im_start|>system
' + system_message + '<|im_end|>
'}}{% endif %}{{'<|im_start|>' + message['role'] + '
' + message['content'] + '<|im_end|>' + '
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
' }}{% endif %})TEMPLATE",
// replacement
R"TEMPLATE({{- bos_token }}
{%- if messages[0]['role'] == 'system' %}
{%- set loop_start = 1 %}
{%- set system_message = messages[0]['content'] %}
{%- else %}
{%- set loop_start = 0 %}
{%- set system_message = 'You are a helpful assistant. Please give a long and detailed answer.' %}
{%- endif %}
{{- '<|im_start|>system\n' + system_message + '<|im_end|>\n' }}
{%- for message in messages %}
{%- if loop.index0 >= loop_start %}
{{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>\n' }}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant\n' }}
{%- endif %})TEMPLATE",
},
// OLMoE-1B-7B-0125-Instruct-Q4_0.gguf (nomic-ai/gpt4all#3471)
{
// original
R"TEMPLATE({{ bos_token }}{% for message in messages %}{% if message['role'] == 'system' %}{{ '<|system|>
' + message['content'] + '
' }}{% elif message['role'] == 'user' %}{{ '<|user|>
' + message['content'] + '
' }}{% elif message['role'] == 'assistant' %}{% if not loop.last %}{{ '<|assistant|>
' + message['content'] + eos_token + '
' }}{% else %}{{ '<|assistant|>
' + message['content'] + eos_token }}{% endif %}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<|assistant|>
' }}{% endif %}{% endfor %})TEMPLATE",
// replacement
R"TEMPLATE({{- bos_token }}
{%- for message in messages %}
{%- if message['role'] == 'system' %}
{{- '<|system|>\n' + message['content'] + '\n' }}
{%- elif message['role'] == 'user' %}
{{- '<|user|>\n' + message['content'] + '\n' }}
{%- elif message['role'] == 'assistant' %}
{%- if not loop.last %}
{{- '<|assistant|>\n' + message['content'] + eos_token + '\n' }}
{%- else %}
{{- '<|assistant|>\n' + message['content'] + eos_token }}
{%- endif %}
{%- endif %}
{%- if loop.last and add_generation_prompt %}
{{- '<|assistant|>\n' }}
{%- endif %}
{%- endfor %})TEMPLATE",
},
// OLMoE-1B-7B-0924-Instruct-Q4_0.gguf (nomic-ai/gpt4all#3471)
{
// original
R"TEMPLATE({{ bos_token }}{% for message in messages %}
{% if message['role'] == 'system' %}
{{ '<|system|>
' + message['content'] }}
{% elif message['role'] == 'user' %}
{{ '<|user|>
' + message['content'] }}
{% elif message['role'] == 'assistant' %}
{{ '<|assistant|>
' + message['content'] + eos_token }}
{% endif %}
{% if loop.last and add_generation_prompt %}
{{ '<|assistant|>' }}
{% endif %}
{% endfor %})TEMPLATE",
// replacement
R"TEMPLATE({{- bos_token }}
{%- for message in messages %}
{%- if message['role'] == 'system' %}
{{- '<|system|>\n' + message['content'] }}
{%- elif message['role'] == 'user' %}
{{- '<|user|>\n' + message['content'] }}
{%- elif message['role'] == 'assistant' %}
{{- '<|assistant|>\n' + message['content'] + eos_token }}
{%- endif %}
{%- if loop.last and add_generation_prompt %}
{{- '<|assistant|>' }}
{%- endif %}
{%- endfor %})TEMPLATE",
},
// Phi-3.1-mini-128k-instruct-Q4_0.gguf (nomic-ai/gpt4all#3346)
{
// original
R"TEMPLATE({% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>
' + message['content'] + '<|end|>
'}}{% elif message['role'] == 'user' %}{{'<|user|>
' + message['content'] + '<|end|>
'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>
' + message['content'] + '<|end|>
'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>
' }}{% else %}{{ eos_token }}{% endif %})TEMPLATE",
// replacement
R"TEMPLATE({%- for message in messages %}
{%- if message['role'] == 'system' %}
{{- '<|system|>\n' + message['content'] + '<|end|>\n' }}
{%- elif message['role'] == 'user' %}
{{- '<|user|>\n' + message['content'] + '<|end|>\n' }}
{%- elif message['role'] == 'assistant' %}
{{- '<|assistant|>\n' + message['content'] + '<|end|>\n' }}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|assistant|>\n' }}
{%- else %}
{{- eos_token }}
{%- endif %})TEMPLATE",
},
// Phi-3-mini-4k-instruct.Q4_0.gguf
{
// original
R"TEMPLATE({{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '
' + message['content'] + '<|end|>
' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>
' }}{% else %}{{ eos_token }}{% endif %})TEMPLATE",
// replacement
R"TEMPLATE({{- bos_token }}
{%- for message in messages %}
{{- '<|' + message['role'] + '|>\n' + message['content'] + '<|end|>\n' }}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|assistant|>\n' }}
{%- else %}
{{- eos_token }}
{%- endif %})TEMPLATE",
},
// qwen2-1_5b-instruct-q4_0.gguf (nomic-ai/gpt4all#3263), qwen2-72b-instruct-q4_0.gguf
{
// original
R"TEMPLATE({% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system
You are a helpful assistant.<|im_end|>
' }}{% endif %}{{'<|im_start|>' + message['role'] + '
' + message['content'] + '<|im_end|>' + '
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
' }}{% endif %})TEMPLATE",
// replacement
R"TEMPLATE({%- for message in messages %}
{%- if loop.first and messages[0]['role'] != 'system' %}
{{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
{%- endif %}
{{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>\n' }}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant\n' }}
{%- endif %})TEMPLATE",
},
};

View File

@ -1,6 +0,0 @@
#pragma once
#include <string_view>
#include <unordered_map>
extern const std::unordered_map<std::string_view, std::string_view> CHAT_TEMPLATE_SUBSTITUTIONS;

View File

@ -12,9 +12,6 @@
#include <QSettings>
#include <QUrl>
#include <QtLogging>
#include <QtSystemDetection>
#include <string>
#ifdef GPT4ALL_OFFLINE_INSTALLER
# include <QDesktopServices>
@ -28,7 +25,6 @@
using namespace Qt::Literals::StringLiterals;
class MyLLM: public LLM { };
Q_GLOBAL_STATIC(MyLLM, llmInstance)
LLM *LLM::globalInstance()

View File

@ -3,8 +3,7 @@
#include <QObject>
#include <QString>
#include <QtTypes>
#include <QtGlobal>
class LLM : public QObject
{

View File

@ -5,14 +5,10 @@
#include "mysettings.h"
#include <QCoreApplication>
#include <QDebug>
#include <QGlobalStatic>
#include <QGuiApplication>
#include <QList>
#include <QUrl>
#include <Qt>
#include <QtLogging>
class MyLocalDocs: public LocalDocs { };
Q_GLOBAL_STATIC(MyLocalDocs, localDocsInstance)

View File

@ -2,14 +2,11 @@
#define LOCALDOCS_H
#include "database.h"
#include "localdocsmodel.h"
#include "localdocsmodel.h" // IWYU pragma: keep
#include <QObject>
#include <QString>
#include <QStringList> // IWYU pragma: keep
// IWYU pragma: no_forward_declare LocalDocsModel
#include <QStringList>
class LocalDocs : public QObject
{

View File

@ -5,11 +5,11 @@
#include <QDateTime>
#include <QMap>
#include <QVector> // IWYU pragma: keep
#include <QVector>
#include <QtGlobal>
#include <utility>
LocalDocsCollectionsModel::LocalDocsCollectionsModel(QObject *parent)
: QSortFilterProxyModel(parent)
{

View File

@ -4,19 +4,17 @@
#include "database.h"
#include <QAbstractListModel>
#include <QByteArray>
#include <QHash>
#include <QList>
#include <QObject> // IWYU pragma: keep
#include <QObject>
#include <QSortFilterProxyModel>
#include <QString>
#include <QVariant>
#include <Qt>
#include <functional>
class QByteArray;
class QVariant;
template <typename Key, typename T> class QHash;
class LocalDocsCollectionsModel : public QSortFilterProxyModel
{
Q_OBJECT

View File

@ -2,10 +2,8 @@
#include <QDateTime>
#include <QDebug>
#include <QFlags>
#include <QGlobalStatic>
#include <QIODevice>
#include <QMutexLocker> // IWYU pragma: keep
#include <QStandardPaths>
#include <cstdio>
@ -14,7 +12,6 @@
using namespace Qt::Literals::StringLiterals;
class MyLogger: public Logger { };
Q_GLOBAL_STATIC(MyLogger, loggerInstance)
Logger *Logger::globalInstance()
@ -65,11 +62,8 @@ void Logger::messageHandler(QtMsgType type, const QMessageLogContext &, const QS
}
// Get time and date
auto timestamp = QDateTime::currentDateTime().toString();
const std::string out = u"[%1] (%2): %3\n"_s.arg(typeString, timestamp, msg).toStdString();
// Write message
QMutexLocker locker(&logger->m_mutex);
const std::string out = u"[%1] (%2): %3\n"_s.arg(typeString, timestamp, msg).toStdString();
logger->m_file.write(out.c_str());
logger->m_file.flush();
std::cerr << out;

View File

@ -2,24 +2,19 @@
#define LOGGER_H
#include <QFile>
#include <QMutex>
#include <QString>
#include <QtLogging>
class Logger
{
QFile m_file;
class Logger {
public:
explicit Logger();
static Logger *globalInstance();
private:
static void messageHandler(QtMsgType type, const QMessageLogContext &context, const QString &msg);
private:
QFile m_file;
QMutex m_mutex;
public:
static Logger *globalInstance();
explicit Logger();
friend class MyLogger;
};

View File

@ -2,7 +2,6 @@
#include <Cocoa/Cocoa.h>
void MacOSDock::showIcon()
{
[[NSApplication sharedApplication] setActivationPolicy:NSApplicationActivationPolicyRegular];

View File

@ -7,37 +7,22 @@
#include "modellist.h"
#include "mysettings.h"
#include "network.h"
#include "toolmodel.h"
#include <gpt4all-backend/llmodel.h>
#include <singleapplication.h>
#include <QByteArray>
#include <QCoreApplication>
#include <QFont>
#include <QFontDatabase>
#include <QList>
#include <QObject>
#include <QQmlApplicationEngine>
#include <QQmlContext>
#include <QQuickWindow>
#include <QSettings>
#include <QString>
#include <QStringList>
#include <QUrl>
#include <QVariant>
#include <QWindow>
#include <Qt>
#include <QtAssert>
#include <QtSystemDetection>
#if G4A_CONFIG(force_d3d12)
# include <QSGRendererInterface>
#endif
#ifndef GPT4ALL_USE_QTPDF
# include <fpdfview.h>
#endif
#ifdef Q_OS_LINUX
# include <QIcon>
@ -72,10 +57,6 @@ static void raiseWindow(QWindow *window)
int main(int argc, char *argv[])
{
#ifndef GPT4ALL_USE_QTPDF
FPDF_InitLibrary();
#endif
QCoreApplication::setOrganizationName("nomic.ai");
QCoreApplication::setOrganizationDomain("gpt4all.io");
QCoreApplication::setApplicationName("GPT4All");
@ -93,27 +74,24 @@ int main(int argc, char *argv[])
return 0;
}
#if G4A_CONFIG(force_d3d12)
QQuickWindow::setGraphicsApi(QSGRendererInterface::Direct3D12);
#endif
#ifdef Q_OS_LINUX
app.setWindowIcon(QIcon(":/gpt4all/icons/gpt4all.svg"));
#endif
// set search path before constructing the MySettings instance, which relies on this
{
auto appDirPath = QCoreApplication::applicationDirPath();
QStringList searchPaths {
#ifdef Q_OS_DARWIN
u"%1/../Frameworks"_s.arg(appDirPath),
#else
appDirPath,
u"%1/../lib"_s.arg(appDirPath),
QString llmodelSearchPaths = QCoreApplication::applicationDirPath();
const QString libDir = QCoreApplication::applicationDirPath() + "/../lib/";
if (LLM::directoryExists(libDir))
llmodelSearchPaths += ";" + libDir;
#if defined(Q_OS_MAC)
const QString binDir = QCoreApplication::applicationDirPath() + "/../../../";
if (LLM::directoryExists(binDir))
llmodelSearchPaths += ";" + binDir;
const QString frameworksDir = QCoreApplication::applicationDirPath() + "/../Frameworks/";
if (LLM::directoryExists(frameworksDir))
llmodelSearchPaths += ";" + frameworksDir;
#endif
};
LLModel::Implementation::setImplementationsSearchPath(searchPaths.join(u';').toStdString());
}
LLModel::Implementation::setImplementationsSearchPath(llmodelSearchPaths.toStdString());
// Set the local and language translation before the qml engine has even been started. This will
// use the default system locale unless the user has explicitly set it to use a different one.
@ -138,8 +116,6 @@ int main(int argc, char *argv[])
qmlRegisterSingletonInstance("download", 1, 0, "Download", Download::globalInstance());
qmlRegisterSingletonInstance("network", 1, 0, "Network", Network::globalInstance());
qmlRegisterSingletonInstance("localdocs", 1, 0, "LocalDocs", LocalDocs::globalInstance());
qmlRegisterSingletonInstance("toollist", 1, 0, "ToolList", ToolModel::globalInstance());
qmlRegisterUncreatableMetaObject(ToolEnums::staticMetaObject, "toolenums", 1, 0, "ToolEnums", "Error: only enums");
qmlRegisterUncreatableMetaObject(MySettingsEnums::staticMetaObject, "mysettingsenums", 1, 0, "MySettingsEnums", "Error: only enums");
{
@ -187,9 +163,5 @@ int main(int argc, char *argv[])
// Otherwise, we can get a heap-use-after-free inside of llama.cpp.
ChatListModel::globalInstance()->destroyChats();
#ifndef GPT4ALL_USE_QTPDF
FPDF_DestroyLibrary();
#endif
return res;
}

View File

@ -1,7 +1,6 @@
#include "modellist.h"
#include "download.h"
#include "jinja_replacements.h"
#include "mysettings.h"
#include "network.h"
@ -9,11 +8,9 @@
#include <QChar>
#include <QCoreApplication>
#include <QCryptographicHash>
#include <QDebug>
#include <QDir>
#include <QDirIterator>
#include <QEvent>
#include <QEventLoop>
#include <QFile>
#include <QFileInfo>
@ -31,15 +28,14 @@
#include <QSslConfiguration>
#include <QSslSocket>
#include <QStandardPaths>
#include <QStringList> // IWYU pragma: keep
#include <QStringList>
#include <QTextStream>
#include <QTimer>
#include <QUrl>
#include <QtAssert>
#include <QtLogging>
#include <QtPreprocessorSupport>
#include <algorithm>
#include <cstddef>
#include <iterator>
#include <optional>
#include <string>
@ -66,15 +62,15 @@ static const QString RMODEL_CHAT_TEMPLATE = uR"(<chat>
{%- if loop.first %}
{{- '### Context:\n' }}
{%- endif %}
{{- ('Collection: ' + source.collection + '\n' +
'Path: ' + source.path + '\n' +
'Excerpt: ' + source.text + '\n\n') | escape }}
{{- 'Collection: ' + source.collection + '\n' +
'Path: ' + source.path + '\n' +
'Excerpt: ' + source.text + '\n\n' }}
{%- endfor %}
{%- endif %}
{%- for attachment in message.prompt_attachments %}
{{- (attachment.processed_content + '\n\n') | escape }}
{{- attachment.processed_content + '\n\n' }}
{%- endfor %}
{{- message.content | escape }}
{{- message.content }}
{{- '</' + message['role'] + '>' }}
{%- endfor %}
</chat>)"_s;
@ -356,18 +352,7 @@ QVariant ModelInfo::defaultChatTemplate() const
auto path = (dirpath + filename()).toUtf8();
auto res = LLModel::Implementation::chatTemplate(path.constData());
if (res) {
std::string ggufTmpl(std::move(*res));
if (ggufTmpl.size() >= 2 && ggufTmpl.end()[-2] != '\n' && ggufTmpl.end()[-1] == '\n')
ggufTmpl.erase(ggufTmpl.end() - 1); // strip trailing newline for e.g. Llama-3.2-3B-Instruct
if (
auto replacement = CHAT_TEMPLATE_SUBSTITUTIONS.find(ggufTmpl);
replacement != CHAT_TEMPLATE_SUBSTITUTIONS.end()
) {
qWarning() << "automatically substituting chat template for" << filename();
auto &[badTemplate, goodTemplate] = *replacement;
ggufTmpl = goodTemplate;
}
m_modelChatTemplate = QString::fromStdString(ggufTmpl);
m_modelChatTemplate = QString::fromStdString(*res);
} else {
qWarning().nospace() << "failed to get chat template for " << filename() << ": " << res.error().c_str();
m_modelChatTemplate = QString(); // do not retry
@ -488,25 +473,14 @@ GPT4AllDownloadableModels::GPT4AllDownloadableModels(QObject *parent)
connect(this, &GPT4AllDownloadableModels::modelReset, this, &GPT4AllDownloadableModels::countChanged);
}
void GPT4AllDownloadableModels::filter(const QVector<QString> &keywords)
{
m_keywords = keywords;
invalidateFilter();
}
bool GPT4AllDownloadableModels::filterAcceptsRow(int sourceRow,
const QModelIndex &sourceParent) const
{
QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);
const QString description = sourceModel()->data(index, ModelList::DescriptionRole).toString();
bool hasDescription = !description.isEmpty();
bool hasDescription = !sourceModel()->data(index, ModelList::DescriptionRole).toString().isEmpty();
bool isClone = sourceModel()->data(index, ModelList::IsCloneRole).toBool();
bool isDiscovered = sourceModel()->data(index, ModelList::IsDiscoveredRole).toBool();
bool isOnline = sourceModel()->data(index, ModelList::OnlineRole).toBool();
bool satisfiesKeyword = m_keywords.isEmpty();
for (const QString &k : m_keywords)
satisfiesKeyword = description.contains(k) ? true : satisfiesKeyword;
return !isOnline && !isDiscovered && hasDescription && !isClone && satisfiesKeyword;
return !isDiscovered && hasDescription && !isClone;
}
int GPT4AllDownloadableModels::count() const
@ -1625,6 +1599,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
QString requiresVersion = obj["requires"].toString();
QString versionRemoved = obj["removedIn"].toString();
QString url = obj["url"].toString();
QByteArray modelHash = obj["md5sum"].toString().toLatin1();
bool isDefault = obj.contains("isDefault") && obj["isDefault"] == u"true"_s;
bool disableGUI = obj.contains("disableGUI") && obj["disableGUI"] == u"true"_s;
QString description = obj["description"].toString();
@ -1635,16 +1610,6 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
QString type = obj["type"].toString();
bool isEmbeddingModel = obj["embeddingModel"].toBool();
QByteArray modelHash;
ModelInfo::HashAlgorithm hashAlgorithm;
if (auto it = obj.find("sha256sum"_L1); it != obj.end()) {
modelHash = it->toString().toLatin1();
hashAlgorithm = ModelInfo::Sha256;
} else {
modelHash = obj["md5sum"].toString().toLatin1();
hashAlgorithm = ModelInfo::Md5;
}
// Some models aren't supported in the GUI at all
if (disableGUI)
continue;
@ -1673,7 +1638,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
{ ModelList::FilenameRole, modelFilename },
{ ModelList::FilesizeRole, modelFilesize },
{ ModelList::HashRole, modelHash },
{ ModelList::HashAlgorithmRole, hashAlgorithm },
{ ModelList::HashAlgorithmRole, ModelInfo::Md5 },
{ ModelList::DefaultRole, isDefault },
{ ModelList::DescriptionRole, description },
{ ModelList::RequiresVersionRole, requiresVersion },
@ -2357,56 +2322,3 @@ void ModelList::handleDiscoveryItemErrorOccurred(QNetworkReply::NetworkError cod
qWarning() << u"ERROR: Discovery item failed with error code \"%1-%2\""_s
.arg(code).arg(reply->errorString()).toStdString();
}
QStringList ModelList::remoteModelList(const QString &apiKey, const QUrl &baseUrl)
{
QStringList modelList;
// Create the request
QNetworkRequest request;
request.setUrl(baseUrl.resolved(QUrl("models")));
request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
// Add the Authorization header
const QString bearerToken = QString("Bearer %1").arg(apiKey);
request.setRawHeader("Authorization", bearerToken.toUtf8());
// Make the GET request
QNetworkReply *reply = m_networkManager.get(request);
// We use a local event loop to wait for the request to complete
QEventLoop loop;
connect(reply, &QNetworkReply::finished, &loop, &QEventLoop::quit);
loop.exec();
// Check for errors
if (reply->error() == QNetworkReply::NoError) {
// Parse the JSON response
const QByteArray responseData = reply->readAll();
const QJsonDocument jsonDoc = QJsonDocument::fromJson(responseData);
if (!jsonDoc.isNull() && jsonDoc.isObject()) {
QJsonObject rootObj = jsonDoc.object();
QJsonValue dataValue = rootObj.value("data");
if (dataValue.isArray()) {
QJsonArray dataArray = dataValue.toArray();
for (const QJsonValue &val : dataArray) {
if (val.isObject()) {
QJsonObject obj = val.toObject();
const QString modelId = obj.value("id").toString();
modelList.append(modelId);
}
}
}
}
} else {
// Handle network error (e.g. print it to qDebug)
qWarning() << "Error retrieving models:" << reply->errorString();
}
// Clean up
reply->deleteLater();
return modelList;
}

View File

@ -5,29 +5,25 @@
#include <QByteArray>
#include <QDateTime>
#include <QHash>
#include <QLatin1StringView> // IWYU pragma: keep
#include <QLatin1StringView>
#include <QList>
#include <QMutex>
#include <QNetworkAccessManager>
#include <QNetworkReply>
#include <QObject>
#include <QPair> // IWYU pragma: keep
#include <QQmlEngine> // IWYU pragma: keep
#include <QPair>
#include <QQmlEngine>
#include <QSortFilterProxyModel>
#include <QSslError>
#include <QString>
#include <QVariant>
#include <QVector> // IWYU pragma: keep
#include <QVector>
#include <Qt>
#include <QtTypes>
#include <QtGlobal>
#include <optional>
#include <utility>
// IWYU pragma: no_forward_declare QObject
// IWYU pragma: no_forward_declare QSslError
class QUrl;
using namespace Qt::Literals::StringLiterals;
@ -273,7 +269,7 @@ private:
std::optional<QString> m_chatTemplate;
mutable std::optional<QString> m_modelChatTemplate;
QString m_systemMessage;
QString m_chatNamePrompt = "Describe the above conversation. Your entire response must be three words or less.";
QString m_chatNamePrompt = "Describe the above conversation in seven words or less.";
QString m_suggestedFollowUpPrompt = "Suggest three very short factual follow-up questions that have not been answered yet or cannot be found inspired by the previous conversation and excerpts.";
friend class MySettings;
friend class ModelList;
@ -306,16 +302,11 @@ public:
explicit GPT4AllDownloadableModels(QObject *parent);
int count() const;
Q_INVOKABLE void filter(const QVector<QString> &keywords);
Q_SIGNALS:
void countChanged();
protected:
bool filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const override;
private:
QVector<QString> m_keywords;
};
class HuggingFaceDownloadableModels : public QSortFilterProxyModel
@ -534,8 +525,6 @@ public:
Q_INVOKABLE void discoverSearch(const QString &discover);
Q_INVOKABLE QStringList remoteModelList(const QString &apiKey, const QUrl &baseUrl);
Q_SIGNALS:
void countChanged();
void installedModelsChanged();

View File

@ -11,27 +11,22 @@
#include <QFileInfo>
#include <QGlobalStatic>
#include <QGuiApplication>
#include <QIODevice> // IWYU pragma: keep
#include <QIODevice>
#include <QMap>
#include <QMetaObject>
#include <QStandardPaths>
#include <QThread>
#include <QUrl>
#include <QVariant>
#include <QtLogging>
#include <QtAssert>
#include <algorithm>
#include <string>
#include <thread>
#include <vector>
#if !(defined(Q_OS_MAC) && defined(__aarch64__))
#include <cstring>
#endif
using namespace Qt::Literals::StringLiterals;
// used only for settings serialization, do not translate
static const QStringList suggestionModeNames { "LocalDocsOnly", "On", "Off" };
static const QStringList chatThemeNames { "Light", "Dark", "LegacyDark" };

View File

@ -4,24 +4,20 @@
#include "modellist.h" // IWYU pragma: keep
#include <QDateTime>
#include <QLatin1StringView> // IWYU pragma: keep
#include <QLatin1StringView>
#include <QList>
#include <QModelIndex>
#include <QObject>
#include <QSettings>
#include <QString>
#include <QStringList> // IWYU pragma: keep
#include <QStringList>
#include <QTranslator>
#include <QVariant>
#include <QVector>
#include <cstdint>
#include <memory>
#include <optional>
// IWYU pragma: no_forward_declare QModelIndex
class QLocale;
namespace MySettingsEnums {
Q_NAMESPACE

View File

@ -8,6 +8,7 @@
#include "localdocsmodel.h"
#include "modellist.h"
#include "mysettings.h"
#include "utils.h"
#include <gpt4all-backend/llmodel.h>
@ -28,6 +29,7 @@
#include <QSslSocket>
#include <QSysInfo>
#include <Qt>
#include <QtGlobal>
#include <QtLogging>
#include <QUrl>
#include <QUuid>
@ -47,7 +49,6 @@ using namespace Qt::Literals::StringLiterals;
#define STR_(x) #x
#define STR(x) STR_(x)
static const char MIXPANEL_TOKEN[] = "ce362e568ddaee16ed243eaffb5860a2";
#ifdef __clang__
@ -241,12 +242,6 @@ void Network::handleJsonUploadFinished()
m_activeUploads.removeAll(jsonReply);
if (jsonReply->error() != QNetworkReply::NoError) {
qWarning() << "Request to" << jsonReply->url().toString() << "failed:" << jsonReply->errorString();
jsonReply->deleteLater();
return;
}
QVariant response = jsonReply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
Q_ASSERT(response.isValid());
bool ok;
@ -454,11 +449,6 @@ void Network::handleIpifyFinished()
QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());
if (!reply)
return;
if (reply->error() != QNetworkReply::NoError) {
qWarning() << "Request to" << reply->url().toString() << "failed:" << reply->errorString();
reply->deleteLater();
return;
}
QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
Q_ASSERT(response.isValid());
@ -483,11 +473,6 @@ void Network::handleMixpanelFinished()
QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());
if (!reply)
return;
if (reply->error() != QNetworkReply::NoError) {
qWarning() << "Request to" << reply->url().toString() << "failed:" << reply->errorString();
reply->deleteLater();
return;
}
QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
Q_ASSERT(response.isValid());
@ -526,11 +511,6 @@ void Network::handleHealthFinished()
QNetworkReply *healthReply = qobject_cast<QNetworkReply *>(sender());
if (!healthReply)
return;
if (healthReply->error() != QNetworkReply::NoError) {
qWarning() << "Request to" << healthReply->url().toString() << "failed:" << healthReply->errorString();
healthReply->deleteLater();
return;
}
QVariant response = healthReply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
Q_ASSERT(response.isValid());

View File

@ -11,14 +11,7 @@
#include <QSslError>
#include <QString>
#include <QVariant>
#include <QVariantMap> // IWYU pragma: keep
#include <QVector> // IWYU pragma: keep
// IWYU pragma: no_forward_declare QByteArray
// IWYU pragma: no_forward_declare QNetworkReply
// IWYU pragma: no_forward_declare QSslError
class QUrl;
#include <QVector>
struct KeyValue {
QString key;

View File

@ -4,10 +4,9 @@
#include "chatmodel.h"
#include "modellist.h"
#include "mysettings.h"
#include "utils.h" // IWYU pragma: keep
#include "utils.h"
#include <fmt/format.h>
#include <gpt4all-backend/llmodel.h>
#include <QByteArray>
#include <QCborArray>
@ -16,38 +15,32 @@
#include <QDateTime>
#include <QDebug>
#include <QHostAddress>
#include <QHttpHeaders>
#include <QHttpServer>
#include <QHttpServerRequest>
#include <QHttpServerResponder>
#include <QJsonArray>
#include <QJsonDocument>
#include <QJsonObject>
#include <QJsonValue>
#include <QLatin1StringView>
#include <QPair> // IWYU pragma: keep
#include <QTcpServer>
#include <QPair>
#include <QVariant>
#include <Qt>
#include <QtAssert>
#include <QtCborCommon>
#include <QtGlobal>
#include <QtLogging>
#include <QtMinMax>
#include <QtPreprocessorSupport>
#include <QtTypes>
#include <cstdint>
#include <exception>
#include <iostream>
#include <optional>
#include <span>
#include <stdexcept>
#include <string>
#include <string_view>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <variant>
#include <vector>
#if QT_VERSION >= QT_VERSION_CHECK(6, 8, 0)
# include <QTcpServer>
#endif
using namespace std::string_literals;
using namespace Qt::Literals::StringLiterals;
@ -458,17 +451,23 @@ static QJsonObject requestFromJson(const QByteArray &request)
void Server::start()
{
m_server = std::make_unique<QHttpServer>(this);
#if QT_VERSION >= QT_VERSION_CHECK(6, 8, 0)
auto *tcpServer = new QTcpServer(m_server.get());
#else
auto *tcpServer = m_server.get();
#endif
auto port = MySettings::globalInstance()->networkPort();
if (!tcpServer->listen(QHostAddress::LocalHost, port)) {
qWarning() << "Server ERROR: Failed to listen on port" << port;
return;
}
#if QT_VERSION >= QT_VERSION_CHECK(6, 8, 0)
if (!m_server->bind(tcpServer)) {
qWarning() << "Server ERROR: Failed to HTTP server to socket" << port;
return;
}
#endif
m_server->route("/v1/models", QHttpServerRequest::Method::Get,
[](const QHttpServerRequest &) {
@ -608,12 +607,19 @@ void Server::start()
}
);
#if QT_VERSION >= QT_VERSION_CHECK(6, 8, 0)
m_server->addAfterRequestHandler(this, [](const QHttpServerRequest &req, QHttpServerResponse &resp) {
Q_UNUSED(req);
auto headers = resp.headers();
headers.append("Access-Control-Allow-Origin"_L1, "*"_L1);
resp.setHeaders(std::move(headers));
});
#else
m_server->afterRequest([](QHttpServerResponse &&resp) {
resp.addHeader("Access-Control-Allow-Origin", "*");
return std::move(resp);
});
#endif
connect(this, &Server::requestResetResponseState, m_chat, &Chat::resetResponseState, Qt::BlockingQueuedConnection);
}
@ -688,8 +694,7 @@ auto Server::handleCompletionRequest(const CompletionRequest &request)
promptCtx,
/*usedLocalDocs*/ false);
} catch (const std::exception &e) {
m_chatModel->setResponseValue(e.what());
m_chatModel->setError();
emit responseChanged(e.what());
emit responseStopped(0);
return makeError(QHttpServerResponder::StatusCode::InternalServerError);
}
@ -767,16 +772,16 @@ auto Server::handleChatRequest(const ChatRequest &request)
Q_ASSERT(!request.messages.isEmpty());
// adds prompt/response items to GUI
std::vector<MessageInput> messages;
QList<ChatItem> chatItems;
for (auto &message : request.messages) {
using enum ChatRequest::Message::Role;
switch (message.role) {
case System: messages.push_back({ MessageInput::Type::System, message.content }); break;
case User: messages.push_back({ MessageInput::Type::Prompt, message.content }); break;
case Assistant: messages.push_back({ MessageInput::Type::Response, message.content }); break;
case System: chatItems.emplace_back(ChatItem::system_tag, message.content); break;
case User: chatItems.emplace_back(ChatItem::prompt_tag, message.content); break;
case Assistant: chatItems.emplace_back(ChatItem::response_tag, message.content); break;
}
}
auto startOffset = m_chatModel->appendResponseWithHistory(messages);
auto subrange = m_chatModel->appendResponseWithHistory(chatItems);
// FIXME(jared): taking parameters from the UI inhibits reproducibility of results
LLModel::PromptContext promptCtx {
@ -796,10 +801,9 @@ auto Server::handleChatRequest(const ChatRequest &request)
for (int i = 0; i < request.n; ++i) {
ChatPromptResult result;
try {
result = promptInternalChat(m_collections, promptCtx, startOffset);
result = promptInternalChat(m_collections, promptCtx, subrange);
} catch (const std::exception &e) {
m_chatModel->setResponseValue(e.what());
m_chatModel->setError();
emit responseChanged(e.what());
emit responseStopped(0);
return makeError(QHttpServerResponder::StatusCode::InternalServerError);
}

View File

@ -8,7 +8,7 @@
#include <QHttpServerResponse>
#include <QJsonObject>
#include <QList>
#include <QObject> // IWYU pragma: keep
#include <QObject>
#include <QString>
#include <memory>

View File

@ -1,76 +0,0 @@
#include "tool.h"
#include <QDataStream>
#include <QtTypes>
#include <string>
using json = nlohmann::ordered_json;
json::object_t Tool::jinjaValue() const
{
json::array_t paramList;
const QList<ToolParamInfo> p = parameters();
for (auto &info : p) {
std::string typeStr;
switch (info.type) {
using enum ToolEnums::ParamType;
case String: typeStr = "string"; break;
case Number: typeStr = "number"; break;
case Integer: typeStr = "integer"; break;
case Object: typeStr = "object"; break;
case Array: typeStr = "array"; break;
case Boolean: typeStr = "boolean"; break;
case Null: typeStr = "null"; break;
}
paramList.emplace_back(json::initializer_list_t {
{ "name", info.name.toStdString() },
{ "type", typeStr },
{ "description", info.description.toStdString() },
{ "required", info.required },
});
}
return {
{ "name", name().toStdString() },
{ "description", description().toStdString() },
{ "function", function().toStdString() },
{ "parameters", paramList },
{ "symbolicFormat", symbolicFormat().toStdString() },
{ "examplePrompt", examplePrompt().toStdString() },
{ "exampleCall", exampleCall().toStdString() },
{ "exampleReply", exampleReply().toStdString() },
};
}
void ToolCallInfo::serialize(QDataStream &stream, int version)
{
stream << name;
stream << params.size();
for (auto param : params) {
stream << param.name;
stream << param.type;
stream << param.value;
}
stream << result;
stream << error;
stream << errorString;
}
bool ToolCallInfo::deserialize(QDataStream &stream, int version)
{
stream >> name;
qsizetype count;
stream >> count;
for (int i = 0; i < count; ++i) {
ToolParam p;
stream >> p.name;
stream >> p.type;
stream >> p.value;
}
stream >> result;
stream >> error;
stream >> errorString;
return true;
}

View File

@ -1,137 +0,0 @@
#ifndef TOOL_H
#define TOOL_H
#include <nlohmann/json.hpp>
#include <QList>
#include <QObject>
#include <QString>
#include <QVariant>
#include <QtGlobal>
class QDataStream;
using json = nlohmann::ordered_json;
namespace ToolEnums
{
Q_NAMESPACE
enum class Error
{
NoError = 0,
TimeoutError = 2,
UnknownError = 499,
};
Q_ENUM_NS(Error)
enum class ParamType { String, Number, Integer, Object, Array, Boolean, Null }; // json schema types
Q_ENUM_NS(ParamType)
enum class ParseState {
None,
InTagChoice,
InStart,
Partial,
Complete,
};
Q_ENUM_NS(ParseState)
}
struct ToolParamInfo
{
QString name;
ToolEnums::ParamType type;
QString description;
bool required;
};
Q_DECLARE_METATYPE(ToolParamInfo)
struct ToolParam
{
QString name;
ToolEnums::ParamType type;
QVariant value;
bool operator==(const ToolParam& other) const
{
return name == other.name && type == other.type && value == other.value;
}
};
Q_DECLARE_METATYPE(ToolParam)
struct ToolCallInfo
{
QString name;
QList<ToolParam> params;
QString result;
ToolEnums::Error error = ToolEnums::Error::NoError;
QString errorString;
void serialize(QDataStream &stream, int version);
bool deserialize(QDataStream &stream, int version);
bool operator==(const ToolCallInfo& other) const
{
return name == other.name && result == other.result && params == other.params
&& error == other.error && errorString == other.errorString;
}
};
Q_DECLARE_METATYPE(ToolCallInfo)
class Tool : public QObject
{
Q_OBJECT
Q_PROPERTY(QString name READ name CONSTANT)
Q_PROPERTY(QString description READ description CONSTANT)
Q_PROPERTY(QString function READ function CONSTANT)
Q_PROPERTY(QList<ToolParamInfo> parameters READ parameters CONSTANT)
Q_PROPERTY(QString examplePrompt READ examplePrompt CONSTANT)
Q_PROPERTY(QString exampleCall READ exampleCall CONSTANT)
Q_PROPERTY(QString exampleReply READ exampleReply CONSTANT)
public:
Tool() : QObject(nullptr) {}
virtual ~Tool() {}
virtual void run(const QList<ToolParam> &params) = 0;
virtual bool interrupt() = 0;
// Tools should set these if they encounter errors. For instance, a tool depending upon the network
// might set these error variables if the network is not available.
virtual ToolEnums::Error error() const { return ToolEnums::Error::NoError; }
virtual QString errorString() const { return QString(); }
// [Required] Human readable name of the tool.
virtual QString name() const = 0;
// [Required] Human readable description of what the tool does. Use this tool to: {{description}}
virtual QString description() const = 0;
// [Required] Must be unique. Name of the function to invoke. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
virtual QString function() const = 0;
// [Optional] List describing the tool's parameters. An empty list specifies no parameters.
virtual QList<ToolParamInfo> parameters() const { return {}; }
// [Optional] The symbolic format of the toolcall.
virtual QString symbolicFormat() const { return QString(); }
// [Optional] A human generated example of a prompt that could result in this tool being called.
virtual QString examplePrompt() const { return QString(); }
// [Optional] An example of this tool call that pairs with the example query. It should be the
// complete string that the model must generate.
virtual QString exampleCall() const { return QString(); }
// [Optional] An example of the reply the model might generate given the result of the tool call.
virtual QString exampleReply() const { return QString(); }
bool operator==(const Tool &other) const { return function() == other.function(); }
json::object_t jinjaValue() const;
Q_SIGNALS:
void runComplete(const ToolCallInfo &info);
};
#endif // TOOL_H

View File

@ -1,202 +0,0 @@
#include "toolcallparser.h"
#include "tool.h"
#include <QChar>
#include <QSet>
#include <QtAssert>
#include <QtTypes>
#include <stdexcept>
ToolCallParser::ToolCallParser()
: ToolCallParser(ToolCallConstants::AllTagNames)
{}
ToolCallParser::ToolCallParser(const QStringList &tagNames)
{
QSet<QChar> firstChars;
for (auto &name : tagNames) {
if (name.isEmpty())
throw std::invalid_argument("ToolCallParser(): tag names must not be empty");
if (firstChars.contains(name.at(0)))
throw std::invalid_argument("ToolCallParser(): tag names must not share any prefix");
firstChars << name.at(0);
m_possibleStartTags << makeStartTag(name).toUtf8();
m_possibleEndTags << makeEndTag (name).toUtf8();
}
reset();
}
void ToolCallParser::reset()
{
// Resets the search state, but not the buffer or global state
resetSearchState();
// These are global states maintained between update calls
m_buffers.clear();
m_buffers << QByteArray();
}
void ToolCallParser::resetSearchState()
{
m_expected = {'<'};
m_expectedIndex = 0;
m_state = ToolEnums::ParseState::None;
m_toolCall.clear();
m_startTagBuffer.clear();
m_endTagBuffer.clear();
m_currentTagIndex = -1;
m_startIndex = -1;
m_endIndex = -1;
}
bool ToolCallParser::isExpected(char c) const
{
return m_expected.isEmpty() || m_expected.contains(c);
}
void ToolCallParser::setExpected(const QList<QByteArray> &tags)
{
m_expected.clear();
for (const auto &tag : tags) {
Q_ASSERT(tag.size() > m_expectedIndex);
m_expected << tag.at(m_expectedIndex);
}
}
QByteArray ToolCallParser::startTag() const
{
if (m_currentTagIndex < 0)
return {};
return m_possibleStartTags.at(m_currentTagIndex);
}
QByteArray ToolCallParser::endTag() const
{
if (m_currentTagIndex < 0)
return {};
return m_possibleEndTags.at(m_currentTagIndex);
}
QByteArray &ToolCallParser::currentBuffer()
{
return m_buffers.last();
}
// This method is called with an arbitrary string and a current state. This method should take the
// current state into account and then parse through the update character by character to arrive at
// the new state.
void ToolCallParser::update(const QByteArray &update)
{
currentBuffer().append(update);
for (qsizetype i = currentBuffer().size() - update.size(); i < currentBuffer().size(); ++i) {
const char c = currentBuffer()[i];
const bool foundMatch = isExpected(c);
if (!foundMatch) {
resetSearchState();
continue;
}
switch (m_state) {
case ToolEnums::ParseState::None:
{
m_expectedIndex = 1;
setExpected(m_possibleStartTags);
m_state = ToolEnums::ParseState::InTagChoice;
m_startIndex = i;
break;
}
case ToolEnums::ParseState::InTagChoice:
{
for (int i = 0; i < m_possibleStartTags.size(); ++i) {
const auto &tag = m_possibleStartTags.at(i);
if (c == tag.at(1)) m_currentTagIndex = i;
}
if (m_currentTagIndex >= 0) {
m_expectedIndex = 2;
setExpected({m_possibleStartTags.at(m_currentTagIndex)});
m_state = ToolEnums::ParseState::InStart;
} else
resetSearchState();
break;
}
case ToolEnums::ParseState::InStart:
{
m_startTagBuffer.append(c);
const auto startTag = this->startTag();
Q_ASSERT(!startTag.isEmpty());
if (m_expectedIndex == startTag.size() - 1) {
m_expectedIndex = 0;
setExpected({});
m_state = ToolEnums::ParseState::Partial;
} else {
++m_expectedIndex;
Q_ASSERT(m_currentTagIndex >= 0);
setExpected({startTag});
}
break;
}
case ToolEnums::ParseState::Partial:
{
Q_ASSERT(m_currentTagIndex >= 0);
const auto endTag = this->endTag();
Q_ASSERT(!endTag.isEmpty());
m_toolCall.append(c);
m_endTagBuffer.append(c);
if (m_endTagBuffer.size() > endTag.size())
m_endTagBuffer.remove(0, 1);
if (m_endTagBuffer == endTag) {
m_endIndex = i + 1;
m_toolCall.chop(endTag.size());
m_state = ToolEnums::ParseState::Complete;
m_endTagBuffer.clear();
}
break;
}
case ToolEnums::ParseState::Complete:
{
// Already complete, do nothing further
break;
}
}
}
}
bool ToolCallParser::splitIfPossible()
{
// The first split happens when we're in a partial state
if (m_buffers.size() < 2 && m_state == ToolEnums::ParseState::Partial) {
Q_ASSERT(m_startIndex >= 0);
const auto beforeToolCall = currentBuffer().left(m_startIndex);
const auto toolCall = currentBuffer().mid (m_startIndex);
m_buffers = { beforeToolCall, toolCall };
return true;
}
// The second split happens when we're in the complete state
if (m_buffers.size() < 3 && m_state == ToolEnums::ParseState::Complete) {
Q_ASSERT(m_endIndex >= 0);
const auto &beforeToolCall = m_buffers.first();
const auto toolCall = currentBuffer().left(m_endIndex);
const auto afterToolCall = currentBuffer().mid (m_endIndex);
m_buffers = { beforeToolCall, toolCall, afterToolCall };
return true;
}
return false;
}
QStringList ToolCallParser::buffers() const
{
QStringList result;
result.reserve(m_buffers.size());
for (const auto &buffer : m_buffers)
result << QString::fromUtf8(buffer);
return result;
}

View File

@ -1,73 +0,0 @@
#ifndef TOOLCALLPARSER_H
#define TOOLCALLPARSER_H
#include <QByteArray>
#include <QList>
#include <QString>
#include <QStringList> // IWYU pragma: keep
namespace ToolEnums { enum class ParseState; }
using namespace Qt::Literals::StringLiterals;
class ToolCallParser
{
public:
ToolCallParser();
ToolCallParser(const QStringList &tagNames);
void reset();
void update(const QByteArray &update);
QString toolCall() const { return QString::fromUtf8(m_toolCall); }
int startIndex() const { return m_startIndex; }
ToolEnums::ParseState state() const { return m_state; }
QByteArray startTag() const;
QByteArray endTag() const;
bool splitIfPossible();
QStringList buffers() const;
int numberOfBuffers() const { return m_buffers.size(); }
static QString makeStartTag(const QString &name) { return u"<%1>"_s .arg(name); }
static QString makeEndTag (const QString &name) { return u"</%1>"_s.arg(name); }
private:
QByteArray &currentBuffer();
void resetSearchState();
bool isExpected(char c) const;
void setExpected(const QList<QByteArray> &tags);
QList<QByteArray> m_possibleStartTags;
QList<QByteArray> m_possibleEndTags;
QByteArray m_startTagBuffer;
QByteArray m_endTagBuffer;
int m_currentTagIndex;
QList<char> m_expected;
int m_expectedIndex;
ToolEnums::ParseState m_state;
QList<QByteArray> m_buffers;
QByteArray m_toolCall;
int m_startIndex;
int m_endIndex;
};
namespace ToolCallConstants
{
// NB: the parsing code assumes the first char of the various tags differ
inline const QString CodeInterpreterFunction = u"javascript_interpret"_s;
inline const QString CodeInterpreterStartTag = ToolCallParser::makeStartTag(CodeInterpreterFunction);
inline const QString CodeInterpreterEndTag = ToolCallParser::makeEndTag (CodeInterpreterFunction);
inline const QString CodeInterpreterPrefix = u"%1\n```javascript\n"_s.arg(CodeInterpreterStartTag);
inline const QString CodeInterpreterSuffix = u"```\n%1"_s .arg(CodeInterpreterEndTag );
inline const QString ThinkTagName = u"think"_s;
inline const QString ThinkStartTag = ToolCallParser::makeStartTag(ThinkTagName);
inline const QString ThinkEndTag = ToolCallParser::makeEndTag (ThinkTagName);
inline const QStringList AllTagNames { CodeInterpreterFunction, ThinkTagName };
}
#endif // TOOLCALLPARSER_H

View File

@ -1,32 +0,0 @@
#include "toolmodel.h"
#include "codeinterpreter.h"
#include <QCoreApplication>
#include <QEvent>
#include <QGlobalStatic>
class MyToolModel: public ToolModel { };
Q_GLOBAL_STATIC(MyToolModel, toolModelInstance)
ToolModel *ToolModel::globalInstance()
{
return toolModelInstance();
}
ToolModel::ToolModel()
: QAbstractListModel(nullptr)
{
QCoreApplication::instance()->installEventFilter(this);
Tool* codeInterpreter = new CodeInterpreter;
m_tools.append(codeInterpreter);
m_toolMap.insert(codeInterpreter->function(), codeInterpreter);
}
bool ToolModel::eventFilter(QObject *obj, QEvent *ev)
{
if (obj == QCoreApplication::instance() && ev->type() == QEvent::LanguageChange)
emit dataChanged(index(0, 0), index(m_tools.size() - 1, 0));
return false;
}

View File

@ -1,111 +0,0 @@
#ifndef TOOLMODEL_H
#define TOOLMODEL_H
#include "tool.h"
#include <QAbstractListModel>
#include <QByteArray>
#include <QHash>
#include <QList>
#include <QString>
#include <QVariant>
#include <QtPreprocessorSupport>
class ToolModel : public QAbstractListModel
{
Q_OBJECT
Q_PROPERTY(int count READ count NOTIFY countChanged)
public:
static ToolModel *globalInstance();
enum Roles {
NameRole = Qt::UserRole + 1,
DescriptionRole,
FunctionRole,
ParametersRole,
SymbolicFormatRole,
ExamplePromptRole,
ExampleCallRole,
ExampleReplyRole,
};
int rowCount(const QModelIndex &parent = QModelIndex()) const override
{
Q_UNUSED(parent)
return m_tools.size();
}
QVariant data(const QModelIndex &index, int role = Qt::DisplayRole) const override
{
if (!index.isValid() || index.row() < 0 || index.row() >= m_tools.size())
return QVariant();
const Tool *item = m_tools.at(index.row());
switch (role) {
case NameRole:
return item->name();
case DescriptionRole:
return item->description();
case FunctionRole:
return item->function();
case ParametersRole:
return QVariant::fromValue(item->parameters());
case SymbolicFormatRole:
return item->symbolicFormat();
case ExamplePromptRole:
return item->examplePrompt();
case ExampleCallRole:
return item->exampleCall();
case ExampleReplyRole:
return item->exampleReply();
}
return QVariant();
}
QHash<int, QByteArray> roleNames() const override
{
QHash<int, QByteArray> roles;
roles[NameRole] = "name";
roles[DescriptionRole] = "description";
roles[FunctionRole] = "function";
roles[ParametersRole] = "parameters";
roles[SymbolicFormatRole] = "symbolicFormat";
roles[ExamplePromptRole] = "examplePrompt";
roles[ExampleCallRole] = "exampleCall";
roles[ExampleReplyRole] = "exampleReply";
return roles;
}
Q_INVOKABLE Tool* get(int index) const
{
if (index < 0 || index >= m_tools.size()) return nullptr;
return m_tools.at(index);
}
Q_INVOKABLE Tool *get(const QString &id) const
{
if (!m_toolMap.contains(id)) return nullptr;
return m_toolMap.value(id);
}
int count() const { return m_tools.size(); }
Q_SIGNALS:
void countChanged();
void valueChanged(int index, const QString &value);
protected:
bool eventFilter(QObject *obj, QEvent *ev) override;
private:
explicit ToolModel();
~ToolModel() {}
friend class MyToolModel;
QList<Tool*> m_tools;
QHash<QString, Tool*> m_toolMap;
};
#endif // TOOLMODEL_H

View File

@ -5,7 +5,7 @@
#include <QByteArray>
#include <QJsonValue>
#include <QLatin1StringView> // IWYU pragma: keep
#include <QLatin1StringView>
#include <QString>
#include <QStringView>
#include <QUtf8StringView>
@ -13,9 +13,8 @@
#include <initializer_list>
#include <string_view>
#include <utility> // IWYU pragma: keep
#include <utility>
// IWYU pragma: no_forward_declare QJsonValue
class QJsonObject;
@ -41,4 +40,4 @@ MAKE_FORMATTER(QVariant, value.toString().toUtf8());
// alternative to QJsonObject's initializer_list constructor that accepts Latin-1 strings
QJsonObject makeJsonObject(std::initializer_list<std::pair<QLatin1StringView, QJsonValue>> args);
#include "utils.inl" // IWYU pragma: export
#include "utils.inl"

View File

@ -1,6 +1,5 @@
#include <QJsonObject>
inline QJsonObject makeJsonObject(std::initializer_list<std::pair<QLatin1StringView, QJsonValue>> args)
{
QJsonObject obj;

View File

@ -7,16 +7,15 @@
#include <xlsxformat.h>
#include <xlsxworksheet.h>
#include <QChar>
#include <QDateTime>
#include <QDebug>
#include <QLatin1StringView>
#include <QList>
#include <QRegularExpression>
#include <QString>
#include <QStringList> // IWYU pragma: keep
#include <QStringList>
#include <QStringView>
#include <QVariant>
#include <QtGlobal>
#include <QtLogging>
#include <memory>

View File

@ -4,7 +4,6 @@
class QIODevice;
class QString;
class XLSXToMD
{
public:

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More