mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-06-23 00:02:10 -04:00
Compare commits
101 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
b666d16db5 | ||
|
cd70db29ed | ||
|
fb72ba1ff5 | ||
|
b968d45c11 | ||
|
228d5379cf | ||
|
dd820ef7c4 | ||
|
a7cbc8c3fd | ||
|
4d171835ac | ||
|
0c28ee7059 | ||
|
96aeb44210 | ||
|
29f29773af | ||
|
d8c04cead8 | ||
|
b1cb46ec2a | ||
|
b83d06e67f | ||
|
7aa339cf40 | ||
|
1b84182030 | ||
|
02e12089d3 | ||
|
09f37a0ff8 | ||
|
5e7e4b3f78 | ||
|
22ebd42c32 | ||
|
051a63f031 | ||
|
26356f872e | ||
|
22b8bc546f | ||
|
52164142de | ||
|
be6347389e | ||
|
8c10eccd24 | ||
|
6ef0bd518e | ||
|
04dc157b98 | ||
|
014bf67c63 | ||
|
8c9f26e249 | ||
|
d4e6a6e485 | ||
|
a081255951 | ||
|
36c852b8be | ||
|
c38c7455d8 | ||
|
9131f4c432 | ||
|
6bfa014594 | ||
|
5af31278b7 | ||
|
a80f023ed2 | ||
|
126042fdc9 | ||
|
1f2712d57c | ||
|
f8f78c6677 | ||
|
643c733be3 | ||
|
0734694fb8 | ||
|
e267512db9 | ||
|
34037f3101 | ||
|
007a7af1c8 | ||
|
f914ee56c9 | ||
|
8a0ec5c303 | ||
|
c2ee252ef2 | ||
|
64dcf7682e | ||
|
22b8278ef1 | ||
|
adafa17c37 | ||
|
343a4b6b6a | ||
|
6a8a840681 | ||
|
88f5dac133 | ||
|
0d974297a5 | ||
|
4fbc20ced9 | ||
|
f4f7de51e7 | ||
|
c01ac7fa93 | ||
|
173fdb18c2 | ||
|
8790586e57 | ||
|
b98501c786 | ||
|
49df6464a7 | ||
|
6b719e99b5 | ||
|
d85fe40de8 | ||
|
15f66570fe | ||
|
a97a28fe4f | ||
|
df2d124c19 | ||
|
241d5ff40b | ||
|
0348189cc1 | ||
|
4a8a51f946 | ||
|
867b3dfceb | ||
|
58962496b4 | ||
|
810615d97b | ||
|
82175b27c8 | ||
|
68047d9a60 | ||
|
c871f9eb95 | ||
|
93c5c001e1 | ||
|
4812ddf1f2 | ||
|
cc5ed4737f | ||
|
7339d42a81 | ||
|
a0abc93701 | ||
|
e2541a24b3 | ||
|
22f6a7f1bc | ||
|
ce6558ec94 | ||
|
737e164352 | ||
|
c7d7345188 | ||
|
13e694e6e8 | ||
|
93b4093761 | ||
|
183eb9fb43 | ||
|
2afa9f2f25 | ||
|
cefca34445 | ||
|
6bbeac2b9f | ||
|
1c89447d63 | ||
|
2efb336b8a | ||
|
3819842bcc | ||
|
5ab70da2ae | ||
|
aa84e2da39 | ||
|
0f27359c39 | ||
|
eedd0507d9 | ||
|
680614779e |
@ -1,13 +1,17 @@
|
|||||||
version: 2.1
|
version: 2.1
|
||||||
setup: true
|
setup: true
|
||||||
orbs:
|
orbs:
|
||||||
path-filtering: circleci/path-filtering@1.1.0
|
path-filtering: circleci/path-filtering@1.3.0
|
||||||
|
|
||||||
workflows:
|
workflows:
|
||||||
version: 2.1
|
version: 2.1
|
||||||
generate-config:
|
generate-config:
|
||||||
jobs:
|
jobs:
|
||||||
- path-filtering/filter:
|
- path-filtering/filter:
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /.*/
|
||||||
base-revision: main
|
base-revision: main
|
||||||
config-path: .circleci/continue_config.yml
|
config-path: .circleci/continue_config.yml
|
||||||
mapping: |
|
mapping: |
|
||||||
|
File diff suppressed because it is too large
Load Diff
12
.gitmodules
vendored
12
.gitmodules
vendored
@ -17,9 +17,9 @@
|
|||||||
[submodule "gpt4all-chat/deps/QXlsx"]
|
[submodule "gpt4all-chat/deps/QXlsx"]
|
||||||
path = gpt4all-chat/deps/QXlsx
|
path = gpt4all-chat/deps/QXlsx
|
||||||
url = https://github.com/nomic-ai/QXlsx.git
|
url = https://github.com/nomic-ai/QXlsx.git
|
||||||
[submodule "gpt4all-chat/deps/Jinja2Cpp"]
|
[submodule "gpt4all-chat/deps/minja"]
|
||||||
path = gpt4all-chat/deps/Jinja2Cpp
|
path = gpt4all-chat/deps/minja
|
||||||
url = https://github.com/nomic-ai/jinja2cpp.git
|
url = https://github.com/nomic-ai/minja.git
|
||||||
[submodule "gpt4all-chat/deps/rapidjson"]
|
[submodule "gpt4all-chat/deps/json"]
|
||||||
path = gpt4all-chat/deps/rapidjson
|
path = gpt4all-chat/deps/json
|
||||||
url = https://github.com/nomic-ai/rapidjson.git
|
url = https://github.com/nlohmann/json.git
|
||||||
|
@ -51,11 +51,6 @@ Thiago Ramos ([@thiagojramos](https://github.com/thiagojramos))<br/>
|
|||||||
E-mail: thiagojramos@outlook.com<br/>
|
E-mail: thiagojramos@outlook.com<br/>
|
||||||
- pt\_BR translation
|
- pt\_BR translation
|
||||||
|
|
||||||
Victor Emanuel ([@SINAPSA-IC](https://github.com/SINAPSA-IC))<br/>
|
|
||||||
E-mail: contact@sinapsaro.ro<br/>
|
|
||||||
Discord: `@sinapsa_ic_56124_99632`
|
|
||||||
- ro\_RO translation
|
|
||||||
|
|
||||||
不知火 Shiranui ([@supersonictw](https://github.com/supersonictw))<br/>
|
不知火 Shiranui ([@supersonictw](https://github.com/supersonictw))<br/>
|
||||||
E-mail: supersonic@livemail.tw<br/>
|
E-mail: supersonic@livemail.tw<br/>
|
||||||
Discord: `@supersonictw`
|
Discord: `@supersonictw`
|
||||||
@ -77,6 +72,6 @@ Discord: `@Tim453`
|
|||||||
- Flatpak
|
- Flatpak
|
||||||
|
|
||||||
Jack ([@wuodoo](https://github.com/wuodoo))<br/>
|
Jack ([@wuodoo](https://github.com/wuodoo))<br/>
|
||||||
E-mail: 2296103047@qq.com><br/>
|
E-mail: 2296103047@qq.com<br/>
|
||||||
Discord: `@mikage`
|
Discord: `@mikage`
|
||||||
- zh\_CN translation
|
- zh\_CN translation
|
||||||
|
22
README.md
22
README.md
@ -1,5 +1,9 @@
|
|||||||
<h1 align="center">GPT4All</h1>
|
<h1 align="center">GPT4All</h1>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
Now with support for DeepSeek R1 Distillations
|
||||||
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://www.nomic.ai/gpt4all">Website</a> • <a href="https://docs.gpt4all.io">Documentation</a> • <a href="https://discord.gg/mGZE39AS3e">Discord</a> • <a href="https://www.youtube.com/watch?v=gQcZDXRVJok">YouTube Tutorial</a>
|
<a href="https://www.nomic.ai/gpt4all">Website</a> • <a href="https://docs.gpt4all.io">Documentation</a> • <a href="https://discord.gg/mGZE39AS3e">Discord</a> • <a href="https://www.youtube.com/watch?v=gQcZDXRVJok">YouTube Tutorial</a>
|
||||||
</p>
|
</p>
|
||||||
@ -23,9 +27,6 @@ https://github.com/nomic-ai/gpt4all/assets/70534565/513a0f15-4964-4109-89e4-4f9a
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
GPT4All is made possible by our compute partner <a href="https://www.paperspace.com/">Paperspace</a>.
|
GPT4All is made possible by our compute partner <a href="https://www.paperspace.com/">Paperspace</a>.
|
||||||
</p>
|
</p>
|
||||||
<p align="center">
|
|
||||||
<a href="https://www.phorm.ai/query?projectId=755eecd3-24ad-49cc-abf4-0ab84caacf63"><img src="https://img.shields.io/badge/Phorm-Ask_AI-%23F2777A.svg" alt="phorm.ai"></a>
|
|
||||||
</p>
|
|
||||||
|
|
||||||
## Download Links
|
## Download Links
|
||||||
|
|
||||||
@ -34,6 +35,11 @@ GPT4All is made possible by our compute partner <a href="https://www.paperspace.
|
|||||||
<img src="gpt4all-bindings/python/docs/assets/windows.png" style="height: 1em; width: auto" /> Windows Installer
|
<img src="gpt4all-bindings/python/docs/assets/windows.png" style="height: 1em; width: auto" /> Windows Installer
|
||||||
</a> —
|
</a> —
|
||||||
</p>
|
</p>
|
||||||
|
<p>
|
||||||
|
— <a href="https://gpt4all.io/installers/gpt4all-installer-win64-arm.exe">
|
||||||
|
<img src="gpt4all-bindings/python/docs/assets/windows.png" style="height: 1em; width: auto" /> Windows ARM Installer
|
||||||
|
</a> —
|
||||||
|
</p>
|
||||||
<p>
|
<p>
|
||||||
— <a href="https://gpt4all.io/installers/gpt4all-installer-darwin.dmg">
|
— <a href="https://gpt4all.io/installers/gpt4all-installer-darwin.dmg">
|
||||||
<img src="gpt4all-bindings/python/docs/assets/mac.png" style="height: 1em; width: auto" /> macOS Installer
|
<img src="gpt4all-bindings/python/docs/assets/mac.png" style="height: 1em; width: auto" /> macOS Installer
|
||||||
@ -45,10 +51,16 @@ GPT4All is made possible by our compute partner <a href="https://www.paperspace.
|
|||||||
</a> —
|
</a> —
|
||||||
</p>
|
</p>
|
||||||
<p>
|
<p>
|
||||||
Windows and Linux require Intel Core i3 2nd Gen / AMD Bulldozer, or better. x86-64 only, no ARM.
|
The Windows and Linux builds require Intel Core i3 2nd Gen / AMD Bulldozer, or better.
|
||||||
</p>
|
</p>
|
||||||
<p>
|
<p>
|
||||||
macOS requires Monterey 12.6 or newer. Best results with Apple Silicon M-series processors.
|
The Windows ARM build supports Qualcomm Snapdragon and Microsoft SQ1/SQ2 processors.
|
||||||
|
</p>
|
||||||
|
<p>
|
||||||
|
The Linux build is x86-64 only (no ARM).
|
||||||
|
</p>
|
||||||
|
<p>
|
||||||
|
The macOS build requires Monterey 12.6 or newer. Best results with Apple Silicon M-series processors.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
See the full [System Requirements](gpt4all-chat/system_requirements.md) for more details.
|
See the full [System Requirements](gpt4all-chat/system_requirements.md) for more details.
|
||||||
|
@ -11,7 +11,6 @@ function(gpt4all_add_warning_options target)
|
|||||||
-Wextra-semi
|
-Wextra-semi
|
||||||
-Wformat=2
|
-Wformat=2
|
||||||
-Wmissing-include-dirs
|
-Wmissing-include-dirs
|
||||||
-Wstrict-overflow=2
|
|
||||||
-Wsuggest-override
|
-Wsuggest-override
|
||||||
-Wvla
|
-Wvla
|
||||||
# errors
|
# errors
|
||||||
|
@ -69,7 +69,7 @@ if (LLMODEL_CUDA)
|
|||||||
cmake_minimum_required(VERSION 3.18) # for CMAKE_CUDA_ARCHITECTURES
|
cmake_minimum_required(VERSION 3.18) # for CMAKE_CUDA_ARCHITECTURES
|
||||||
|
|
||||||
# Defaults must be set before enable_language(CUDA).
|
# Defaults must be set before enable_language(CUDA).
|
||||||
# Keep this in sync with the arch list in ggml/src/CMakeLists.txt.
|
# Keep this in sync with the arch list in ggml/src/CMakeLists.txt (plus 5.0 for non-F16 branch).
|
||||||
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
|
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
|
||||||
# 52 == lowest CUDA 12 standard
|
# 52 == lowest CUDA 12 standard
|
||||||
# 60 == f16 CUDA intrinsics
|
# 60 == f16 CUDA intrinsics
|
||||||
@ -78,7 +78,7 @@ if (LLMODEL_CUDA)
|
|||||||
if (GGML_CUDA_F16 OR GGML_CUDA_DMMV_F16)
|
if (GGML_CUDA_F16 OR GGML_CUDA_DMMV_F16)
|
||||||
set(CMAKE_CUDA_ARCHITECTURES "60;61;70;75") # needed for f16 CUDA intrinsics
|
set(CMAKE_CUDA_ARCHITECTURES "60;61;70;75") # needed for f16 CUDA intrinsics
|
||||||
else()
|
else()
|
||||||
set(CMAKE_CUDA_ARCHITECTURES "52;61;70;75") # lowest CUDA 12 standard + lowest for integer intrinsics
|
set(CMAKE_CUDA_ARCHITECTURES "50;52;61;70;75") # lowest CUDA 12 standard + lowest for integer intrinsics
|
||||||
#set(CMAKE_CUDA_ARCHITECTURES "OFF") # use this to compile much faster, but only F16 models work
|
#set(CMAKE_CUDA_ARCHITECTURES "OFF") # use this to compile much faster, but only F16 models work
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
@ -1 +1 @@
|
|||||||
Subproject commit 58a55efc4ae5dd3bc12887d47981faa7136027af
|
Subproject commit 11f734c3b0334dbae4823b4a7467764e447fc6d6
|
@ -53,6 +53,8 @@ static const std::vector<const char *> KNOWN_ARCHES {
|
|||||||
"gpt2",
|
"gpt2",
|
||||||
// "gptj", -- no inference code
|
// "gptj", -- no inference code
|
||||||
"gptneox",
|
"gptneox",
|
||||||
|
"granite",
|
||||||
|
"granitemoe",
|
||||||
"mpt",
|
"mpt",
|
||||||
"baichuan",
|
"baichuan",
|
||||||
"starcoder",
|
"starcoder",
|
||||||
@ -80,6 +82,7 @@ static const std::vector<const char *> KNOWN_ARCHES {
|
|||||||
"command-r",
|
"command-r",
|
||||||
// "dbrx", -- 16x12B parameters
|
// "dbrx", -- 16x12B parameters
|
||||||
"olmo",
|
"olmo",
|
||||||
|
"olmoe",
|
||||||
"openelm",
|
"openelm",
|
||||||
// "arctic", -- 10B+128x3.66B parameters
|
// "arctic", -- 10B+128x3.66B parameters
|
||||||
"deepseek2",
|
"deepseek2",
|
||||||
|
@ -140,9 +140,14 @@ const std::vector<LLModel::Implementation> &LLModel::Implementation::implementat
|
|||||||
std::string path;
|
std::string path;
|
||||||
// Split the paths string by the delimiter and process each path.
|
// Split the paths string by the delimiter and process each path.
|
||||||
while (std::getline(ss, path, ';')) {
|
while (std::getline(ss, path, ';')) {
|
||||||
std::u8string u8_path(path.begin(), path.end());
|
fs::directory_iterator iter;
|
||||||
|
try {
|
||||||
|
iter = fs::directory_iterator(std::u8string(path.begin(), path.end()));
|
||||||
|
} catch (const fs::filesystem_error &) {
|
||||||
|
continue; // skip nonexistent path
|
||||||
|
}
|
||||||
// Iterate over all libraries
|
// Iterate over all libraries
|
||||||
for (const auto &f : fs::directory_iterator(u8_path)) {
|
for (const auto &f : iter) {
|
||||||
const fs::path &p = f.path();
|
const fs::path &p = f.path();
|
||||||
|
|
||||||
if (p.extension() != LIB_FILE_EXT) continue;
|
if (p.extension() != LIB_FILE_EXT) continue;
|
||||||
|
@ -8,8 +8,10 @@
|
|||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| **Theme** | Color theme for the application. Options are `Light`, `Dark`, and `LegacyDark` | `Light` |
|
| **Theme** | Color theme for the application. Options are `Light`, `Dark`, and `LegacyDark` | `Light` |
|
||||||
| **Font Size** | Font size setting for text throughout the application. Options are Small, Medium, and Large | Small |
|
| **Font Size** | Font size setting for text throughout the application. Options are Small, Medium, and Large | Small |
|
||||||
|
| **Language and Locale** | The language and locale of that language you wish to use | System Locale |
|
||||||
| **Device** | Device that will run your models. Options are `Auto` (GPT4All chooses), `Metal` (Apple Silicon M1+), `CPU`, and `GPU` | `Auto` |
|
| **Device** | Device that will run your models. Options are `Auto` (GPT4All chooses), `Metal` (Apple Silicon M1+), `CPU`, and `GPU` | `Auto` |
|
||||||
| **Default Model** | Choose your preferred LLM to load by default on startup| Auto |
|
| **Default Model** | Choose your preferred LLM to load by default on startup| Auto |
|
||||||
|
| **Suggestion Mode** | Generate suggested follow up questions at the end of responses | When chatting with LocalDocs |
|
||||||
| **Download Path** | Select a destination on your device to save downloaded models | Windows: `C:\Users\{username}\AppData\Local\nomic.ai\GPT4All`<br><br>Mac: `/Users/{username}/Library/Application Support/nomic.ai/GPT4All/`<br><br>Linux: `/home/{username}/.local/share/nomic.ai/GPT4All` |
|
| **Download Path** | Select a destination on your device to save downloaded models | Windows: `C:\Users\{username}\AppData\Local\nomic.ai\GPT4All`<br><br>Mac: `/Users/{username}/Library/Application Support/nomic.ai/GPT4All/`<br><br>Linux: `/home/{username}/.local/share/nomic.ai/GPT4All` |
|
||||||
| **Enable Datalake** | Opt-in to sharing interactions with GPT4All community (**anonymous** and **optional**) | Off |
|
| **Enable Datalake** | Opt-in to sharing interactions with GPT4All community (**anonymous** and **optional**) | Off |
|
||||||
|
|
||||||
@ -18,7 +20,7 @@
|
|||||||
| Setting | Description | Default Value |
|
| Setting | Description | Default Value |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| **CPU Threads** | Number of concurrently running CPU threads (more can speed up responses) | 4 |
|
| **CPU Threads** | Number of concurrently running CPU threads (more can speed up responses) | 4 |
|
||||||
| **Save Chat Context** | Save chat context to disk to pick up exactly where a model left off. | Off |
|
| **Enable System Tray** | The application will minimize to the system tray / taskbar when the window is closed | Off |
|
||||||
| **Enable Local Server** | Allow any application on your device to use GPT4All via an OpenAI-compatible GPT4All API | Off |
|
| **Enable Local Server** | Allow any application on your device to use GPT4All via an OpenAI-compatible GPT4All API | Off |
|
||||||
| **API Server Port** | Local HTTP port for the local API server | 4891 |
|
| **API Server Port** | Local HTTP port for the local API server | 4891 |
|
||||||
|
|
||||||
@ -29,8 +31,11 @@
|
|||||||
| Setting | Description | Default Value |
|
| Setting | Description | Default Value |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| **Name** | Unique name of this model / character| set by model uploader |
|
| **Name** | Unique name of this model / character| set by model uploader |
|
||||||
| **System Prompt** | General instructions for the chats this model will be used for | set by model uploader |
|
| **Model File** | Filename (.gguf) of the model | set by model uploader |
|
||||||
| **Prompt Template** | Format of user <-> assistant interactions for the chats this model will be used for | set by model uploader |
|
| **System Message** | General instructions for the chats this model will be used for | set by model uploader |
|
||||||
|
| **Chat Template** | Format of user <-> assistant interactions for the chats this model will be used for | set by model uploader |
|
||||||
|
| **Chat Name Prompt** | Prompt used to automatically generate chat names | Describe the above conversation in seven words or less. |
|
||||||
|
| **Suggested FollowUp Prompt** | Prompt used to automatically generate follow up questions after a chat response | Suggest three very short factual follow-up questions that have not been answered yet or cannot be found inspired by the previous conversation and excerpts. |
|
||||||
|
|
||||||
### Clone
|
### Clone
|
||||||
|
|
||||||
|
@ -4,6 +4,90 @@ All notable changes to this project will be documented in this file.
|
|||||||
|
|
||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
|
||||||
|
|
||||||
|
## [3.10.0] - 2025-02-24
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Whitelist Granite (non-MoE) model architecture (by [@ThiloteE](https://github.com/ThiloteE) in [#3487](https://github.com/nomic-ai/gpt4all/pull/3487))
|
||||||
|
- Add support for CUDA compute 5.0 GPUs such as the GTX 750 ([#3499](https://github.com/nomic-ai/gpt4all/pull/3499))
|
||||||
|
- Add a Remote Providers tab to the Add Model page ([#3506](https://github.com/nomic-ai/gpt4all/pull/3506))
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Substitute prettier default templates for OLMoE 7B 0924/0125 and Granite 3.1 3B/8B (by [@ThiloteE](https://github.com/ThiloteE) in [#3471](https://github.com/nomic-ai/gpt4all/pull/3471))
|
||||||
|
- Build with LLVM Clang 19 on macOS and Ubuntu ([#3500](https://github.com/nomic-ai/gpt4all/pull/3500))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Fix several potential crashes ([#3465](https://github.com/nomic-ai/gpt4all/pull/3465))
|
||||||
|
- Fix visual spacing issues with deepseek models ([#3470](https://github.com/nomic-ai/gpt4all/pull/3470))
|
||||||
|
- Add missing strings to Italian translation (by [@Harvester62](https://github.com/Harvester62) in [#3496](https://github.com/nomic-ai/gpt4all/pull/3496))
|
||||||
|
- Update Simplified Chinese translation (by [@Junior2Ran](https://github.com/Junior2Ran) in [#3467](https://github.com/nomic-ai/pull/3467))
|
||||||
|
|
||||||
|
## [3.9.0] - 2025-02-04
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Whitelist OLMoE and Granite MoE model architectures (no Vulkan) (by [@ThiloteE](https://github.com/ThiloteE) in [#3449](https://github.com/nomic-ai/gpt4all/pull/3449))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Fix "index N is not a prompt" when using LocalDocs with reasoning ([#3451](https://github.com/nomic-ai/gpt4all/pull/3451))
|
||||||
|
- Work around rendering artifacts on Snapdragon SoCs with Windows ([#3450](https://github.com/nomic-ai/gpt4all/pull/3450))
|
||||||
|
- Prevent DeepSeek-R1 reasoning from appearing in chat names and follow-up questions ([#3458](https://github.com/nomic-ai/gpt4all/pull/3458))
|
||||||
|
- Fix LocalDocs crash on Windows ARM when reading PDFs ([#3460](https://github.com/nomic-ai/gpt4all/pull/3460))
|
||||||
|
- Fix UI freeze when chat template is `{#` ([#3446](https://github.com/nomic-ai/gpt4all/pull/3446))
|
||||||
|
|
||||||
|
## [3.8.0] - 2025-01-30
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Support DeepSeek-R1 Qwen models ([#3431](https://github.com/nomic-ai/gpt4all/pull/3431))
|
||||||
|
- Support for think tags in the GUI ([#3440](https://github.com/nomic-ai/gpt4all/pull/3440))
|
||||||
|
- Support specifying SHA256 hash in models3.json instead of MD5 ([#3437](https://github.com/nomic-ai/gpt4all/pull/3437))
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Use minja instead of Jinja2Cpp for significantly improved template compatibility ([#3433](https://github.com/nomic-ai/gpt4all/pull/3433))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Fix regression while using localdocs with server API ([#3410](https://github.com/nomic-ai/gpt4all/pull/3410))
|
||||||
|
- Don't show system messages in server chat view ([#3411](https://github.com/nomic-ai/gpt4all/pull/3411))
|
||||||
|
- Fix `codesign --verify` failure on macOS ([#3413](https://github.com/nomic-ai/gpt4all/pull/3413))
|
||||||
|
- Code Interpreter: Fix console.log not accepting a single string after v3.7.0 ([#3426](https://github.com/nomic-ai/gpt4all/pull/3426))
|
||||||
|
- Fix Phi 3.1 Mini 128K Instruct template (by [@ThiloteE](https://github.com/ThiloteE) in [#3412](https://github.com/nomic-ai/gpt4all/pull/3412))
|
||||||
|
- Don't block the gui thread for reasoning ([#3435](https://github.com/nomic-ai/gpt4all/pull/3435))
|
||||||
|
- Fix corruption of unicode in output of reasoning models ([#3443](https://github.com/nomic-ai/gpt4all/pull/3443))
|
||||||
|
|
||||||
|
## [3.7.0] - 2025-01-21
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Add support for the Windows ARM64 target platform (CPU-only) ([#3385](https://github.com/nomic-ai/gpt4all/pull/3385))
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Update from Qt 6.5.1 to 6.8.1 ([#3386](https://github.com/nomic-ai/gpt4all/pull/3386))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Fix the timeout error in code interpreter ([#3369](https://github.com/nomic-ai/gpt4all/pull/3369))
|
||||||
|
- Fix code interpreter console.log not accepting multiple arguments ([#3371](https://github.com/nomic-ai/gpt4all/pull/3371))
|
||||||
|
- Remove 'X is defined' checks from templates for better compatibility ([#3372](https://github.com/nomic-ai/gpt4all/pull/3372))
|
||||||
|
- Jinja2Cpp: Add 'if' requirement for 'else' parsing to fix crash ([#3373](https://github.com/nomic-ai/gpt4all/pull/3373))
|
||||||
|
- Save chats on quit, even if the window isn't closed first ([#3387](https://github.com/nomic-ai/gpt4all/pull/3387))
|
||||||
|
- Add chat template replacements for five new models and fix EM German Mistral ([#3393](https://github.com/nomic-ai/gpt4all/pull/3393))
|
||||||
|
- Fix crash when entering `{{ a["foo"(` as chat template ([#3394](https://github.com/nomic-ai/gpt4all/pull/3394))
|
||||||
|
- Sign the maintenance tool on macOS to prevent crash on Sequoia ([#3391](https://github.com/nomic-ai/gpt4all/pull/3391))
|
||||||
|
- Jinja2Cpp: Fix operator precedence in 'not X is defined' ([#3402](https://github.com/nomic-ai/gpt4all/pull/3402))
|
||||||
|
|
||||||
|
## [3.6.1] - 2024-12-20
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Fix the stop generation button no longer working in v3.6.0 ([#3336](https://github.com/nomic-ai/gpt4all/pull/3336))
|
||||||
|
- Fix the copy entire conversation button no longer working in v3.6.0 ([#3336](https://github.com/nomic-ai/gpt4all/pull/3336))
|
||||||
|
|
||||||
|
## [3.6.0] - 2024-12-19
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Automatically substitute chat templates that are not compatible with Jinja2Cpp in GGUFs ([#3327](https://github.com/nomic-ai/gpt4all/pull/3327))
|
||||||
|
- Built-in javascript code interpreter tool plus model ([#3173](https://github.com/nomic-ai/gpt4all/pull/3173))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Fix remote model template to allow for XML in messages ([#3318](https://github.com/nomic-ai/gpt4all/pull/3318))
|
||||||
|
- Fix Jinja2Cpp bug that broke system message detection in chat templates ([#3325](https://github.com/nomic-ai/gpt4all/pull/3325))
|
||||||
|
- Fix LocalDocs sources displaying in unconsolidated form after v3.5.0 ([#3328](https://github.com/nomic-ai/gpt4all/pull/3328))
|
||||||
|
|
||||||
## [3.5.3] - 2024-12-16
|
## [3.5.3] - 2024-12-16
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
@ -228,6 +312,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
|
|||||||
- Fix several Vulkan resource management issues ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694))
|
- Fix several Vulkan resource management issues ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694))
|
||||||
- Fix crash/hang when some models stop generating, by showing special tokens ([#2701](https://github.com/nomic-ai/gpt4all/pull/2701))
|
- Fix crash/hang when some models stop generating, by showing special tokens ([#2701](https://github.com/nomic-ai/gpt4all/pull/2701))
|
||||||
|
|
||||||
|
[3.10.0]: https://github.com/nomic-ai/gpt4all/compare/v3.9.0...v3.10.0
|
||||||
|
[3.9.0]: https://github.com/nomic-ai/gpt4all/compare/v3.8.0...v3.9.0
|
||||||
|
[3.8.0]: https://github.com/nomic-ai/gpt4all/compare/v3.7.0...v3.8.0
|
||||||
|
[3.7.0]: https://github.com/nomic-ai/gpt4all/compare/v3.6.1...v3.7.0
|
||||||
|
[3.6.1]: https://github.com/nomic-ai/gpt4all/compare/v3.6.0...v3.6.1
|
||||||
|
[3.6.0]: https://github.com/nomic-ai/gpt4all/compare/v3.5.3...v3.6.0
|
||||||
[3.5.3]: https://github.com/nomic-ai/gpt4all/compare/v3.5.2...v3.5.3
|
[3.5.3]: https://github.com/nomic-ai/gpt4all/compare/v3.5.2...v3.5.3
|
||||||
[3.5.2]: https://github.com/nomic-ai/gpt4all/compare/v3.5.1...v3.5.2
|
[3.5.2]: https://github.com/nomic-ai/gpt4all/compare/v3.5.1...v3.5.2
|
||||||
[3.5.1]: https://github.com/nomic-ai/gpt4all/compare/v3.5.0...v3.5.1
|
[3.5.1]: https://github.com/nomic-ai/gpt4all/compare/v3.5.0...v3.5.1
|
||||||
|
@ -3,13 +3,17 @@ cmake_minimum_required(VERSION 3.25) # for try_compile SOURCE_FROM_VAR
|
|||||||
include(../common/common.cmake)
|
include(../common/common.cmake)
|
||||||
|
|
||||||
set(APP_VERSION_MAJOR 3)
|
set(APP_VERSION_MAJOR 3)
|
||||||
set(APP_VERSION_MINOR 5)
|
set(APP_VERSION_MINOR 10)
|
||||||
set(APP_VERSION_PATCH 3)
|
set(APP_VERSION_PATCH 1)
|
||||||
set(APP_VERSION_BASE "${APP_VERSION_MAJOR}.${APP_VERSION_MINOR}.${APP_VERSION_PATCH}")
|
set(APP_VERSION_BASE "${APP_VERSION_MAJOR}.${APP_VERSION_MINOR}.${APP_VERSION_PATCH}")
|
||||||
set(APP_VERSION "${APP_VERSION_BASE}")
|
set(APP_VERSION "${APP_VERSION_BASE}-dev0")
|
||||||
|
|
||||||
project(gpt4all VERSION ${APP_VERSION_BASE} LANGUAGES CXX C)
|
project(gpt4all VERSION ${APP_VERSION_BASE} LANGUAGES CXX C)
|
||||||
|
|
||||||
|
if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
|
||||||
|
set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install CACHE PATH "..." FORCE)
|
||||||
|
endif()
|
||||||
|
|
||||||
if(APPLE)
|
if(APPLE)
|
||||||
option(BUILD_UNIVERSAL "Build a Universal binary on macOS" OFF)
|
option(BUILD_UNIVERSAL "Build a Universal binary on macOS" OFF)
|
||||||
if(BUILD_UNIVERSAL)
|
if(BUILD_UNIVERSAL)
|
||||||
@ -28,13 +32,28 @@ option(GPT4ALL_TEST "Build the tests" ${Python3_FOUND})
|
|||||||
option(GPT4ALL_LOCALHOST "Build installer for localhost repo" OFF)
|
option(GPT4ALL_LOCALHOST "Build installer for localhost repo" OFF)
|
||||||
option(GPT4ALL_OFFLINE_INSTALLER "Build an offline installer" OFF)
|
option(GPT4ALL_OFFLINE_INSTALLER "Build an offline installer" OFF)
|
||||||
option(GPT4ALL_SIGN_INSTALL "Sign installed binaries and installers (requires signing identities)" OFF)
|
option(GPT4ALL_SIGN_INSTALL "Sign installed binaries and installers (requires signing identities)" OFF)
|
||||||
|
option(GPT4ALL_GEN_CPACK_CONFIG "Generate the CPack config.xml in the package step and nothing else." OFF)
|
||||||
|
set(GPT4ALL_USE_QTPDF "AUTO" CACHE STRING "Whether to Use QtPDF for LocalDocs. If OFF or not available on this platform, PDFium is used.")
|
||||||
|
set_property(CACHE GPT4ALL_USE_QTPDF PROPERTY STRINGS AUTO ON OFF)
|
||||||
|
set(GPT4ALL_FORCE_D3D12 "AUTO" CACHE STRING "Whether to use Direct3D 12 as the Qt scene graph backend. Defaults to ON on Windows ARM.")
|
||||||
|
set_property(CACHE GPT4ALL_FORCE_D3D12 PROPERTY STRINGS AUTO ON OFF)
|
||||||
|
|
||||||
|
include(cmake/cpack_config.cmake)
|
||||||
|
|
||||||
|
if (GPT4ALL_GEN_CPACK_CONFIG)
|
||||||
|
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/cpack-steal-config.cmake.in"
|
||||||
|
"${CMAKE_BINARY_DIR}/cmake/cpack-steal-config.cmake" @ONLY)
|
||||||
|
set(CPACK_POST_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/cpack-steal-config.cmake)
|
||||||
|
include(CPack)
|
||||||
|
include(CPackIFW)
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
|
||||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||||
set(CMAKE_CXX_STANDARD 23)
|
set(CMAKE_CXX_STANDARD 23)
|
||||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||||
if (MSVC)
|
if (MSVC)
|
||||||
# Enable accurate __cplusplus macro to fix errors in Jinja2Cpp
|
# Enable accurate __cplusplus macro
|
||||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/Zc:__cplusplus>)
|
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/Zc:__cplusplus>)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -73,14 +92,19 @@ include_directories("${CMAKE_CURRENT_BINARY_DIR}")
|
|||||||
set(CMAKE_AUTOMOC ON)
|
set(CMAKE_AUTOMOC ON)
|
||||||
set(CMAKE_AUTORCC ON)
|
set(CMAKE_AUTORCC ON)
|
||||||
|
|
||||||
# Generate a header file with the version number
|
|
||||||
configure_file(
|
|
||||||
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/config.h.in"
|
|
||||||
"${CMAKE_CURRENT_BINARY_DIR}/config.h"
|
|
||||||
)
|
|
||||||
|
|
||||||
set(CMAKE_FIND_PACKAGE_TARGETS_GLOBAL ON)
|
set(CMAKE_FIND_PACKAGE_TARGETS_GLOBAL ON)
|
||||||
find_package(Qt6 6.5 COMPONENTS Core HttpServer LinguistTools Pdf Quick QuickDialogs2 Sql Svg REQUIRED)
|
set(GPT4ALL_QT_COMPONENTS Core HttpServer LinguistTools Quick QuickDialogs2 Sql Svg)
|
||||||
|
set(GPT4ALL_USING_QTPDF OFF)
|
||||||
|
if (CMAKE_SYSTEM_NAME MATCHES Windows AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|AARCH64|arm64|ARM64)$")
|
||||||
|
# QtPDF is not available.
|
||||||
|
if (GPT4ALL_USE_QTPDF STREQUAL "ON")
|
||||||
|
message(FATAL_ERROR "QtPDF is not available on Windows ARM64.")
|
||||||
|
endif()
|
||||||
|
elseif (GPT4ALL_USE_QTPDF MATCHES "^(ON|AUTO)$")
|
||||||
|
set(GPT4ALL_USING_QTPDF ON)
|
||||||
|
list(APPEND GPT4ALL_QT_COMPONENTS Pdf)
|
||||||
|
endif()
|
||||||
|
find_package(Qt6 6.8 COMPONENTS ${GPT4ALL_QT_COMPONENTS} REQUIRED)
|
||||||
|
|
||||||
if (QT_KNOWN_POLICY_QTP0004)
|
if (QT_KNOWN_POLICY_QTP0004)
|
||||||
qt_policy(SET QTP0004 NEW) # generate extra qmldir files on Qt 6.8+
|
qt_policy(SET QTP0004 NEW) # generate extra qmldir files on Qt 6.8+
|
||||||
@ -102,10 +126,24 @@ message(STATUS "Qt 6 root directory: ${Qt6_ROOT_DIR}")
|
|||||||
|
|
||||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
||||||
|
|
||||||
if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
|
set(GPT4ALL_CONFIG_FORCE_D3D12 -1)
|
||||||
set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install CACHE PATH "..." FORCE)
|
if (NOT CMAKE_SYSTEM_NAME MATCHES Windows OR Qt6_VERSION VERSION_LESS "6.6")
|
||||||
|
# Direct3D 12 is not available.
|
||||||
|
if (GPT4ALL_FORCE_D3D12 STREQUAL "ON")
|
||||||
|
message(FATAL_ERROR "Cannot use Direct3D 12 on this platform.")
|
||||||
|
endif()
|
||||||
|
elseif (GPT4ALL_FORCE_D3D12 MATCHES "^(ON|AUTO)$")
|
||||||
|
if (GPT4ALL_FORCE_D3D12 STREQUAL "ON" OR CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|AARCH64|arm64|ARM64)$")
|
||||||
|
set(GPT4ALL_CONFIG_FORCE_D3D12 1)
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# Generate a header file for configuration
|
||||||
|
configure_file(
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/src/config.h.in"
|
||||||
|
"${CMAKE_CURRENT_BINARY_DIR}/config.h"
|
||||||
|
)
|
||||||
|
|
||||||
add_subdirectory(deps)
|
add_subdirectory(deps)
|
||||||
add_subdirectory(../gpt4all-backend llmodel)
|
add_subdirectory(../gpt4all-backend llmodel)
|
||||||
|
|
||||||
@ -193,12 +231,14 @@ qt_add_executable(chat
|
|||||||
src/chatapi.cpp src/chatapi.h
|
src/chatapi.cpp src/chatapi.h
|
||||||
src/chatlistmodel.cpp src/chatlistmodel.h
|
src/chatlistmodel.cpp src/chatlistmodel.h
|
||||||
src/chatllm.cpp src/chatllm.h
|
src/chatllm.cpp src/chatllm.h
|
||||||
src/chatmodel.h
|
src/chatmodel.h src/chatmodel.cpp
|
||||||
src/chatviewtextprocessor.cpp src/chatviewtextprocessor.h
|
src/chatviewtextprocessor.cpp src/chatviewtextprocessor.h
|
||||||
|
src/codeinterpreter.cpp src/codeinterpreter.h
|
||||||
src/database.cpp src/database.h
|
src/database.cpp src/database.h
|
||||||
src/download.cpp src/download.h
|
src/download.cpp src/download.h
|
||||||
src/embllm.cpp src/embllm.h
|
src/embllm.cpp src/embllm.h
|
||||||
src/jinja_helpers.cpp src/jinja_helpers.h
|
src/jinja_helpers.cpp src/jinja_helpers.h
|
||||||
|
src/jinja_replacements.cpp src/jinja_replacements.h
|
||||||
src/llm.cpp src/llm.h
|
src/llm.cpp src/llm.h
|
||||||
src/localdocs.cpp src/localdocs.h
|
src/localdocs.cpp src/localdocs.h
|
||||||
src/localdocsmodel.cpp src/localdocsmodel.h
|
src/localdocsmodel.cpp src/localdocsmodel.h
|
||||||
@ -207,6 +247,9 @@ qt_add_executable(chat
|
|||||||
src/mysettings.cpp src/mysettings.h
|
src/mysettings.cpp src/mysettings.h
|
||||||
src/network.cpp src/network.h
|
src/network.cpp src/network.h
|
||||||
src/server.cpp src/server.h
|
src/server.cpp src/server.h
|
||||||
|
src/tool.cpp src/tool.h
|
||||||
|
src/toolcallparser.cpp src/toolcallparser.h
|
||||||
|
src/toolmodel.cpp src/toolmodel.h
|
||||||
src/xlsxtomd.cpp src/xlsxtomd.h
|
src/xlsxtomd.cpp src/xlsxtomd.h
|
||||||
${CHAT_EXE_RESOURCES}
|
${CHAT_EXE_RESOURCES}
|
||||||
${MACOS_SOURCES}
|
${MACOS_SOURCES}
|
||||||
@ -223,10 +266,13 @@ qt_add_qml_module(chat
|
|||||||
qml/AddModelView.qml
|
qml/AddModelView.qml
|
||||||
qml/AddGPT4AllModelView.qml
|
qml/AddGPT4AllModelView.qml
|
||||||
qml/AddHFModelView.qml
|
qml/AddHFModelView.qml
|
||||||
|
qml/AddRemoteModelView.qml
|
||||||
qml/ApplicationSettings.qml
|
qml/ApplicationSettings.qml
|
||||||
qml/ChatDrawer.qml
|
qml/ChatDrawer.qml
|
||||||
|
qml/ChatCollapsibleItem.qml
|
||||||
qml/ChatItemView.qml
|
qml/ChatItemView.qml
|
||||||
qml/ChatMessageButton.qml
|
qml/ChatMessageButton.qml
|
||||||
|
qml/ChatTextItem.qml
|
||||||
qml/ChatView.qml
|
qml/ChatView.qml
|
||||||
qml/CollectionsDrawer.qml
|
qml/CollectionsDrawer.qml
|
||||||
qml/HomeView.qml
|
qml/HomeView.qml
|
||||||
@ -269,6 +315,7 @@ qt_add_qml_module(chat
|
|||||||
qml/MyTextField.qml
|
qml/MyTextField.qml
|
||||||
qml/MyToolButton.qml
|
qml/MyToolButton.qml
|
||||||
qml/MyWelcomeButton.qml
|
qml/MyWelcomeButton.qml
|
||||||
|
qml/RemoteModelCard.qml
|
||||||
RESOURCES
|
RESOURCES
|
||||||
icons/antenna_1.svg
|
icons/antenna_1.svg
|
||||||
icons/antenna_2.svg
|
icons/antenna_2.svg
|
||||||
@ -299,6 +346,7 @@ qt_add_qml_module(chat
|
|||||||
icons/gpt4all-48.png
|
icons/gpt4all-48.png
|
||||||
icons/gpt4all.svg
|
icons/gpt4all.svg
|
||||||
icons/gpt4all_transparent.svg
|
icons/gpt4all_transparent.svg
|
||||||
|
icons/groq.svg
|
||||||
icons/home.svg
|
icons/home.svg
|
||||||
icons/image.svg
|
icons/image.svg
|
||||||
icons/info.svg
|
icons/info.svg
|
||||||
@ -306,12 +354,14 @@ qt_add_qml_module(chat
|
|||||||
icons/left_panel_open.svg
|
icons/left_panel_open.svg
|
||||||
icons/local-docs.svg
|
icons/local-docs.svg
|
||||||
icons/models.svg
|
icons/models.svg
|
||||||
|
icons/mistral.svg
|
||||||
icons/network.svg
|
icons/network.svg
|
||||||
icons/nomic_logo.svg
|
icons/nomic_logo.svg
|
||||||
icons/notes.svg
|
icons/notes.svg
|
||||||
icons/paperclip.svg
|
icons/paperclip.svg
|
||||||
icons/plus.svg
|
icons/plus.svg
|
||||||
icons/plus_circle.svg
|
icons/plus_circle.svg
|
||||||
|
icons/openai.svg
|
||||||
icons/recycle.svg
|
icons/recycle.svg
|
||||||
icons/regenerate.svg
|
icons/regenerate.svg
|
||||||
icons/search.svg
|
icons/search.svg
|
||||||
@ -397,9 +447,19 @@ target_include_directories(chat PRIVATE deps/usearch/include
|
|||||||
deps/usearch/fp16/include)
|
deps/usearch/fp16/include)
|
||||||
|
|
||||||
target_link_libraries(chat
|
target_link_libraries(chat
|
||||||
PRIVATE Qt6::Core Qt6::HttpServer Qt6::Pdf Qt6::Quick Qt6::Sql Qt6::Svg)
|
PRIVATE Qt6::Core Qt6::HttpServer Qt6::Quick Qt6::Sql Qt6::Svg)
|
||||||
|
if (GPT4ALL_USING_QTPDF)
|
||||||
|
target_compile_definitions(chat PRIVATE GPT4ALL_USE_QTPDF)
|
||||||
|
target_link_libraries(chat PRIVATE Qt6::Pdf)
|
||||||
|
else()
|
||||||
|
# Link PDFium
|
||||||
|
target_link_libraries(chat PRIVATE pdfium)
|
||||||
|
endif()
|
||||||
target_link_libraries(chat
|
target_link_libraries(chat
|
||||||
PRIVATE llmodel SingleApplication fmt::fmt duckx::duckx QXlsx jinja2cpp)
|
PRIVATE llmodel SingleApplication fmt::fmt duckx::duckx QXlsx)
|
||||||
|
target_include_directories(chat PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/deps/json/include)
|
||||||
|
target_include_directories(chat PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/deps/json/include/nlohmann)
|
||||||
|
target_include_directories(chat PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/deps/minja/include)
|
||||||
|
|
||||||
if (APPLE)
|
if (APPLE)
|
||||||
target_link_libraries(chat PRIVATE ${COCOA_LIBRARY})
|
target_link_libraries(chat PRIVATE ${COCOA_LIBRARY})
|
||||||
@ -407,13 +467,17 @@ endif()
|
|||||||
|
|
||||||
# -- install --
|
# -- install --
|
||||||
|
|
||||||
set(COMPONENT_NAME_MAIN ${PROJECT_NAME})
|
if (APPLE)
|
||||||
|
set(GPT4ALL_LIB_DEST bin/gpt4all.app/Contents/Frameworks)
|
||||||
|
else()
|
||||||
|
set(GPT4ALL_LIB_DEST lib)
|
||||||
|
endif()
|
||||||
|
|
||||||
install(TARGETS chat DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN})
|
install(TARGETS chat DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN})
|
||||||
|
|
||||||
install(
|
install(
|
||||||
TARGETS llmodel
|
TARGETS llmodel
|
||||||
LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib
|
LIBRARY DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib
|
||||||
RUNTIME DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN} # .dll
|
RUNTIME DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN} # .dll
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -437,7 +501,7 @@ endif()
|
|||||||
|
|
||||||
install(
|
install(
|
||||||
TARGETS ${MODEL_IMPL_TARGETS}
|
TARGETS ${MODEL_IMPL_TARGETS}
|
||||||
LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib
|
LIBRARY DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib
|
||||||
RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll
|
RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -467,7 +531,7 @@ if (LLMODEL_CUDA)
|
|||||||
TARGETS llamamodel-mainline-cuda
|
TARGETS llamamodel-mainline-cuda
|
||||||
llamamodel-mainline-cuda-avxonly
|
llamamodel-mainline-cuda-avxonly
|
||||||
RUNTIME_DEPENDENCY_SET llama-cuda-deps
|
RUNTIME_DEPENDENCY_SET llama-cuda-deps
|
||||||
LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so/.dylib
|
LIBRARY DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .so
|
||||||
RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll
|
RUNTIME DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN} # .dll
|
||||||
)
|
)
|
||||||
if (WIN32)
|
if (WIN32)
|
||||||
@ -481,67 +545,38 @@ if (LLMODEL_CUDA)
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (NOT GPT4ALL_USING_QTPDF)
|
||||||
|
# Install PDFium
|
||||||
|
if (WIN32)
|
||||||
|
install(FILES ${PDFium_LIBRARY} DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN}) # .dll
|
||||||
|
else()
|
||||||
|
install(FILES ${PDFium_LIBRARY} DESTINATION ${GPT4ALL_LIB_DEST} COMPONENT ${COMPONENT_NAME_MAIN}) # .so/.dylib
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
if (NOT APPLE)
|
if (NOT APPLE)
|
||||||
install(FILES "${LOCAL_EMBEDDING_MODEL_PATH}"
|
install(FILES "${LOCAL_EMBEDDING_MODEL_PATH}"
|
||||||
DESTINATION resources
|
DESTINATION resources
|
||||||
COMPONENT ${COMPONENT_NAME_MAIN})
|
COMPONENT ${COMPONENT_NAME_MAIN})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(CPACK_GENERATOR "IFW")
|
if (CMAKE_SYSTEM_NAME MATCHES Linux)
|
||||||
set(CPACK_VERBATIM_VARIABLES YES)
|
|
||||||
set(CPACK_IFW_VERBOSE ON)
|
|
||||||
|
|
||||||
if(${CMAKE_SYSTEM_NAME} MATCHES Linux)
|
|
||||||
find_program(LINUXDEPLOYQT linuxdeployqt HINTS "$ENV{HOME}/dev/linuxdeployqt/build/tools/linuxdeployqt" "$ENV{HOME}/project/linuxdeployqt/bin")
|
find_program(LINUXDEPLOYQT linuxdeployqt HINTS "$ENV{HOME}/dev/linuxdeployqt/build/tools/linuxdeployqt" "$ENV{HOME}/project/linuxdeployqt/bin")
|
||||||
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-linux.cmake.in"
|
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-linux.cmake.in"
|
||||||
"${CMAKE_BINARY_DIR}/cmake/deploy-qt-linux.cmake" @ONLY)
|
"${CMAKE_BINARY_DIR}/cmake/deploy-qt-linux.cmake" @ONLY)
|
||||||
set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-linux.cmake)
|
set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-linux.cmake)
|
||||||
set(CPACK_IFW_ROOT "~/Qt/Tools/QtInstallerFramework/4.6")
|
elseif (CMAKE_SYSTEM_NAME MATCHES Windows)
|
||||||
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-linux")
|
find_program(WINDEPLOYQT windeployqt)
|
||||||
set(CPACK_IFW_TARGET_DIRECTORY "@HomeDir@/${COMPONENT_NAME_MAIN}")
|
|
||||||
elseif(${CMAKE_SYSTEM_NAME} MATCHES Windows)
|
|
||||||
find_program(WINDEPLOYQT windeployqt HINTS ${_qt_bin_dir})
|
|
||||||
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-windows.cmake.in"
|
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-windows.cmake.in"
|
||||||
"${CMAKE_BINARY_DIR}/cmake/deploy-qt-windows.cmake" @ONLY)
|
"${CMAKE_BINARY_DIR}/cmake/deploy-qt-windows.cmake" @ONLY)
|
||||||
set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-windows.cmake)
|
set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-windows.cmake)
|
||||||
set(CPACK_IFW_ROOT "C:/Qt/Tools/QtInstallerFramework/4.6")
|
elseif (CMAKE_SYSTEM_NAME MATCHES Darwin)
|
||||||
set(CPACK_IFW_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.ico")
|
find_program(MACDEPLOYQT macdeployqt)
|
||||||
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-win64")
|
|
||||||
set(CPACK_IFW_TARGET_DIRECTORY "@HomeDir@\\${COMPONENT_NAME_MAIN}")
|
|
||||||
elseif(${CMAKE_SYSTEM_NAME} MATCHES Darwin)
|
|
||||||
find_program(MACDEPLOYQT macdeployqt HINTS ${_qt_bin_dir})
|
|
||||||
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-mac.cmake.in"
|
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/deploy-qt-mac.cmake.in"
|
||||||
"${CMAKE_BINARY_DIR}/cmake/deploy-qt-mac.cmake" @ONLY)
|
"${CMAKE_BINARY_DIR}/cmake/deploy-qt-mac.cmake" @ONLY)
|
||||||
set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-mac.cmake)
|
set(CPACK_PRE_BUILD_SCRIPTS ${CMAKE_BINARY_DIR}/cmake/deploy-qt-mac.cmake)
|
||||||
set(CPACK_IFW_ROOT "~/Qt/Tools/QtInstallerFramework/4.6")
|
|
||||||
set(CPACK_IFW_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns")
|
|
||||||
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-darwin")
|
|
||||||
set(CPACK_IFW_TARGET_DIRECTORY "@ApplicationsDir@/${COMPONENT_NAME_MAIN}")
|
|
||||||
set(CPACK_BUNDLE_NAME ${COMPONENT_NAME_MAIN})
|
|
||||||
set(CPACK_BUNDLE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns")
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(CPACK_COMPONENTS_ALL gpt4all) # exclude development components
|
|
||||||
set(CPACK_PACKAGE_INSTALL_DIRECTORY ${COMPONENT_NAME_MAIN})
|
|
||||||
set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR})
|
|
||||||
set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR})
|
|
||||||
SET(CPACK_PACKAGE_VERSION_PATCH ${PROJECT_VERSION_PATCH})
|
|
||||||
set(CPACK_PACKAGE_HOMEPAGE_URL "https://www.nomic.ai/gpt4all")
|
|
||||||
set(CPACK_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png")
|
|
||||||
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_CURRENT_SOURCE_DIR}/LICENSE)
|
|
||||||
set(CPACK_RESOURCE_FILE_README ${CMAKE_CURRENT_SOURCE_DIR}/README.md)
|
|
||||||
set(CPACK_PACKAGE_EXECUTABLES "GPT4All")
|
|
||||||
set(CPACK_CREATE_DESKTOP_LINKS "GPT4All")
|
|
||||||
set(CPACK_IFW_PACKAGE_NAME "GPT4All")
|
|
||||||
set(CPACK_IFW_PACKAGE_TITLE "GPT4All Installer")
|
|
||||||
set(CPACK_IFW_PACKAGE_PUBLISHER "Nomic, Inc.")
|
|
||||||
set(CPACK_IFW_PRODUCT_URL "https://www.nomic.ai/gpt4all")
|
|
||||||
set(CPACK_IFW_PACKAGE_WIZARD_STYLE "Aero")
|
|
||||||
set(CPACK_IFW_PACKAGE_LOGO "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png")
|
|
||||||
set(CPACK_IFW_PACKAGE_WINDOW_ICON "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png")
|
|
||||||
set(CPACK_IFW_PACKAGE_WIZARD_SHOW_PAGE_LIST OFF)
|
|
||||||
set(CPACK_IFW_PACKAGE_CONTROL_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/cmake/installer_control.qs")
|
|
||||||
|
|
||||||
include(InstallRequiredSystemLibraries)
|
include(InstallRequiredSystemLibraries)
|
||||||
include(CPack)
|
include(CPack)
|
||||||
include(CPackIFW)
|
include(CPackIFW)
|
||||||
@ -553,20 +588,35 @@ endif()
|
|||||||
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} ESSENTIAL FORCED_INSTALLATION)
|
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} ESSENTIAL FORCED_INSTALLATION)
|
||||||
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} VERSION ${APP_VERSION})
|
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} VERSION ${APP_VERSION})
|
||||||
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} LICENSES "MIT LICENSE" ${CPACK_RESOURCE_FILE_LICENSE})
|
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} LICENSES "MIT LICENSE" ${CPACK_RESOURCE_FILE_LICENSE})
|
||||||
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/cmake/installer_component.qs")
|
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/cmake/installer_gpt4all_component.qs")
|
||||||
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} REPLACES "gpt4all-chat") #Was used in very earliest prototypes
|
cpack_ifw_configure_component(${COMPONENT_NAME_MAIN} REPLACES "gpt4all-chat") #Was used in very earliest prototypes
|
||||||
|
|
||||||
|
if (APPLE AND GPT4ALL_SIGN_INSTALL)
|
||||||
|
if (GPT4ALL_OFFLINE_INSTALLER)
|
||||||
|
cpack_add_component(maintenancetool HIDDEN)
|
||||||
|
else()
|
||||||
|
cpack_add_component(maintenancetool HIDDEN DOWNLOADED)
|
||||||
|
endif()
|
||||||
|
cpack_ifw_configure_component(maintenancetool ESSENTIAL FORCED_INSTALLATION)
|
||||||
|
cpack_ifw_configure_component(maintenancetool VERSION ${APP_VERSION})
|
||||||
|
cpack_ifw_configure_component(maintenancetool SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/cmake/installer_maintenancetool_component.qs")
|
||||||
|
endif()
|
||||||
|
|
||||||
if (GPT4ALL_LOCALHOST)
|
if (GPT4ALL_LOCALHOST)
|
||||||
cpack_ifw_add_repository("GPT4AllRepository" URL "http://localhost/repository")
|
cpack_ifw_add_repository("GPT4AllRepository" URL "http://localhost/repository")
|
||||||
elseif(GPT4ALL_OFFLINE_INSTALLER)
|
elseif (GPT4ALL_OFFLINE_INSTALLER)
|
||||||
add_compile_definitions(GPT4ALL_OFFLINE_INSTALLER)
|
add_compile_definitions(GPT4ALL_OFFLINE_INSTALLER)
|
||||||
else()
|
else()
|
||||||
if(${CMAKE_SYSTEM_NAME} MATCHES Linux)
|
if (CMAKE_SYSTEM_NAME MATCHES Linux)
|
||||||
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/linux/repository")
|
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/linux/repository")
|
||||||
elseif(${CMAKE_SYSTEM_NAME} MATCHES Windows)
|
elseif (CMAKE_SYSTEM_NAME MATCHES Windows)
|
||||||
#To sign the target on windows have to create a batch script add use it as a custom target and then use CPACK_IFW_EXTRA_TARGETS to set this extra target
|
# To sign the target on windows have to create a batch script add use it as a custom target and then use CPACK_IFW_EXTRA_TARGETS to set this extra target
|
||||||
|
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|AMD64|amd64)$")
|
||||||
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/windows/repository")
|
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/windows/repository")
|
||||||
elseif(${CMAKE_SYSTEM_NAME} MATCHES Darwin)
|
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|AARCH64|arm64|ARM64)$")
|
||||||
|
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/windows_arm/repository")
|
||||||
|
endif()
|
||||||
|
elseif (CMAKE_SYSTEM_NAME MATCHES Darwin)
|
||||||
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/mac/repository")
|
cpack_ifw_add_repository("GPT4AllRepository" URL "https://gpt4all.io/installer_repos/mac/repository")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
@ -1,45 +0,0 @@
|
|||||||
# gpt4all-chat
|
|
||||||
|
|
||||||
Cross platform Qt based GUI for GPT4All versions with GPT-J as the base
|
|
||||||
model. NOTE: The model seen in the screenshot is actually a preview of a
|
|
||||||
new training run for GPT4All based on GPT-J. The GPT4All project is busy
|
|
||||||
at work getting ready to release this model including installers for all
|
|
||||||
three major OS's. In the meantime, you can try this UI out with the original
|
|
||||||
GPT-J model by following build instructions below.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
One click installers for macOS, Linux, and Windows at https://www.nomic.ai/gpt4all
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
* Cross-platform (Linux, Windows, MacOSX)
|
|
||||||
* The UI is made to look and feel like you've come to expect from a chatty gpt
|
|
||||||
* Check for updates so you can always stay fresh with latest models
|
|
||||||
* Easy to install with precompiled binaries available for all three major desktop platforms
|
|
||||||
* Multi-modal - Ability to load more than one model and switch between them
|
|
||||||
* Multi-chat - a list of current and past chats and the ability to save/delete/export and switch between
|
|
||||||
* Supports models that are supported by llama.cpp
|
|
||||||
* Model downloader in GUI featuring many popular open source models
|
|
||||||
* Settings dialog to change temp, top_p, min_p, top_k, threads, etc
|
|
||||||
* Copy your conversation to clipboard
|
|
||||||
* RAG via LocalDocs feature
|
|
||||||
* Check for updates to get the very latest GUI
|
|
||||||
|
|
||||||
## Building and running
|
|
||||||
|
|
||||||
* Follow the visual instructions on the [build_and_run](build_and_run.md) page
|
|
||||||
|
|
||||||
## Getting the latest
|
|
||||||
|
|
||||||
If you've already checked out the source code and/or built the program make sure when you do a git fetch to get the latest changes and that you also do `git submodule update --init --recursive` to update the submodules. (If you ever run into trouble, deinitializing via `git submodule deinit -f .` and then initializing again via `git submodule update --init --recursive` fixes most issues)
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
* Pull requests welcome. See the feature wish list for ideas :)
|
|
||||||
|
|
||||||
|
|
||||||
## License
|
|
||||||
The source code of this chat interface is currently under a MIT license.
|
|
@ -1,6 +0,0 @@
|
|||||||
#ifndef CONFIG_H
|
|
||||||
#define CONFIG_H
|
|
||||||
|
|
||||||
#define APP_VERSION "@APP_VERSION@"
|
|
||||||
|
|
||||||
#endif // CONFIG_H
|
|
2
gpt4all-chat/cmake/cpack-steal-config.cmake.in
Normal file
2
gpt4all-chat/cmake/cpack-steal-config.cmake.in
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
set(OUTPUT_DIR "@CMAKE_BINARY_DIR@")
|
||||||
|
file(COPY ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/config DESTINATION ${OUTPUT_DIR}/cpack-config)
|
50
gpt4all-chat/cmake/cpack_config.cmake
Normal file
50
gpt4all-chat/cmake/cpack_config.cmake
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
set(COMPONENT_NAME_MAIN "gpt4all")
|
||||||
|
|
||||||
|
set(CPACK_GENERATOR "IFW")
|
||||||
|
set(CPACK_VERBATIM_VARIABLES YES)
|
||||||
|
set(CPACK_IFW_VERBOSE ON)
|
||||||
|
|
||||||
|
if (CMAKE_SYSTEM_NAME MATCHES Linux)
|
||||||
|
set(CPACK_IFW_ROOT "~/Qt/Tools/QtInstallerFramework/4.6")
|
||||||
|
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-linux")
|
||||||
|
set(CPACK_IFW_TARGET_DIRECTORY "@HomeDir@/${COMPONENT_NAME_MAIN}")
|
||||||
|
elseif (CMAKE_SYSTEM_NAME MATCHES Windows)
|
||||||
|
set(CPACK_IFW_ROOT "C:/Qt/Tools/QtInstallerFramework/4.6")
|
||||||
|
set(CPACK_IFW_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.ico")
|
||||||
|
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|AMD64|amd64)$")
|
||||||
|
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-win64")
|
||||||
|
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|AARCH64|arm64|ARM64)$")
|
||||||
|
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-win64-arm")
|
||||||
|
else()
|
||||||
|
message(FATAL_ERROR "Unrecognized processor: ${CMAKE_SYSTEM_PROCESSOR}")
|
||||||
|
endif()
|
||||||
|
set(CPACK_IFW_TARGET_DIRECTORY "@HomeDir@\\${COMPONENT_NAME_MAIN}")
|
||||||
|
elseif (CMAKE_SYSTEM_NAME MATCHES Darwin)
|
||||||
|
set(CPACK_IFW_ROOT "~/Qt/Tools/QtInstallerFramework/4.6")
|
||||||
|
set(CPACK_IFW_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns")
|
||||||
|
set(CPACK_PACKAGE_FILE_NAME "${COMPONENT_NAME_MAIN}-installer-darwin")
|
||||||
|
set(CPACK_IFW_TARGET_DIRECTORY "@ApplicationsDir@/${COMPONENT_NAME_MAIN}")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(CPACK_COMPONENTS_ALL ${COMPONENT_NAME_MAIN}) # exclude development components
|
||||||
|
if (APPLE AND GPT4ALL_SIGN_INSTALL)
|
||||||
|
list(APPEND CPACK_COMPONENTS_ALL maintenancetool)
|
||||||
|
endif()
|
||||||
|
set(CPACK_PACKAGE_INSTALL_DIRECTORY ${COMPONENT_NAME_MAIN})
|
||||||
|
set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR})
|
||||||
|
set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR})
|
||||||
|
set(CPACK_PACKAGE_VERSION_PATCH ${PROJECT_VERSION_PATCH})
|
||||||
|
set(CPACK_PACKAGE_HOMEPAGE_URL "https://www.nomic.ai/gpt4all")
|
||||||
|
set(CPACK_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png")
|
||||||
|
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_CURRENT_SOURCE_DIR}/LICENSE)
|
||||||
|
set(CPACK_PACKAGE_EXECUTABLES "GPT4All")
|
||||||
|
set(CPACK_CREATE_DESKTOP_LINKS "GPT4All")
|
||||||
|
set(CPACK_IFW_PACKAGE_NAME "GPT4All")
|
||||||
|
set(CPACK_IFW_PACKAGE_TITLE "GPT4All Installer")
|
||||||
|
set(CPACK_IFW_PACKAGE_PUBLISHER "Nomic, Inc.")
|
||||||
|
set(CPACK_IFW_PRODUCT_URL "https://www.nomic.ai/gpt4all")
|
||||||
|
set(CPACK_IFW_PACKAGE_WIZARD_STYLE "Aero")
|
||||||
|
set(CPACK_IFW_PACKAGE_LOGO "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png")
|
||||||
|
set(CPACK_IFW_PACKAGE_WINDOW_ICON "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png")
|
||||||
|
set(CPACK_IFW_PACKAGE_WIZARD_SHOW_PAGE_LIST OFF)
|
||||||
|
set(CPACK_IFW_PACKAGE_CONTROL_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/cmake/installer_control.qs")
|
@ -1,20 +1,26 @@
|
|||||||
set(MACDEPLOYQT "@MACDEPLOYQT@")
|
set(MACDEPLOYQT "@MACDEPLOYQT@")
|
||||||
set(COMPONENT_NAME_MAIN "@COMPONENT_NAME_MAIN@")
|
set(COMPONENT_NAME_MAIN "@COMPONENT_NAME_MAIN@")
|
||||||
set(CMAKE_CURRENT_SOURCE_DIR "@CMAKE_CURRENT_SOURCE_DIR@")
|
set(CMAKE_CURRENT_SOURCE_DIR "@CMAKE_CURRENT_SOURCE_DIR@")
|
||||||
|
set(GPT4ALL_SIGN_INSTALL "@GPT4ALL_SIGN_INSTALL@")
|
||||||
set(GPT4ALL_SIGNING_ID "@MAC_SIGNING_IDENTITY@")
|
set(GPT4ALL_SIGNING_ID "@MAC_SIGNING_IDENTITY@")
|
||||||
if (GPT4ALL_SIGNING_ID)
|
set(CPACK_CONFIG_DIR "@CMAKE_BINARY_DIR@")
|
||||||
|
if (GPT4ALL_SIGN_INSTALL)
|
||||||
set(MAC_NOTARIZE -sign-for-notarization=${GPT4ALL_SIGNING_ID})
|
set(MAC_NOTARIZE -sign-for-notarization=${GPT4ALL_SIGNING_ID})
|
||||||
endif()
|
endif()
|
||||||
execute_process(COMMAND ${MACDEPLOYQT} ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app -qmldir=${CMAKE_CURRENT_SOURCE_DIR} -verbose=2 ${MAC_NOTARIZE})
|
execute_process(COMMAND ${MACDEPLOYQT} ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app -qmldir=${CMAKE_CURRENT_SOURCE_DIR} -verbose=2 ${MAC_NOTARIZE})
|
||||||
file(GLOB MYLLAMALIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllama*)
|
|
||||||
file(GLOB MYLLMODELLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllmodel.*)
|
|
||||||
file(COPY ${MYLLAMALIBS}
|
|
||||||
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks)
|
|
||||||
file(COPY ${MYLLMODELLIBS}
|
|
||||||
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks)
|
|
||||||
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png"
|
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png"
|
||||||
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)
|
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)
|
||||||
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png"
|
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png"
|
||||||
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)
|
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)
|
||||||
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns"
|
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns"
|
||||||
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)
|
DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data)
|
||||||
|
|
||||||
|
if (GPT4ALL_SIGN_INSTALL)
|
||||||
|
# Create signed MaintenanceTool
|
||||||
|
set(MT_DATA_DIR ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/maintenancetool/data)
|
||||||
|
file(MAKE_DIRECTORY ${MT_DATA_DIR})
|
||||||
|
execute_process(
|
||||||
|
COMMAND binarycreator --config ${CPACK_CONFIG_DIR}/cpack-config/config/config.xml --create-maintenancetool --sign ${GPT4ALL_SIGNING_ID}
|
||||||
|
WORKING_DIRECTORY ${MT_DATA_DIR}
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
19
gpt4all-chat/cmake/installer_maintenancetool_component.qs
Normal file
19
gpt4all-chat/cmake/installer_maintenancetool_component.qs
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
function Component()
|
||||||
|
{
|
||||||
|
component.ifwVersion = installer.value("FrameworkVersion");
|
||||||
|
installer.installationStarted.connect(this, Component.prototype.onInstallationStarted);
|
||||||
|
}
|
||||||
|
|
||||||
|
Component.prototype.onInstallationStarted = function()
|
||||||
|
{
|
||||||
|
if (component.updateRequested() || component.installationRequested()) {
|
||||||
|
if (installer.value("os") == "win") {
|
||||||
|
component.installerbaseBinaryPath = "@TargetDir@/installerbase.exe";
|
||||||
|
} else if (installer.value("os") == "x11") {
|
||||||
|
component.installerbaseBinaryPath = "@TargetDir@/installerbase";
|
||||||
|
} else if (installer.value("os") == "mac") {
|
||||||
|
component.installerbaseBinaryPath = "@TargetDir@/MaintenanceTool.app";
|
||||||
|
}
|
||||||
|
installer.setInstallerBaseBinary(component.installerbaseBinaryPath);
|
||||||
|
}
|
||||||
|
}
|
@ -1,3 +1,6 @@
|
|||||||
|
include(FetchContent)
|
||||||
|
|
||||||
|
|
||||||
set(BUILD_SHARED_LIBS OFF)
|
set(BUILD_SHARED_LIBS OFF)
|
||||||
|
|
||||||
set(FMT_INSTALL OFF)
|
set(FMT_INSTALL OFF)
|
||||||
@ -12,11 +15,37 @@ add_subdirectory(DuckX)
|
|||||||
set(QT_VERSION_MAJOR 6)
|
set(QT_VERSION_MAJOR 6)
|
||||||
add_subdirectory(QXlsx/QXlsx)
|
add_subdirectory(QXlsx/QXlsx)
|
||||||
|
|
||||||
# forked dependency of Jinja2Cpp
|
if (NOT GPT4ALL_USING_QTPDF)
|
||||||
set(RAPIDJSON_BUILD_DOC OFF)
|
# If we do not use QtPDF, we need to get PDFium.
|
||||||
set(RAPIDJSON_BUILD_EXAMPLES OFF)
|
set(GPT4ALL_PDFIUM_TAG "chromium/6996")
|
||||||
set(RAPIDJSON_BUILD_TESTS OFF)
|
if (CMAKE_SYSTEM_NAME MATCHES Linux)
|
||||||
set(RAPIDJSON_ENABLE_INSTRUMENTATION_OPT OFF)
|
FetchContent_Declare(
|
||||||
add_subdirectory(rapidjson)
|
pdfium
|
||||||
|
URL "https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-linux-x64.tgz"
|
||||||
|
URL_HASH "SHA256=68b381b87efed539f2e33ae1e280304c9a42643a878cc296c1d66a93b0cb4335"
|
||||||
|
)
|
||||||
|
elseif (CMAKE_SYSTEM_NAME MATCHES Windows)
|
||||||
|
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|AMD64|amd64)$")
|
||||||
|
FetchContent_Declare(
|
||||||
|
pdfium
|
||||||
|
URL "https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-win-x64.tgz"
|
||||||
|
URL_HASH "SHA256=83e714c302ceacccf403826d5cb57ea39b77f393d83b8d5781283012774a9378"
|
||||||
|
)
|
||||||
|
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|AARCH64|arm64|ARM64)$")
|
||||||
|
FetchContent_Declare(
|
||||||
|
pdfium
|
||||||
|
URL "https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-win-arm64.tgz"
|
||||||
|
URL_HASH "SHA256=78e77e871453a4915cbf66fb381b951c9932f88a747c6b2b33c9f27ec2371445"
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
elseif (CMAKE_SYSTEM_NAME MATCHES Darwin)
|
||||||
|
FetchContent_Declare(
|
||||||
|
pdfium
|
||||||
|
URL "https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-mac-univ.tgz"
|
||||||
|
URL_HASH "SHA256=e7577f3242ff9c1df50025f9615673a43601a201bc51ee4792975f98920793a2"
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|
||||||
add_subdirectory(Jinja2Cpp)
|
FetchContent_MakeAvailable(pdfium)
|
||||||
|
find_package(PDFium REQUIRED PATHS "${pdfium_SOURCE_DIR}" NO_DEFAULT_PATH)
|
||||||
|
endif()
|
||||||
|
@ -1 +0,0 @@
|
|||||||
Subproject commit bcf2f82ae120f0a71c114ecb64a63ab5fe1ffc79
|
|
1
gpt4all-chat/deps/json
Submodule
1
gpt4all-chat/deps/json
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 606b6347edf0758c531abb6c36743e09a4c48a84
|
1
gpt4all-chat/deps/minja
Submodule
1
gpt4all-chat/deps/minja
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit e97bb2442cd6ab3d5bb5f5a3e8a1f7d6081d613b
|
@ -1 +0,0 @@
|
|||||||
Subproject commit 9b547ef4bd86210ef084abc2790bd1ddfe66b592
|
|
3
gpt4all-chat/icons/groq.svg
Normal file
3
gpt4all-chat/icons/groq.svg
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8" ?>
|
||||||
|
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 26.3 26.3"><defs><style>.cls-1{fill:#f05237;}.cls-2{fill:#fff;}</style></defs><g id="Layer_2" data-name="Layer 2"><g id="Content"><circle class="cls-1" cx="13.15" cy="13.15" r="13.15"/><path class="cls-2" d="M13.17,6.88a4.43,4.43,0,0,0,0,8.85h1.45V14.07H13.17a2.77,2.77,0,1,1,2.77-2.76v4.07a2.74,2.74,0,0,1-4.67,2L10.1,18.51a4.37,4.37,0,0,0,3.07,1.29h.06a4.42,4.42,0,0,0,4.36-4.4V11.2a4.43,4.43,0,0,0-4.42-4.32"/></g></g></svg>
|
After Width: | Height: | Size: 620 B |
1
gpt4all-chat/icons/mistral.svg
Normal file
1
gpt4all-chat/icons/mistral.svg
Normal file
@ -0,0 +1 @@
|
|||||||
|
<svg viewBox="0 0 512 512" xmlns="http://www.w3.org/2000/svg" fill-rule="evenodd" clip-rule="evenodd" stroke-linejoin="round" stroke-miterlimit="2"><path d="M189.08 303.228H94.587l.044-94.446h94.497l-.048 94.446z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M283.528 397.674h-94.493l.044-94.446h94.496l-.047 94.446z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M283.575 303.228H189.08l.046-94.446h94.496l-.047 94.446z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M378.07 303.228h-94.495l.044-94.446h94.498l-.047 94.446zM189.128 208.779H94.633l.044-94.448h94.498l-.047 94.448zM378.115 208.779h-94.494l.045-94.448h94.496l-.047 94.448zM94.587 303.227H.093l.044-96.017h94.496l-.046 96.017z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M94.633 208.779H.138l.046-94.448H94.68l-.047 94.448z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M94.68 115.902H.185L.23 19.885h94.498l-.047 96.017zM472.657 114.331h-94.495l.044-94.446h94.497l-.046 94.446zM94.54 399.244H.046l.044-97.588h94.497l-.047 97.588z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M94.495 492.123H0l.044-94.446H94.54l-.045 94.446zM472.563 303.228H378.07l.044-94.446h94.496l-.047 94.446zM472.61 208.779h-94.495l.044-94.448h94.498l-.047 94.448z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M472.517 397.674h-94.494l.044-94.446h94.497l-.047 94.446z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M472.47 492.121h-94.493l.044-96.017h94.496l-.047 96.017z" fill="#1c1c1b" fill-rule="nonzero"/><path d="M228.375 303.22h-96.061l.046-94.446h96.067l-.052 94.446z" fill="#ff7000" fill-rule="nonzero"/><path d="M322.827 397.666h-94.495l.044-96.018h94.498l-.047 96.018z" fill="#ff4900" fill-rule="nonzero"/><path d="M324.444 303.22h-97.636l.046-94.446h97.638l-.048 94.446z" fill="#ff7000" fill-rule="nonzero"/><path d="M418.938 303.22h-96.064l.045-94.446h96.066l-.047 94.446z" fill="#ff7000" fill-rule="nonzero"/><path d="M228.423 208.77H132.36l.045-94.445h96.066l-.05 94.446zM418.985 208.77H322.92l.044-94.445h96.069l-.048 94.446z" fill="#ffa300" fill-rule="nonzero"/><path d="M133.883 304.79H39.392l.044-96.017h94.496l-.049 96.017z" fill="#ff7000" fill-rule="nonzero"/><path d="M133.929 208.77H39.437l.044-95.445h94.496l-.048 95.445z" fill="#ffa300" fill-rule="nonzero"/><path d="M133.976 114.325H39.484l.044-94.448h94.497l-.05 94.448zM511.954 115.325h-94.493l.044-95.448h94.497l-.048 95.448z" fill="#ffce00" fill-rule="nonzero"/><path d="M133.836 399.667H39.345l.044-96.447h94.496l-.049 96.447z" fill="#ff4900" fill-rule="nonzero"/><path d="M133.79 492.117H39.3l.044-94.448h94.496l-.049 94.448z" fill="#ff0107" fill-rule="nonzero"/><path d="M511.862 303.22h-94.495l.046-94.446h94.496l-.047 94.446z" fill="#ff7000" fill-rule="nonzero"/><path d="M511.907 208.77h-94.493l.044-94.445h94.496l-.047 94.446z" fill="#ffa300" fill-rule="nonzero"/><path d="M511.815 398.666h-94.493l.044-95.447h94.496l-.047 95.447z" fill="#ff4900" fill-rule="nonzero"/><path d="M511.77 492.117h-94.496l.046-94.448h94.496l-.047 94.448z" fill="#ff0107" fill-rule="nonzero"/></svg>
|
After Width: | Height: | Size: 2.9 KiB |
2
gpt4all-chat/icons/openai.svg
Normal file
2
gpt4all-chat/icons/openai.svg
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?><!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
|
||||||
|
<svg fill="#000000" width="800px" height="800px" viewBox="0 0 24 24" role="img" xmlns="http://www.w3.org/2000/svg"><title>OpenAI icon</title><path d="M22.2819 9.8211a5.9847 5.9847 0 0 0-.5157-4.9108 6.0462 6.0462 0 0 0-6.5098-2.9A6.0651 6.0651 0 0 0 4.9807 4.1818a5.9847 5.9847 0 0 0-3.9977 2.9 6.0462 6.0462 0 0 0 .7427 7.0966 5.98 5.98 0 0 0 .511 4.9107 6.051 6.051 0 0 0 6.5146 2.9001A5.9847 5.9847 0 0 0 13.2599 24a6.0557 6.0557 0 0 0 5.7718-4.2058 5.9894 5.9894 0 0 0 3.9977-2.9001 6.0557 6.0557 0 0 0-.7475-7.0729zm-9.022 12.6081a4.4755 4.4755 0 0 1-2.8764-1.0408l.1419-.0804 4.7783-2.7582a.7948.7948 0 0 0 .3927-.6813v-6.7369l2.02 1.1686a.071.071 0 0 1 .038.052v5.5826a4.504 4.504 0 0 1-4.4945 4.4944zm-9.6607-4.1254a4.4708 4.4708 0 0 1-.5346-3.0137l.142.0852 4.783 2.7582a.7712.7712 0 0 0 .7806 0l5.8428-3.3685v2.3324a.0804.0804 0 0 1-.0332.0615L9.74 19.9502a4.4992 4.4992 0 0 1-6.1408-1.6464zM2.3408 7.8956a4.485 4.485 0 0 1 2.3655-1.9728V11.6a.7664.7664 0 0 0 .3879.6765l5.8144 3.3543-2.0201 1.1685a.0757.0757 0 0 1-.071 0l-4.8303-2.7865A4.504 4.504 0 0 1 2.3408 7.872zm16.5963 3.8558L13.1038 8.364 15.1192 7.2a.0757.0757 0 0 1 .071 0l4.8303 2.7913a4.4944 4.4944 0 0 1-.6765 8.1042v-5.6772a.79.79 0 0 0-.407-.667zm2.0107-3.0231l-.142-.0852-4.7735-2.7818a.7759.7759 0 0 0-.7854 0L9.409 9.2297V6.8974a.0662.0662 0 0 1 .0284-.0615l4.8303-2.7866a4.4992 4.4992 0 0 1 6.6802 4.66zM8.3065 12.863l-2.02-1.1638a.0804.0804 0 0 1-.038-.0567V6.0742a4.4992 4.4992 0 0 1 7.3757-3.4537l-.142.0805L8.704 5.459a.7948.7948 0 0 0-.3927.6813zm1.0976-2.3654l2.602-1.4998 2.6069 1.4998v2.9994l-2.5974 1.4997-2.6067-1.4997Z"/></svg>
|
After Width: | Height: | Size: 1.7 KiB |
@ -54,7 +54,7 @@ Window {
|
|||||||
systemTrayIcon.shouldClose = true;
|
systemTrayIcon.shouldClose = true;
|
||||||
window.shouldClose = true;
|
window.shouldClose = true;
|
||||||
savingPopup.open();
|
savingPopup.open();
|
||||||
ChatListModel.saveChats();
|
ChatListModel.saveChatsForQuit();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -231,8 +231,8 @@ Window {
|
|||||||
|
|
||||||
window.shouldClose = true;
|
window.shouldClose = true;
|
||||||
savingPopup.open();
|
savingPopup.open();
|
||||||
ChatListModel.saveChats();
|
ChatListModel.saveChatsForQuit();
|
||||||
close.accepted = false
|
close.accepted = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
Connections {
|
Connections {
|
||||||
|
@ -1,20 +1,15 @@
|
|||||||
## Latest News
|
## Latest News
|
||||||
|
|
||||||
GPT4All v3.5.2 was released on December 13th. It changes the "Explore Models" page and fixes issues with the API server and cloned models.
|
GPT4All v3.10.0 was released on February 24th. Changes include:
|
||||||
|
|
||||||
GPT4All v3.5.1 was released on December 10th and fixes several issues with the new chat templates. Additionally, it fixes a bug with the default model button as well an issue with remote models.
|
* **Remote Models:**
|
||||||
|
* The Add Model page now has a dedicated tab for remote model providers.
|
||||||
---
|
* Groq, OpenAI, and Mistral remote models are now easier to configure.
|
||||||
|
* **CUDA Compatibility:** GPUs with CUDA compute capability 5.0 such as the GTX 750 are now supported by the CUDA backend.
|
||||||
GPT4All v3.5.0 was released on December 9th. Changes include:
|
* **New Model:** The non-MoE Granite model is now supported.
|
||||||
|
* **Translation Updates:**
|
||||||
* **Message Editing:**
|
* The Italian translation has been updated.
|
||||||
* You can now edit any message you've sent by clicking the pencil icon below it.
|
* The Simplified Chinese translation has been significantly improved.
|
||||||
* You can now redo earlier responses in the conversation.
|
* **Better Chat Templates:** The default chat templates for OLMoE 7B 0924/0125 and Granite 3.1 3B/8B have been improved.
|
||||||
* **Templates:** Chat templates have been completely overhauled! They now use Jinja-style syntax. You may notice warnings or errors in the UI. Read the linked docs, and if you have any questions, please ask on the Discord.
|
* **Whitespace Fixes:** DeepSeek-R1-based models now have better whitespace behavior in their output.
|
||||||
* **File Attachments:** Markdown and plain text files are now supported as file attachments.
|
* **Crash Fixes:** Several issues that could potentially cause GPT4All to crash have been fixed.
|
||||||
* **System Tray:** There is now an option in Application Settings to allow GPT4All to minimize to the system tray instead of closing.
|
|
||||||
* **Local API Server:**
|
|
||||||
* The API server now supports system messages from the client and no longer uses the system message in settings.
|
|
||||||
* You can now send messages to the API server in any order supported by the model instead of just user/assistant pairs.
|
|
||||||
* **Translations:** The Italian and Romanian translations have been improved.
|
|
||||||
|
@ -1,6 +1,22 @@
|
|||||||
[
|
[
|
||||||
{
|
{
|
||||||
"order": "a",
|
"order": "a",
|
||||||
|
"md5sum": "a54c08a7b90e4029a8c2ab5b5dc936aa",
|
||||||
|
"name": "Reasoner v1",
|
||||||
|
"filename": "qwen2.5-coder-7b-instruct-q4_0.gguf",
|
||||||
|
"filesize": "4431390720",
|
||||||
|
"requires": "3.6.0",
|
||||||
|
"ramrequired": "8",
|
||||||
|
"parameters": "8 billion",
|
||||||
|
"quant": "q4_0",
|
||||||
|
"type": "qwen2",
|
||||||
|
"description": "<ul><li>Based on <a href=\"https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct\">Qwen2.5-Coder 7B</a></li><li>Uses built-in javascript code interpreter</li><li>Use for complex reasoning tasks that can be aided by computation analysis</li><li>License: <a href=\"https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct/blob/main/LICENSE\">Apache License Version 2.0</a></li><li>#reasoning</li></ul>",
|
||||||
|
"url": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GGUF/resolve/main/qwen2.5-coder-7b-instruct-q4_0.gguf",
|
||||||
|
"chatTemplate": "{{- '<|im_start|>system\\n' }}\n{% if toolList|length > 0 %}You have access to the following functions:\n{% for tool in toolList %}\nUse the function '{{tool.function}}' to: '{{tool.description}}'\n{% if tool.parameters|length > 0 %}\nparameters:\n{% for info in tool.parameters %}\n {{info.name}}:\n type: {{info.type}}\n description: {{info.description}}\n required: {{info.required}}\n{% endfor %}\n{% endif %}\n# Tool Instructions\nIf you CHOOSE to call this function ONLY reply with the following format:\n'{{tool.symbolicFormat}}'\nHere is an example. If the user says, '{{tool.examplePrompt}}', then you reply\n'{{tool.exampleCall}}'\nAfter the result you might reply with, '{{tool.exampleReply}}'\n{% endfor %}\nYou MUST include both the start and end tags when you use a function.\n\nYou are a helpful AI assistant who uses the functions to break down, analyze, perform, and verify complex reasoning tasks. You SHOULD try to verify your answers using the functions where possible.\n{% endif %}\n{{- '<|im_end|>\\n' }}\n{% for message in messages %}\n{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{% endfor %}\n{% if add_generation_prompt %}\n{{ '<|im_start|>assistant\\n' }}\n{% endif %}\n",
|
||||||
|
"systemPrompt": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"order": "aa",
|
||||||
"md5sum": "c87ad09e1e4c8f9c35a5fcef52b6f1c9",
|
"md5sum": "c87ad09e1e4c8f9c35a5fcef52b6f1c9",
|
||||||
"name": "Llama 3 8B Instruct",
|
"name": "Llama 3 8B Instruct",
|
||||||
"filename": "Meta-Llama-3-8B-Instruct.Q4_0.gguf",
|
"filename": "Meta-Llama-3-8B-Instruct.Q4_0.gguf",
|
||||||
@ -13,7 +29,68 @@
|
|||||||
"description": "<ul><li>Fast responses</li><li>Chat based model</li><li>Accepts system prompts in Llama 3 format</li><li>Trained by Meta</li><li>License: <a href=\"https://llama.meta.com/llama3/license/\">Meta Llama 3 Community License</a></li></ul>",
|
"description": "<ul><li>Fast responses</li><li>Chat based model</li><li>Accepts system prompts in Llama 3 format</li><li>Trained by Meta</li><li>License: <a href=\"https://llama.meta.com/llama3/license/\">Meta Llama 3 Community License</a></li></ul>",
|
||||||
"url": "https://gpt4all.io/models/gguf/Meta-Llama-3-8B-Instruct.Q4_0.gguf",
|
"url": "https://gpt4all.io/models/gguf/Meta-Llama-3-8B-Instruct.Q4_0.gguf",
|
||||||
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2<|eot_id|>",
|
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2<|eot_id|>",
|
||||||
"systemPrompt": ""
|
"systemPrompt": "",
|
||||||
|
"chatTemplate": "{%- set loop_messages = messages %}\n{%- for message in loop_messages %}\n {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' %}\n {{- content }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"order": "aa1",
|
||||||
|
"sha256sum": "5cd4ee65211770f1d99b4f6f4951780b9ef40e29314bd6542bb5bd0ad0bc29d1",
|
||||||
|
"name": "DeepSeek-R1-Distill-Qwen-7B",
|
||||||
|
"filename": "DeepSeek-R1-Distill-Qwen-7B-Q4_0.gguf",
|
||||||
|
"filesize": "4444121056",
|
||||||
|
"requires": "3.8.0",
|
||||||
|
"ramrequired": "8",
|
||||||
|
"parameters": "7 billion",
|
||||||
|
"quant": "q4_0",
|
||||||
|
"type": "deepseek",
|
||||||
|
"description": "<p>The official Qwen2.5-Math-7B distillation of DeepSeek-R1.</p><ul><li>License: <a href=\"https://opensource.org/license/mit\">MIT</a></li><li>No restrictions on commercial use</li><li>#reasoning</li></ul>",
|
||||||
|
"url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-7B-Q4_0.gguf",
|
||||||
|
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|User|>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*</think>', '') %}\n {{- '<|Assistant|>' + content + '<|end▁of▁sentence|>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<|Assistant|>' }}\n{%- endif %}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"order": "aa2",
|
||||||
|
"sha256sum": "906b3382f2680f4ce845459b4a122e904002b075238080307586bcffcde49eef",
|
||||||
|
"name": "DeepSeek-R1-Distill-Qwen-14B",
|
||||||
|
"filename": "DeepSeek-R1-Distill-Qwen-14B-Q4_0.gguf",
|
||||||
|
"filesize": "8544267680",
|
||||||
|
"requires": "3.8.0",
|
||||||
|
"ramrequired": "16",
|
||||||
|
"parameters": "14 billion",
|
||||||
|
"quant": "q4_0",
|
||||||
|
"type": "deepseek",
|
||||||
|
"description": "<p>The official Qwen2.5-14B distillation of DeepSeek-R1.</p><ul><li>License: <a href=\"https://opensource.org/license/mit\">MIT</a></li><li>No restrictions on commercial use</li><li>#reasoning</li></ul>",
|
||||||
|
"url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-14B-Q4_0.gguf",
|
||||||
|
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|User|>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*</think>', '') %}\n {{- '<|Assistant|>' + content + '<|end▁of▁sentence|>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<|Assistant|>' }}\n{%- endif %}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"order": "aa3",
|
||||||
|
"sha256sum": "0eb93e436ac8beec18aceb958c120d282cb2cf5451b23185e7be268fe9d375cc",
|
||||||
|
"name": "DeepSeek-R1-Distill-Llama-8B",
|
||||||
|
"filename": "DeepSeek-R1-Distill-Llama-8B-Q4_0.gguf",
|
||||||
|
"filesize": "4675894112",
|
||||||
|
"requires": "3.8.0",
|
||||||
|
"ramrequired": "8",
|
||||||
|
"parameters": "8 billion",
|
||||||
|
"quant": "q4_0",
|
||||||
|
"type": "deepseek",
|
||||||
|
"description": "<p>The official Llama-3.1-8B distillation of DeepSeek-R1.</p><ul><li>License: <a href=\"https://opensource.org/license/mit\">MIT</a></li><li>No restrictions on commercial use</li><li>#reasoning</li></ul>",
|
||||||
|
"url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Llama-8B-GGUF/resolve/main/DeepSeek-R1-Distill-Llama-8B-Q4_0.gguf",
|
||||||
|
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|User|>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*</think>', '') %}\n {{- '<|Assistant|>' + content + '<|end▁of▁sentence|>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<|Assistant|>' }}\n{%- endif %}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"order": "aa4",
|
||||||
|
"sha256sum": "b3af887d0a015b39fab2395e4faf682c1a81a6a3fd09a43f0d4292f7d94bf4d0",
|
||||||
|
"name": "DeepSeek-R1-Distill-Qwen-1.5B",
|
||||||
|
"filename": "DeepSeek-R1-Distill-Qwen-1.5B-Q4_0.gguf",
|
||||||
|
"filesize": "1068807776",
|
||||||
|
"requires": "3.8.0",
|
||||||
|
"ramrequired": "3",
|
||||||
|
"parameters": "1.5 billion",
|
||||||
|
"quant": "q4_0",
|
||||||
|
"type": "deepseek",
|
||||||
|
"description": "<p>The official Qwen2.5-Math-1.5B distillation of DeepSeek-R1.</p><ul><li>License: <a href=\"https://opensource.org/license/mit\">MIT</a></li><li>No restrictions on commercial use</li><li>#reasoning</li></ul>",
|
||||||
|
"url": "https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-1.5B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-1.5B-Q4_0.gguf",
|
||||||
|
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|User|>' + message['content'] }}\n {%- endif %}\n {%- if message['role'] == 'assistant' %}\n {%- set content = message['content'] | regex_replace('^[\\\\s\\\\S]*</think>', '') %}\n {{- '<|Assistant|>' + content + '<|end▁of▁sentence|>' }}\n {%- endif %}\n{%- endfor -%}\n{%- if add_generation_prompt %}\n {{- '<|Assistant|>' }}\n{%- endif %}"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"order": "b",
|
"order": "b",
|
||||||
@ -30,7 +107,7 @@
|
|||||||
"url": "https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/Llama-3.2-3B-Instruct-Q4_0.gguf",
|
"url": "https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/Llama-3.2-3B-Instruct-Q4_0.gguf",
|
||||||
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2",
|
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2",
|
||||||
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>",
|
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>",
|
||||||
"chatTemplate": "{{- bos_token }}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now('%d %b %Y') %}\n {%- else %}\n {%- set date_string = '26 Jul 2024' %}\n {%- endif %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] | trim %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set system_message = '' %}\n {%- set loop_start = 0 %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\\n\\n' }}\n{{- 'Cutting Knowledge Date: December 2023\\n' }}\n{{- 'Today Date: ' + date_string + '\\n\\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + message['content'] | trim + '<|eot_id|>' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
|
"chatTemplate": "{{- bos_token }}\n{%- set date_string = strftime_now('%d %b %Y') %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] | trim %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set system_message = '' %}\n {%- set loop_start = 0 %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\\n\\n' }}\n{{- 'Cutting Knowledge Date: December 2023\\n' }}\n{{- 'Today Date: ' + date_string + '\\n\\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + message['content'] | trim + '<|eot_id|>' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"order": "c",
|
"order": "c",
|
||||||
@ -47,7 +124,7 @@
|
|||||||
"url": "https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q4_0.gguf",
|
"url": "https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q4_0.gguf",
|
||||||
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2",
|
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2",
|
||||||
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>",
|
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>",
|
||||||
"chatTemplate": "{{- bos_token }}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now('%d %b %Y') %}\n {%- else %}\n {%- set date_string = '26 Jul 2024' %}\n {%- endif %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] | trim %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set system_message = '' %}\n {%- set loop_start = 0 %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\\n\\n' }}\n{{- 'Cutting Knowledge Date: December 2023\\n' }}\n{{- 'Today Date: ' + date_string + '\\n\\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + message['content'] | trim + '<|eot_id|>' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
|
"chatTemplate": "{{- bos_token }}\n{%- set date_string = strftime_now('%d %b %Y') %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] | trim %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set system_message = '' %}\n {%- set loop_start = 0 %}\n{%- endif %}\n\n{#- System message #}\n{{- '<|start_header_id|>system<|end_header_id|>\\n\\n' }}\n{{- 'Cutting Knowledge Date: December 2023\\n' }}\n{{- 'Today Date: ' + date_string + '\\n\\n' }}\n{{- system_message }}\n{{- '<|eot_id|>' }}\n\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n' + message['content'] | trim + '<|eot_id|>' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"order": "d",
|
"order": "d",
|
||||||
@ -64,7 +141,7 @@
|
|||||||
"url": "https://huggingface.co/NousResearch/Nous-Hermes-2-Mistral-7B-DPO-GGUF/resolve/main/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",
|
"url": "https://huggingface.co/NousResearch/Nous-Hermes-2-Mistral-7B-DPO-GGUF/resolve/main/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",
|
||||||
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n",
|
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n",
|
||||||
"systemPrompt": "",
|
"systemPrompt": "",
|
||||||
"chatTemplate": "{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
|
"chatTemplate": "{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"order": "e",
|
"order": "e",
|
||||||
@ -81,7 +158,7 @@
|
|||||||
"description": "<strong>Strong overall fast instruction following model</strong><br><ul><li>Fast responses</li><li>Trained by Mistral AI<li>Uncensored</li><li>Licensed for commercial use</li></ul>",
|
"description": "<strong>Strong overall fast instruction following model</strong><br><ul><li>Fast responses</li><li>Trained by Mistral AI<li>Uncensored</li><li>Licensed for commercial use</li></ul>",
|
||||||
"url": "https://gpt4all.io/models/gguf/mistral-7b-instruct-v0.1.Q4_0.gguf",
|
"url": "https://gpt4all.io/models/gguf/mistral-7b-instruct-v0.1.Q4_0.gguf",
|
||||||
"promptTemplate": "[INST] %1 [/INST]",
|
"promptTemplate": "[INST] %1 [/INST]",
|
||||||
"chatTemplate": "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if (message['role'] == 'user') != ((loop.index0 - loop_start) % 2 == 0) %}\n {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {%- if loop.first and system_message is defined %}\n {{- ' [INST] ' + system_message + '\\n\\n' + message['content'] + ' [/INST]' }}\n {%- else %}\n {{- ' [INST] ' + message['content'] + ' [/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {{- ' ' + message['content'] + eos_token }}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}"
|
"chatTemplate": "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_start = 1 %}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if (message['role'] == 'user') != ((loop.index0 - loop_start) % 2 == 0) %}\n {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {%- if loop.index0 == loop_start and loop_start == 1 %}\n {{- ' [INST] ' + system_message + '\\n\\n' + message['content'] + ' [/INST]' }}\n {%- else %}\n {{- ' [INST] ' + message['content'] + ' [/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {{- ' ' + message['content'] + eos_token }}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"order": "f",
|
"order": "f",
|
||||||
@ -97,7 +174,8 @@
|
|||||||
"description": "<ul><li><strong>For advanced users only. Not recommended for use on Windows or Linux without selecting CUDA due to speed issues.</strong></li><li>Fast responses</li><li>Chat based model</li><li>Large context size of 128k</li><li>Accepts agentic system prompts in Llama 3.1 format</li><li>Trained by Meta</li><li>License: <a href=\"https://llama.meta.com/llama3_1/license/\">Meta Llama 3.1 Community License</a></li></ul>",
|
"description": "<ul><li><strong>For advanced users only. Not recommended for use on Windows or Linux without selecting CUDA due to speed issues.</strong></li><li>Fast responses</li><li>Chat based model</li><li>Large context size of 128k</li><li>Accepts agentic system prompts in Llama 3.1 format</li><li>Trained by Meta</li><li>License: <a href=\"https://llama.meta.com/llama3_1/license/\">Meta Llama 3.1 Community License</a></li></ul>",
|
||||||
"url": "https://huggingface.co/GPT4All-Community/Meta-Llama-3.1-8B-Instruct-128k/resolve/main/Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf",
|
"url": "https://huggingface.co/GPT4All-Community/Meta-Llama-3.1-8B-Instruct-128k/resolve/main/Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf",
|
||||||
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2",
|
"promptTemplate": "<|start_header_id|>user<|end_header_id|>\n\n%1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n%2",
|
||||||
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>"
|
"systemPrompt": "<|start_header_id|>system<|end_header_id|>\nCutting Knowledge Date: December 2023\n\nYou are a helpful assistant.<|eot_id|>",
|
||||||
|
"chatTemplate": "{%- set loop_messages = messages %}\n{%- for message in loop_messages %}\n {%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' %}\n {%- if loop.index0 == 0 %}\n {%- set content = bos_token + content %}\n {%- endif %}\n {{- content }}\n{%- endfor %}\n{{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"order": "g",
|
"order": "g",
|
||||||
@ -114,7 +192,7 @@
|
|||||||
"url": "https://gpt4all.io/models/gguf/mistral-7b-openorca.gguf2.Q4_0.gguf",
|
"url": "https://gpt4all.io/models/gguf/mistral-7b-openorca.gguf2.Q4_0.gguf",
|
||||||
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n",
|
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n",
|
||||||
"systemPrompt": "<|im_start|>system\nYou are MistralOrca, a large language model trained by Alignment Lab AI.\n<|im_end|>\n",
|
"systemPrompt": "<|im_start|>system\nYou are MistralOrca, a large language model trained by Alignment Lab AI.\n<|im_end|>\n",
|
||||||
"chatTemplate": "{%- if not add_generation_prompt is defined %}\n {%- set add_generation_prompt = false %}\n{%- endif %}\n{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
|
"chatTemplate": "{%- for message in messages %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"order": "h",
|
"order": "h",
|
||||||
@ -197,6 +275,7 @@
|
|||||||
"url": "https://huggingface.co/lamhieu/ghost-7b-v0.9.1-gguf/resolve/main/ghost-7b-v0.9.1-Q4_0.gguf",
|
"url": "https://huggingface.co/lamhieu/ghost-7b-v0.9.1-gguf/resolve/main/ghost-7b-v0.9.1-Q4_0.gguf",
|
||||||
"promptTemplate": "<|user|>\n%1</s>\n<|assistant|>\n%2</s>\n",
|
"promptTemplate": "<|user|>\n%1</s>\n<|assistant|>\n%2</s>\n",
|
||||||
"systemPrompt": "<|system|>\nYou are Ghost created by Lam Hieu. You are a helpful and knowledgeable assistant. You like to help and always give honest information, in its original language. In communication, you are always respectful, equal and promote positive behavior.\n</s>",
|
"systemPrompt": "<|system|>\nYou are Ghost created by Lam Hieu. You are a helpful and knowledgeable assistant. You like to help and always give honest information, in its original language. In communication, you are always respectful, equal and promote positive behavior.\n</s>",
|
||||||
|
"chatTemplate": "{%- for message in messages %}\n {%- if message['role'] == 'user' %}\n {{- '<|user|>\\n' + message['content'] + eos_token }}\n {%- elif message['role'] == 'system' %}\n {{- '<|system|>\\n' + message['content'] + eos_token }}\n {%- elif message['role'] == 'assistant' %}\n {{- '<|assistant|>\\n' + message['content'] + eos_token }}\n {%- endif %}\n {%- if loop.last and add_generation_prompt %}\n {{- '<|assistant|>' }}\n {%- endif %}\n{%- endfor %}",
|
||||||
"systemMessage": "You are Ghost created by Lam Hieu. You are a helpful and knowledgeable assistant. You like to help and always give honest information, in its original language. In communication, you are always respectful, equal and promote positive behavior."
|
"systemMessage": "You are Ghost created by Lam Hieu. You are a helpful and knowledgeable assistant. You like to help and always give honest information, in its original language. In communication, you are always respectful, equal and promote positive behavior."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -282,7 +361,8 @@
|
|||||||
"description": "<ul><li>Very fast responses</li><li>Chat based model</li><li>Accepts system prompts in Phi-3 format</li><li>Trained by Microsoft</li><li>License: <a href=\"https://opensource.org/license/mit\">MIT</a></li><li>No restrictions on commercial use</li></ul>",
|
"description": "<ul><li>Very fast responses</li><li>Chat based model</li><li>Accepts system prompts in Phi-3 format</li><li>Trained by Microsoft</li><li>License: <a href=\"https://opensource.org/license/mit\">MIT</a></li><li>No restrictions on commercial use</li></ul>",
|
||||||
"url": "https://gpt4all.io/models/gguf/Phi-3-mini-4k-instruct.Q4_0.gguf",
|
"url": "https://gpt4all.io/models/gguf/Phi-3-mini-4k-instruct.Q4_0.gguf",
|
||||||
"promptTemplate": "<|user|>\n%1<|end|>\n<|assistant|>\n%2<|end|>\n",
|
"promptTemplate": "<|user|>\n%1<|end|>\n<|assistant|>\n%2<|end|>\n",
|
||||||
"systemPrompt": ""
|
"systemPrompt": "",
|
||||||
|
"chatTemplate": "{{- bos_token }}\n{%- for message in messages %}\n {{- '<|' + message['role'] + '|>\\n' + message['content'] + '<|end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|assistant|>\\n' }}\n{%- else %}\n {{- eos_token }}\n{%- endif %}"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"order": "r",
|
"order": "r",
|
||||||
@ -406,7 +486,7 @@
|
|||||||
"url": "https://huggingface.co/TheBloke/em_german_mistral_v01-GGUF/resolve/main/em_german_mistral_v01.Q4_0.gguf",
|
"url": "https://huggingface.co/TheBloke/em_german_mistral_v01-GGUF/resolve/main/em_german_mistral_v01.Q4_0.gguf",
|
||||||
"promptTemplate": "USER: %1 ASSISTANT: ",
|
"promptTemplate": "USER: %1 ASSISTANT: ",
|
||||||
"systemPrompt": "Du bist ein hilfreicher Assistent. ",
|
"systemPrompt": "Du bist ein hilfreicher Assistent. ",
|
||||||
"chatTemplate": "{%- set system_message = false %}\n{%- if messages[0]['role'] == 'system' %}\n {%- set loop_start = 1 %}\n {%- set system_message = true %}\n {{- messages[0]['content'] }}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if (not loop.first) or (system_message is not none) %}\n {{- ' ' }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {{- 'USER: ' + message['content'] }}\n {%- elif message['role'] == 'assistant' %}\n {{- 'ASSISTANT: ' + message['content'] }}\n {%- else %}\n {{- raise_exception('After the optional system message, conversation roles must be either user or assistant.') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {%- if messages %}\n {{- ' ' }}\n {%- endif %}\n {{- 'ASSISTANT:' }}\n{%- endif %}",
|
"chatTemplate": "{%- if messages[0]['role'] == 'system' %}\n {%- set loop_start = 1 %}\n {{- messages[0]['content'] }}\n{%- else %}\n {%- set loop_start = 0 %}\n{%- endif %}\n{%- for message in messages %}\n {%- if loop.index0 >= loop_start %}\n {%- if not loop.first %}\n {{- ' ' }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {{- 'USER: ' + message['content'] }}\n {%- elif message['role'] == 'assistant' %}\n {{- 'ASSISTANT: ' + message['content'] }}\n {%- else %}\n {{- raise_exception('After the optional system message, conversation roles must be either user or assistant.') }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {%- if messages %}\n {{- ' ' }}\n {%- endif %}\n {{- 'ASSISTANT:' }}\n{%- endif %}",
|
||||||
"systemMessage": "Du bist ein hilfreicher Assistent."
|
"systemMessage": "Du bist ein hilfreicher Assistent."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -452,7 +532,7 @@
|
|||||||
"filename": "qwen2-1_5b-instruct-q4_0.gguf",
|
"filename": "qwen2-1_5b-instruct-q4_0.gguf",
|
||||||
"filesize": "937532800",
|
"filesize": "937532800",
|
||||||
"requires": "3.0",
|
"requires": "3.0",
|
||||||
"ramrequired": "4",
|
"ramrequired": "3",
|
||||||
"parameters": "1.5 billion",
|
"parameters": "1.5 billion",
|
||||||
"quant": "q4_0",
|
"quant": "q4_0",
|
||||||
"type": "qwen2",
|
"type": "qwen2",
|
||||||
@ -460,6 +540,6 @@
|
|||||||
"url": "https://huggingface.co/Qwen/Qwen2-1.5B-Instruct-GGUF/resolve/main/qwen2-1_5b-instruct-q4_0.gguf",
|
"url": "https://huggingface.co/Qwen/Qwen2-1.5B-Instruct-GGUF/resolve/main/qwen2-1_5b-instruct-q4_0.gguf",
|
||||||
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>",
|
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>",
|
||||||
"systemPrompt": "<|im_start|>system\nBelow is an instruction that describes a task. Write a response that appropriately completes the request.<|im_end|>\n",
|
"systemPrompt": "<|im_start|>system\nBelow is an instruction that describes a task. Write a response that appropriately completes the request.<|im_end|>\n",
|
||||||
"chatTemplate": "{%- for message in messages %}\n {%- if loop.first and messages[0]['role'] != 'system' %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
|
"chatTemplate": "{%- for message in messages %}\n {%- if loop.first and messages[0]['role'] != 'system' %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n {{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>\\n' }}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
@ -243,5 +243,40 @@
|
|||||||
"version": "3.5.2",
|
"version": "3.5.2",
|
||||||
"notes": "* **Model Search:** There are now separate tabs for official and third-party models.\n* **Local Server Fixes:** Several mistakes in v3.5's changes to the API server have been corrected.\n* **Cloned Model Fixes:** The chat template and system message of cloned models now manage their defaults correctly.\n* **Translation Improvements:** The Romanian and Italian translations have been updated.\n",
|
"notes": "* **Model Search:** There are now separate tabs for official and third-party models.\n* **Local Server Fixes:** Several mistakes in v3.5's changes to the API server have been corrected.\n* **Cloned Model Fixes:** The chat template and system message of cloned models now manage their defaults correctly.\n* **Translation Improvements:** The Romanian and Italian translations have been updated.\n",
|
||||||
"contributors": "* Jared Van Bortel (Nomic AI)\n* Adam Treat (Nomic AI)\n* Riccardo Giovanetti (`@Harvester62`)\n* Victor Emanuel (`@SINAPSA-IC`)"
|
"contributors": "* Jared Van Bortel (Nomic AI)\n* Adam Treat (Nomic AI)\n* Riccardo Giovanetti (`@Harvester62`)\n* Victor Emanuel (`@SINAPSA-IC`)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"version": "3.5.3",
|
||||||
|
"notes": "* **LocalDocs Fix:** A serious issue causing LocalDocs to not work properly in v3.5.2 has been fixed.\n",
|
||||||
|
"contributors": "* Jared Van Bortel (Nomic AI)\n* Adam Treat (Nomic AI)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"version": "3.6.0",
|
||||||
|
"notes": "* **Reasoner v1:**\n * Built-in javascript code interpreter tool.\n * Custom curated model that utilizes the code interpreter to break down, analyze, perform, and verify complex reasoning tasks.\n* **Templates:** Automatically substitute chat templates that are not compatible with Jinja2Cpp in GGUFs.\n* **Fixes:**\n * Remote model template to allow for XML in messages.\n * Jinja2Cpp bug that broke system message detection in chat templates.\n * LocalDocs sources displaying in unconsolidated form after v3.5.0.\n",
|
||||||
|
"contributors": "* Adam Treat (Nomic AI)\n* Jared Van Bortel (Nomic AI)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"version": "3.6.1",
|
||||||
|
"notes": "* **Fixes:**\n * The stop generation button no longer working in v3.6.0.\n * The copy entire conversation button no longer working in v3.6.0.\n",
|
||||||
|
"contributors": "* Adam Treat (Nomic AI)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"version": "3.7.0",
|
||||||
|
"notes": "* **Windows ARM Support:** GPT4All now supports the Windows ARM platform, ensuring compatibility with devices powered by Qualcomm Snapdragon and Microsoft SQ-series processors.\n * **NOTE:** Support for GPU and/or NPU acceleration is not available at this time. Only the CPU will be used to run LLMs.\n * **NOTE:** You must install the new *Windows ARM* version of GPT4All from the website. The standard *Windows* version will not work due to emulation limitations.\n* **Fixed Updating on macOS:** The maintenance tool no longer crashes when attempting to update or uninstall GPT4All on Sequoia.\n * **NOTE:** If you have installed the version from the GitHub releases as a workaround for this issue, you can safely uninstall it and switch back to the version from the website.\n* **Fixed Chat Saving on macOS:** Chats now save as expected when the application is quit with Command-Q.\n* **Code Interpreter Improvements:**\n * The behavior when the code takes too long to execute and times out has been improved.\n * console.log now accepts multiple arguments for better compatibility with native JavaScript.\n* **Chat Templating Improvements:**\n * Two crashes and one compatibility issue have been fixed in the chat template parser.\n * The default chat template for EM German Mistral has been fixed.\n * Automatic replacements have been added for five new models as we continue to improve compatibility with common chat templates.\n",
|
||||||
|
"contributors": "* Jared Van Bortel (Nomic AI)\n* Adam Treat (Nomic AI)\n* Riccardo Giovanetti (`@Harvester62`)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"version": "3.8.0",
|
||||||
|
"notes": "* **Native DeepSeek-R1-Distill Support:** GPT4All now has robust support for the DeepSeek-R1 family of distillations.\n * Several model variants are now available on the downloads page.\n * Reasoning (wrapped in \"think\" tags) is displayed similarly to the Reasoner model.\n * The DeepSeek-R1 Qwen pretokenizer is now supported, resolving the loading failure in previous versions.\n * The model is now configured with a GPT4All-compatible prompt template by default.\n* **Chat Templating Overhaul:** The template parser has been *completely* replaced with one that has much better compatibility with common models.\n* **Code Interpreter Fixes:**\n * An issue preventing the code interpreter from logging a single string in v3.7.0 has been fixed.\n * The UI no longer freezes while the code interpreter is running a computation.\n* **Local Server Fixes:**\n * An issue preventing the server from using LocalDocs after the first request since v3.5.0 has been fixed.\n * System messages are now correctly hidden from the message history.\n",
|
||||||
|
"contributors": "* Jared Van Bortel (Nomic AI)\n* Adam Treat (Nomic AI)\n* ThiloteE (`@ThiloteE`)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"version": "3.9.0",
|
||||||
|
"notes": "* **LocalDocs Fix:** LocalDocs no longer shows an error on later messages with reasoning models.\n* **DeepSeek Fix:** DeepSeek-R1 reasoning (in 'think' tags) no longer appears in chat names and follow-up questions.\n* **Windows ARM Improvements:**\n * Graphical artifacts on some SoCs have been fixed.\n * A crash when adding a collection of PDFs to LocalDocs has been fixed.\n* **Template Parser Fixes:** Chat templates containing an unclosed comment no longer freeze GPT4All.\n* **New Models:** OLMoE and Granite MoE models are now supported.\n",
|
||||||
|
"contributors": "* Jared Van Bortel (Nomic AI)\n* Adam Treat (Nomic AI)\n* ThiloteE (`@ThiloteE`)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"version": "3.10.0",
|
||||||
|
"notes": "* **Remote Models:**\n * The Add Model page now has a dedicated tab for remote model providers.\n * Groq, OpenAI, and Mistral remote models are now easier to configure.\n* **CUDA Compatibility:** GPUs with CUDA compute capability 5.0 such as the GTX 750 are now supported by the CUDA backend.\n* **New Model:** The non-MoE Granite model is now supported.\n* **Translation Updates:**\n * The Italian translation has been updated.\n * The Simplified Chinese translation has been significantly improved.\n* **Better Chat Templates:** The default chat templates for OLMoE 7B 0924/0125 and Granite 3.1 3B/8B have been improved.\n* **Whitespace Fixes:** DeepSeek-R1-based models now have better whitespace behavior in their output.\n* **Crash Fixes:** Several issues that could potentially cause GPT4All to crash have been fixed.\n",
|
||||||
|
"contributors": "* Jared Van Bortel (Nomic AI)\n* Adam Treat (Nomic AI)\n* ThiloteE (`@ThiloteE`)\n* Lil Bob (`@Junior2Ran`)\n* Riccardo Giovanetti (`@Harvester62`)"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
@ -56,6 +56,52 @@ ColumnLayout {
|
|||||||
Accessible.description: qsTr("Displayed when the models request is ongoing")
|
Accessible.description: qsTr("Displayed when the models request is ongoing")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
RowLayout {
|
||||||
|
ButtonGroup {
|
||||||
|
id: buttonGroup
|
||||||
|
exclusive: true
|
||||||
|
}
|
||||||
|
MyButton {
|
||||||
|
text: qsTr("All")
|
||||||
|
checked: true
|
||||||
|
borderWidth: 0
|
||||||
|
backgroundColor: checked ? theme.lightButtonBackground : "transparent"
|
||||||
|
backgroundColorHovered: theme.lighterButtonBackgroundHovered
|
||||||
|
backgroundRadius: 5
|
||||||
|
padding: 15
|
||||||
|
topPadding: 8
|
||||||
|
bottomPadding: 8
|
||||||
|
textColor: theme.lighterButtonForeground
|
||||||
|
fontPixelSize: theme.fontSizeLarge
|
||||||
|
fontPixelBold: true
|
||||||
|
checkable: true
|
||||||
|
ButtonGroup.group: buttonGroup
|
||||||
|
onClicked: {
|
||||||
|
ModelList.gpt4AllDownloadableModels.filter("");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
MyButton {
|
||||||
|
text: qsTr("Reasoning")
|
||||||
|
borderWidth: 0
|
||||||
|
backgroundColor: checked ? theme.lightButtonBackground : "transparent"
|
||||||
|
backgroundColorHovered: theme.lighterButtonBackgroundHovered
|
||||||
|
backgroundRadius: 5
|
||||||
|
padding: 15
|
||||||
|
topPadding: 8
|
||||||
|
bottomPadding: 8
|
||||||
|
textColor: theme.lighterButtonForeground
|
||||||
|
fontPixelSize: theme.fontSizeLarge
|
||||||
|
fontPixelBold: true
|
||||||
|
checkable: true
|
||||||
|
ButtonGroup.group: buttonGroup
|
||||||
|
onClicked: {
|
||||||
|
ModelList.gpt4AllDownloadableModels.filter("#reasoning");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Layout.bottomMargin: 10
|
||||||
|
}
|
||||||
|
|
||||||
ScrollView {
|
ScrollView {
|
||||||
id: scrollView
|
id: scrollView
|
||||||
ScrollBar.vertical.policy: ScrollBar.AsNeeded
|
ScrollBar.vertical.policy: ScrollBar.AsNeeded
|
||||||
@ -158,7 +204,7 @@ ColumnLayout {
|
|||||||
Layout.minimumWidth: 200
|
Layout.minimumWidth: 200
|
||||||
Layout.fillWidth: true
|
Layout.fillWidth: true
|
||||||
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
|
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
|
||||||
visible: !isOnline && !installed && !calcHash && downloadError === ""
|
visible: !installed && !calcHash && downloadError === ""
|
||||||
Accessible.description: qsTr("Stop/restart/start the download")
|
Accessible.description: qsTr("Stop/restart/start the download")
|
||||||
onClicked: {
|
onClicked: {
|
||||||
if (!isDownloading) {
|
if (!isDownloading) {
|
||||||
@ -184,52 +230,6 @@ ColumnLayout {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MySettingsButton {
|
|
||||||
id: installButton
|
|
||||||
visible: !installed && isOnline
|
|
||||||
Layout.topMargin: 20
|
|
||||||
Layout.leftMargin: 20
|
|
||||||
Layout.minimumWidth: 200
|
|
||||||
Layout.fillWidth: true
|
|
||||||
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
|
|
||||||
text: qsTr("Install")
|
|
||||||
font.pixelSize: theme.fontSizeLarge
|
|
||||||
onClicked: {
|
|
||||||
var apiKeyText = apiKey.text.trim(),
|
|
||||||
baseUrlText = baseUrl.text.trim(),
|
|
||||||
modelNameText = modelName.text.trim();
|
|
||||||
|
|
||||||
var apiKeyOk = apiKeyText !== "",
|
|
||||||
baseUrlOk = !isCompatibleApi || baseUrlText !== "",
|
|
||||||
modelNameOk = !isCompatibleApi || modelNameText !== "";
|
|
||||||
|
|
||||||
if (!apiKeyOk)
|
|
||||||
apiKey.showError();
|
|
||||||
if (!baseUrlOk)
|
|
||||||
baseUrl.showError();
|
|
||||||
if (!modelNameOk)
|
|
||||||
modelName.showError();
|
|
||||||
|
|
||||||
if (!apiKeyOk || !baseUrlOk || !modelNameOk)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!isCompatibleApi)
|
|
||||||
Download.installModel(
|
|
||||||
filename,
|
|
||||||
apiKeyText,
|
|
||||||
);
|
|
||||||
else
|
|
||||||
Download.installCompatibleModel(
|
|
||||||
modelNameText,
|
|
||||||
apiKeyText,
|
|
||||||
baseUrlText,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Accessible.role: Accessible.Button
|
|
||||||
Accessible.name: qsTr("Install")
|
|
||||||
Accessible.description: qsTr("Install online model")
|
|
||||||
}
|
|
||||||
|
|
||||||
ColumnLayout {
|
ColumnLayout {
|
||||||
spacing: 0
|
spacing: 0
|
||||||
Label {
|
Label {
|
||||||
@ -344,69 +344,6 @@ ColumnLayout {
|
|||||||
Accessible.description: qsTr("Displayed when the file hash is being calculated")
|
Accessible.description: qsTr("Displayed when the file hash is being calculated")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MyTextField {
|
|
||||||
id: apiKey
|
|
||||||
visible: !installed && isOnline
|
|
||||||
Layout.topMargin: 20
|
|
||||||
Layout.leftMargin: 20
|
|
||||||
Layout.minimumWidth: 200
|
|
||||||
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
|
|
||||||
wrapMode: Text.WrapAnywhere
|
|
||||||
function showError() {
|
|
||||||
messageToast.show(qsTr("ERROR: $API_KEY is empty."));
|
|
||||||
apiKey.placeholderTextColor = theme.textErrorColor;
|
|
||||||
}
|
|
||||||
onTextChanged: {
|
|
||||||
apiKey.placeholderTextColor = theme.mutedTextColor;
|
|
||||||
}
|
|
||||||
placeholderText: qsTr("enter $API_KEY")
|
|
||||||
Accessible.role: Accessible.EditableText
|
|
||||||
Accessible.name: placeholderText
|
|
||||||
Accessible.description: qsTr("Whether the file hash is being calculated")
|
|
||||||
}
|
|
||||||
|
|
||||||
MyTextField {
|
|
||||||
id: baseUrl
|
|
||||||
visible: !installed && isOnline && isCompatibleApi
|
|
||||||
Layout.topMargin: 20
|
|
||||||
Layout.leftMargin: 20
|
|
||||||
Layout.minimumWidth: 200
|
|
||||||
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
|
|
||||||
wrapMode: Text.WrapAnywhere
|
|
||||||
function showError() {
|
|
||||||
messageToast.show(qsTr("ERROR: $BASE_URL is empty."));
|
|
||||||
baseUrl.placeholderTextColor = theme.textErrorColor;
|
|
||||||
}
|
|
||||||
onTextChanged: {
|
|
||||||
baseUrl.placeholderTextColor = theme.mutedTextColor;
|
|
||||||
}
|
|
||||||
placeholderText: qsTr("enter $BASE_URL")
|
|
||||||
Accessible.role: Accessible.EditableText
|
|
||||||
Accessible.name: placeholderText
|
|
||||||
Accessible.description: qsTr("Whether the file hash is being calculated")
|
|
||||||
}
|
|
||||||
|
|
||||||
MyTextField {
|
|
||||||
id: modelName
|
|
||||||
visible: !installed && isOnline && isCompatibleApi
|
|
||||||
Layout.topMargin: 20
|
|
||||||
Layout.leftMargin: 20
|
|
||||||
Layout.minimumWidth: 200
|
|
||||||
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
|
|
||||||
wrapMode: Text.WrapAnywhere
|
|
||||||
function showError() {
|
|
||||||
messageToast.show(qsTr("ERROR: $MODEL_NAME is empty."))
|
|
||||||
modelName.placeholderTextColor = theme.textErrorColor;
|
|
||||||
}
|
|
||||||
onTextChanged: {
|
|
||||||
modelName.placeholderTextColor = theme.mutedTextColor;
|
|
||||||
}
|
|
||||||
placeholderText: qsTr("enter $MODEL_NAME")
|
|
||||||
Accessible.role: Accessible.EditableText
|
|
||||||
Accessible.name: placeholderText
|
|
||||||
Accessible.description: qsTr("Whether the file hash is being calculated")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -89,6 +89,13 @@ Rectangle {
|
|||||||
gpt4AllModelView.show();
|
gpt4AllModelView.show();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
MyTabButton {
|
||||||
|
text: qsTr("Remote Providers")
|
||||||
|
isSelected: remoteModelView.isShown()
|
||||||
|
onPressed: {
|
||||||
|
remoteModelView.show();
|
||||||
|
}
|
||||||
|
}
|
||||||
MyTabButton {
|
MyTabButton {
|
||||||
text: qsTr("HuggingFace")
|
text: qsTr("HuggingFace")
|
||||||
isSelected: huggingfaceModelView.isShown()
|
isSelected: huggingfaceModelView.isShown()
|
||||||
@ -112,7 +119,20 @@ Rectangle {
|
|||||||
stackLayout.currentIndex = 0;
|
stackLayout.currentIndex = 0;
|
||||||
}
|
}
|
||||||
function isShown() {
|
function isShown() {
|
||||||
return stackLayout.currentIndex === 0
|
return stackLayout.currentIndex === 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
AddRemoteModelView {
|
||||||
|
id: remoteModelView
|
||||||
|
Layout.fillWidth: true
|
||||||
|
Layout.fillHeight: true
|
||||||
|
|
||||||
|
function show() {
|
||||||
|
stackLayout.currentIndex = 1;
|
||||||
|
}
|
||||||
|
function isShown() {
|
||||||
|
return stackLayout.currentIndex === 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,10 +146,10 @@ Rectangle {
|
|||||||
anchors.fill: parent
|
anchors.fill: parent
|
||||||
|
|
||||||
function show() {
|
function show() {
|
||||||
stackLayout.currentIndex = 1;
|
stackLayout.currentIndex = 2;
|
||||||
}
|
}
|
||||||
function isShown() {
|
function isShown() {
|
||||||
return stackLayout.currentIndex === 1
|
return stackLayout.currentIndex === 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
147
gpt4all-chat/qml/AddRemoteModelView.qml
Normal file
147
gpt4all-chat/qml/AddRemoteModelView.qml
Normal file
@ -0,0 +1,147 @@
|
|||||||
|
import QtCore
|
||||||
|
import QtQuick
|
||||||
|
import QtQuick.Controls
|
||||||
|
import QtQuick.Controls.Basic
|
||||||
|
import QtQuick.Layouts
|
||||||
|
import QtQuick.Dialogs
|
||||||
|
import Qt.labs.folderlistmodel
|
||||||
|
import Qt5Compat.GraphicalEffects
|
||||||
|
|
||||||
|
import llm
|
||||||
|
import chatlistmodel
|
||||||
|
import download
|
||||||
|
import modellist
|
||||||
|
import network
|
||||||
|
import gpt4all
|
||||||
|
import mysettings
|
||||||
|
import localdocs
|
||||||
|
|
||||||
|
ColumnLayout {
|
||||||
|
Layout.fillWidth: true
|
||||||
|
Layout.alignment: Qt.AlignTop
|
||||||
|
spacing: 5
|
||||||
|
|
||||||
|
Label {
|
||||||
|
Layout.topMargin: 0
|
||||||
|
Layout.bottomMargin: 25
|
||||||
|
Layout.rightMargin: 150 * theme.fontScale
|
||||||
|
Layout.alignment: Qt.AlignTop
|
||||||
|
Layout.fillWidth: true
|
||||||
|
verticalAlignment: Text.AlignTop
|
||||||
|
text: qsTr("Various remote model providers that use network resources for inference.")
|
||||||
|
font.pixelSize: theme.fontSizeLarger
|
||||||
|
color: theme.textColor
|
||||||
|
wrapMode: Text.WordWrap
|
||||||
|
}
|
||||||
|
|
||||||
|
ScrollView {
|
||||||
|
id: scrollView
|
||||||
|
ScrollBar.vertical.policy: ScrollBar.AsNeeded
|
||||||
|
Layout.fillWidth: true
|
||||||
|
Layout.fillHeight: true
|
||||||
|
contentWidth: availableWidth
|
||||||
|
clip: true
|
||||||
|
Flow {
|
||||||
|
anchors.left: parent.left
|
||||||
|
anchors.right: parent.right
|
||||||
|
spacing: 20
|
||||||
|
bottomPadding: 20
|
||||||
|
property int childWidth: 330 * theme.fontScale
|
||||||
|
property int childHeight: 400 + 166 * theme.fontScale
|
||||||
|
RemoteModelCard {
|
||||||
|
width: parent.childWidth
|
||||||
|
height: parent.childHeight
|
||||||
|
providerBaseUrl: "https://api.groq.com/openai/v1/"
|
||||||
|
providerName: qsTr("Groq")
|
||||||
|
providerImage: "qrc:/gpt4all/icons/groq.svg"
|
||||||
|
providerDesc: qsTr('Groq offers a high-performance AI inference engine designed for low-latency and efficient processing. Optimized for real-time applications, Groq’s technology is ideal for users who need fast responses from open large language models and other AI workloads.<br><br>Get your API key: <a href="https://console.groq.com/keys">https://groq.com/</a>')
|
||||||
|
modelWhitelist: [
|
||||||
|
// last updated 2025-02-24
|
||||||
|
"deepseek-r1-distill-llama-70b",
|
||||||
|
"deepseek-r1-distill-qwen-32b",
|
||||||
|
"gemma2-9b-it",
|
||||||
|
"llama-3.1-8b-instant",
|
||||||
|
"llama-3.2-1b-preview",
|
||||||
|
"llama-3.2-3b-preview",
|
||||||
|
"llama-3.3-70b-specdec",
|
||||||
|
"llama-3.3-70b-versatile",
|
||||||
|
"llama3-70b-8192",
|
||||||
|
"llama3-8b-8192",
|
||||||
|
"mixtral-8x7b-32768",
|
||||||
|
"qwen-2.5-32b",
|
||||||
|
"qwen-2.5-coder-32b",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
RemoteModelCard {
|
||||||
|
width: parent.childWidth
|
||||||
|
height: parent.childHeight
|
||||||
|
providerBaseUrl: "https://api.openai.com/v1/"
|
||||||
|
providerName: qsTr("OpenAI")
|
||||||
|
providerImage: "qrc:/gpt4all/icons/openai.svg"
|
||||||
|
providerDesc: qsTr('OpenAI provides access to advanced AI models, including GPT-4 supporting a wide range of applications, from conversational AI to content generation and code completion.<br><br>Get your API key: <a href="https://platform.openai.com/signup">https://openai.com/</a>')
|
||||||
|
modelWhitelist: [
|
||||||
|
// last updated 2025-02-24
|
||||||
|
"gpt-3.5-turbo",
|
||||||
|
"gpt-3.5-turbo-16k",
|
||||||
|
"gpt-4",
|
||||||
|
"gpt-4-32k",
|
||||||
|
"gpt-4-turbo",
|
||||||
|
"gpt-4o",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
RemoteModelCard {
|
||||||
|
width: parent.childWidth
|
||||||
|
height: parent.childHeight
|
||||||
|
providerBaseUrl: "https://api.mistral.ai/v1/"
|
||||||
|
providerName: qsTr("Mistral")
|
||||||
|
providerImage: "qrc:/gpt4all/icons/mistral.svg"
|
||||||
|
providerDesc: qsTr('Mistral AI specializes in efficient, open-weight language models optimized for various natural language processing tasks. Their models are designed for flexibility and performance, making them a solid option for applications requiring scalable AI solutions.<br><br>Get your API key: <a href="https://mistral.ai/">https://mistral.ai/</a>')
|
||||||
|
modelWhitelist: [
|
||||||
|
// last updated 2025-02-24
|
||||||
|
"codestral-2405",
|
||||||
|
"codestral-2411-rc5",
|
||||||
|
"codestral-2412",
|
||||||
|
"codestral-2501",
|
||||||
|
"codestral-latest",
|
||||||
|
"codestral-mamba-2407",
|
||||||
|
"codestral-mamba-latest",
|
||||||
|
"ministral-3b-2410",
|
||||||
|
"ministral-3b-latest",
|
||||||
|
"ministral-8b-2410",
|
||||||
|
"ministral-8b-latest",
|
||||||
|
"mistral-large-2402",
|
||||||
|
"mistral-large-2407",
|
||||||
|
"mistral-large-2411",
|
||||||
|
"mistral-large-latest",
|
||||||
|
"mistral-medium-2312",
|
||||||
|
"mistral-medium-latest",
|
||||||
|
"mistral-saba-2502",
|
||||||
|
"mistral-saba-latest",
|
||||||
|
"mistral-small-2312",
|
||||||
|
"mistral-small-2402",
|
||||||
|
"mistral-small-2409",
|
||||||
|
"mistral-small-2501",
|
||||||
|
"mistral-small-latest",
|
||||||
|
"mistral-tiny-2312",
|
||||||
|
"mistral-tiny-2407",
|
||||||
|
"mistral-tiny-latest",
|
||||||
|
"open-codestral-mamba",
|
||||||
|
"open-mistral-7b",
|
||||||
|
"open-mistral-nemo",
|
||||||
|
"open-mistral-nemo-2407",
|
||||||
|
"open-mixtral-8x22b",
|
||||||
|
"open-mixtral-8x22b-2404",
|
||||||
|
"open-mixtral-8x7b",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
RemoteModelCard {
|
||||||
|
width: parent.childWidth
|
||||||
|
height: parent.childHeight
|
||||||
|
providerIsCustom: true
|
||||||
|
providerName: qsTr("Custom")
|
||||||
|
providerImage: "qrc:/gpt4all/icons/antenna_3.svg"
|
||||||
|
providerDesc: qsTr("The custom provider option allows users to connect their own OpenAI-compatible AI models or third-party inference services. This is useful for organizations with proprietary models or those leveraging niche AI providers not listed here.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
166
gpt4all-chat/qml/ChatCollapsibleItem.qml
Normal file
166
gpt4all-chat/qml/ChatCollapsibleItem.qml
Normal file
@ -0,0 +1,166 @@
|
|||||||
|
import Qt5Compat.GraphicalEffects
|
||||||
|
import QtCore
|
||||||
|
import QtQuick
|
||||||
|
import QtQuick.Controls
|
||||||
|
import QtQuick.Controls.Basic
|
||||||
|
import QtQuick.Layouts
|
||||||
|
|
||||||
|
import gpt4all
|
||||||
|
import mysettings
|
||||||
|
import toolenums
|
||||||
|
|
||||||
|
ColumnLayout {
|
||||||
|
property alias textContent: innerTextItem.textContent
|
||||||
|
property bool isCurrent: false
|
||||||
|
property bool isError: false
|
||||||
|
property bool isThinking: false
|
||||||
|
property int thinkingTime: 0
|
||||||
|
|
||||||
|
Layout.topMargin: 10
|
||||||
|
Layout.bottomMargin: 10
|
||||||
|
|
||||||
|
Item {
|
||||||
|
Layout.preferredWidth: childrenRect.width
|
||||||
|
Layout.preferredHeight: 38
|
||||||
|
RowLayout {
|
||||||
|
anchors.left: parent.left
|
||||||
|
anchors.top: parent.top
|
||||||
|
anchors.bottom: parent.bottom
|
||||||
|
|
||||||
|
Item {
|
||||||
|
Layout.preferredWidth: myTextArea.implicitWidth
|
||||||
|
Layout.preferredHeight: myTextArea.implicitHeight
|
||||||
|
TextArea {
|
||||||
|
id: myTextArea
|
||||||
|
text: {
|
||||||
|
if (isError)
|
||||||
|
return qsTr("Analysis encountered error");
|
||||||
|
if (isCurrent)
|
||||||
|
return isThinking ? qsTr("Thinking") : qsTr("Analyzing");
|
||||||
|
return isThinking
|
||||||
|
? qsTr("Thought for %1 %2")
|
||||||
|
.arg(Math.ceil(thinkingTime / 1000.0))
|
||||||
|
.arg(Math.ceil(thinkingTime / 1000.0) === 1 ? qsTr("second") : qsTr("seconds"))
|
||||||
|
: qsTr("Analyzed");
|
||||||
|
}
|
||||||
|
padding: 0
|
||||||
|
font.pixelSize: theme.fontSizeLarger
|
||||||
|
enabled: false
|
||||||
|
focus: false
|
||||||
|
readOnly: true
|
||||||
|
color: headerMA.containsMouse ? theme.mutedDarkTextColorHovered : theme.mutedTextColor
|
||||||
|
hoverEnabled: false
|
||||||
|
}
|
||||||
|
|
||||||
|
Item {
|
||||||
|
id: textColorOverlay
|
||||||
|
anchors.fill: parent
|
||||||
|
clip: true
|
||||||
|
visible: false
|
||||||
|
Rectangle {
|
||||||
|
id: animationRec
|
||||||
|
width: myTextArea.width * 0.3
|
||||||
|
anchors.top: parent.top
|
||||||
|
anchors.bottom: parent.bottom
|
||||||
|
color: theme.textColor
|
||||||
|
|
||||||
|
SequentialAnimation {
|
||||||
|
running: isCurrent
|
||||||
|
loops: Animation.Infinite
|
||||||
|
NumberAnimation {
|
||||||
|
target: animationRec;
|
||||||
|
property: "x";
|
||||||
|
from: -animationRec.width;
|
||||||
|
to: myTextArea.width * 3;
|
||||||
|
duration: 2000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
OpacityMask {
|
||||||
|
visible: isCurrent
|
||||||
|
anchors.fill: parent
|
||||||
|
maskSource: myTextArea
|
||||||
|
source: textColorOverlay
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Item {
|
||||||
|
id: caret
|
||||||
|
Layout.preferredWidth: contentCaret.width
|
||||||
|
Layout.preferredHeight: contentCaret.height
|
||||||
|
Image {
|
||||||
|
id: contentCaret
|
||||||
|
anchors.centerIn: parent
|
||||||
|
visible: false
|
||||||
|
sourceSize.width: theme.fontSizeLarge
|
||||||
|
sourceSize.height: theme.fontSizeLarge
|
||||||
|
mipmap: true
|
||||||
|
source: {
|
||||||
|
if (contentLayout.state === "collapsed")
|
||||||
|
return "qrc:/gpt4all/icons/caret_right.svg";
|
||||||
|
else
|
||||||
|
return "qrc:/gpt4all/icons/caret_down.svg";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ColorOverlay {
|
||||||
|
anchors.fill: contentCaret
|
||||||
|
source: contentCaret
|
||||||
|
color: headerMA.containsMouse ? theme.mutedDarkTextColorHovered : theme.mutedTextColor
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MouseArea {
|
||||||
|
id: headerMA
|
||||||
|
hoverEnabled: true
|
||||||
|
anchors.fill: parent
|
||||||
|
onClicked: {
|
||||||
|
if (contentLayout.state === "collapsed")
|
||||||
|
contentLayout.state = "expanded";
|
||||||
|
else
|
||||||
|
contentLayout.state = "collapsed";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnLayout {
|
||||||
|
id: contentLayout
|
||||||
|
spacing: 0
|
||||||
|
state: "collapsed"
|
||||||
|
clip: true
|
||||||
|
|
||||||
|
states: [
|
||||||
|
State {
|
||||||
|
name: "expanded"
|
||||||
|
PropertyChanges { target: contentLayout; Layout.preferredHeight: innerContentLayout.height }
|
||||||
|
},
|
||||||
|
State {
|
||||||
|
name: "collapsed"
|
||||||
|
PropertyChanges { target: contentLayout; Layout.preferredHeight: 0 }
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
transitions: [
|
||||||
|
Transition {
|
||||||
|
SequentialAnimation {
|
||||||
|
PropertyAnimation {
|
||||||
|
target: contentLayout
|
||||||
|
property: "Layout.preferredHeight"
|
||||||
|
duration: 300
|
||||||
|
easing.type: Easing.InOutQuad
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
ColumnLayout {
|
||||||
|
id: innerContentLayout
|
||||||
|
Layout.leftMargin: 30
|
||||||
|
ChatTextItem {
|
||||||
|
id: innerTextItem
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -4,9 +4,11 @@ import QtQuick
|
|||||||
import QtQuick.Controls
|
import QtQuick.Controls
|
||||||
import QtQuick.Controls.Basic
|
import QtQuick.Controls.Basic
|
||||||
import QtQuick.Layouts
|
import QtQuick.Layouts
|
||||||
|
import Qt.labs.qmlmodels
|
||||||
|
|
||||||
import gpt4all
|
import gpt4all
|
||||||
import mysettings
|
import mysettings
|
||||||
|
import toolenums
|
||||||
|
|
||||||
ColumnLayout {
|
ColumnLayout {
|
||||||
|
|
||||||
@ -33,6 +35,8 @@ GridLayout {
|
|||||||
Layout.alignment: Qt.AlignVCenter | Qt.AlignRight
|
Layout.alignment: Qt.AlignVCenter | Qt.AlignRight
|
||||||
Layout.preferredWidth: 32
|
Layout.preferredWidth: 32
|
||||||
Layout.preferredHeight: 32
|
Layout.preferredHeight: 32
|
||||||
|
Layout.topMargin: model.index > 0 ? 25 : 0
|
||||||
|
|
||||||
Image {
|
Image {
|
||||||
id: logo
|
id: logo
|
||||||
sourceSize: Qt.size(32, 32)
|
sourceSize: Qt.size(32, 32)
|
||||||
@ -65,6 +69,8 @@ GridLayout {
|
|||||||
Layout.column: 1
|
Layout.column: 1
|
||||||
Layout.fillWidth: true
|
Layout.fillWidth: true
|
||||||
Layout.preferredHeight: 38
|
Layout.preferredHeight: 38
|
||||||
|
Layout.topMargin: model.index > 0 ? 25 : 0
|
||||||
|
|
||||||
RowLayout {
|
RowLayout {
|
||||||
spacing: 5
|
spacing: 5
|
||||||
anchors.left: parent.left
|
anchors.left: parent.left
|
||||||
@ -72,7 +78,11 @@ GridLayout {
|
|||||||
anchors.bottom: parent.bottom
|
anchors.bottom: parent.bottom
|
||||||
|
|
||||||
TextArea {
|
TextArea {
|
||||||
text: name === "Response: " ? qsTr("GPT4All") : qsTr("You")
|
text: {
|
||||||
|
if (name === "Response: ")
|
||||||
|
return qsTr("GPT4All");
|
||||||
|
return qsTr("You");
|
||||||
|
}
|
||||||
padding: 0
|
padding: 0
|
||||||
font.pixelSize: theme.fontSizeLarger
|
font.pixelSize: theme.fontSizeLarger
|
||||||
font.bold: true
|
font.bold: true
|
||||||
@ -88,7 +98,7 @@ GridLayout {
|
|||||||
color: theme.mutedTextColor
|
color: theme.mutedTextColor
|
||||||
}
|
}
|
||||||
RowLayout {
|
RowLayout {
|
||||||
visible: isCurrentResponse && (value === "" && currentChat.responseInProgress)
|
visible: isCurrentResponse && (content === "" && currentChat.responseInProgress)
|
||||||
Text {
|
Text {
|
||||||
color: theme.mutedTextColor
|
color: theme.mutedTextColor
|
||||||
font.pixelSize: theme.fontSizeLarger
|
font.pixelSize: theme.fontSizeLarger
|
||||||
@ -100,6 +110,7 @@ GridLayout {
|
|||||||
case Chat.PromptProcessing: return qsTr("processing ...")
|
case Chat.PromptProcessing: return qsTr("processing ...")
|
||||||
case Chat.ResponseGeneration: return qsTr("generating response ...");
|
case Chat.ResponseGeneration: return qsTr("generating response ...");
|
||||||
case Chat.GeneratingQuestions: return qsTr("generating questions ...");
|
case Chat.GeneratingQuestions: return qsTr("generating questions ...");
|
||||||
|
case Chat.ToolCallGeneration: return qsTr("generating toolcall ...");
|
||||||
default: return ""; // handle unexpected values
|
default: return ""; // handle unexpected values
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -156,131 +167,48 @@ GridLayout {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TextArea {
|
Repeater {
|
||||||
id: myTextArea
|
model: childItems
|
||||||
|
|
||||||
|
DelegateChooser {
|
||||||
|
id: chooser
|
||||||
|
role: "name"
|
||||||
|
DelegateChoice {
|
||||||
|
roleValue: "Text: ";
|
||||||
|
ChatTextItem {
|
||||||
Layout.fillWidth: true
|
Layout.fillWidth: true
|
||||||
padding: 0
|
textContent: modelData.content
|
||||||
color: {
|
|
||||||
if (!currentChat.isServer)
|
|
||||||
return theme.textColor
|
|
||||||
return theme.white
|
|
||||||
}
|
|
||||||
wrapMode: Text.WordWrap
|
|
||||||
textFormat: TextEdit.PlainText
|
|
||||||
focus: false
|
|
||||||
readOnly: true
|
|
||||||
font.pixelSize: theme.fontSizeLarge
|
|
||||||
cursorVisible: isCurrentResponse ? currentChat.responseInProgress : false
|
|
||||||
cursorPosition: text.length
|
|
||||||
TapHandler {
|
|
||||||
id: tapHandler
|
|
||||||
onTapped: function(eventPoint, button) {
|
|
||||||
var clickedPos = myTextArea.positionAt(eventPoint.position.x, eventPoint.position.y);
|
|
||||||
var success = textProcessor.tryCopyAtPosition(clickedPos);
|
|
||||||
if (success)
|
|
||||||
copyCodeMessage.open();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
DelegateChoice {
|
||||||
MouseArea {
|
roleValue: "ToolCall: ";
|
||||||
id: conversationMouseArea
|
ChatCollapsibleItem {
|
||||||
anchors.fill: parent
|
Layout.fillWidth: true
|
||||||
acceptedButtons: Qt.RightButton
|
textContent: modelData.content
|
||||||
|
isCurrent: modelData.isCurrentResponse
|
||||||
onClicked: (mouse) => {
|
isError: modelData.isToolCallError
|
||||||
if (mouse.button === Qt.RightButton) {
|
}
|
||||||
conversationContextMenu.x = conversationMouseArea.mouseX
|
}
|
||||||
conversationContextMenu.y = conversationMouseArea.mouseY
|
DelegateChoice {
|
||||||
conversationContextMenu.open()
|
roleValue: "Think: ";
|
||||||
|
ChatCollapsibleItem {
|
||||||
|
Layout.fillWidth: true
|
||||||
|
textContent: modelData.content
|
||||||
|
isCurrent: modelData.isCurrentResponse
|
||||||
|
isError: false
|
||||||
|
isThinking: true
|
||||||
|
thinkingTime: modelData.thinkingTime
|
||||||
|
visible: modelData.content !== ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
onLinkActivated: function(link) {
|
delegate: chooser
|
||||||
if (!isCurrentResponse || !currentChat.responseInProgress)
|
|
||||||
Qt.openUrlExternally(link)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
onLinkHovered: function (link) {
|
ChatTextItem {
|
||||||
if (!isCurrentResponse || !currentChat.responseInProgress)
|
Layout.fillWidth: true
|
||||||
statusBar.externalHoveredLink = link
|
textContent: content
|
||||||
}
|
|
||||||
|
|
||||||
MyMenu {
|
|
||||||
id: conversationContextMenu
|
|
||||||
MyMenuItem {
|
|
||||||
text: qsTr("Copy")
|
|
||||||
enabled: myTextArea.selectedText !== ""
|
|
||||||
height: enabled ? implicitHeight : 0
|
|
||||||
onTriggered: myTextArea.copy()
|
|
||||||
}
|
|
||||||
MyMenuItem {
|
|
||||||
text: qsTr("Copy Message")
|
|
||||||
enabled: myTextArea.selectedText === ""
|
|
||||||
height: enabled ? implicitHeight : 0
|
|
||||||
onTriggered: {
|
|
||||||
myTextArea.selectAll()
|
|
||||||
myTextArea.copy()
|
|
||||||
myTextArea.deselect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
MyMenuItem {
|
|
||||||
text: textProcessor.shouldProcessText ? qsTr("Disable markdown") : qsTr("Enable markdown")
|
|
||||||
height: enabled ? implicitHeight : 0
|
|
||||||
onTriggered: {
|
|
||||||
textProcessor.shouldProcessText = !textProcessor.shouldProcessText;
|
|
||||||
textProcessor.setValue(value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ChatViewTextProcessor {
|
|
||||||
id: textProcessor
|
|
||||||
}
|
|
||||||
|
|
||||||
function resetChatViewTextProcessor() {
|
|
||||||
textProcessor.fontPixelSize = myTextArea.font.pixelSize
|
|
||||||
textProcessor.codeColors.defaultColor = theme.codeDefaultColor
|
|
||||||
textProcessor.codeColors.keywordColor = theme.codeKeywordColor
|
|
||||||
textProcessor.codeColors.functionColor = theme.codeFunctionColor
|
|
||||||
textProcessor.codeColors.functionCallColor = theme.codeFunctionCallColor
|
|
||||||
textProcessor.codeColors.commentColor = theme.codeCommentColor
|
|
||||||
textProcessor.codeColors.stringColor = theme.codeStringColor
|
|
||||||
textProcessor.codeColors.numberColor = theme.codeNumberColor
|
|
||||||
textProcessor.codeColors.headerColor = theme.codeHeaderColor
|
|
||||||
textProcessor.codeColors.backgroundColor = theme.codeBackgroundColor
|
|
||||||
textProcessor.textDocument = textDocument
|
|
||||||
textProcessor.setValue(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
property bool textProcessorReady: false
|
|
||||||
|
|
||||||
Component.onCompleted: {
|
|
||||||
resetChatViewTextProcessor();
|
|
||||||
textProcessorReady = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
Connections {
|
|
||||||
target: chatModel
|
|
||||||
function onValueChanged(i, value) {
|
|
||||||
if (myTextArea.textProcessorReady && index === i)
|
|
||||||
textProcessor.setValue(value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Connections {
|
|
||||||
target: MySettings
|
|
||||||
function onFontSizeChanged() {
|
|
||||||
myTextArea.resetChatViewTextProcessor();
|
|
||||||
}
|
|
||||||
function onChatThemeChanged() {
|
|
||||||
myTextArea.resetChatViewTextProcessor();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Accessible.role: Accessible.Paragraph
|
|
||||||
Accessible.name: text
|
|
||||||
Accessible.description: name === "Response: " ? "The response by the model" : "The prompt by the user"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ThumbsDownDialog {
|
ThumbsDownDialog {
|
||||||
@ -289,16 +217,16 @@ GridLayout {
|
|||||||
y: Math.round((parent.height - height) / 2)
|
y: Math.round((parent.height - height) / 2)
|
||||||
width: 640
|
width: 640
|
||||||
height: 300
|
height: 300
|
||||||
property string text: value
|
property string text: content
|
||||||
response: newResponse === undefined || newResponse === "" ? text : newResponse
|
response: newResponse === undefined || newResponse === "" ? text : newResponse
|
||||||
onAccepted: {
|
onAccepted: {
|
||||||
var responseHasChanged = response !== text && response !== newResponse
|
var responseHasChanged = response !== text && response !== newResponse
|
||||||
if (thumbsDownState && !thumbsUpState && !responseHasChanged)
|
if (thumbsDownState && !thumbsUpState && !responseHasChanged)
|
||||||
return
|
return
|
||||||
|
|
||||||
chatModel.updateNewResponse(index, response)
|
chatModel.updateNewResponse(model.index, response)
|
||||||
chatModel.updateThumbsUpState(index, false)
|
chatModel.updateThumbsUpState(model.index, false)
|
||||||
chatModel.updateThumbsDownState(index, true)
|
chatModel.updateThumbsDownState(model.index, true)
|
||||||
Network.sendConversation(currentChat.id, getConversationJson());
|
Network.sendConversation(currentChat.id, getConversationJson());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -416,7 +344,7 @@ GridLayout {
|
|||||||
states: [
|
states: [
|
||||||
State {
|
State {
|
||||||
name: "expanded"
|
name: "expanded"
|
||||||
PropertyChanges { target: sourcesLayout; Layout.preferredHeight: flow.height }
|
PropertyChanges { target: sourcesLayout; Layout.preferredHeight: sourcesFlow.height }
|
||||||
},
|
},
|
||||||
State {
|
State {
|
||||||
name: "collapsed"
|
name: "collapsed"
|
||||||
@ -438,7 +366,7 @@ GridLayout {
|
|||||||
]
|
]
|
||||||
|
|
||||||
Flow {
|
Flow {
|
||||||
id: flow
|
id: sourcesFlow
|
||||||
Layout.fillWidth: true
|
Layout.fillWidth: true
|
||||||
spacing: 10
|
spacing: 10
|
||||||
visible: consolidatedSources.length !== 0
|
visible: consolidatedSources.length !== 0
|
||||||
@ -617,9 +545,7 @@ GridLayout {
|
|||||||
name: qsTr("Copy")
|
name: qsTr("Copy")
|
||||||
source: "qrc:/gpt4all/icons/copy.svg"
|
source: "qrc:/gpt4all/icons/copy.svg"
|
||||||
onClicked: {
|
onClicked: {
|
||||||
myTextArea.selectAll();
|
chatModel.copyToClipboard(index);
|
||||||
myTextArea.copy();
|
|
||||||
myTextArea.deselect();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
139
gpt4all-chat/qml/ChatTextItem.qml
Normal file
139
gpt4all-chat/qml/ChatTextItem.qml
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
import Qt5Compat.GraphicalEffects
|
||||||
|
import QtCore
|
||||||
|
import QtQuick
|
||||||
|
import QtQuick.Controls
|
||||||
|
import QtQuick.Controls.Basic
|
||||||
|
import QtQuick.Layouts
|
||||||
|
|
||||||
|
import gpt4all
|
||||||
|
import mysettings
|
||||||
|
import toolenums
|
||||||
|
|
||||||
|
TextArea {
|
||||||
|
id: myTextArea
|
||||||
|
property string textContent: ""
|
||||||
|
visible: textContent != ""
|
||||||
|
Layout.fillWidth: true
|
||||||
|
padding: 0
|
||||||
|
color: {
|
||||||
|
if (!currentChat.isServer)
|
||||||
|
return theme.textColor
|
||||||
|
return theme.white
|
||||||
|
}
|
||||||
|
wrapMode: Text.WordWrap
|
||||||
|
textFormat: TextEdit.PlainText
|
||||||
|
focus: false
|
||||||
|
readOnly: true
|
||||||
|
font.pixelSize: theme.fontSizeLarge
|
||||||
|
cursorVisible: isCurrentResponse ? currentChat.responseInProgress : false
|
||||||
|
cursorPosition: text.length
|
||||||
|
TapHandler {
|
||||||
|
id: tapHandler
|
||||||
|
onTapped: function(eventPoint, button) {
|
||||||
|
var clickedPos = myTextArea.positionAt(eventPoint.position.x, eventPoint.position.y);
|
||||||
|
var success = textProcessor.tryCopyAtPosition(clickedPos);
|
||||||
|
if (success)
|
||||||
|
copyCodeMessage.open();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MouseArea {
|
||||||
|
id: conversationMouseArea
|
||||||
|
anchors.fill: parent
|
||||||
|
acceptedButtons: Qt.RightButton
|
||||||
|
|
||||||
|
onClicked: (mouse) => {
|
||||||
|
if (mouse.button === Qt.RightButton) {
|
||||||
|
conversationContextMenu.x = conversationMouseArea.mouseX
|
||||||
|
conversationContextMenu.y = conversationMouseArea.mouseY
|
||||||
|
conversationContextMenu.open()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
onLinkActivated: function(link) {
|
||||||
|
if (!isCurrentResponse || !currentChat.responseInProgress)
|
||||||
|
Qt.openUrlExternally(link)
|
||||||
|
}
|
||||||
|
|
||||||
|
onLinkHovered: function (link) {
|
||||||
|
if (!isCurrentResponse || !currentChat.responseInProgress)
|
||||||
|
statusBar.externalHoveredLink = link
|
||||||
|
}
|
||||||
|
|
||||||
|
MyMenu {
|
||||||
|
id: conversationContextMenu
|
||||||
|
MyMenuItem {
|
||||||
|
text: qsTr("Copy")
|
||||||
|
enabled: myTextArea.selectedText !== ""
|
||||||
|
height: enabled ? implicitHeight : 0
|
||||||
|
onTriggered: myTextArea.copy()
|
||||||
|
}
|
||||||
|
MyMenuItem {
|
||||||
|
text: qsTr("Copy Message")
|
||||||
|
enabled: myTextArea.selectedText === ""
|
||||||
|
height: enabled ? implicitHeight : 0
|
||||||
|
onTriggered: {
|
||||||
|
myTextArea.selectAll()
|
||||||
|
myTextArea.copy()
|
||||||
|
myTextArea.deselect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MyMenuItem {
|
||||||
|
text: textProcessor.shouldProcessText ? qsTr("Disable markdown") : qsTr("Enable markdown")
|
||||||
|
height: enabled ? implicitHeight : 0
|
||||||
|
onTriggered: {
|
||||||
|
textProcessor.shouldProcessText = !textProcessor.shouldProcessText;
|
||||||
|
textProcessor.setValue(textContent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ChatViewTextProcessor {
|
||||||
|
id: textProcessor
|
||||||
|
}
|
||||||
|
|
||||||
|
function resetChatViewTextProcessor() {
|
||||||
|
textProcessor.fontPixelSize = myTextArea.font.pixelSize
|
||||||
|
textProcessor.codeColors.defaultColor = theme.codeDefaultColor
|
||||||
|
textProcessor.codeColors.keywordColor = theme.codeKeywordColor
|
||||||
|
textProcessor.codeColors.functionColor = theme.codeFunctionColor
|
||||||
|
textProcessor.codeColors.functionCallColor = theme.codeFunctionCallColor
|
||||||
|
textProcessor.codeColors.commentColor = theme.codeCommentColor
|
||||||
|
textProcessor.codeColors.stringColor = theme.codeStringColor
|
||||||
|
textProcessor.codeColors.numberColor = theme.codeNumberColor
|
||||||
|
textProcessor.codeColors.headerColor = theme.codeHeaderColor
|
||||||
|
textProcessor.codeColors.backgroundColor = theme.codeBackgroundColor
|
||||||
|
textProcessor.textDocument = textDocument
|
||||||
|
textProcessor.setValue(textContent);
|
||||||
|
}
|
||||||
|
|
||||||
|
property bool textProcessorReady: false
|
||||||
|
|
||||||
|
Component.onCompleted: {
|
||||||
|
resetChatViewTextProcessor();
|
||||||
|
textProcessorReady = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Connections {
|
||||||
|
target: myTextArea
|
||||||
|
function onTextContentChanged() {
|
||||||
|
if (myTextArea.textProcessorReady)
|
||||||
|
textProcessor.setValue(textContent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Connections {
|
||||||
|
target: MySettings
|
||||||
|
function onFontSizeChanged() {
|
||||||
|
myTextArea.resetChatViewTextProcessor();
|
||||||
|
}
|
||||||
|
function onChatThemeChanged() {
|
||||||
|
myTextArea.resetChatViewTextProcessor();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Accessible.role: Accessible.Paragraph
|
||||||
|
Accessible.name: text
|
||||||
|
Accessible.description: name === "Response: " ? "The response by the model" : "The prompt by the user"
|
||||||
|
}
|
@ -37,10 +37,11 @@ Rectangle {
|
|||||||
|
|
||||||
Connections {
|
Connections {
|
||||||
target: currentChat
|
target: currentChat
|
||||||
function onResponseInProgressChanged() {
|
// FIXME: https://github.com/nomic-ai/gpt4all/issues/3334
|
||||||
if (MySettings.networkIsActive && !currentChat.responseInProgress)
|
// function onResponseInProgressChanged() {
|
||||||
Network.sendConversation(currentChat.id, getConversationJson());
|
// if (MySettings.networkIsActive && !currentChat.responseInProgress)
|
||||||
}
|
// Network.sendConversation(currentChat.id, getConversationJson());
|
||||||
|
// }
|
||||||
function onModelLoadingErrorChanged() {
|
function onModelLoadingErrorChanged() {
|
||||||
if (currentChat.modelLoadingError !== "")
|
if (currentChat.modelLoadingError !== "")
|
||||||
modelLoadingErrorPopup.open()
|
modelLoadingErrorPopup.open()
|
||||||
@ -116,42 +117,44 @@ Rectangle {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function getConversation() {
|
// FIXME: https://github.com/nomic-ai/gpt4all/issues/3334
|
||||||
var conversation = "";
|
// function getConversation() {
|
||||||
for (var i = 0; i < chatModel.count; i++) {
|
// var conversation = "";
|
||||||
var item = chatModel.get(i)
|
// for (var i = 0; i < chatModel.count; i++) {
|
||||||
var string = item.name;
|
// var item = chatModel.get(i)
|
||||||
var isResponse = item.name === "Response: "
|
// var string = item.name;
|
||||||
string += chatModel.get(i).value
|
// var isResponse = item.name === "Response: "
|
||||||
if (isResponse && item.stopped)
|
// string += chatModel.get(i).value
|
||||||
string += " <stopped>"
|
// if (isResponse && item.stopped)
|
||||||
string += "\n"
|
// string += " <stopped>"
|
||||||
conversation += string
|
// string += "\n"
|
||||||
}
|
// conversation += string
|
||||||
return conversation
|
// }
|
||||||
}
|
// return conversation
|
||||||
|
// }
|
||||||
|
|
||||||
function getConversationJson() {
|
// FIXME: https://github.com/nomic-ai/gpt4all/issues/3334
|
||||||
var str = "{\"conversation\": [";
|
// function getConversationJson() {
|
||||||
for (var i = 0; i < chatModel.count; i++) {
|
// var str = "{\"conversation\": [";
|
||||||
var item = chatModel.get(i)
|
// for (var i = 0; i < chatModel.count; i++) {
|
||||||
var isResponse = item.name === "Response: "
|
// var item = chatModel.get(i)
|
||||||
str += "{\"content\": ";
|
// var isResponse = item.name === "Response: "
|
||||||
str += JSON.stringify(item.value)
|
// str += "{\"content\": ";
|
||||||
str += ", \"role\": \"" + (isResponse ? "assistant" : "user") + "\"";
|
// str += JSON.stringify(item.value)
|
||||||
if (isResponse && item.thumbsUpState !== item.thumbsDownState)
|
// str += ", \"role\": \"" + (isResponse ? "assistant" : "user") + "\"";
|
||||||
str += ", \"rating\": \"" + (item.thumbsUpState ? "positive" : "negative") + "\"";
|
// if (isResponse && item.thumbsUpState !== item.thumbsDownState)
|
||||||
if (isResponse && item.newResponse !== "")
|
// str += ", \"rating\": \"" + (item.thumbsUpState ? "positive" : "negative") + "\"";
|
||||||
str += ", \"edited_content\": " + JSON.stringify(item.newResponse);
|
// if (isResponse && item.newResponse !== "")
|
||||||
if (isResponse && item.stopped)
|
// str += ", \"edited_content\": " + JSON.stringify(item.newResponse);
|
||||||
str += ", \"stopped\": \"true\""
|
// if (isResponse && item.stopped)
|
||||||
if (!isResponse)
|
// str += ", \"stopped\": \"true\""
|
||||||
str += "},"
|
// if (!isResponse)
|
||||||
else
|
// str += "},"
|
||||||
str += ((i < chatModel.count - 1) ? "}," : "}")
|
// else
|
||||||
}
|
// str += ((i < chatModel.count - 1) ? "}," : "}")
|
||||||
return str + "]}"
|
// }
|
||||||
}
|
// return str + "]}"
|
||||||
|
// }
|
||||||
|
|
||||||
ChatDrawer {
|
ChatDrawer {
|
||||||
id: chatDrawer
|
id: chatDrawer
|
||||||
@ -824,6 +827,8 @@ Rectangle {
|
|||||||
textInput.forceActiveFocus();
|
textInput.forceActiveFocus();
|
||||||
textInput.cursorPosition = text.length;
|
textInput.cursorPosition = text.length;
|
||||||
}
|
}
|
||||||
|
height: visible ? implicitHeight : 0
|
||||||
|
visible: name !== "ToolResponse: " && name !== "System: "
|
||||||
}
|
}
|
||||||
|
|
||||||
remove: Transition {
|
remove: Transition {
|
||||||
@ -930,10 +935,7 @@ Rectangle {
|
|||||||
visible: false
|
visible: false
|
||||||
}
|
}
|
||||||
onClicked: {
|
onClicked: {
|
||||||
var conversation = getConversation()
|
chatModel.copyToClipboard()
|
||||||
copyEdit.text = conversation
|
|
||||||
copyEdit.selectAll()
|
|
||||||
copyEdit.copy()
|
|
||||||
copyMessage.open()
|
copyMessage.open()
|
||||||
}
|
}
|
||||||
ToolTip.visible: copyChatButton.hovered
|
ToolTip.visible: copyChatButton.hovered
|
||||||
@ -1352,9 +1354,10 @@ Rectangle {
|
|||||||
ToolTip.text: Accessible.description
|
ToolTip.text: Accessible.description
|
||||||
|
|
||||||
onClicked: {
|
onClicked: {
|
||||||
var index = Math.max(0, chatModel.count - 1);
|
// FIXME: This no longer sets a 'stopped' field so conversations that
|
||||||
var listElement = chatModel.get(index);
|
// are copied to clipboard or to datalake don't indicate if the user
|
||||||
listElement.stopped = true
|
// has prematurely stopped the response. This has been broken since
|
||||||
|
// v3.0.0 at least.
|
||||||
currentChat.stopGenerating()
|
currentChat.stopGenerating()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -60,29 +60,30 @@ ComboBox {
|
|||||||
highlighted: comboBox.highlightedIndex === index
|
highlighted: comboBox.highlightedIndex === index
|
||||||
}
|
}
|
||||||
popup: Popup {
|
popup: Popup {
|
||||||
// FIXME This should be made much nicer to take into account lists that are very long so
|
|
||||||
// that it is scrollable and also sized optimally taking into account the x,y and the content
|
|
||||||
// width and height as well as the window width and height
|
|
||||||
y: comboBox.height - 1
|
y: comboBox.height - 1
|
||||||
width: comboBox.width
|
width: comboBox.width
|
||||||
implicitHeight: contentItem.implicitHeight + 20
|
implicitHeight: Math.min(window.height - y, contentItem.implicitHeight + 20)
|
||||||
padding: 0
|
padding: 0
|
||||||
|
|
||||||
contentItem: Rectangle {
|
contentItem: Rectangle {
|
||||||
implicitWidth: myListView.contentWidth
|
implicitWidth: comboBox.width
|
||||||
implicitHeight: myListView.contentHeight
|
implicitHeight: myListView.contentHeight
|
||||||
color: "transparent"
|
color: "transparent"
|
||||||
ListView {
|
radius: 10
|
||||||
id: myListView
|
ScrollView {
|
||||||
anchors.fill: parent
|
anchors.fill: parent
|
||||||
anchors.margins: 10
|
anchors.margins: 10
|
||||||
clip: true
|
clip: true
|
||||||
|
ScrollBar.vertical.policy: ScrollBar.AsNeeded
|
||||||
|
ScrollBar.horizontal.policy: ScrollBar.AlwaysOff
|
||||||
|
ListView {
|
||||||
|
id: myListView
|
||||||
implicitHeight: contentHeight
|
implicitHeight: contentHeight
|
||||||
model: comboBox.popup.visible ? comboBox.delegateModel : null
|
model: comboBox.popup.visible ? comboBox.delegateModel : null
|
||||||
currentIndex: comboBox.highlightedIndex
|
currentIndex: comboBox.highlightedIndex
|
||||||
ScrollIndicator.vertical: ScrollIndicator { }
|
ScrollIndicator.vertical: ScrollIndicator { }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
background: Rectangle {
|
background: Rectangle {
|
||||||
color: theme.menuBackgroundColor//theme.controlBorder
|
color: theme.menuBackgroundColor//theme.controlBorder
|
||||||
|
221
gpt4all-chat/qml/RemoteModelCard.qml
Normal file
221
gpt4all-chat/qml/RemoteModelCard.qml
Normal file
@ -0,0 +1,221 @@
|
|||||||
|
import QtCore
|
||||||
|
import QtQuick
|
||||||
|
import QtQuick.Controls
|
||||||
|
import QtQuick.Controls.Basic
|
||||||
|
import QtQuick.Layouts
|
||||||
|
import QtQuick.Dialogs
|
||||||
|
import Qt.labs.folderlistmodel
|
||||||
|
import Qt5Compat.GraphicalEffects
|
||||||
|
|
||||||
|
import llm
|
||||||
|
import chatlistmodel
|
||||||
|
import download
|
||||||
|
import modellist
|
||||||
|
import network
|
||||||
|
import gpt4all
|
||||||
|
import mysettings
|
||||||
|
import localdocs
|
||||||
|
|
||||||
|
|
||||||
|
Rectangle {
|
||||||
|
property alias providerName: providerNameLabel.text
|
||||||
|
property alias providerImage: myimage.source
|
||||||
|
property alias providerDesc: providerDescLabel.text
|
||||||
|
property string providerBaseUrl: ""
|
||||||
|
property bool providerIsCustom: false
|
||||||
|
property var modelWhitelist: null
|
||||||
|
|
||||||
|
color: theme.conversationBackground
|
||||||
|
radius: 10
|
||||||
|
border.width: 1
|
||||||
|
border.color: theme.controlBorder
|
||||||
|
implicitHeight: topColumn.height + bottomColumn.height + 33 * theme.fontScale
|
||||||
|
|
||||||
|
ColumnLayout {
|
||||||
|
id: topColumn
|
||||||
|
anchors.left: parent.left
|
||||||
|
anchors.right: parent.right
|
||||||
|
anchors.top: parent.top
|
||||||
|
anchors.margins: 20
|
||||||
|
spacing: 15 * theme.fontScale
|
||||||
|
RowLayout {
|
||||||
|
Layout.alignment: Qt.AlignTop
|
||||||
|
spacing: 10
|
||||||
|
Item {
|
||||||
|
Layout.preferredWidth: 27 * theme.fontScale
|
||||||
|
Layout.preferredHeight: 27 * theme.fontScale
|
||||||
|
Layout.alignment: Qt.AlignLeft
|
||||||
|
|
||||||
|
Image {
|
||||||
|
id: myimage
|
||||||
|
anchors.centerIn: parent
|
||||||
|
sourceSize.width: parent.width
|
||||||
|
sourceSize.height: parent.height
|
||||||
|
mipmap: true
|
||||||
|
fillMode: Image.PreserveAspectFit
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Label {
|
||||||
|
id: providerNameLabel
|
||||||
|
color: theme.textColor
|
||||||
|
font.pixelSize: theme.fontSizeBanner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Label {
|
||||||
|
id: providerDescLabel
|
||||||
|
Layout.fillWidth: true
|
||||||
|
wrapMode: Text.Wrap
|
||||||
|
color: theme.settingsTitleTextColor
|
||||||
|
font.pixelSize: theme.fontSizeLarge
|
||||||
|
onLinkActivated: function(link) { Qt.openUrlExternally(link); }
|
||||||
|
|
||||||
|
MouseArea {
|
||||||
|
anchors.fill: parent
|
||||||
|
acceptedButtons: Qt.NoButton // pass clicks to parent
|
||||||
|
cursorShape: parent.hoveredLink ? Qt.PointingHandCursor : Qt.ArrowCursor
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnLayout {
|
||||||
|
id: bottomColumn
|
||||||
|
anchors.left: parent.left
|
||||||
|
anchors.right: parent.right
|
||||||
|
anchors.bottom: parent.bottom
|
||||||
|
anchors.margins: 20
|
||||||
|
spacing: 30
|
||||||
|
|
||||||
|
ColumnLayout {
|
||||||
|
MySettingsLabel {
|
||||||
|
text: qsTr("API Key")
|
||||||
|
font.bold: true
|
||||||
|
font.pixelSize: theme.fontSizeLarge
|
||||||
|
color: theme.settingsTitleTextColor
|
||||||
|
}
|
||||||
|
|
||||||
|
MyTextField {
|
||||||
|
id: apiKeyField
|
||||||
|
Layout.fillWidth: true
|
||||||
|
font.pixelSize: theme.fontSizeLarge
|
||||||
|
wrapMode: Text.WrapAnywhere
|
||||||
|
function showError() {
|
||||||
|
messageToast.show(qsTr("ERROR: $API_KEY is empty."));
|
||||||
|
apiKeyField.placeholderTextColor = theme.textErrorColor;
|
||||||
|
}
|
||||||
|
onTextChanged: {
|
||||||
|
apiKeyField.placeholderTextColor = theme.mutedTextColor;
|
||||||
|
if (!providerIsCustom) {
|
||||||
|
let models = ModelList.remoteModelList(apiKeyField.text, providerBaseUrl);
|
||||||
|
if (modelWhitelist !== null)
|
||||||
|
models = models.filter(m => modelWhitelist.includes(m));
|
||||||
|
myModelList.model = models;
|
||||||
|
myModelList.currentIndex = -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
placeholderText: qsTr("enter $API_KEY")
|
||||||
|
Accessible.role: Accessible.EditableText
|
||||||
|
Accessible.name: placeholderText
|
||||||
|
Accessible.description: qsTr("Whether the file hash is being calculated")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnLayout {
|
||||||
|
visible: providerIsCustom
|
||||||
|
MySettingsLabel {
|
||||||
|
text: qsTr("Base Url")
|
||||||
|
font.bold: true
|
||||||
|
font.pixelSize: theme.fontSizeLarge
|
||||||
|
color: theme.settingsTitleTextColor
|
||||||
|
}
|
||||||
|
MyTextField {
|
||||||
|
id: baseUrlField
|
||||||
|
Layout.fillWidth: true
|
||||||
|
font.pixelSize: theme.fontSizeLarge
|
||||||
|
wrapMode: Text.WrapAnywhere
|
||||||
|
function showError() {
|
||||||
|
messageToast.show(qsTr("ERROR: $BASE_URL is empty."));
|
||||||
|
baseUrlField.placeholderTextColor = theme.textErrorColor;
|
||||||
|
}
|
||||||
|
onTextChanged: {
|
||||||
|
baseUrlField.placeholderTextColor = theme.mutedTextColor;
|
||||||
|
}
|
||||||
|
placeholderText: qsTr("enter $BASE_URL")
|
||||||
|
Accessible.role: Accessible.EditableText
|
||||||
|
Accessible.name: placeholderText
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ColumnLayout {
|
||||||
|
visible: providerIsCustom
|
||||||
|
MySettingsLabel {
|
||||||
|
text: qsTr("Model Name")
|
||||||
|
font.bold: true
|
||||||
|
font.pixelSize: theme.fontSizeLarge
|
||||||
|
color: theme.settingsTitleTextColor
|
||||||
|
}
|
||||||
|
MyTextField {
|
||||||
|
id: modelNameField
|
||||||
|
Layout.fillWidth: true
|
||||||
|
font.pixelSize: theme.fontSizeLarge
|
||||||
|
wrapMode: Text.WrapAnywhere
|
||||||
|
function showError() {
|
||||||
|
messageToast.show(qsTr("ERROR: $MODEL_NAME is empty."))
|
||||||
|
modelNameField.placeholderTextColor = theme.textErrorColor;
|
||||||
|
}
|
||||||
|
onTextChanged: {
|
||||||
|
modelNameField.placeholderTextColor = theme.mutedTextColor;
|
||||||
|
}
|
||||||
|
placeholderText: qsTr("enter $MODEL_NAME")
|
||||||
|
Accessible.role: Accessible.EditableText
|
||||||
|
Accessible.name: placeholderText
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnLayout {
|
||||||
|
visible: myModelList.count > 0 && !providerIsCustom
|
||||||
|
|
||||||
|
MySettingsLabel {
|
||||||
|
text: qsTr("Models")
|
||||||
|
font.bold: true
|
||||||
|
font.pixelSize: theme.fontSizeLarge
|
||||||
|
color: theme.settingsTitleTextColor
|
||||||
|
}
|
||||||
|
|
||||||
|
RowLayout {
|
||||||
|
spacing: 10
|
||||||
|
|
||||||
|
MyComboBox {
|
||||||
|
Layout.fillWidth: true
|
||||||
|
id: myModelList
|
||||||
|
currentIndex: -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MySettingsButton {
|
||||||
|
id: installButton
|
||||||
|
Layout.alignment: Qt.AlignRight
|
||||||
|
text: qsTr("Install")
|
||||||
|
font.pixelSize: theme.fontSizeLarge
|
||||||
|
|
||||||
|
property string apiKeyText: apiKeyField.text.trim()
|
||||||
|
property string baseUrlText: providerIsCustom ? baseUrlField.text.trim() : providerBaseUrl.trim()
|
||||||
|
property string modelNameText: providerIsCustom ? modelNameField.text.trim() : myModelList.currentText.trim()
|
||||||
|
|
||||||
|
enabled: apiKeyText !== "" && baseUrlText !== "" && modelNameText !== ""
|
||||||
|
|
||||||
|
onClicked: {
|
||||||
|
Download.installCompatibleModel(
|
||||||
|
modelNameText,
|
||||||
|
apiKeyText,
|
||||||
|
baseUrlText,
|
||||||
|
);
|
||||||
|
myModelList.currentIndex = -1;
|
||||||
|
}
|
||||||
|
Accessible.role: Accessible.Button
|
||||||
|
Accessible.name: qsTr("Install")
|
||||||
|
Accessible.description: qsTr("Install remote model")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -3,19 +3,30 @@
|
|||||||
#include "chatlistmodel.h"
|
#include "chatlistmodel.h"
|
||||||
#include "network.h"
|
#include "network.h"
|
||||||
#include "server.h"
|
#include "server.h"
|
||||||
|
#include "tool.h"
|
||||||
|
#include "toolcallparser.h"
|
||||||
|
#include "toolmodel.h"
|
||||||
|
|
||||||
#include <QBuffer>
|
#include <QByteArray>
|
||||||
#include <QDataStream>
|
#include <QDataStream>
|
||||||
#include <QDebug>
|
#include <QDebug>
|
||||||
|
#include <QFile>
|
||||||
|
#include <QFileInfo>
|
||||||
|
#include <QIODevice>
|
||||||
#include <QLatin1String>
|
#include <QLatin1String>
|
||||||
#include <QMap>
|
#include <QMap>
|
||||||
|
#include <QRegularExpression>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QVariant>
|
|
||||||
#include <Qt>
|
#include <Qt>
|
||||||
|
#include <QtAssert>
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
|
|
||||||
|
#include <optional>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
|
using namespace ToolEnums;
|
||||||
|
|
||||||
|
|
||||||
Chat::Chat(QObject *parent)
|
Chat::Chat(QObject *parent)
|
||||||
: QObject(parent)
|
: QObject(parent)
|
||||||
, m_id(Network::globalInstance()->generateUniqueId())
|
, m_id(Network::globalInstance()->generateUniqueId())
|
||||||
@ -54,7 +65,6 @@ void Chat::connectLLM()
|
|||||||
// Should be in different threads
|
// Should be in different threads
|
||||||
connect(m_llmodel, &ChatLLM::modelLoadingPercentageChanged, this, &Chat::handleModelLoadingPercentageChanged, Qt::QueuedConnection);
|
connect(m_llmodel, &ChatLLM::modelLoadingPercentageChanged, this, &Chat::handleModelLoadingPercentageChanged, Qt::QueuedConnection);
|
||||||
connect(m_llmodel, &ChatLLM::responseChanged, this, &Chat::handleResponseChanged, Qt::QueuedConnection);
|
connect(m_llmodel, &ChatLLM::responseChanged, this, &Chat::handleResponseChanged, Qt::QueuedConnection);
|
||||||
connect(m_llmodel, &ChatLLM::responseFailed, this, &Chat::handleResponseFailed, Qt::QueuedConnection);
|
|
||||||
connect(m_llmodel, &ChatLLM::promptProcessing, this, &Chat::promptProcessing, Qt::QueuedConnection);
|
connect(m_llmodel, &ChatLLM::promptProcessing, this, &Chat::promptProcessing, Qt::QueuedConnection);
|
||||||
connect(m_llmodel, &ChatLLM::generatingQuestions, this, &Chat::generatingQuestions, Qt::QueuedConnection);
|
connect(m_llmodel, &ChatLLM::generatingQuestions, this, &Chat::generatingQuestions, Qt::QueuedConnection);
|
||||||
connect(m_llmodel, &ChatLLM::responseStopped, this, &Chat::responseStopped, Qt::QueuedConnection);
|
connect(m_llmodel, &ChatLLM::responseStopped, this, &Chat::responseStopped, Qt::QueuedConnection);
|
||||||
@ -173,6 +183,11 @@ QVariant Chat::popPrompt(int index)
|
|||||||
|
|
||||||
void Chat::stopGenerating()
|
void Chat::stopGenerating()
|
||||||
{
|
{
|
||||||
|
// In future if we have more than one tool we'll have to keep track of which tools are possibly
|
||||||
|
// running, but for now we only have one
|
||||||
|
Tool *toolInstance = ToolModel::globalInstance()->get(ToolCallConstants::CodeInterpreterFunction);
|
||||||
|
Q_ASSERT(toolInstance);
|
||||||
|
toolInstance->interrupt();
|
||||||
m_llmodel->stopGenerating();
|
m_llmodel->stopGenerating();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,23 +196,12 @@ Chat::ResponseState Chat::responseState() const
|
|||||||
return m_responseState;
|
return m_responseState;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Chat::handleResponseChanged(const QString &response)
|
void Chat::handleResponseChanged()
|
||||||
{
|
{
|
||||||
if (m_responseState != Chat::ResponseGeneration) {
|
if (m_responseState != Chat::ResponseGeneration) {
|
||||||
m_responseState = Chat::ResponseGeneration;
|
m_responseState = Chat::ResponseGeneration;
|
||||||
emit responseStateChanged();
|
emit responseStateChanged();
|
||||||
}
|
}
|
||||||
|
|
||||||
const int index = m_chatModel->count() - 1;
|
|
||||||
m_chatModel->updateValue(index, response);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Chat::handleResponseFailed(const QString &error)
|
|
||||||
{
|
|
||||||
const int index = m_chatModel->count() - 1;
|
|
||||||
m_chatModel->updateValue(index, error);
|
|
||||||
m_chatModel->setError();
|
|
||||||
responseStopped(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Chat::handleModelLoadingPercentageChanged(float loadingPercentage)
|
void Chat::handleModelLoadingPercentageChanged(float loadingPercentage)
|
||||||
@ -242,14 +246,74 @@ void Chat::responseStopped(qint64 promptResponseMs)
|
|||||||
m_responseState = Chat::ResponseStopped;
|
m_responseState = Chat::ResponseStopped;
|
||||||
emit responseInProgressChanged();
|
emit responseInProgressChanged();
|
||||||
emit responseStateChanged();
|
emit responseStateChanged();
|
||||||
if (m_generatedName.isEmpty())
|
|
||||||
emit generateNameRequested();
|
|
||||||
|
|
||||||
Network::globalInstance()->trackChatEvent("response_complete", {
|
const QString possibleToolcall = m_chatModel->possibleToolcall();
|
||||||
|
|
||||||
|
Network::globalInstance()->trackChatEvent("response_stopped", {
|
||||||
{"first", m_firstResponse},
|
{"first", m_firstResponse},
|
||||||
{"message_count", chatModel()->count()},
|
{"message_count", chatModel()->count()},
|
||||||
{"$duration", promptResponseMs / 1000.},
|
{"$duration", promptResponseMs / 1000.},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
ToolCallParser parser;
|
||||||
|
parser.update(possibleToolcall.toUtf8());
|
||||||
|
if (parser.state() == ToolEnums::ParseState::Complete && parser.startTag() != ToolCallConstants::ThinkStartTag)
|
||||||
|
processToolCall(parser.toolCall());
|
||||||
|
else
|
||||||
|
responseComplete();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Chat::processToolCall(const QString &toolCall)
|
||||||
|
{
|
||||||
|
m_responseState = Chat::ToolCallGeneration;
|
||||||
|
emit responseStateChanged();
|
||||||
|
// Regex to remove the formatting around the code
|
||||||
|
static const QRegularExpression regex("^\\s*```javascript\\s*|\\s*```\\s*$");
|
||||||
|
QString code = toolCall;
|
||||||
|
code.remove(regex);
|
||||||
|
code = code.trimmed();
|
||||||
|
|
||||||
|
// Right now the code interpreter is the only available tool
|
||||||
|
Tool *toolInstance = ToolModel::globalInstance()->get(ToolCallConstants::CodeInterpreterFunction);
|
||||||
|
Q_ASSERT(toolInstance);
|
||||||
|
connect(toolInstance, &Tool::runComplete, this, &Chat::toolCallComplete, Qt::SingleShotConnection);
|
||||||
|
|
||||||
|
// The param is the code
|
||||||
|
const ToolParam param = { "code", ToolEnums::ParamType::String, code };
|
||||||
|
m_responseInProgress = true;
|
||||||
|
emit responseInProgressChanged();
|
||||||
|
toolInstance->run({param});
|
||||||
|
}
|
||||||
|
|
||||||
|
void Chat::toolCallComplete(const ToolCallInfo &info)
|
||||||
|
{
|
||||||
|
// Update the current response with meta information about toolcall and re-parent
|
||||||
|
m_chatModel->updateToolCall(info);
|
||||||
|
|
||||||
|
++m_consecutiveToolCalls;
|
||||||
|
|
||||||
|
m_responseInProgress = false;
|
||||||
|
emit responseInProgressChanged();
|
||||||
|
|
||||||
|
// We limit the number of consecutive toolcalls otherwise we get into a potentially endless loop
|
||||||
|
if (m_consecutiveToolCalls < 3 || info.error == ToolEnums::Error::NoError) {
|
||||||
|
resetResponseState();
|
||||||
|
emit promptRequested(m_collections); // triggers a new response
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
responseComplete();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Chat::responseComplete()
|
||||||
|
{
|
||||||
|
if (m_generatedName.isEmpty())
|
||||||
|
emit generateNameRequested();
|
||||||
|
|
||||||
|
m_responseState = Chat::ResponseStopped;
|
||||||
|
emit responseStateChanged();
|
||||||
|
|
||||||
|
m_consecutiveToolCalls = 0;
|
||||||
m_firstResponse = false;
|
m_firstResponse = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -319,11 +383,8 @@ void Chat::trySwitchContextOfLoadedModel()
|
|||||||
|
|
||||||
void Chat::generatedNameChanged(const QString &name)
|
void Chat::generatedNameChanged(const QString &name)
|
||||||
{
|
{
|
||||||
// Only use the first three words maximum and remove newlines and extra spaces
|
m_generatedName = name;
|
||||||
m_generatedName = name.simplified();
|
m_name = name;
|
||||||
QStringList words = m_generatedName.split(' ', Qt::SkipEmptyParts);
|
|
||||||
int wordCount = qMin(7, words.size());
|
|
||||||
m_name = words.mid(0, wordCount).join(' ');
|
|
||||||
emit nameChanged();
|
emit nameChanged();
|
||||||
m_needsSave = true;
|
m_needsSave = true;
|
||||||
}
|
}
|
||||||
|
@ -3,21 +3,26 @@
|
|||||||
|
|
||||||
#include "chatllm.h"
|
#include "chatllm.h"
|
||||||
#include "chatmodel.h"
|
#include "chatmodel.h"
|
||||||
#include "database.h" // IWYU pragma: keep
|
#include "database.h"
|
||||||
#include "localdocsmodel.h" // IWYU pragma: keep
|
#include "localdocsmodel.h"
|
||||||
#include "modellist.h"
|
#include "modellist.h"
|
||||||
|
#include "tool.h"
|
||||||
|
|
||||||
#include <QDateTime>
|
#include <QDateTime>
|
||||||
#include <QList>
|
#include <QList>
|
||||||
#include <QObject>
|
#include <QObject>
|
||||||
#include <QQmlEngine>
|
#include <QQmlEngine> // IWYU pragma: keep
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QStringList> // IWYU pragma: keep
|
#include <QStringList> // IWYU pragma: keep
|
||||||
#include <QStringView>
|
#include <QUrl>
|
||||||
#include <QtGlobal>
|
#include <QVariant>
|
||||||
|
#include <QtTypes>
|
||||||
|
|
||||||
|
// IWYU pragma: no_forward_declare LocalDocsCollectionsModel
|
||||||
|
// IWYU pragma: no_forward_declare ToolCallInfo
|
||||||
class QDataStream;
|
class QDataStream;
|
||||||
|
|
||||||
|
|
||||||
class Chat : public QObject
|
class Chat : public QObject
|
||||||
{
|
{
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
@ -55,7 +60,8 @@ public:
|
|||||||
LocalDocsProcessing,
|
LocalDocsProcessing,
|
||||||
PromptProcessing,
|
PromptProcessing,
|
||||||
GeneratingQuestions,
|
GeneratingQuestions,
|
||||||
ResponseGeneration
|
ResponseGeneration,
|
||||||
|
ToolCallGeneration
|
||||||
};
|
};
|
||||||
Q_ENUM(ResponseState)
|
Q_ENUM(ResponseState)
|
||||||
|
|
||||||
@ -161,12 +167,14 @@ Q_SIGNALS:
|
|||||||
void generatedQuestionsChanged();
|
void generatedQuestionsChanged();
|
||||||
|
|
||||||
private Q_SLOTS:
|
private Q_SLOTS:
|
||||||
void handleResponseChanged(const QString &response);
|
void handleResponseChanged();
|
||||||
void handleResponseFailed(const QString &error);
|
|
||||||
void handleModelLoadingPercentageChanged(float);
|
void handleModelLoadingPercentageChanged(float);
|
||||||
void promptProcessing();
|
void promptProcessing();
|
||||||
void generatingQuestions();
|
void generatingQuestions();
|
||||||
void responseStopped(qint64 promptResponseMs);
|
void responseStopped(qint64 promptResponseMs);
|
||||||
|
void processToolCall(const QString &toolCall);
|
||||||
|
void toolCallComplete(const ToolCallInfo &info);
|
||||||
|
void responseComplete();
|
||||||
void generatedNameChanged(const QString &name);
|
void generatedNameChanged(const QString &name);
|
||||||
void generatedQuestionFinished(const QString &question);
|
void generatedQuestionFinished(const QString &question);
|
||||||
void handleModelLoadingError(const QString &error);
|
void handleModelLoadingError(const QString &error);
|
||||||
@ -205,6 +213,7 @@ private:
|
|||||||
// - The chat was freshly created during this launch.
|
// - The chat was freshly created during this launch.
|
||||||
// - The chat was changed after loading it from disk.
|
// - The chat was changed after loading it from disk.
|
||||||
bool m_needsSave = true;
|
bool m_needsSave = true;
|
||||||
|
int m_consecutiveToolCalls = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // CHAT_H
|
#endif // CHAT_H
|
||||||
|
@ -2,6 +2,9 @@
|
|||||||
|
|
||||||
#include "utils.h"
|
#include "utils.h"
|
||||||
|
|
||||||
|
#include <fmt/format.h>
|
||||||
|
|
||||||
|
#include <QAnyStringView>
|
||||||
#include <QCoreApplication>
|
#include <QCoreApplication>
|
||||||
#include <QDebug>
|
#include <QDebug>
|
||||||
#include <QGuiApplication>
|
#include <QGuiApplication>
|
||||||
@ -9,15 +12,17 @@
|
|||||||
#include <QJsonDocument>
|
#include <QJsonDocument>
|
||||||
#include <QJsonObject>
|
#include <QJsonObject>
|
||||||
#include <QJsonValue>
|
#include <QJsonValue>
|
||||||
|
#include <QLatin1String>
|
||||||
#include <QNetworkAccessManager>
|
#include <QNetworkAccessManager>
|
||||||
#include <QNetworkRequest>
|
#include <QNetworkRequest>
|
||||||
|
#include <QStringView>
|
||||||
#include <QThread>
|
#include <QThread>
|
||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
#include <QUtf8StringView>
|
#include <QUtf8StringView> // IWYU pragma: keep
|
||||||
#include <QVariant>
|
#include <QVariant>
|
||||||
#include <QXmlStreamReader>
|
#include <QXmlStreamReader>
|
||||||
#include <Qt>
|
#include <Qt>
|
||||||
#include <QtGlobal>
|
#include <QtAssert>
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
|
|
||||||
#include <expected>
|
#include <expected>
|
||||||
@ -29,6 +34,7 @@ using namespace Qt::Literals::StringLiterals;
|
|||||||
|
|
||||||
//#define DEBUG
|
//#define DEBUG
|
||||||
|
|
||||||
|
|
||||||
ChatAPI::ChatAPI()
|
ChatAPI::ChatAPI()
|
||||||
: QObject(nullptr)
|
: QObject(nullptr)
|
||||||
, m_modelName("gpt-3.5-turbo")
|
, m_modelName("gpt-3.5-turbo")
|
||||||
|
@ -3,10 +3,11 @@
|
|||||||
|
|
||||||
#include <gpt4all-backend/llmodel.h>
|
#include <gpt4all-backend/llmodel.h>
|
||||||
|
|
||||||
#include <QByteArray> // IWYU pragma: keep
|
#include <QByteArray>
|
||||||
#include <QNetworkReply>
|
#include <QNetworkReply>
|
||||||
#include <QObject>
|
#include <QObject>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
|
#include <QtPreprocessorSupport>
|
||||||
|
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
@ -17,9 +18,11 @@
|
|||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
// IWYU pragma: no_forward_declare QByteArray
|
||||||
|
class ChatAPI;
|
||||||
class QNetworkAccessManager;
|
class QNetworkAccessManager;
|
||||||
|
|
||||||
class ChatAPI;
|
|
||||||
class ChatAPIWorker : public QObject {
|
class ChatAPIWorker : public QObject {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
|
@ -1,26 +1,27 @@
|
|||||||
#include "chatlistmodel.h"
|
#include "chatlistmodel.h"
|
||||||
|
|
||||||
#include "database.h" // IWYU pragma: keep
|
|
||||||
#include "mysettings.h"
|
#include "mysettings.h"
|
||||||
|
|
||||||
|
#include <QCoreApplication>
|
||||||
#include <QDataStream>
|
#include <QDataStream>
|
||||||
#include <QDir>
|
#include <QDir>
|
||||||
#include <QElapsedTimer>
|
#include <QElapsedTimer>
|
||||||
|
#include <QEvent>
|
||||||
#include <QFile>
|
#include <QFile>
|
||||||
#include <QFileInfo>
|
#include <QFileInfo>
|
||||||
#include <QGlobalStatic>
|
#include <QGlobalStatic>
|
||||||
#include <QGuiApplication>
|
#include <QGuiApplication>
|
||||||
#include <QIODevice>
|
#include <QIODevice>
|
||||||
#include <QSettings>
|
#include <QSettings>
|
||||||
#include <QString>
|
#include <QStringList> // IWYU pragma: keep
|
||||||
#include <QStringList>
|
|
||||||
#include <Qt>
|
#include <Qt>
|
||||||
|
#include <QtTypes>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
static constexpr quint32 CHAT_FORMAT_MAGIC = 0xF5D553CC;
|
static constexpr quint32 CHAT_FORMAT_MAGIC = 0xF5D553CC;
|
||||||
static constexpr qint32 CHAT_FORMAT_VERSION = 11;
|
static constexpr qint32 CHAT_FORMAT_VERSION = 12;
|
||||||
|
|
||||||
class MyChatListModel: public ChatListModel { };
|
class MyChatListModel: public ChatListModel { };
|
||||||
Q_GLOBAL_STATIC(MyChatListModel, chatListModelInstance)
|
Q_GLOBAL_STATIC(MyChatListModel, chatListModelInstance)
|
||||||
@ -52,9 +53,11 @@ void ChatListModel::loadChats()
|
|||||||
connect(thread, &ChatsRestoreThread::finished, thread, &QObject::deleteLater);
|
connect(thread, &ChatsRestoreThread::finished, thread, &QObject::deleteLater);
|
||||||
thread->start();
|
thread->start();
|
||||||
|
|
||||||
ChatSaver *saver = new ChatSaver;
|
m_chatSaver = std::make_unique<ChatSaver>();
|
||||||
connect(this, &ChatListModel::requestSaveChats, saver, &ChatSaver::saveChats, Qt::QueuedConnection);
|
connect(this, &ChatListModel::requestSaveChats, m_chatSaver.get(), &ChatSaver::saveChats, Qt::QueuedConnection);
|
||||||
connect(saver, &ChatSaver::saveChatsFinished, this, &ChatListModel::saveChatsFinished, Qt::QueuedConnection);
|
connect(m_chatSaver.get(), &ChatSaver::saveChatsFinished, this, &ChatListModel::saveChatsFinished, Qt::QueuedConnection);
|
||||||
|
// save chats on application quit
|
||||||
|
connect(QCoreApplication::instance(), &QCoreApplication::aboutToQuit, this, &ChatListModel::saveChatsSync);
|
||||||
|
|
||||||
connect(MySettings::globalInstance(), &MySettings::serverChatChanged, this, &ChatListModel::handleServerEnabledChanged);
|
connect(MySettings::globalInstance(), &MySettings::serverChatChanged, this, &ChatListModel::handleServerEnabledChanged);
|
||||||
}
|
}
|
||||||
@ -78,16 +81,24 @@ ChatSaver::ChatSaver()
|
|||||||
m_thread.start();
|
m_thread.start();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ChatSaver::~ChatSaver()
|
||||||
|
{
|
||||||
|
m_thread.quit();
|
||||||
|
m_thread.wait();
|
||||||
|
}
|
||||||
|
|
||||||
|
QVector<Chat *> ChatListModel::getChatsToSave() const
|
||||||
|
{
|
||||||
|
QVector<Chat *> toSave;
|
||||||
|
for (auto *chat : m_chats)
|
||||||
|
if (chat != m_serverChat && !chat->isNewChat())
|
||||||
|
toSave << chat;
|
||||||
|
return toSave;
|
||||||
|
}
|
||||||
|
|
||||||
void ChatListModel::saveChats()
|
void ChatListModel::saveChats()
|
||||||
{
|
{
|
||||||
QVector<Chat*> toSave;
|
auto toSave = getChatsToSave();
|
||||||
for (Chat *chat : m_chats) {
|
|
||||||
if (chat == m_serverChat)
|
|
||||||
continue;
|
|
||||||
if (chat->isNewChat())
|
|
||||||
continue;
|
|
||||||
toSave.append(chat);
|
|
||||||
}
|
|
||||||
if (toSave.isEmpty()) {
|
if (toSave.isEmpty()) {
|
||||||
emit saveChatsFinished();
|
emit saveChatsFinished();
|
||||||
return;
|
return;
|
||||||
@ -96,8 +107,24 @@ void ChatListModel::saveChats()
|
|||||||
emit requestSaveChats(toSave);
|
emit requestSaveChats(toSave);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ChatListModel::saveChatsForQuit()
|
||||||
|
{
|
||||||
|
saveChats();
|
||||||
|
m_startedFinalSave = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChatListModel::saveChatsSync()
|
||||||
|
{
|
||||||
|
auto toSave = getChatsToSave();
|
||||||
|
if (!m_startedFinalSave && !toSave.isEmpty())
|
||||||
|
m_chatSaver->saveChats(toSave);
|
||||||
|
}
|
||||||
|
|
||||||
void ChatSaver::saveChats(const QVector<Chat *> &chats)
|
void ChatSaver::saveChats(const QVector<Chat *> &chats)
|
||||||
{
|
{
|
||||||
|
// we can be called from the main thread instead of a worker thread at quit time, so take a lock
|
||||||
|
QMutexLocker locker(&m_mutex);
|
||||||
|
|
||||||
QElapsedTimer timer;
|
QElapsedTimer timer;
|
||||||
timer.start();
|
timer.start();
|
||||||
const QString savePath = MySettings::globalInstance()->modelPath();
|
const QString savePath = MySettings::globalInstance()->modelPath();
|
||||||
|
@ -7,16 +7,23 @@
|
|||||||
|
|
||||||
#include <QAbstractListModel>
|
#include <QAbstractListModel>
|
||||||
#include <QByteArray>
|
#include <QByteArray>
|
||||||
|
#include <QDate>
|
||||||
#include <QDebug>
|
#include <QDebug>
|
||||||
#include <QHash>
|
#include <QHash>
|
||||||
#include <QList>
|
#include <QList>
|
||||||
|
#include <QMutex>
|
||||||
#include <QObject>
|
#include <QObject>
|
||||||
|
#include <QString>
|
||||||
#include <QThread>
|
#include <QThread>
|
||||||
#include <QVariant>
|
#include <QVariant>
|
||||||
#include <QVector>
|
#include <QVector> // IWYU pragma: keep
|
||||||
#include <Qt>
|
#include <Qt>
|
||||||
#include <QtGlobal>
|
#include <QtAssert>
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
|
#include <QtPreprocessorSupport>
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
|
||||||
class ChatsRestoreThread : public QThread
|
class ChatsRestoreThread : public QThread
|
||||||
{
|
{
|
||||||
@ -33,6 +40,7 @@ class ChatSaver : public QObject
|
|||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
explicit ChatSaver();
|
explicit ChatSaver();
|
||||||
|
~ChatSaver() override;
|
||||||
|
|
||||||
Q_SIGNALS:
|
Q_SIGNALS:
|
||||||
void saveChatsFinished();
|
void saveChatsFinished();
|
||||||
@ -42,6 +50,7 @@ public Q_SLOTS:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
QThread m_thread;
|
QThread m_thread;
|
||||||
|
QMutex m_mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
class ChatListModel : public QAbstractListModel
|
class ChatListModel : public QAbstractListModel
|
||||||
@ -228,6 +237,7 @@ public:
|
|||||||
|
|
||||||
void removeChatFile(Chat *chat) const;
|
void removeChatFile(Chat *chat) const;
|
||||||
Q_INVOKABLE void saveChats();
|
Q_INVOKABLE void saveChats();
|
||||||
|
Q_INVOKABLE void saveChatsForQuit();
|
||||||
void restoreChat(Chat *chat);
|
void restoreChat(Chat *chat);
|
||||||
void chatsRestoredFinished();
|
void chatsRestoredFinished();
|
||||||
|
|
||||||
@ -244,6 +254,9 @@ protected:
|
|||||||
bool eventFilter(QObject *obj, QEvent *ev) override;
|
bool eventFilter(QObject *obj, QEvent *ev) override;
|
||||||
|
|
||||||
private Q_SLOTS:
|
private Q_SLOTS:
|
||||||
|
// Used with QCoreApplication::aboutToQuit. Does not require an event loop.
|
||||||
|
void saveChatsSync();
|
||||||
|
|
||||||
void newChatCountChanged()
|
void newChatCountChanged()
|
||||||
{
|
{
|
||||||
Q_ASSERT(m_newChat && m_newChat->chatModel()->count());
|
Q_ASSERT(m_newChat && m_newChat->chatModel()->count());
|
||||||
@ -274,11 +287,16 @@ private Q_SLOTS:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
QVector<Chat *> getChatsToSave() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Chat* m_newChat = nullptr;
|
Chat* m_newChat = nullptr;
|
||||||
Chat* m_serverChat = nullptr;
|
Chat* m_serverChat = nullptr;
|
||||||
Chat* m_currentChat = nullptr;
|
Chat* m_currentChat = nullptr;
|
||||||
QList<Chat*> m_chats;
|
QList<Chat*> m_chats;
|
||||||
|
std::unique_ptr<ChatSaver> m_chatSaver;
|
||||||
|
bool m_startedFinalSave = false;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
explicit ChatListModel();
|
explicit ChatListModel();
|
||||||
|
@ -7,40 +7,48 @@
|
|||||||
#include "localdocs.h"
|
#include "localdocs.h"
|
||||||
#include "mysettings.h"
|
#include "mysettings.h"
|
||||||
#include "network.h"
|
#include "network.h"
|
||||||
|
#include "tool.h"
|
||||||
|
#include "toolmodel.h"
|
||||||
|
#include "toolcallparser.h"
|
||||||
|
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
#include <minja/minja.hpp>
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
#include <jinja2cpp/error_info.h>
|
#include <QChar>
|
||||||
#include <jinja2cpp/template.h>
|
|
||||||
#include <jinja2cpp/template_env.h>
|
|
||||||
#include <jinja2cpp/user_callable.h>
|
|
||||||
#include <jinja2cpp/value.h>
|
|
||||||
|
|
||||||
#include <QDataStream>
|
#include <QDataStream>
|
||||||
#include <QDebug>
|
#include <QDebug>
|
||||||
#include <QFile>
|
#include <QFile>
|
||||||
#include <QGlobalStatic>
|
#include <QGlobalStatic>
|
||||||
#include <QIODevice>
|
#include <QIODevice> // IWYU pragma: keep
|
||||||
#include <QJsonDocument>
|
#include <QJsonDocument>
|
||||||
#include <QJsonObject>
|
#include <QJsonObject>
|
||||||
#include <QJsonValue>
|
#include <QJsonValue>
|
||||||
#include <QMap>
|
#include <QMap>
|
||||||
#include <QMutex>
|
#include <QMutex> // IWYU pragma: keep
|
||||||
#include <QMutexLocker> // IWYU pragma: keep
|
#include <QMutexLocker> // IWYU pragma: keep
|
||||||
#include <QRegularExpression>
|
#include <QRegularExpression> // IWYU pragma: keep
|
||||||
#include <QRegularExpressionMatch>
|
#include <QRegularExpressionMatch> // IWYU pragma: keep
|
||||||
#include <QSet>
|
#include <QSet>
|
||||||
|
#include <QStringView>
|
||||||
|
#include <QTextStream>
|
||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
|
#include <QVariant>
|
||||||
#include <QWaitCondition>
|
#include <QWaitCondition>
|
||||||
#include <Qt>
|
#include <Qt>
|
||||||
|
#include <QtAssert>
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
|
#include <QtTypes> // IWYU pragma: keep
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
#include <concepts>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
#include <exception>
|
#include <exception>
|
||||||
|
#include <functional>
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
#include <limits>
|
#include <limits>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
@ -55,45 +63,106 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
using namespace Qt::Literals::StringLiterals;
|
using namespace Qt::Literals::StringLiterals;
|
||||||
|
using namespace ToolEnums;
|
||||||
namespace ranges = std::ranges;
|
namespace ranges = std::ranges;
|
||||||
|
using json = nlohmann::ordered_json;
|
||||||
|
|
||||||
//#define DEBUG
|
//#define DEBUG
|
||||||
//#define DEBUG_MODEL_LOADING
|
//#define DEBUG_MODEL_LOADING
|
||||||
|
|
||||||
// NOTE: not threadsafe
|
// NOTE: not threadsafe
|
||||||
static jinja2::TemplateEnv *jinjaEnv()
|
static const std::shared_ptr<minja::Context> &jinjaEnv()
|
||||||
{
|
{
|
||||||
static std::optional<jinja2::TemplateEnv> environment;
|
static std::shared_ptr<minja::Context> environment;
|
||||||
if (!environment) {
|
if (!environment) {
|
||||||
auto &env = environment.emplace();
|
environment = minja::Context::builtins();
|
||||||
auto &settings = env.GetSettings();
|
environment->set("strftime_now", minja::simple_function(
|
||||||
settings.trimBlocks = true;
|
"strftime_now", { "format" },
|
||||||
settings.lstripBlocks = true;
|
[](const std::shared_ptr<minja::Context> &, minja::Value &args) -> minja::Value {
|
||||||
env.AddGlobal("raise_exception", jinja2::UserCallable(
|
auto format = args.at("format").get<std::string>();
|
||||||
/*callable*/ [](auto ¶ms) -> jinja2::Value {
|
|
||||||
auto messageArg = params.args.find("message");
|
|
||||||
if (messageArg == params.args.end() || !messageArg->second.isString())
|
|
||||||
throw std::runtime_error("'message' argument to raise_exception() must be a string");
|
|
||||||
throw std::runtime_error(fmt::format("Jinja template error: {}", messageArg->second.asString()));
|
|
||||||
},
|
|
||||||
/*argsInfo*/ { jinja2::ArgInfo("message", /*isMandatory*/ true) }
|
|
||||||
));
|
|
||||||
env.AddGlobal("strftime_now", jinja2::UserCallable(
|
|
||||||
/*callable*/ [](auto ¶ms) -> jinja2::Value {
|
|
||||||
using Clock = std::chrono::system_clock;
|
using Clock = std::chrono::system_clock;
|
||||||
auto formatArg = params.args.find("format");
|
|
||||||
if (formatArg == params.args.end() || !formatArg->second.isString())
|
|
||||||
throw std::runtime_error("'format' argument to strftime_now() must be a string");
|
|
||||||
time_t nowUnix = Clock::to_time_t(Clock::now());
|
time_t nowUnix = Clock::to_time_t(Clock::now());
|
||||||
auto localDate = *std::localtime(&nowUnix);
|
auto localDate = *std::localtime(&nowUnix);
|
||||||
std::ostringstream ss;
|
std::ostringstream ss;
|
||||||
ss << std::put_time(&localDate, formatArg->second.asString().c_str());
|
ss << std::put_time(&localDate, format.c_str());
|
||||||
return ss.str();
|
return ss.str();
|
||||||
},
|
}
|
||||||
/*argsInfo*/ { jinja2::ArgInfo("format", /*isMandatory*/ true) }
|
));
|
||||||
|
environment->set("regex_replace", minja::simple_function(
|
||||||
|
"regex_replace", { "str", "pattern", "repl" },
|
||||||
|
[](const std::shared_ptr<minja::Context> &, minja::Value &args) -> minja::Value {
|
||||||
|
auto str = args.at("str" ).get<std::string>();
|
||||||
|
auto pattern = args.at("pattern").get<std::string>();
|
||||||
|
auto repl = args.at("repl" ).get<std::string>();
|
||||||
|
return std::regex_replace(str, std::regex(pattern), repl);
|
||||||
|
}
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
return &*environment;
|
return environment;
|
||||||
|
}
|
||||||
|
|
||||||
|
class BaseResponseHandler {
|
||||||
|
public:
|
||||||
|
virtual void onSplitIntoTwo (const QString &startTag, const QString &firstBuffer, const QString &secondBuffer) = 0;
|
||||||
|
virtual void onSplitIntoThree (const QString &secondBuffer, const QString &thirdBuffer) = 0;
|
||||||
|
// "old-style" responses, with all of the implementation details left in
|
||||||
|
virtual void onOldResponseChunk(const QByteArray &chunk) = 0;
|
||||||
|
// notify of a "new-style" response that has been cleaned of tool calling
|
||||||
|
virtual bool onBufferResponse (const QString &response, int bufferIdx) = 0;
|
||||||
|
// notify of a "new-style" response, no tool calling applicable
|
||||||
|
virtual bool onRegularResponse () = 0;
|
||||||
|
virtual bool getStopGenerating () const = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
static auto promptModelWithTools(
|
||||||
|
LLModel *model, const LLModel::PromptCallback &promptCallback, BaseResponseHandler &respHandler,
|
||||||
|
const LLModel::PromptContext &ctx, const QByteArray &prompt, const QStringList &toolNames
|
||||||
|
) -> std::pair<QStringList, bool>
|
||||||
|
{
|
||||||
|
ToolCallParser toolCallParser(toolNames);
|
||||||
|
auto handleResponse = [&toolCallParser, &respHandler](LLModel::Token token, std::string_view piece) -> bool {
|
||||||
|
Q_UNUSED(token)
|
||||||
|
|
||||||
|
toolCallParser.update(piece.data());
|
||||||
|
|
||||||
|
// Split the response into two if needed
|
||||||
|
if (toolCallParser.numberOfBuffers() < 2 && toolCallParser.splitIfPossible()) {
|
||||||
|
const auto parseBuffers = toolCallParser.buffers();
|
||||||
|
Q_ASSERT(parseBuffers.size() == 2);
|
||||||
|
respHandler.onSplitIntoTwo(toolCallParser.startTag(), parseBuffers.at(0), parseBuffers.at(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split the response into three if needed
|
||||||
|
if (toolCallParser.numberOfBuffers() < 3 && toolCallParser.startTag() == ToolCallConstants::ThinkStartTag
|
||||||
|
&& toolCallParser.splitIfPossible()) {
|
||||||
|
const auto parseBuffers = toolCallParser.buffers();
|
||||||
|
Q_ASSERT(parseBuffers.size() == 3);
|
||||||
|
respHandler.onSplitIntoThree(parseBuffers.at(1), parseBuffers.at(2));
|
||||||
|
}
|
||||||
|
|
||||||
|
respHandler.onOldResponseChunk(QByteArray::fromRawData(piece.data(), piece.size()));
|
||||||
|
|
||||||
|
bool ok;
|
||||||
|
const auto parseBuffers = toolCallParser.buffers();
|
||||||
|
if (parseBuffers.size() > 1) {
|
||||||
|
ok = respHandler.onBufferResponse(parseBuffers.last(), parseBuffers.size() - 1);
|
||||||
|
} else {
|
||||||
|
ok = respHandler.onRegularResponse();
|
||||||
|
}
|
||||||
|
if (!ok)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
const bool shouldExecuteToolCall = toolCallParser.state() == ToolEnums::ParseState::Complete
|
||||||
|
&& toolCallParser.startTag() != ToolCallConstants::ThinkStartTag;
|
||||||
|
|
||||||
|
return !shouldExecuteToolCall && !respHandler.getStopGenerating();
|
||||||
|
};
|
||||||
|
model->prompt(std::string_view(prompt), promptCallback, handleResponse, ctx);
|
||||||
|
|
||||||
|
const bool shouldExecuteToolCall = toolCallParser.state() == ToolEnums::ParseState::Complete
|
||||||
|
&& toolCallParser.startTag() != ToolCallConstants::ThinkStartTag;
|
||||||
|
|
||||||
|
return { toolCallParser.buffers(), shouldExecuteToolCall };
|
||||||
}
|
}
|
||||||
|
|
||||||
class LLModelStore {
|
class LLModelStore {
|
||||||
@ -643,40 +712,16 @@ bool isAllSpace(R &&r)
|
|||||||
void ChatLLM::regenerateResponse(int index)
|
void ChatLLM::regenerateResponse(int index)
|
||||||
{
|
{
|
||||||
Q_ASSERT(m_chatModel);
|
Q_ASSERT(m_chatModel);
|
||||||
int promptIdx;
|
if (m_chatModel->regenerateResponse(index)) {
|
||||||
{
|
emit responseChanged();
|
||||||
auto items = m_chatModel->chatItems(); // holds lock
|
|
||||||
if (index < 1 || index >= items.size() || items[index].type() != ChatItem::Type::Response)
|
|
||||||
return;
|
|
||||||
promptIdx = m_chatModel->getPeerUnlocked(index).value_or(-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
emit responseChanged({});
|
|
||||||
m_chatModel->truncate(index + 1);
|
|
||||||
m_chatModel->updateCurrentResponse(index, true );
|
|
||||||
m_chatModel->updateNewResponse (index, {} );
|
|
||||||
m_chatModel->updateStopped (index, false);
|
|
||||||
m_chatModel->updateThumbsUpState (index, false);
|
|
||||||
m_chatModel->updateThumbsDownState(index, false);
|
|
||||||
m_chatModel->setError(false);
|
|
||||||
if (promptIdx >= 0)
|
|
||||||
m_chatModel->updateSources(promptIdx, {});
|
|
||||||
|
|
||||||
prompt(m_chat->collectionList());
|
prompt(m_chat->collectionList());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<QString> ChatLLM::popPrompt(int index)
|
std::optional<QString> ChatLLM::popPrompt(int index)
|
||||||
{
|
{
|
||||||
Q_ASSERT(m_chatModel);
|
Q_ASSERT(m_chatModel);
|
||||||
QString content;
|
return m_chatModel->popPrompt(index);
|
||||||
{
|
|
||||||
auto items = m_chatModel->chatItems(); // holds lock
|
|
||||||
if (index < 0 || index >= items.size() || items[index].type() != ChatItem::Type::Prompt)
|
|
||||||
return std::nullopt;
|
|
||||||
content = items[index].value;
|
|
||||||
}
|
|
||||||
m_chatModel->truncate(index);
|
|
||||||
return content;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ModelInfo ChatLLM::modelInfo() const
|
ModelInfo ChatLLM::modelInfo() const
|
||||||
@ -737,28 +782,29 @@ void ChatLLM::prompt(const QStringList &enabledCollections)
|
|||||||
promptInternalChat(enabledCollections, promptContextFromSettings(m_modelInfo));
|
promptInternalChat(enabledCollections, promptContextFromSettings(m_modelInfo));
|
||||||
} catch (const std::exception &e) {
|
} catch (const std::exception &e) {
|
||||||
// FIXME(jared): this is neither translated nor serialized
|
// FIXME(jared): this is neither translated nor serialized
|
||||||
emit responseFailed(u"Error: %1"_s.arg(QString::fromUtf8(e.what())));
|
m_chatModel->setResponseValue(u"Error: %1"_s.arg(QString::fromUtf8(e.what())));
|
||||||
|
m_chatModel->setError();
|
||||||
emit responseStopped(0);
|
emit responseStopped(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME(jared): We can avoid this potentially expensive copy if we use ChatItem pointers, but this is only safe if we
|
std::vector<MessageItem> ChatLLM::forkConversation(const QString &prompt) const
|
||||||
// hold the lock while generating. We can't do that now because Chat is actually in charge of updating the response, not
|
|
||||||
// ChatLLM.
|
|
||||||
std::vector<ChatItem> ChatLLM::forkConversation(const QString &prompt) const
|
|
||||||
{
|
{
|
||||||
Q_ASSERT(m_chatModel);
|
Q_ASSERT(m_chatModel);
|
||||||
if (m_chatModel->hasError())
|
if (m_chatModel->hasError())
|
||||||
throw std::logic_error("cannot continue conversation with an error");
|
throw std::logic_error("cannot continue conversation with an error");
|
||||||
|
|
||||||
std::vector<ChatItem> conversation;
|
std::vector<MessageItem> conversation;
|
||||||
{
|
{
|
||||||
auto items = m_chatModel->chatItems(); // holds lock
|
auto items = m_chatModel->messageItems();
|
||||||
Q_ASSERT(items.size() >= 2); // should be prompt/response pairs
|
// It is possible the main thread could have erased the conversation while the llm thread,
|
||||||
|
// is busy forking the conversatoin but it must have set stop generating first
|
||||||
|
Q_ASSERT(items.size() >= 2 || m_stopGenerating); // should be prompt/response pairs
|
||||||
conversation.reserve(items.size() + 1);
|
conversation.reserve(items.size() + 1);
|
||||||
conversation.assign(items.begin(), items.end());
|
conversation.assign(items.begin(), items.end());
|
||||||
}
|
}
|
||||||
conversation.emplace_back(ChatItem::prompt_tag, prompt);
|
qsizetype nextIndex = conversation.empty() ? 0 : conversation.back().index().value() + 1;
|
||||||
|
conversation.emplace_back(nextIndex, MessageItem::Type::Prompt, prompt.toUtf8());
|
||||||
return conversation;
|
return conversation;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -777,23 +823,22 @@ static uint parseJinjaTemplateVersion(QStringView tmpl)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static auto loadJinjaTemplate(
|
static std::shared_ptr<minja::TemplateNode> loadJinjaTemplate(const std::string &source)
|
||||||
std::optional<jinja2::Template> &tmpl /*out*/, const std::string &source
|
|
||||||
) -> jinja2::Result<void>
|
|
||||||
{
|
{
|
||||||
tmpl.emplace(jinjaEnv());
|
return minja::Parser::parse(source, { .trim_blocks = true, .lstrip_blocks = true, .keep_trailing_newline = false });
|
||||||
return tmpl->Load(source);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<std::string> ChatLLM::checkJinjaTemplateError(const std::string &source)
|
std::optional<std::string> ChatLLM::checkJinjaTemplateError(const std::string &source)
|
||||||
{
|
{
|
||||||
std::optional<jinja2::Template> tmpl;
|
try {
|
||||||
if (auto res = loadJinjaTemplate(tmpl, source); !res)
|
loadJinjaTemplate(source);
|
||||||
return res.error().ToString();
|
} catch (const std::runtime_error &e) {
|
||||||
|
return e.what();
|
||||||
|
}
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string ChatLLM::applyJinjaTemplate(std::span<const ChatItem> items) const
|
std::string ChatLLM::applyJinjaTemplate(std::span<const MessageItem> items) const
|
||||||
{
|
{
|
||||||
Q_ASSERT(items.size() >= 1);
|
Q_ASSERT(items.size() >= 1);
|
||||||
|
|
||||||
@ -820,80 +865,86 @@ std::string ChatLLM::applyJinjaTemplate(std::span<const ChatItem> items) const
|
|||||||
|
|
||||||
uint version = parseJinjaTemplateVersion(chatTemplate);
|
uint version = parseJinjaTemplateVersion(chatTemplate);
|
||||||
|
|
||||||
auto makeMap = [version](const ChatItem &item) {
|
auto makeMap = [version](const MessageItem &item) {
|
||||||
return jinja2::GenericMap([msg = std::make_shared<JinjaMessage>(version, item)] { return msg.get(); });
|
return JinjaMessage(version, item).AsJson();
|
||||||
};
|
};
|
||||||
|
|
||||||
std::unique_ptr<ChatItem> systemItem;
|
std::unique_ptr<MessageItem> systemItem;
|
||||||
bool useSystem = !isAllSpace(systemMessage);
|
bool useSystem = !isAllSpace(systemMessage);
|
||||||
|
|
||||||
jinja2::ValuesList messages;
|
json::array_t messages;
|
||||||
messages.reserve(useSystem + items.size());
|
messages.reserve(useSystem + items.size());
|
||||||
if (useSystem) {
|
if (useSystem) {
|
||||||
systemItem = std::make_unique<ChatItem>(ChatItem::system_tag, systemMessage);
|
systemItem = std::make_unique<MessageItem>(MessageItem::system_tag, systemMessage.toUtf8());
|
||||||
messages.emplace_back(makeMap(*systemItem));
|
messages.emplace_back(makeMap(*systemItem));
|
||||||
}
|
}
|
||||||
for (auto &item : items)
|
for (auto &item : items)
|
||||||
messages.emplace_back(makeMap(item));
|
messages.emplace_back(makeMap(item));
|
||||||
|
|
||||||
jinja2::ValuesMap params {
|
json::array_t toolList;
|
||||||
|
const int toolCount = ToolModel::globalInstance()->count();
|
||||||
|
for (int i = 0; i < toolCount; ++i) {
|
||||||
|
Tool *t = ToolModel::globalInstance()->get(i);
|
||||||
|
toolList.push_back(t->jinjaValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
json::object_t params {
|
||||||
{ "messages", std::move(messages) },
|
{ "messages", std::move(messages) },
|
||||||
{ "add_generation_prompt", true },
|
{ "add_generation_prompt", true },
|
||||||
|
{ "toolList", toolList },
|
||||||
};
|
};
|
||||||
for (auto &[name, token] : model->specialTokens())
|
for (auto &[name, token] : model->specialTokens())
|
||||||
params.emplace(std::move(name), std::move(token));
|
params.emplace(std::move(name), std::move(token));
|
||||||
|
|
||||||
std::optional<jinja2::Template> tmpl;
|
try {
|
||||||
auto maybeRendered = loadJinjaTemplate(tmpl, chatTemplate.toStdString())
|
auto tmpl = loadJinjaTemplate(chatTemplate.toStdString());
|
||||||
.and_then([&] { return tmpl->RenderAsString(params); });
|
auto context = minja::Context::make(minja::Value(std::move(params)), jinjaEnv());
|
||||||
if (!maybeRendered)
|
return tmpl->render(context);
|
||||||
throw std::runtime_error(fmt::format("Failed to parse chat template: {}", maybeRendered.error().ToString()));
|
} catch (const std::runtime_error &e) {
|
||||||
return *maybeRendered;
|
throw std::runtime_error(fmt::format("Failed to parse chat template: {}", e.what()));
|
||||||
|
}
|
||||||
|
Q_UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
auto ChatLLM::promptInternalChat(const QStringList &enabledCollections, const LLModel::PromptContext &ctx,
|
auto ChatLLM::promptInternalChat(const QStringList &enabledCollections, const LLModel::PromptContext &ctx,
|
||||||
std::optional<std::pair<int, int>> subrange) -> ChatPromptResult
|
qsizetype startOffset) -> ChatPromptResult
|
||||||
{
|
{
|
||||||
Q_ASSERT(isModelLoaded());
|
Q_ASSERT(isModelLoaded());
|
||||||
Q_ASSERT(m_chatModel);
|
Q_ASSERT(m_chatModel);
|
||||||
|
|
||||||
// Return a (ChatModelAccessor, std::span) pair where the span represents the relevant messages for this chat.
|
// Return a vector of relevant messages for this chat.
|
||||||
// "subrange" is used to select only local server messages from the current chat session.
|
// "startOffset" is used to select only local server messages from the current chat session.
|
||||||
auto getChat = [&]() {
|
auto getChat = [&]() {
|
||||||
auto items = m_chatModel->chatItems(); // holds lock
|
auto items = m_chatModel->messageItems();
|
||||||
std::span view(items);
|
if (startOffset > 0)
|
||||||
if (subrange)
|
items.erase(items.begin(), items.begin() + startOffset);
|
||||||
view = view.subspan(subrange->first, subrange->second);
|
Q_ASSERT(items.size() >= 2);
|
||||||
Q_ASSERT(view.size() >= 2);
|
return items;
|
||||||
return std::pair(std::move(items), view);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// copy messages for safety (since we can't hold the lock the whole time)
|
QList<ResultInfo> databaseResults;
|
||||||
|
if (!enabledCollections.isEmpty()) {
|
||||||
std::optional<std::pair<int, QString>> query;
|
std::optional<std::pair<int, QString>> query;
|
||||||
{
|
{
|
||||||
// Find the prompt that represents the query. Server chats are flexible and may not have one.
|
// Find the prompt that represents the query. Server chats are flexible and may not have one.
|
||||||
auto [_, view] = getChat(); // holds lock
|
auto items = getChat();
|
||||||
if (auto peer = m_chatModel->getPeer(view, view.end() - 1)) // peer of response
|
if (auto peer = m_chatModel->getPeer(items, items.end() - 1)) // peer of response
|
||||||
query = { *peer - view.begin(), (*peer)->value };
|
query = { (*peer)->index().value(), (*peer)->content() };
|
||||||
}
|
}
|
||||||
|
|
||||||
QList<ResultInfo> databaseResults;
|
if (query) {
|
||||||
if (query && !enabledCollections.isEmpty()) {
|
|
||||||
auto &[promptIndex, queryStr] = *query;
|
auto &[promptIndex, queryStr] = *query;
|
||||||
const int retrievalSize = MySettings::globalInstance()->localDocsRetrievalSize();
|
const int retrievalSize = MySettings::globalInstance()->localDocsRetrievalSize();
|
||||||
emit requestRetrieveFromDB(enabledCollections, queryStr, retrievalSize, &databaseResults); // blocks
|
emit requestRetrieveFromDB(enabledCollections, queryStr, retrievalSize, &databaseResults); // blocks
|
||||||
m_chatModel->updateSources(promptIndex, databaseResults);
|
m_chatModel->updateSources(promptIndex, databaseResults);
|
||||||
emit databaseResultsChanged(databaseResults);
|
emit databaseResultsChanged(databaseResults);
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy messages for safety (since we can't hold the lock the whole time)
|
|
||||||
std::vector<ChatItem> chatItems;
|
|
||||||
{
|
|
||||||
auto [_, view] = getChat(); // holds lock
|
|
||||||
chatItems.assign(view.begin(), view.end() - 1); // exclude new response
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto result = promptInternal(chatItems, ctx, !databaseResults.isEmpty());
|
auto messageItems = getChat();
|
||||||
|
messageItems.pop_back(); // exclude new response
|
||||||
|
|
||||||
|
auto result = promptInternal(messageItems, ctx, !databaseResults.isEmpty());
|
||||||
return {
|
return {
|
||||||
/*PromptResult*/ {
|
/*PromptResult*/ {
|
||||||
.response = std::move(result.response),
|
.response = std::move(result.response),
|
||||||
@ -904,8 +955,65 @@ auto ChatLLM::promptInternalChat(const QStringList &enabledCollections, const LL
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class ChatViewResponseHandler : public BaseResponseHandler {
|
||||||
|
public:
|
||||||
|
ChatViewResponseHandler(ChatLLM *cllm, QElapsedTimer *totalTime, ChatLLM::PromptResult *result)
|
||||||
|
: m_cllm(cllm), m_totalTime(totalTime), m_result(result) {}
|
||||||
|
|
||||||
|
void onSplitIntoTwo(const QString &startTag, const QString &firstBuffer, const QString &secondBuffer) override
|
||||||
|
{
|
||||||
|
if (startTag == ToolCallConstants::ThinkStartTag)
|
||||||
|
m_cllm->m_chatModel->splitThinking({ firstBuffer, secondBuffer });
|
||||||
|
else
|
||||||
|
m_cllm->m_chatModel->splitToolCall({ firstBuffer, secondBuffer });
|
||||||
|
}
|
||||||
|
|
||||||
|
void onSplitIntoThree(const QString &secondBuffer, const QString &thirdBuffer) override
|
||||||
|
{
|
||||||
|
m_cllm->m_chatModel->endThinking({ secondBuffer, thirdBuffer }, m_totalTime->elapsed());
|
||||||
|
}
|
||||||
|
|
||||||
|
void onOldResponseChunk(const QByteArray &chunk) override
|
||||||
|
{
|
||||||
|
m_result->responseTokens++;
|
||||||
|
m_cllm->m_timer->inc();
|
||||||
|
m_result->response.append(chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool onBufferResponse(const QString &response, int bufferIdx) override
|
||||||
|
{
|
||||||
|
Q_UNUSED(bufferIdx)
|
||||||
|
try {
|
||||||
|
QString r = response;
|
||||||
|
m_cllm->m_chatModel->setResponseValue(removeLeadingWhitespace(r));
|
||||||
|
} catch (const std::exception &e) {
|
||||||
|
// We have a try/catch here because the main thread might have removed the response from
|
||||||
|
// the chatmodel by erasing the conversation during the response... the main thread sets
|
||||||
|
// m_stopGenerating before doing so, but it doesn't wait after that to reset the chatmodel
|
||||||
|
Q_ASSERT(m_cllm->m_stopGenerating);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
emit m_cllm->responseChanged();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool onRegularResponse() override
|
||||||
|
{
|
||||||
|
auto respStr = QString::fromUtf8(m_result->response);
|
||||||
|
return onBufferResponse(respStr, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool getStopGenerating() const override
|
||||||
|
{ return m_cllm->m_stopGenerating; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
ChatLLM *m_cllm;
|
||||||
|
QElapsedTimer *m_totalTime;
|
||||||
|
ChatLLM::PromptResult *m_result;
|
||||||
|
};
|
||||||
|
|
||||||
auto ChatLLM::promptInternal(
|
auto ChatLLM::promptInternal(
|
||||||
const std::variant<std::span<const ChatItem>, std::string_view> &prompt,
|
const std::variant<std::span<const MessageItem>, std::string_view> &prompt,
|
||||||
const LLModel::PromptContext &ctx,
|
const LLModel::PromptContext &ctx,
|
||||||
bool usedLocalDocs
|
bool usedLocalDocs
|
||||||
) -> PromptResult
|
) -> PromptResult
|
||||||
@ -915,14 +1023,14 @@ auto ChatLLM::promptInternal(
|
|||||||
auto *mySettings = MySettings::globalInstance();
|
auto *mySettings = MySettings::globalInstance();
|
||||||
|
|
||||||
// unpack prompt argument
|
// unpack prompt argument
|
||||||
const std::span<const ChatItem> *chatItems = nullptr;
|
const std::span<const MessageItem> *messageItems = nullptr;
|
||||||
std::string jinjaBuffer;
|
std::string jinjaBuffer;
|
||||||
std::string_view conversation;
|
std::string_view conversation;
|
||||||
if (auto *nonChat = std::get_if<std::string_view>(&prompt)) {
|
if (auto *nonChat = std::get_if<std::string_view>(&prompt)) {
|
||||||
conversation = *nonChat; // complete the string without a template
|
conversation = *nonChat; // complete the string without a template
|
||||||
} else {
|
} else {
|
||||||
chatItems = &std::get<std::span<const ChatItem>>(prompt);
|
messageItems = &std::get<std::span<const MessageItem>>(prompt);
|
||||||
jinjaBuffer = applyJinjaTemplate(*chatItems);
|
jinjaBuffer = applyJinjaTemplate(*messageItems);
|
||||||
conversation = jinjaBuffer;
|
conversation = jinjaBuffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -930,8 +1038,8 @@ auto ChatLLM::promptInternal(
|
|||||||
if (!dynamic_cast<const ChatAPI *>(m_llModelInfo.model.get())) {
|
if (!dynamic_cast<const ChatAPI *>(m_llModelInfo.model.get())) {
|
||||||
auto nCtx = m_llModelInfo.model->contextLength();
|
auto nCtx = m_llModelInfo.model->contextLength();
|
||||||
std::string jinjaBuffer2;
|
std::string jinjaBuffer2;
|
||||||
auto lastMessageRendered = (chatItems && chatItems->size() > 1)
|
auto lastMessageRendered = (messageItems && messageItems->size() > 1)
|
||||||
? std::string_view(jinjaBuffer2 = applyJinjaTemplate({ &chatItems->back(), 1 }))
|
? std::string_view(jinjaBuffer2 = applyJinjaTemplate({ &messageItems->back(), 1 }))
|
||||||
: conversation;
|
: conversation;
|
||||||
int32_t lastMessageLength = m_llModelInfo.model->countPromptTokens(lastMessageRendered);
|
int32_t lastMessageLength = m_llModelInfo.model->countPromptTokens(lastMessageRendered);
|
||||||
if (auto limit = nCtx - 4; lastMessageLength > limit) {
|
if (auto limit = nCtx - 4; lastMessageLength > limit) {
|
||||||
@ -951,25 +1059,22 @@ auto ChatLLM::promptInternal(
|
|||||||
return !m_stopGenerating;
|
return !m_stopGenerating;
|
||||||
};
|
};
|
||||||
|
|
||||||
auto handleResponse = [this, &result](LLModel::Token token, std::string_view piece) -> bool {
|
|
||||||
Q_UNUSED(token)
|
|
||||||
result.responseTokens++;
|
|
||||||
m_timer->inc();
|
|
||||||
result.response.append(piece.data(), piece.size());
|
|
||||||
auto respStr = QString::fromUtf8(result.response);
|
|
||||||
emit responseChanged(removeLeadingWhitespace(respStr));
|
|
||||||
return !m_stopGenerating;
|
|
||||||
};
|
|
||||||
|
|
||||||
QElapsedTimer totalTime;
|
QElapsedTimer totalTime;
|
||||||
totalTime.start();
|
totalTime.start();
|
||||||
m_timer->start();
|
ChatViewResponseHandler respHandler(this, &totalTime, &result);
|
||||||
|
|
||||||
|
m_timer->start();
|
||||||
|
QStringList finalBuffers;
|
||||||
|
bool shouldExecuteTool;
|
||||||
try {
|
try {
|
||||||
emit promptProcessing();
|
emit promptProcessing();
|
||||||
m_llModelInfo.model->setThreadCount(mySettings->threadCount());
|
m_llModelInfo.model->setThreadCount(mySettings->threadCount());
|
||||||
m_stopGenerating = false;
|
m_stopGenerating = false;
|
||||||
m_llModelInfo.model->prompt(conversation, handlePrompt, handleResponse, ctx);
|
std::tie(finalBuffers, shouldExecuteTool) = promptModelWithTools(
|
||||||
|
m_llModelInfo.model.get(), handlePrompt, respHandler, ctx,
|
||||||
|
QByteArray::fromRawData(conversation.data(), conversation.size()),
|
||||||
|
ToolCallConstants::AllTagNames
|
||||||
|
);
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
m_timer->stop();
|
m_timer->stop();
|
||||||
throw;
|
throw;
|
||||||
@ -980,11 +1085,16 @@ auto ChatLLM::promptInternal(
|
|||||||
|
|
||||||
// trim trailing whitespace
|
// trim trailing whitespace
|
||||||
auto respStr = QString::fromUtf8(result.response);
|
auto respStr = QString::fromUtf8(result.response);
|
||||||
if (!respStr.isEmpty() && std::as_const(respStr).back().isSpace())
|
if (!respStr.isEmpty() && (std::as_const(respStr).back().isSpace() || finalBuffers.size() > 1)) {
|
||||||
emit responseChanged(respStr.trimmed());
|
if (finalBuffers.size() > 1)
|
||||||
|
m_chatModel->setResponseValue(finalBuffers.last().trimmed());
|
||||||
|
else
|
||||||
|
m_chatModel->setResponseValue(respStr.trimmed());
|
||||||
|
emit responseChanged();
|
||||||
|
}
|
||||||
|
|
||||||
bool doQuestions = false;
|
bool doQuestions = false;
|
||||||
if (!m_isServer && chatItems) {
|
if (!m_isServer && messageItems && !shouldExecuteTool) {
|
||||||
switch (mySettings->suggestionMode()) {
|
switch (mySettings->suggestionMode()) {
|
||||||
case SuggestionMode::On: doQuestions = true; break;
|
case SuggestionMode::On: doQuestions = true; break;
|
||||||
case SuggestionMode::LocalDocsOnly: doQuestions = usedLocalDocs; break;
|
case SuggestionMode::LocalDocsOnly: doQuestions = usedLocalDocs; break;
|
||||||
@ -1062,6 +1172,66 @@ void ChatLLM::reloadModel()
|
|||||||
loadModel(m);
|
loadModel(m);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This class throws discards the text within thinking tags, for use with chat names and follow-up questions.
|
||||||
|
class SimpleResponseHandler : public BaseResponseHandler {
|
||||||
|
public:
|
||||||
|
SimpleResponseHandler(ChatLLM *cllm)
|
||||||
|
: m_cllm(cllm) {}
|
||||||
|
|
||||||
|
void onSplitIntoTwo(const QString &startTag, const QString &firstBuffer, const QString &secondBuffer) override
|
||||||
|
{ /* no-op */ }
|
||||||
|
|
||||||
|
void onSplitIntoThree(const QString &secondBuffer, const QString &thirdBuffer) override
|
||||||
|
{ /* no-op */ }
|
||||||
|
|
||||||
|
void onOldResponseChunk(const QByteArray &chunk) override
|
||||||
|
{ m_response.append(chunk); }
|
||||||
|
|
||||||
|
bool onBufferResponse(const QString &response, int bufferIdx) override
|
||||||
|
{
|
||||||
|
if (bufferIdx == 1)
|
||||||
|
return true; // ignore "think" content
|
||||||
|
return onSimpleResponse(response);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool onRegularResponse() override
|
||||||
|
{ return onBufferResponse(QString::fromUtf8(m_response), 0); }
|
||||||
|
|
||||||
|
bool getStopGenerating() const override
|
||||||
|
{ return m_cllm->m_stopGenerating; }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
virtual bool onSimpleResponse(const QString &response) = 0;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
ChatLLM *m_cllm;
|
||||||
|
QByteArray m_response;
|
||||||
|
};
|
||||||
|
|
||||||
|
class NameResponseHandler : public SimpleResponseHandler {
|
||||||
|
private:
|
||||||
|
// max length of chat names, in words
|
||||||
|
static constexpr qsizetype MAX_WORDS = 3;
|
||||||
|
|
||||||
|
public:
|
||||||
|
using SimpleResponseHandler::SimpleResponseHandler;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
bool onSimpleResponse(const QString &response) override
|
||||||
|
{
|
||||||
|
QTextStream stream(const_cast<QString *>(&response), QIODeviceBase::ReadOnly);
|
||||||
|
QStringList words;
|
||||||
|
while (!stream.atEnd() && words.size() < MAX_WORDS) {
|
||||||
|
QString word;
|
||||||
|
stream >> word;
|
||||||
|
words << word;
|
||||||
|
}
|
||||||
|
|
||||||
|
emit m_cllm->generatedNameChanged(words.join(u' '));
|
||||||
|
return words.size() < MAX_WORDS || stream.atEnd();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
void ChatLLM::generateName()
|
void ChatLLM::generateName()
|
||||||
{
|
{
|
||||||
Q_ASSERT(isModelLoaded());
|
Q_ASSERT(isModelLoaded());
|
||||||
@ -1078,23 +1248,15 @@ void ChatLLM::generateName()
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
QByteArray response; // raw UTF-8
|
NameResponseHandler respHandler(this);
|
||||||
|
|
||||||
auto handleResponse = [this, &response](LLModel::Token token, std::string_view piece) -> bool {
|
|
||||||
Q_UNUSED(token)
|
|
||||||
|
|
||||||
response.append(piece.data(), piece.size());
|
|
||||||
QStringList words = QString::fromUtf8(response).simplified().split(u' ', Qt::SkipEmptyParts);
|
|
||||||
emit generatedNameChanged(words.join(u' '));
|
|
||||||
return words.size() <= 3;
|
|
||||||
};
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
m_llModelInfo.model->prompt(
|
promptModelWithTools(
|
||||||
applyJinjaTemplate(forkConversation(chatNamePrompt)),
|
m_llModelInfo.model.get(),
|
||||||
[this](auto &&...) { return !m_stopGenerating; },
|
/*promptCallback*/ [this](auto &&...) { return !m_stopGenerating; },
|
||||||
handleResponse,
|
respHandler, promptContextFromSettings(m_modelInfo),
|
||||||
promptContextFromSettings(m_modelInfo)
|
applyJinjaTemplate(forkConversation(chatNamePrompt)).c_str(),
|
||||||
|
{ ToolCallConstants::ThinkTagName }
|
||||||
);
|
);
|
||||||
} catch (const std::exception &e) {
|
} catch (const std::exception &e) {
|
||||||
qWarning() << "ChatLLM failed to generate name:" << e.what();
|
qWarning() << "ChatLLM failed to generate name:" << e.what();
|
||||||
@ -1106,13 +1268,43 @@ void ChatLLM::handleChatIdChanged(const QString &id)
|
|||||||
m_llmThread.setObjectName(id);
|
m_llmThread.setObjectName(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ChatLLM::generateQuestions(qint64 elapsed)
|
class QuestionResponseHandler : public SimpleResponseHandler {
|
||||||
{
|
public:
|
||||||
|
using SimpleResponseHandler::SimpleResponseHandler;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
bool onSimpleResponse(const QString &response) override
|
||||||
|
{
|
||||||
|
auto responseUtf8Bytes = response.toUtf8().slice(m_offset);
|
||||||
|
auto responseUtf8 = std::string(responseUtf8Bytes.begin(), responseUtf8Bytes.end());
|
||||||
|
// extract all questions from response
|
||||||
|
ptrdiff_t lastMatchEnd = -1;
|
||||||
|
auto it = std::sregex_iterator(responseUtf8.begin(), responseUtf8.end(), s_reQuestion);
|
||||||
|
auto end = std::sregex_iterator();
|
||||||
|
for (; it != end; ++it) {
|
||||||
|
auto pos = it->position();
|
||||||
|
auto len = it->length();
|
||||||
|
lastMatchEnd = pos + len;
|
||||||
|
emit m_cllm->generatedQuestionFinished(QString::fromUtf8(&responseUtf8[pos], len));
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove processed input from buffer
|
||||||
|
if (lastMatchEnd != -1)
|
||||||
|
m_offset += lastMatchEnd;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
// FIXME: This only works with response by the model in english which is not ideal for a multi-language
|
// FIXME: This only works with response by the model in english which is not ideal for a multi-language
|
||||||
// model.
|
// model.
|
||||||
// match whole question sentences
|
// match whole question sentences
|
||||||
static const std::regex reQuestion(R"(\b(?:What|Where|How|Why|When|Who|Which|Whose|Whom)\b[^?]*\?)");
|
static inline const std::regex s_reQuestion { R"(\b(?:What|Where|How|Why|When|Who|Which|Whose|Whom)\b[^?]*\?)" };
|
||||||
|
|
||||||
|
qsizetype m_offset = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
void ChatLLM::generateQuestions(qint64 elapsed)
|
||||||
|
{
|
||||||
Q_ASSERT(isModelLoaded());
|
Q_ASSERT(isModelLoaded());
|
||||||
if (!isModelLoaded()) {
|
if (!isModelLoaded()) {
|
||||||
emit responseStopped(elapsed);
|
emit responseStopped(elapsed);
|
||||||
@ -1130,39 +1322,17 @@ void ChatLLM::generateQuestions(qint64 elapsed)
|
|||||||
|
|
||||||
emit generatingQuestions();
|
emit generatingQuestions();
|
||||||
|
|
||||||
std::string response; // raw UTF-8
|
QuestionResponseHandler respHandler(this);
|
||||||
|
|
||||||
auto handleResponse = [this, &response](LLModel::Token token, std::string_view piece) -> bool {
|
|
||||||
Q_UNUSED(token)
|
|
||||||
|
|
||||||
// add token to buffer
|
|
||||||
response.append(piece);
|
|
||||||
|
|
||||||
// extract all questions from response
|
|
||||||
ptrdiff_t lastMatchEnd = -1;
|
|
||||||
auto it = std::sregex_iterator(response.begin(), response.end(), reQuestion);
|
|
||||||
auto end = std::sregex_iterator();
|
|
||||||
for (; it != end; ++it) {
|
|
||||||
auto pos = it->position();
|
|
||||||
auto len = it->length();
|
|
||||||
lastMatchEnd = pos + len;
|
|
||||||
emit generatedQuestionFinished(QString::fromUtf8(&response[pos], len));
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove processed input from buffer
|
|
||||||
if (lastMatchEnd != -1)
|
|
||||||
response.erase(0, lastMatchEnd);
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
QElapsedTimer totalTime;
|
QElapsedTimer totalTime;
|
||||||
totalTime.start();
|
totalTime.start();
|
||||||
try {
|
try {
|
||||||
m_llModelInfo.model->prompt(
|
promptModelWithTools(
|
||||||
applyJinjaTemplate(forkConversation(suggestedFollowUpPrompt)),
|
m_llModelInfo.model.get(),
|
||||||
[this](auto &&...) { return !m_stopGenerating; },
|
/*promptCallback*/ [this](auto &&...) { return !m_stopGenerating; },
|
||||||
handleResponse,
|
respHandler, promptContextFromSettings(m_modelInfo),
|
||||||
promptContextFromSettings(m_modelInfo)
|
applyJinjaTemplate(forkConversation(suggestedFollowUpPrompt)).c_str(),
|
||||||
|
{ ToolCallConstants::ThinkTagName }
|
||||||
);
|
);
|
||||||
} catch (const std::exception &e) {
|
} catch (const std::exception &e) {
|
||||||
qWarning() << "ChatLLM failed to generate follow-up questions:" << e.what();
|
qWarning() << "ChatLLM failed to generate follow-up questions:" << e.what();
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
#define CHATLLM_H
|
#define CHATLLM_H
|
||||||
|
|
||||||
#include "chatmodel.h"
|
#include "chatmodel.h"
|
||||||
#include "database.h" // IWYU pragma: keep
|
#include "database.h"
|
||||||
#include "modellist.h"
|
#include "modellist.h"
|
||||||
|
|
||||||
#include <gpt4all-backend/llmodel.h>
|
#include <gpt4all-backend/llmodel.h>
|
||||||
@ -10,28 +10,30 @@
|
|||||||
#include <QByteArray>
|
#include <QByteArray>
|
||||||
#include <QElapsedTimer>
|
#include <QElapsedTimer>
|
||||||
#include <QFileInfo>
|
#include <QFileInfo>
|
||||||
#include <QList> // IWYU pragma: keep
|
#include <QList>
|
||||||
#include <QObject>
|
#include <QObject>
|
||||||
#include <QPointer>
|
#include <QPointer>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QStringList> // IWYU pragma: keep
|
#include <QStringList> // IWYU pragma: keep
|
||||||
#include <QStringView>
|
|
||||||
#include <QThread>
|
#include <QThread>
|
||||||
#include <QVariantMap> // IWYU pragma: keep
|
#include <QVariantMap> // IWYU pragma: keep
|
||||||
#include <QtGlobal>
|
#include <QtNumeric>
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <cstdint>
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <span>
|
#include <span>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <string_view>
|
||||||
#include <variant>
|
#include <variant>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
using namespace Qt::Literals::StringLiterals;
|
using namespace Qt::Literals::StringLiterals;
|
||||||
|
|
||||||
|
class ChatLLM;
|
||||||
class QDataStream;
|
class QDataStream;
|
||||||
|
|
||||||
|
|
||||||
// NOTE: values serialized to disk, do not change or reuse
|
// NOTE: values serialized to disk, do not change or reuse
|
||||||
enum class LLModelTypeV0 { // chat versions 2-5
|
enum class LLModelTypeV0 { // chat versions 2-5
|
||||||
MPT = 0,
|
MPT = 0,
|
||||||
@ -88,9 +90,6 @@ inline LLModelTypeV1 parseLLModelTypeV0(int v0)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class ChatLLM;
|
|
||||||
class ChatModel;
|
|
||||||
|
|
||||||
struct LLModelInfo {
|
struct LLModelInfo {
|
||||||
std::unique_ptr<LLModel> model;
|
std::unique_ptr<LLModel> model;
|
||||||
QFileInfo fileInfo;
|
QFileInfo fileInfo;
|
||||||
@ -220,8 +219,8 @@ Q_SIGNALS:
|
|||||||
void modelLoadingPercentageChanged(float);
|
void modelLoadingPercentageChanged(float);
|
||||||
void modelLoadingError(const QString &error);
|
void modelLoadingError(const QString &error);
|
||||||
void modelLoadingWarning(const QString &warning);
|
void modelLoadingWarning(const QString &warning);
|
||||||
void responseChanged(const QString &response);
|
void responseChanged();
|
||||||
void responseFailed(const QString &error);
|
void responseFailed();
|
||||||
void promptProcessing();
|
void promptProcessing();
|
||||||
void generatingQuestions();
|
void generatingQuestions();
|
||||||
void responseStopped(qint64 promptResponseMs);
|
void responseStopped(qint64 promptResponseMs);
|
||||||
@ -251,20 +250,20 @@ protected:
|
|||||||
};
|
};
|
||||||
|
|
||||||
ChatPromptResult promptInternalChat(const QStringList &enabledCollections, const LLModel::PromptContext &ctx,
|
ChatPromptResult promptInternalChat(const QStringList &enabledCollections, const LLModel::PromptContext &ctx,
|
||||||
std::optional<std::pair<int, int>> subrange = {});
|
qsizetype startOffset = 0);
|
||||||
// passing a string_view directly skips templating and uses the raw string
|
// passing a string_view directly skips templating and uses the raw string
|
||||||
PromptResult promptInternal(const std::variant<std::span<const ChatItem>, std::string_view> &prompt,
|
PromptResult promptInternal(const std::variant<std::span<const MessageItem>, std::string_view> &prompt,
|
||||||
const LLModel::PromptContext &ctx,
|
const LLModel::PromptContext &ctx,
|
||||||
bool usedLocalDocs);
|
bool usedLocalDocs);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool loadNewModel(const ModelInfo &modelInfo, QVariantMap &modelLoadProps);
|
bool loadNewModel(const ModelInfo &modelInfo, QVariantMap &modelLoadProps);
|
||||||
|
|
||||||
std::vector<ChatItem> forkConversation(const QString &prompt) const;
|
std::vector<MessageItem> forkConversation(const QString &prompt) const;
|
||||||
|
|
||||||
// Applies the Jinja template. Query mode returns only the last message without special tokens.
|
// Applies the Jinja template. Query mode returns only the last message without special tokens.
|
||||||
// Returns a (# of messages, rendered prompt) pair.
|
// Returns a (# of messages, rendered prompt) pair.
|
||||||
std::string applyJinjaTemplate(std::span<const ChatItem> items) const;
|
std::string applyJinjaTemplate(std::span<const MessageItem> items) const;
|
||||||
|
|
||||||
void generateQuestions(qint64 elapsed);
|
void generateQuestions(qint64 elapsed);
|
||||||
|
|
||||||
@ -285,6 +284,8 @@ private:
|
|||||||
bool m_isServer;
|
bool m_isServer;
|
||||||
bool m_forceMetal;
|
bool m_forceMetal;
|
||||||
bool m_reloadingToChangeVariant;
|
bool m_reloadingToChangeVariant;
|
||||||
|
friend class ChatViewResponseHandler;
|
||||||
|
friend class SimpleResponseHandler;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // CHATLLM_H
|
#endif // CHATLLM_H
|
||||||
|
368
gpt4all-chat/src/chatmodel.cpp
Normal file
368
gpt4all-chat/src/chatmodel.cpp
Normal file
@ -0,0 +1,368 @@
|
|||||||
|
#include "chatmodel.h"
|
||||||
|
|
||||||
|
#include <QDebug>
|
||||||
|
#include <QMap>
|
||||||
|
#include <QTextStream>
|
||||||
|
#include <QtLogging>
|
||||||
|
|
||||||
|
#include <exception>
|
||||||
|
|
||||||
|
|
||||||
|
QList<ResultInfo> ChatItem::consolidateSources(const QList<ResultInfo> &sources)
|
||||||
|
{
|
||||||
|
QMap<QString, ResultInfo> groupedData;
|
||||||
|
for (const ResultInfo &info : sources) {
|
||||||
|
if (groupedData.contains(info.file)) {
|
||||||
|
groupedData[info.file].text += "\n---\n" + info.text;
|
||||||
|
} else {
|
||||||
|
groupedData[info.file] = info;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
QList<ResultInfo> consolidatedSources = groupedData.values();
|
||||||
|
return consolidatedSources;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChatItem::serializeResponse(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream << value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChatItem::serializeToolCall(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream << value;
|
||||||
|
toolCallInfo.serialize(stream, version);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChatItem::serializeToolResponse(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream << value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChatItem::serializeText(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream << value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChatItem::serializeThink(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream << value;
|
||||||
|
stream << thinkingTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChatItem::serializeSubItems(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream << name;
|
||||||
|
switch (auto typ = type()) {
|
||||||
|
using enum ChatItem::Type;
|
||||||
|
case Response: { serializeResponse(stream, version); break; }
|
||||||
|
case ToolCall: { serializeToolCall(stream, version); break; }
|
||||||
|
case ToolResponse: { serializeToolResponse(stream, version); break; }
|
||||||
|
case Text: { serializeText(stream, version); break; }
|
||||||
|
case Think: { serializeThink(stream, version); break; }
|
||||||
|
case System:
|
||||||
|
case Prompt:
|
||||||
|
throw std::invalid_argument(fmt::format("cannot serialize subitem type {}", int(typ)));
|
||||||
|
}
|
||||||
|
|
||||||
|
stream << qsizetype(subItems.size());
|
||||||
|
for (ChatItem *item :subItems)
|
||||||
|
item->serializeSubItems(stream, version);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChatItem::serialize(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream << name;
|
||||||
|
stream << value;
|
||||||
|
stream << newResponse;
|
||||||
|
stream << isCurrentResponse;
|
||||||
|
stream << stopped;
|
||||||
|
stream << thumbsUpState;
|
||||||
|
stream << thumbsDownState;
|
||||||
|
if (version >= 11 && type() == ChatItem::Type::Response)
|
||||||
|
stream << isError;
|
||||||
|
if (version >= 8) {
|
||||||
|
stream << sources.size();
|
||||||
|
for (const ResultInfo &info : sources) {
|
||||||
|
Q_ASSERT(!info.file.isEmpty());
|
||||||
|
stream << info.collection;
|
||||||
|
stream << info.path;
|
||||||
|
stream << info.file;
|
||||||
|
stream << info.title;
|
||||||
|
stream << info.author;
|
||||||
|
stream << info.date;
|
||||||
|
stream << info.text;
|
||||||
|
stream << info.page;
|
||||||
|
stream << info.from;
|
||||||
|
stream << info.to;
|
||||||
|
}
|
||||||
|
} else if (version >= 3) {
|
||||||
|
QList<QString> references;
|
||||||
|
QList<QString> referencesContext;
|
||||||
|
int validReferenceNumber = 1;
|
||||||
|
for (const ResultInfo &info : sources) {
|
||||||
|
if (info.file.isEmpty())
|
||||||
|
continue;
|
||||||
|
|
||||||
|
QString reference;
|
||||||
|
{
|
||||||
|
QTextStream stream(&reference);
|
||||||
|
stream << (validReferenceNumber++) << ". ";
|
||||||
|
if (!info.title.isEmpty())
|
||||||
|
stream << "\"" << info.title << "\". ";
|
||||||
|
if (!info.author.isEmpty())
|
||||||
|
stream << "By " << info.author << ". ";
|
||||||
|
if (!info.date.isEmpty())
|
||||||
|
stream << "Date: " << info.date << ". ";
|
||||||
|
stream << "In " << info.file << ". ";
|
||||||
|
if (info.page != -1)
|
||||||
|
stream << "Page " << info.page << ". ";
|
||||||
|
if (info.from != -1) {
|
||||||
|
stream << "Lines " << info.from;
|
||||||
|
if (info.to != -1)
|
||||||
|
stream << "-" << info.to;
|
||||||
|
stream << ". ";
|
||||||
|
}
|
||||||
|
stream << "[Context](context://" << validReferenceNumber - 1 << ")";
|
||||||
|
}
|
||||||
|
references.append(reference);
|
||||||
|
referencesContext.append(info.text);
|
||||||
|
}
|
||||||
|
|
||||||
|
stream << references.join("\n");
|
||||||
|
stream << referencesContext;
|
||||||
|
}
|
||||||
|
if (version >= 10) {
|
||||||
|
stream << promptAttachments.size();
|
||||||
|
for (const PromptAttachment &a : promptAttachments) {
|
||||||
|
Q_ASSERT(!a.url.isEmpty());
|
||||||
|
stream << a.url;
|
||||||
|
stream << a.content;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (version >= 12) {
|
||||||
|
stream << qsizetype(subItems.size());
|
||||||
|
for (ChatItem *item :subItems)
|
||||||
|
item->serializeSubItems(stream, version);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ChatItem::deserializeToolCall(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream >> value;
|
||||||
|
return toolCallInfo.deserialize(stream, version);;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ChatItem::deserializeToolResponse(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream >> value;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ChatItem::deserializeText(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream >> value;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ChatItem::deserializeResponse(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream >> value;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ChatItem::deserializeThink(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream >> value;
|
||||||
|
stream >> thinkingTime;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ChatItem::deserializeSubItems(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream >> name;
|
||||||
|
try {
|
||||||
|
type(); // check name
|
||||||
|
} catch (const std::exception &e) {
|
||||||
|
qWarning() << "ChatModel ERROR:" << e.what();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
switch (auto typ = type()) {
|
||||||
|
using enum ChatItem::Type;
|
||||||
|
case Response: { deserializeResponse(stream, version); break; }
|
||||||
|
case ToolCall: { deserializeToolCall(stream, version); break; }
|
||||||
|
case ToolResponse: { deserializeToolResponse(stream, version); break; }
|
||||||
|
case Text: { deserializeText(stream, version); break; }
|
||||||
|
case Think: { deserializeThink(stream, version); break; }
|
||||||
|
case System:
|
||||||
|
case Prompt:
|
||||||
|
throw std::invalid_argument(fmt::format("cannot serialize subitem type {}", int(typ)));
|
||||||
|
}
|
||||||
|
|
||||||
|
qsizetype count;
|
||||||
|
stream >> count;
|
||||||
|
for (int i = 0; i < count; ++i) {
|
||||||
|
ChatItem *c = new ChatItem(this);
|
||||||
|
if (!c->deserializeSubItems(stream, version)) {
|
||||||
|
delete c;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
subItems.push_back(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ChatItem::deserialize(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
if (version < 12) {
|
||||||
|
int id;
|
||||||
|
stream >> id;
|
||||||
|
}
|
||||||
|
stream >> name;
|
||||||
|
try {
|
||||||
|
type(); // check name
|
||||||
|
} catch (const std::exception &e) {
|
||||||
|
qWarning() << "ChatModel ERROR:" << e.what();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
stream >> value;
|
||||||
|
if (version < 10) {
|
||||||
|
// This is deprecated and no longer used
|
||||||
|
QString prompt;
|
||||||
|
stream >> prompt;
|
||||||
|
}
|
||||||
|
stream >> newResponse;
|
||||||
|
stream >> isCurrentResponse;
|
||||||
|
stream >> stopped;
|
||||||
|
stream >> thumbsUpState;
|
||||||
|
stream >> thumbsDownState;
|
||||||
|
if (version >= 11 && type() == ChatItem::Type::Response)
|
||||||
|
stream >> isError;
|
||||||
|
if (version >= 8) {
|
||||||
|
qsizetype count;
|
||||||
|
stream >> count;
|
||||||
|
for (int i = 0; i < count; ++i) {
|
||||||
|
ResultInfo info;
|
||||||
|
stream >> info.collection;
|
||||||
|
stream >> info.path;
|
||||||
|
stream >> info.file;
|
||||||
|
stream >> info.title;
|
||||||
|
stream >> info.author;
|
||||||
|
stream >> info.date;
|
||||||
|
stream >> info.text;
|
||||||
|
stream >> info.page;
|
||||||
|
stream >> info.from;
|
||||||
|
stream >> info.to;
|
||||||
|
sources.append(info);
|
||||||
|
}
|
||||||
|
consolidatedSources = ChatItem::consolidateSources(sources);
|
||||||
|
} else if (version >= 3) {
|
||||||
|
QString references;
|
||||||
|
QList<QString> referencesContext;
|
||||||
|
stream >> references;
|
||||||
|
stream >> referencesContext;
|
||||||
|
|
||||||
|
if (!references.isEmpty()) {
|
||||||
|
QList<QString> referenceList = references.split("\n");
|
||||||
|
|
||||||
|
// Ignore empty lines and those that begin with "---" which is no longer used
|
||||||
|
for (auto it = referenceList.begin(); it != referenceList.end();) {
|
||||||
|
if (it->trimmed().isEmpty() || it->trimmed().startsWith("---"))
|
||||||
|
it = referenceList.erase(it);
|
||||||
|
else
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
|
||||||
|
Q_ASSERT(referenceList.size() == referencesContext.size());
|
||||||
|
for (int j = 0; j < referenceList.size(); ++j) {
|
||||||
|
QString reference = referenceList[j];
|
||||||
|
QString context = referencesContext[j];
|
||||||
|
ResultInfo info;
|
||||||
|
QTextStream refStream(&reference);
|
||||||
|
QString dummy;
|
||||||
|
int validReferenceNumber;
|
||||||
|
refStream >> validReferenceNumber >> dummy;
|
||||||
|
// Extract title (between quotes)
|
||||||
|
if (reference.contains("\"")) {
|
||||||
|
int startIndex = reference.indexOf('"') + 1;
|
||||||
|
int endIndex = reference.indexOf('"', startIndex);
|
||||||
|
info.title = reference.mid(startIndex, endIndex - startIndex);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract author (after "By " and before the next period)
|
||||||
|
if (reference.contains("By ")) {
|
||||||
|
int startIndex = reference.indexOf("By ") + 3;
|
||||||
|
int endIndex = reference.indexOf('.', startIndex);
|
||||||
|
info.author = reference.mid(startIndex, endIndex - startIndex).trimmed();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract date (after "Date: " and before the next period)
|
||||||
|
if (reference.contains("Date: ")) {
|
||||||
|
int startIndex = reference.indexOf("Date: ") + 6;
|
||||||
|
int endIndex = reference.indexOf('.', startIndex);
|
||||||
|
info.date = reference.mid(startIndex, endIndex - startIndex).trimmed();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract file name (after "In " and before the "[Context]")
|
||||||
|
if (reference.contains("In ") && reference.contains(". [Context]")) {
|
||||||
|
int startIndex = reference.indexOf("In ") + 3;
|
||||||
|
int endIndex = reference.indexOf(". [Context]", startIndex);
|
||||||
|
info.file = reference.mid(startIndex, endIndex - startIndex).trimmed();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract page number (after "Page " and before the next space)
|
||||||
|
if (reference.contains("Page ")) {
|
||||||
|
int startIndex = reference.indexOf("Page ") + 5;
|
||||||
|
int endIndex = reference.indexOf(' ', startIndex);
|
||||||
|
if (endIndex == -1) endIndex = reference.length();
|
||||||
|
info.page = reference.mid(startIndex, endIndex - startIndex).toInt();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract lines (after "Lines " and before the next space or hyphen)
|
||||||
|
if (reference.contains("Lines ")) {
|
||||||
|
int startIndex = reference.indexOf("Lines ") + 6;
|
||||||
|
int endIndex = reference.indexOf(' ', startIndex);
|
||||||
|
if (endIndex == -1) endIndex = reference.length();
|
||||||
|
int hyphenIndex = reference.indexOf('-', startIndex);
|
||||||
|
if (hyphenIndex != -1 && hyphenIndex < endIndex) {
|
||||||
|
info.from = reference.mid(startIndex, hyphenIndex - startIndex).toInt();
|
||||||
|
info.to = reference.mid(hyphenIndex + 1, endIndex - hyphenIndex - 1).toInt();
|
||||||
|
} else {
|
||||||
|
info.from = reference.mid(startIndex, endIndex - startIndex).toInt();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info.text = context;
|
||||||
|
sources.append(info);
|
||||||
|
}
|
||||||
|
|
||||||
|
consolidatedSources = ChatItem::consolidateSources(sources);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (version >= 10) {
|
||||||
|
qsizetype count;
|
||||||
|
stream >> count;
|
||||||
|
QList<PromptAttachment> attachments;
|
||||||
|
for (int i = 0; i < count; ++i) {
|
||||||
|
PromptAttachment a;
|
||||||
|
stream >> a.url;
|
||||||
|
stream >> a.content;
|
||||||
|
attachments.append(a);
|
||||||
|
}
|
||||||
|
promptAttachments = attachments;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (version >= 12) {
|
||||||
|
qsizetype count;
|
||||||
|
stream >> count;
|
||||||
|
for (int i = 0; i < count; ++i) {
|
||||||
|
ChatItem *c = new ChatItem(this);
|
||||||
|
if (!c->deserializeSubItems(stream, version)) {
|
||||||
|
delete c;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
subItems.push_back(c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
@ -1,29 +1,32 @@
|
|||||||
#include "chatviewtextprocessor.h"
|
#include "chatviewtextprocessor.h"
|
||||||
|
|
||||||
|
#include <QAbstractTextDocumentLayout>
|
||||||
#include <QBrush>
|
#include <QBrush>
|
||||||
#include <QChar>
|
#include <QChar>
|
||||||
#include <QClipboard>
|
#include <QClipboard>
|
||||||
|
#include <QDebug>
|
||||||
|
#include <QFlag>
|
||||||
#include <QFont>
|
#include <QFont>
|
||||||
#include <QFontMetricsF>
|
|
||||||
#include <QGuiApplication>
|
#include <QGuiApplication>
|
||||||
#include <QList>
|
#include <QList> // IWYU pragma: keep
|
||||||
#include <QPainter>
|
#include <QPair>
|
||||||
#include <QQuickTextDocument>
|
#include <QQuickTextDocument>
|
||||||
#include <QRegularExpression>
|
#include <QRegularExpression>
|
||||||
#include <QStringList>
|
#include <QStringList> // IWYU pragma: keep
|
||||||
#include <QTextBlock>
|
#include <QTextBlock> // IWYU pragma: keep
|
||||||
#include <QTextCharFormat>
|
#include <QTextCharFormat> // IWYU pragma: keep
|
||||||
#include <QTextCursor>
|
#include <QTextCursor>
|
||||||
#include <QTextDocument>
|
#include <QTextDocument>
|
||||||
#include <QTextDocumentFragment>
|
#include <QTextDocumentFragment>
|
||||||
#include <QTextFrame>
|
#include <QTextFrame> // IWYU pragma: keep
|
||||||
#include <QTextFrameFormat>
|
#include <QTextFrameFormat> // IWYU pragma: keep
|
||||||
#include <QTextTableCell>
|
#include <QTextTableCell>
|
||||||
#include <QVariant>
|
#include <QtAssert>
|
||||||
#include <Qt>
|
#include <QtLogging>
|
||||||
#include <QtGlobal>
|
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
|
||||||
enum Language {
|
enum Language {
|
||||||
None,
|
None,
|
||||||
|
@ -3,18 +3,15 @@
|
|||||||
|
|
||||||
#include <QColor>
|
#include <QColor>
|
||||||
#include <QObject>
|
#include <QObject>
|
||||||
#include <QQmlEngine>
|
#include <QQmlEngine> // IWYU pragma: keep
|
||||||
#include <QQuickTextDocument> // IWYU pragma: keep
|
#include <QQuickTextDocument>
|
||||||
#include <QRectF>
|
|
||||||
#include <QSizeF>
|
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QSyntaxHighlighter>
|
#include <QSyntaxHighlighter>
|
||||||
#include <QTextObjectInterface>
|
#include <QVector> // IWYU pragma: keep
|
||||||
#include <QVector>
|
#include <QtTypes>
|
||||||
|
|
||||||
|
// IWYU pragma: no_forward_declare QQuickTextDocument
|
||||||
|
|
||||||
class QPainter;
|
|
||||||
class QTextDocument;
|
|
||||||
class QTextFormat;
|
|
||||||
|
|
||||||
struct CodeColors {
|
struct CodeColors {
|
||||||
Q_GADGET
|
Q_GADGET
|
||||||
|
179
gpt4all-chat/src/codeinterpreter.cpp
Normal file
179
gpt4all-chat/src/codeinterpreter.cpp
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
#include "codeinterpreter.h"
|
||||||
|
|
||||||
|
#include <QJSEngine>
|
||||||
|
#include <QJSValue>
|
||||||
|
#include <QList>
|
||||||
|
#include <QStringList> // IWYU pragma: keep
|
||||||
|
#include <QThread>
|
||||||
|
#include <QVariant>
|
||||||
|
#include <Qt>
|
||||||
|
|
||||||
|
using namespace Qt::Literals::StringLiterals;
|
||||||
|
|
||||||
|
|
||||||
|
CodeInterpreter::CodeInterpreter()
|
||||||
|
: Tool()
|
||||||
|
, m_error(ToolEnums::Error::NoError)
|
||||||
|
{
|
||||||
|
m_worker = new CodeInterpreterWorker;
|
||||||
|
connect(this, &CodeInterpreter::request, m_worker, &CodeInterpreterWorker::request, Qt::QueuedConnection);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInterpreter::run(const QList<ToolParam> ¶ms)
|
||||||
|
{
|
||||||
|
m_error = ToolEnums::Error::NoError;
|
||||||
|
m_errorString = QString();
|
||||||
|
|
||||||
|
Q_ASSERT(params.count() == 1
|
||||||
|
&& params.first().name == "code"
|
||||||
|
&& params.first().type == ToolEnums::ParamType::String);
|
||||||
|
|
||||||
|
const QString code = params.first().value.toString();
|
||||||
|
connect(m_worker, &CodeInterpreterWorker::finished, [this, params] {
|
||||||
|
m_error = m_worker->error();
|
||||||
|
m_errorString = m_worker->errorString();
|
||||||
|
emit runComplete({
|
||||||
|
ToolCallConstants::CodeInterpreterFunction,
|
||||||
|
params,
|
||||||
|
m_worker->response(),
|
||||||
|
m_error,
|
||||||
|
m_errorString
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
emit request(code);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CodeInterpreter::interrupt()
|
||||||
|
{
|
||||||
|
return m_worker->interrupt();
|
||||||
|
}
|
||||||
|
|
||||||
|
QList<ToolParamInfo> CodeInterpreter::parameters() const
|
||||||
|
{
|
||||||
|
return {{
|
||||||
|
"code",
|
||||||
|
ToolEnums::ParamType::String,
|
||||||
|
"javascript code to compute",
|
||||||
|
true
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
QString CodeInterpreter::symbolicFormat() const
|
||||||
|
{
|
||||||
|
return "{human readable plan to complete the task}\n" + ToolCallConstants::CodeInterpreterPrefix + "{code}\n" + ToolCallConstants::CodeInterpreterSuffix;
|
||||||
|
}
|
||||||
|
|
||||||
|
QString CodeInterpreter::examplePrompt() const
|
||||||
|
{
|
||||||
|
return R"(Write code to check if a number is prime, use that to see if the number 7 is prime)";
|
||||||
|
}
|
||||||
|
|
||||||
|
QString CodeInterpreter::exampleCall() const
|
||||||
|
{
|
||||||
|
static const QString example = R"(function isPrime(n) {
|
||||||
|
if (n <= 1) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
for (let i = 2; i <= Math.sqrt(n); i++) {
|
||||||
|
if (n % i === 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
const number = 7;
|
||||||
|
console.log(`The number ${number} is prime: ${isPrime(number)}`);
|
||||||
|
)";
|
||||||
|
|
||||||
|
return "Certainly! Let's compute the answer to whether the number 7 is prime.\n" + ToolCallConstants::CodeInterpreterPrefix + example + ToolCallConstants::CodeInterpreterSuffix;
|
||||||
|
}
|
||||||
|
|
||||||
|
QString CodeInterpreter::exampleReply() const
|
||||||
|
{
|
||||||
|
return R"("The computed result shows that 7 is a prime number.)";
|
||||||
|
}
|
||||||
|
|
||||||
|
CodeInterpreterWorker::CodeInterpreterWorker()
|
||||||
|
: QObject(nullptr)
|
||||||
|
, m_engine(new QJSEngine(this))
|
||||||
|
{
|
||||||
|
moveToThread(&m_thread);
|
||||||
|
|
||||||
|
QJSValue consoleInternalObject = m_engine->newQObject(&m_consoleCapture);
|
||||||
|
m_engine->globalObject().setProperty("console_internal", consoleInternalObject);
|
||||||
|
|
||||||
|
// preprocess console.log args in JS since Q_INVOKE doesn't support varargs
|
||||||
|
auto consoleObject = m_engine->evaluate(uR"(
|
||||||
|
class Console {
|
||||||
|
log(...args) {
|
||||||
|
if (args.length == 0)
|
||||||
|
return;
|
||||||
|
if (args.length >= 2 && typeof args[0] === 'string')
|
||||||
|
throw new Error('console.log string formatting not supported');
|
||||||
|
let cat = '';
|
||||||
|
for (const arg of args) {
|
||||||
|
cat += String(arg);
|
||||||
|
}
|
||||||
|
console_internal.log(cat);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
new Console();
|
||||||
|
)"_s);
|
||||||
|
m_engine->globalObject().setProperty("console", consoleObject);
|
||||||
|
m_thread.start();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInterpreterWorker::reset()
|
||||||
|
{
|
||||||
|
m_response.clear();
|
||||||
|
m_error = ToolEnums::Error::NoError;
|
||||||
|
m_errorString.clear();
|
||||||
|
m_consoleCapture.output.clear();
|
||||||
|
m_engine->setInterrupted(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInterpreterWorker::request(const QString &code)
|
||||||
|
{
|
||||||
|
reset();
|
||||||
|
const QJSValue result = m_engine->evaluate(code);
|
||||||
|
QString resultString;
|
||||||
|
|
||||||
|
if (m_engine->isInterrupted()) {
|
||||||
|
resultString = QString("Error: code execution was interrupted or timed out.");
|
||||||
|
} else if (result.isError()) {
|
||||||
|
// NOTE: We purposely do not set the m_error or m_errorString for the code interpreter since
|
||||||
|
// we *want* the model to see the response has an error so it can hopefully correct itself. The
|
||||||
|
// error member variables are intended for tools that have error conditions that cannot be corrected.
|
||||||
|
// For instance, a tool depending upon the network might set these error variables if the network
|
||||||
|
// is not available.
|
||||||
|
const QStringList lines = code.split('\n');
|
||||||
|
const int line = result.property("lineNumber").toInt();
|
||||||
|
const int index = line - 1;
|
||||||
|
const QString lineContent = (index >= 0 && index < lines.size()) ? lines.at(index) : "Line not found in code.";
|
||||||
|
resultString = QString("Uncaught exception at line %1: %2\n\t%3")
|
||||||
|
.arg(line)
|
||||||
|
.arg(result.toString())
|
||||||
|
.arg(lineContent);
|
||||||
|
m_error = ToolEnums::Error::UnknownError;
|
||||||
|
m_errorString = resultString;
|
||||||
|
} else {
|
||||||
|
resultString = result.isUndefined() ? QString() : result.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (resultString.isEmpty())
|
||||||
|
resultString = m_consoleCapture.output;
|
||||||
|
else if (!m_consoleCapture.output.isEmpty())
|
||||||
|
resultString += "\n" + m_consoleCapture.output;
|
||||||
|
m_response = resultString;
|
||||||
|
emit finished();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CodeInterpreterWorker::interrupt()
|
||||||
|
{
|
||||||
|
m_error = ToolEnums::Error::TimeoutError;
|
||||||
|
m_engine->setInterrupted(true);
|
||||||
|
return true;
|
||||||
|
}
|
97
gpt4all-chat/src/codeinterpreter.h
Normal file
97
gpt4all-chat/src/codeinterpreter.h
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
#ifndef CODEINTERPRETER_H
|
||||||
|
#define CODEINTERPRETER_H
|
||||||
|
|
||||||
|
#include "tool.h"
|
||||||
|
#include "toolcallparser.h"
|
||||||
|
|
||||||
|
#include <QObject>
|
||||||
|
#include <QString>
|
||||||
|
#include <QThread>
|
||||||
|
#include <QtAssert>
|
||||||
|
|
||||||
|
class QJSEngine;
|
||||||
|
|
||||||
|
|
||||||
|
class JavaScriptConsoleCapture : public QObject
|
||||||
|
{
|
||||||
|
Q_OBJECT
|
||||||
|
public:
|
||||||
|
QString output;
|
||||||
|
Q_INVOKABLE void log(const QString &message)
|
||||||
|
{
|
||||||
|
const int maxLength = 1024;
|
||||||
|
if (output.length() >= maxLength)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (output.length() + message.length() + 1 > maxLength) {
|
||||||
|
static const QString trunc = "\noutput truncated at " + QString::number(maxLength) + " characters...";
|
||||||
|
int remainingLength = maxLength - output.length();
|
||||||
|
if (remainingLength > 0)
|
||||||
|
output.append(message.left(remainingLength));
|
||||||
|
output.append(trunc);
|
||||||
|
Q_ASSERT(output.length() > maxLength);
|
||||||
|
} else {
|
||||||
|
output.append(message + "\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class CodeInterpreterWorker : public QObject {
|
||||||
|
Q_OBJECT
|
||||||
|
public:
|
||||||
|
CodeInterpreterWorker();
|
||||||
|
virtual ~CodeInterpreterWorker() {}
|
||||||
|
|
||||||
|
void reset();
|
||||||
|
QString response() const { return m_response; }
|
||||||
|
ToolEnums::Error error() const { return m_error; }
|
||||||
|
QString errorString() const { return m_errorString; }
|
||||||
|
bool interrupt();
|
||||||
|
|
||||||
|
public Q_SLOTS:
|
||||||
|
void request(const QString &code);
|
||||||
|
|
||||||
|
Q_SIGNALS:
|
||||||
|
void finished();
|
||||||
|
|
||||||
|
private:
|
||||||
|
QString m_response;
|
||||||
|
ToolEnums::Error m_error = ToolEnums::Error::NoError;
|
||||||
|
QString m_errorString;
|
||||||
|
QThread m_thread;
|
||||||
|
JavaScriptConsoleCapture m_consoleCapture;
|
||||||
|
QJSEngine *m_engine = nullptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
class CodeInterpreter : public Tool
|
||||||
|
{
|
||||||
|
Q_OBJECT
|
||||||
|
public:
|
||||||
|
explicit CodeInterpreter();
|
||||||
|
virtual ~CodeInterpreter() {}
|
||||||
|
|
||||||
|
void run(const QList<ToolParam> ¶ms) override;
|
||||||
|
bool interrupt() override;
|
||||||
|
|
||||||
|
ToolEnums::Error error() const override { return m_error; }
|
||||||
|
QString errorString() const override { return m_errorString; }
|
||||||
|
|
||||||
|
QString name() const override { return tr("Code Interpreter"); }
|
||||||
|
QString description() const override { return tr("compute javascript code using console.log as output"); }
|
||||||
|
QString function() const override { return ToolCallConstants::CodeInterpreterFunction; }
|
||||||
|
QList<ToolParamInfo> parameters() const override;
|
||||||
|
virtual QString symbolicFormat() const override;
|
||||||
|
QString examplePrompt() const override;
|
||||||
|
QString exampleCall() const override;
|
||||||
|
QString exampleReply() const override;
|
||||||
|
|
||||||
|
Q_SIGNALS:
|
||||||
|
void request(const QString &code);
|
||||||
|
|
||||||
|
private:
|
||||||
|
ToolEnums::Error m_error = ToolEnums::Error::NoError;
|
||||||
|
QString m_errorString;
|
||||||
|
CodeInterpreterWorker *m_worker;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // CODEINTERPRETER_H
|
7
gpt4all-chat/src/config.h.in
Normal file
7
gpt4all-chat/src/config.h.in
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#define APP_VERSION "@APP_VERSION@"
|
||||||
|
|
||||||
|
#define G4A_CONFIG(name) (1/G4A_CONFIG_##name == 1)
|
||||||
|
|
||||||
|
#define G4A_CONFIG_force_d3d12 @GPT4ALL_CONFIG_FORCE_D3D12@
|
@ -1,10 +1,11 @@
|
|||||||
#include "database.h"
|
#include "database.h"
|
||||||
|
|
||||||
#include "mysettings.h"
|
#include "mysettings.h"
|
||||||
#include "utils.h"
|
#include "utils.h" // IWYU pragma: keep
|
||||||
|
|
||||||
#include <duckx/duckx.hpp>
|
#include <duckx/duckx.hpp>
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
#include <usearch/index.hpp>
|
||||||
#include <usearch/index_plugins.hpp>
|
#include <usearch/index_plugins.hpp>
|
||||||
|
|
||||||
#include <QDebug>
|
#include <QDebug>
|
||||||
@ -12,9 +13,9 @@
|
|||||||
#include <QDirIterator>
|
#include <QDirIterator>
|
||||||
#include <QFile>
|
#include <QFile>
|
||||||
#include <QFileSystemWatcher>
|
#include <QFileSystemWatcher>
|
||||||
|
#include <QFlags>
|
||||||
#include <QIODevice>
|
#include <QIODevice>
|
||||||
#include <QPdfDocument>
|
#include <QKeyValueIterator>
|
||||||
#include <QPdfSelection>
|
|
||||||
#include <QRegularExpression>
|
#include <QRegularExpression>
|
||||||
#include <QSqlError>
|
#include <QSqlError>
|
||||||
#include <QSqlQuery>
|
#include <QSqlQuery>
|
||||||
@ -23,14 +24,24 @@
|
|||||||
#include <QMap>
|
#include <QMap>
|
||||||
#include <QUtf8StringView>
|
#include <QUtf8StringView>
|
||||||
#include <QVariant>
|
#include <QVariant>
|
||||||
#include <Qt>
|
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
|
#include <QtMinMax>
|
||||||
|
#include <QtTypes>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
|
||||||
|
#ifdef GPT4ALL_USE_QTPDF
|
||||||
|
# include <QPdfDocument>
|
||||||
|
# include <QPdfSelection>
|
||||||
|
#else
|
||||||
|
# include <fpdfview.h>
|
||||||
|
# include <fpdf_doc.h>
|
||||||
|
# include <fpdf_text.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
using namespace Qt::Literals::StringLiterals;
|
using namespace Qt::Literals::StringLiterals;
|
||||||
namespace ranges = std::ranges;
|
namespace ranges = std::ranges;
|
||||||
namespace us = unum::usearch;
|
namespace us = unum::usearch;
|
||||||
@ -38,6 +49,7 @@ namespace us = unum::usearch;
|
|||||||
//#define DEBUG
|
//#define DEBUG
|
||||||
//#define DEBUG_EXAMPLE
|
//#define DEBUG_EXAMPLE
|
||||||
|
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
/* QFile that checks input for binary data. If seen, it fails the read and returns true
|
/* QFile that checks input for binary data. If seen, it fails the read and returns true
|
||||||
@ -1103,9 +1115,9 @@ class DocumentReader {
|
|||||||
public:
|
public:
|
||||||
struct Metadata { QString title, author, subject, keywords; };
|
struct Metadata { QString title, author, subject, keywords; };
|
||||||
|
|
||||||
static std::unique_ptr<DocumentReader> fromDocument(const DocumentInfo &info);
|
static std::unique_ptr<DocumentReader> fromDocument(DocumentInfo info);
|
||||||
|
|
||||||
const DocumentInfo &doc () const { return *m_info; }
|
const DocumentInfo &doc () const { return m_info; }
|
||||||
const Metadata &metadata() const { return m_metadata; }
|
const Metadata &metadata() const { return m_metadata; }
|
||||||
const std::optional<QString> &word () const { return m_word; }
|
const std::optional<QString> &word () const { return m_word; }
|
||||||
const std::optional<QString> &nextWord() { m_word = advance(); return m_word; }
|
const std::optional<QString> &nextWord() { m_word = advance(); return m_word; }
|
||||||
@ -1115,8 +1127,8 @@ public:
|
|||||||
virtual ~DocumentReader() = default;
|
virtual ~DocumentReader() = default;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
explicit DocumentReader(const DocumentInfo &info)
|
explicit DocumentReader(DocumentInfo info)
|
||||||
: m_info(&info) {}
|
: m_info(std::move(info)) {}
|
||||||
|
|
||||||
void postInit(Metadata &&metadata = {})
|
void postInit(Metadata &&metadata = {})
|
||||||
{
|
{
|
||||||
@ -1126,17 +1138,18 @@ protected:
|
|||||||
|
|
||||||
virtual std::optional<QString> advance() = 0;
|
virtual std::optional<QString> advance() = 0;
|
||||||
|
|
||||||
const DocumentInfo *m_info;
|
DocumentInfo m_info;
|
||||||
Metadata m_metadata;
|
Metadata m_metadata;
|
||||||
std::optional<QString> m_word;
|
std::optional<QString> m_word;
|
||||||
};
|
};
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
#ifdef GPT4ALL_USE_QTPDF
|
||||||
class PdfDocumentReader final : public DocumentReader {
|
class PdfDocumentReader final : public DocumentReader {
|
||||||
public:
|
public:
|
||||||
explicit PdfDocumentReader(const DocumentInfo &info)
|
explicit PdfDocumentReader(DocumentInfo info)
|
||||||
: DocumentReader(info)
|
: DocumentReader(std::move(info))
|
||||||
{
|
{
|
||||||
QString path = info.file.canonicalFilePath();
|
QString path = info.file.canonicalFilePath();
|
||||||
if (m_doc.load(path) != QPdfDocument::Error::None)
|
if (m_doc.load(path) != QPdfDocument::Error::None)
|
||||||
@ -1173,11 +1186,103 @@ private:
|
|||||||
QString m_pageText;
|
QString m_pageText;
|
||||||
std::optional<QTextStream> m_stream;
|
std::optional<QTextStream> m_stream;
|
||||||
};
|
};
|
||||||
|
#else
|
||||||
|
class PdfDocumentReader final : public DocumentReader {
|
||||||
|
public:
|
||||||
|
explicit PdfDocumentReader(DocumentInfo info)
|
||||||
|
: DocumentReader(std::move(info))
|
||||||
|
{
|
||||||
|
QString path = info.file.canonicalFilePath();
|
||||||
|
m_doc = FPDF_LoadDocument(path.toUtf8().constData(), nullptr);
|
||||||
|
if (!m_doc)
|
||||||
|
throw std::runtime_error(fmt::format("Failed to load PDF: {}", path));
|
||||||
|
|
||||||
|
// Extract metadata
|
||||||
|
Metadata metadata {
|
||||||
|
.title = getMetadata("Title" ),
|
||||||
|
.author = getMetadata("Author" ),
|
||||||
|
.subject = getMetadata("Subject" ),
|
||||||
|
.keywords = getMetadata("Keywords"),
|
||||||
|
};
|
||||||
|
postInit(std::move(metadata));
|
||||||
|
}
|
||||||
|
|
||||||
|
~PdfDocumentReader() override
|
||||||
|
{
|
||||||
|
if (m_page)
|
||||||
|
FPDF_ClosePage(m_page);
|
||||||
|
if (m_doc)
|
||||||
|
FPDF_CloseDocument(m_doc);
|
||||||
|
}
|
||||||
|
|
||||||
|
int page() const override { return m_currentPage; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::optional<QString> advance() override
|
||||||
|
{
|
||||||
|
QString word;
|
||||||
|
do {
|
||||||
|
while (!m_stream || m_stream->atEnd()) {
|
||||||
|
if (m_currentPage >= FPDF_GetPageCount(m_doc))
|
||||||
|
return std::nullopt;
|
||||||
|
|
||||||
|
if (m_page)
|
||||||
|
FPDF_ClosePage(std::exchange(m_page, nullptr));
|
||||||
|
m_page = FPDF_LoadPage(m_doc, m_currentPage++);
|
||||||
|
if (!m_page)
|
||||||
|
throw std::runtime_error("Failed to load page.");
|
||||||
|
|
||||||
|
m_pageText = extractTextFromPage(m_page);
|
||||||
|
m_stream.emplace(&m_pageText);
|
||||||
|
}
|
||||||
|
*m_stream >> word;
|
||||||
|
} while (word.isEmpty());
|
||||||
|
return word;
|
||||||
|
}
|
||||||
|
|
||||||
|
QString getMetadata(FPDF_BYTESTRING key)
|
||||||
|
{
|
||||||
|
// FPDF_GetMetaText includes a 2-byte null terminator
|
||||||
|
ulong nBytes = FPDF_GetMetaText(m_doc, key, nullptr, 0);
|
||||||
|
if (nBytes <= sizeof (FPDF_WCHAR))
|
||||||
|
return { "" };
|
||||||
|
QByteArray buffer(nBytes, Qt::Uninitialized);
|
||||||
|
ulong nResultBytes = FPDF_GetMetaText(m_doc, key, buffer.data(), buffer.size());
|
||||||
|
Q_ASSERT(nResultBytes % 2 == 0);
|
||||||
|
Q_ASSERT(nResultBytes <= nBytes);
|
||||||
|
return QString::fromUtf16(reinterpret_cast<const char16_t *>(buffer.data()), nResultBytes / 2 - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
QString extractTextFromPage(FPDF_PAGE page)
|
||||||
|
{
|
||||||
|
FPDF_TEXTPAGE textPage = FPDFText_LoadPage(page);
|
||||||
|
if (!textPage)
|
||||||
|
throw std::runtime_error("Failed to load text page.");
|
||||||
|
|
||||||
|
int nChars = FPDFText_CountChars(textPage);
|
||||||
|
if (!nChars)
|
||||||
|
return {};
|
||||||
|
// FPDFText_GetText includes a 2-byte null terminator
|
||||||
|
QByteArray buffer((nChars + 1) * sizeof (FPDF_WCHAR), Qt::Uninitialized);
|
||||||
|
int nResultChars = FPDFText_GetText(textPage, 0, nChars, reinterpret_cast<ushort *>(buffer.data()));
|
||||||
|
Q_ASSERT(nResultChars <= nChars + 1);
|
||||||
|
|
||||||
|
FPDFText_ClosePage(textPage);
|
||||||
|
return QString::fromUtf16(reinterpret_cast<const char16_t *>(buffer.data()), nResultChars - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
FPDF_DOCUMENT m_doc = nullptr;
|
||||||
|
FPDF_PAGE m_page = nullptr;
|
||||||
|
int m_currentPage = 0;
|
||||||
|
QString m_pageText;
|
||||||
|
std::optional<QTextStream> m_stream;
|
||||||
|
};
|
||||||
|
#endif // !defined(GPT4ALL_USE_QTPDF)
|
||||||
|
|
||||||
class WordDocumentReader final : public DocumentReader {
|
class WordDocumentReader final : public DocumentReader {
|
||||||
public:
|
public:
|
||||||
explicit WordDocumentReader(const DocumentInfo &info)
|
explicit WordDocumentReader(DocumentInfo info)
|
||||||
: DocumentReader(info)
|
: DocumentReader(std::move(info))
|
||||||
, m_doc(info.file.canonicalFilePath().toStdString())
|
, m_doc(info.file.canonicalFilePath().toStdString())
|
||||||
{
|
{
|
||||||
m_doc.open();
|
m_doc.open();
|
||||||
@ -1269,8 +1374,8 @@ protected:
|
|||||||
|
|
||||||
class TxtDocumentReader final : public DocumentReader {
|
class TxtDocumentReader final : public DocumentReader {
|
||||||
public:
|
public:
|
||||||
explicit TxtDocumentReader(const DocumentInfo &info)
|
explicit TxtDocumentReader(DocumentInfo info)
|
||||||
: DocumentReader(info)
|
: DocumentReader(std::move(info))
|
||||||
, m_file(info.file.canonicalFilePath())
|
, m_file(info.file.canonicalFilePath())
|
||||||
{
|
{
|
||||||
if (!m_file.open(QIODevice::ReadOnly))
|
if (!m_file.open(QIODevice::ReadOnly))
|
||||||
@ -1311,13 +1416,13 @@ protected:
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
std::unique_ptr<DocumentReader> DocumentReader::fromDocument(const DocumentInfo &doc)
|
std::unique_ptr<DocumentReader> DocumentReader::fromDocument(DocumentInfo doc)
|
||||||
{
|
{
|
||||||
if (doc.isPdf())
|
if (doc.isPdf())
|
||||||
return std::make_unique<PdfDocumentReader>(doc);
|
return std::make_unique<PdfDocumentReader>(std::move(doc));
|
||||||
if (doc.isDocx())
|
if (doc.isDocx())
|
||||||
return std::make_unique<WordDocumentReader>(doc);
|
return std::make_unique<WordDocumentReader>(std::move(doc));
|
||||||
return std::make_unique<TxtDocumentReader>(doc);
|
return std::make_unique<TxtDocumentReader>(std::move(doc));
|
||||||
}
|
}
|
||||||
|
|
||||||
ChunkStreamer::ChunkStreamer(Database *database)
|
ChunkStreamer::ChunkStreamer(Database *database)
|
||||||
@ -1325,12 +1430,12 @@ ChunkStreamer::ChunkStreamer(Database *database)
|
|||||||
|
|
||||||
ChunkStreamer::~ChunkStreamer() = default;
|
ChunkStreamer::~ChunkStreamer() = default;
|
||||||
|
|
||||||
void ChunkStreamer::setDocument(const DocumentInfo &doc, int documentId, const QString &embeddingModel)
|
void ChunkStreamer::setDocument(DocumentInfo doc, int documentId, const QString &embeddingModel)
|
||||||
{
|
{
|
||||||
auto docKey = doc.key();
|
auto docKey = doc.key();
|
||||||
if (!m_docKey || *m_docKey != docKey) {
|
if (!m_docKey || *m_docKey != docKey) {
|
||||||
m_docKey = docKey;
|
m_docKey = docKey;
|
||||||
m_reader = DocumentReader::fromDocument(doc);
|
m_reader = DocumentReader::fromDocument(std::move(doc));
|
||||||
m_documentId = documentId;
|
m_documentId = documentId;
|
||||||
m_embeddingModel = embeddingModel;
|
m_embeddingModel = embeddingModel;
|
||||||
m_chunk.clear();
|
m_chunk.clear();
|
||||||
@ -1340,7 +1445,8 @@ void ChunkStreamer::setDocument(const DocumentInfo &doc, int documentId, const Q
|
|||||||
if (m_database->m_documentIdCache.contains(documentId)) {
|
if (m_database->m_documentIdCache.contains(documentId)) {
|
||||||
QSqlQuery q(m_database->m_db);
|
QSqlQuery q(m_database->m_db);
|
||||||
if (!m_database->removeChunksByDocumentId(q, documentId))
|
if (!m_database->removeChunksByDocumentId(q, documentId))
|
||||||
handleDocumentError("ERROR: Cannot remove chunks of document", documentId, doc.file.canonicalPath(), q.lastError());
|
handleDocumentError("ERROR: Cannot remove chunks of document",
|
||||||
|
documentId, m_reader->doc().file.canonicalPath(), q.lastError());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#ifndef DATABASE_H
|
#ifndef DATABASE_H
|
||||||
#define DATABASE_H
|
#define DATABASE_H
|
||||||
|
|
||||||
#include "embllm.h" // IWYU pragma: keep
|
#include "embllm.h"
|
||||||
|
|
||||||
#include <QByteArray>
|
#include <QByteArray>
|
||||||
#include <QChar>
|
#include <QChar>
|
||||||
@ -15,11 +15,11 @@
|
|||||||
#include <QSet>
|
#include <QSet>
|
||||||
#include <QSqlDatabase>
|
#include <QSqlDatabase>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QStringList>
|
#include <QStringList> // IWYU pragma: keep
|
||||||
#include <QThread>
|
#include <QThread>
|
||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
#include <QVector>
|
#include <QVector> // IWYU pragma: keep
|
||||||
#include <QtGlobal>
|
#include <QtAssert>
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
@ -28,7 +28,7 @@
|
|||||||
#include <memory>
|
#include <memory>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector> // IWYU pragma: keep
|
||||||
|
|
||||||
using namespace Qt::Literals::StringLiterals;
|
using namespace Qt::Literals::StringLiterals;
|
||||||
|
|
||||||
@ -39,6 +39,7 @@ class QSqlQuery;
|
|||||||
class QTextStream;
|
class QTextStream;
|
||||||
class QTimer;
|
class QTimer;
|
||||||
|
|
||||||
|
|
||||||
/* Version 0: GPT4All v2.4.3, full-text search
|
/* Version 0: GPT4All v2.4.3, full-text search
|
||||||
* Version 1: GPT4All v2.5.3, embeddings in hsnwlib
|
* Version 1: GPT4All v2.5.3, embeddings in hsnwlib
|
||||||
* Version 2: GPT4All v3.0.0, embeddings in sqlite
|
* Version 2: GPT4All v3.0.0, embeddings in sqlite
|
||||||
@ -171,7 +172,7 @@ public:
|
|||||||
explicit ChunkStreamer(Database *database);
|
explicit ChunkStreamer(Database *database);
|
||||||
~ChunkStreamer();
|
~ChunkStreamer();
|
||||||
|
|
||||||
void setDocument(const DocumentInfo &doc, int documentId, const QString &embeddingModel);
|
void setDocument(DocumentInfo doc, int documentId, const QString &embeddingModel);
|
||||||
std::optional<DocumentInfo::key_type> currentDocKey() const;
|
std::optional<DocumentInfo::key_type> currentDocKey() const;
|
||||||
void reset();
|
void reset();
|
||||||
|
|
||||||
|
@ -10,32 +10,37 @@
|
|||||||
#include <QDebug>
|
#include <QDebug>
|
||||||
#include <QGlobalStatic>
|
#include <QGlobalStatic>
|
||||||
#include <QGuiApplication>
|
#include <QGuiApplication>
|
||||||
#include <QIODevice>
|
#include <QIODevice> // IWYU pragma: keep
|
||||||
#include <QJsonArray>
|
#include <QJsonArray>
|
||||||
#include <QJsonDocument>
|
#include <QJsonDocument>
|
||||||
#include <QJsonObject>
|
#include <QJsonObject>
|
||||||
#include <QJsonValue>
|
#include <QJsonValue>
|
||||||
|
#include <QKeyValueIterator>
|
||||||
#include <QLocale>
|
#include <QLocale>
|
||||||
#include <QNetworkRequest>
|
#include <QNetworkRequest>
|
||||||
#include <QPair>
|
#include <QPair> // IWYU pragma: keep
|
||||||
|
#include <QRegularExpression>
|
||||||
|
#include <QRegularExpressionMatch>
|
||||||
#include <QSettings>
|
#include <QSettings>
|
||||||
#include <QSslConfiguration>
|
#include <QSslConfiguration>
|
||||||
#include <QSslSocket>
|
#include <QSslSocket>
|
||||||
#include <QStringList>
|
#include <QStringList> // IWYU pragma: keep
|
||||||
#include <QTextStream>
|
#include <QTextStream>
|
||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
#include <QVariant>
|
#include <QVariant>
|
||||||
#include <QVector>
|
#include <QVector> // IWYU pragma: keep
|
||||||
#include <Qt>
|
#include <Qt>
|
||||||
|
#include <QtAssert>
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
|
#include <QtMinMax>
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <compare>
|
#include <compare>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
using namespace Qt::Literals::StringLiterals;
|
using namespace Qt::Literals::StringLiterals;
|
||||||
|
|
||||||
|
|
||||||
class MyDownload: public Download { };
|
class MyDownload: public Download { };
|
||||||
Q_GLOBAL_STATIC(MyDownload, downloadInstance)
|
Q_GLOBAL_STATIC(MyDownload, downloadInstance)
|
||||||
Download *Download::globalInstance()
|
Download *Download::globalInstance()
|
||||||
|
@ -13,10 +13,14 @@
|
|||||||
#include <QSslError>
|
#include <QSslError>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QThread>
|
#include <QThread>
|
||||||
#include <QtGlobal>
|
#include <QtTypes>
|
||||||
|
|
||||||
|
// IWYU pragma: no_forward_declare QFile
|
||||||
|
// IWYU pragma: no_forward_declare QList
|
||||||
|
// IWYU pragma: no_forward_declare QSslError
|
||||||
class QByteArray;
|
class QByteArray;
|
||||||
|
|
||||||
|
|
||||||
struct ReleaseInfo {
|
struct ReleaseInfo {
|
||||||
Q_GADGET
|
Q_GADGET
|
||||||
Q_PROPERTY(QString version MEMBER version)
|
Q_PROPERTY(QString version MEMBER version)
|
||||||
|
@ -1,35 +1,35 @@
|
|||||||
#include "embllm.h"
|
#include "embllm.h"
|
||||||
|
|
||||||
#include "modellist.h"
|
|
||||||
#include "mysettings.h"
|
#include "mysettings.h"
|
||||||
|
|
||||||
#include <gpt4all-backend/llmodel.h>
|
#include <gpt4all-backend/llmodel.h>
|
||||||
|
|
||||||
#include <QCoreApplication>
|
#include <QCoreApplication>
|
||||||
#include <QDebug>
|
#include <QDebug>
|
||||||
#include <QFile>
|
|
||||||
#include <QFileInfo>
|
#include <QFileInfo>
|
||||||
#include <QGuiApplication>
|
#include <QGuiApplication>
|
||||||
#include <QIODevice>
|
|
||||||
#include <QJsonArray>
|
#include <QJsonArray>
|
||||||
#include <QJsonDocument>
|
#include <QJsonDocument>
|
||||||
#include <QJsonObject>
|
#include <QJsonObject>
|
||||||
|
#include <QJsonValue>
|
||||||
#include <QList>
|
#include <QList>
|
||||||
#include <QMutexLocker>
|
#include <QMutexLocker> // IWYU pragma: keep
|
||||||
#include <QNetworkAccessManager>
|
#include <QNetworkAccessManager>
|
||||||
#include <QNetworkReply>
|
#include <QNetworkReply>
|
||||||
#include <QNetworkRequest>
|
#include <QNetworkRequest>
|
||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
#include <Qt>
|
#include <Qt>
|
||||||
#include <QtGlobal>
|
#include <QtAssert>
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
|
|
||||||
#include <exception>
|
#include <exception>
|
||||||
|
#include <string>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
using namespace Qt::Literals::StringLiterals;
|
using namespace Qt::Literals::StringLiterals;
|
||||||
|
|
||||||
|
|
||||||
static const QString EMBEDDING_MODEL_NAME = u"nomic-embed-text-v1.5"_s;
|
static const QString EMBEDDING_MODEL_NAME = u"nomic-embed-text-v1.5"_s;
|
||||||
static const QString LOCAL_EMBEDDING_MODEL = u"nomic-embed-text-v1.5.f16.gguf"_s;
|
static const QString LOCAL_EMBEDDING_MODEL = u"nomic-embed-text-v1.5.f16.gguf"_s;
|
||||||
|
|
||||||
@ -359,8 +359,11 @@ void EmbeddingLLMWorker::handleFinished()
|
|||||||
if (retrievedData.isValid() && retrievedData.canConvert<QVector<EmbeddingChunk>>())
|
if (retrievedData.isValid() && retrievedData.canConvert<QVector<EmbeddingChunk>>())
|
||||||
chunks = retrievedData.value<QVector<EmbeddingChunk>>();
|
chunks = retrievedData.value<QVector<EmbeddingChunk>>();
|
||||||
|
|
||||||
QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
QVariant response;
|
||||||
|
if (reply->error() != QNetworkReply::NoError) {
|
||||||
|
response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
||||||
Q_ASSERT(response.isValid());
|
Q_ASSERT(response.isValid());
|
||||||
|
}
|
||||||
bool ok;
|
bool ok;
|
||||||
int code = response.toInt(&ok);
|
int code = response.toInt(&ok);
|
||||||
if (!ok || code != 200) {
|
if (!ok || code != 200) {
|
||||||
|
@ -5,10 +5,10 @@
|
|||||||
#include <QMutex>
|
#include <QMutex>
|
||||||
#include <QObject>
|
#include <QObject>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QStringList>
|
#include <QStringList> // IWYU pragma: keep
|
||||||
#include <QThread>
|
#include <QThread>
|
||||||
#include <QVariant>
|
#include <QVariant>
|
||||||
#include <QVector>
|
#include <QVector> // IWYU pragma: keep
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@ -16,6 +16,7 @@
|
|||||||
class LLModel;
|
class LLModel;
|
||||||
class QNetworkAccessManager;
|
class QNetworkAccessManager;
|
||||||
|
|
||||||
|
|
||||||
struct EmbeddingChunk {
|
struct EmbeddingChunk {
|
||||||
QString model; // TODO(jared): use to select model
|
QString model; // TODO(jared): use to select model
|
||||||
int folder_id;
|
int folder_id;
|
||||||
|
@ -1,111 +1,76 @@
|
|||||||
#include "jinja_helpers.h"
|
#include "jinja_helpers.h"
|
||||||
|
|
||||||
#include "utils.h"
|
|
||||||
|
|
||||||
#include <fmt/format.h>
|
|
||||||
|
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
|
|
||||||
#include <memory>
|
#include <ranges>
|
||||||
#include <vector>
|
#include <string>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
using namespace std::literals::string_view_literals;
|
namespace views = std::views;
|
||||||
|
using json = nlohmann::ordered_json;
|
||||||
|
|
||||||
|
|
||||||
JinjaResultInfo::~JinjaResultInfo() = default;
|
json::object_t JinjaResultInfo::AsJson() const
|
||||||
|
|
||||||
const JinjaFieldMap<ResultInfo> JinjaResultInfo::s_fields = {
|
|
||||||
{ "collection", [](auto &s) { return s.collection.toStdString(); } },
|
|
||||||
{ "path", [](auto &s) { return s.path .toStdString(); } },
|
|
||||||
{ "file", [](auto &s) { return s.file .toStdString(); } },
|
|
||||||
{ "title", [](auto &s) { return s.title .toStdString(); } },
|
|
||||||
{ "author", [](auto &s) { return s.author .toStdString(); } },
|
|
||||||
{ "date", [](auto &s) { return s.date .toStdString(); } },
|
|
||||||
{ "text", [](auto &s) { return s.text .toStdString(); } },
|
|
||||||
{ "page", [](auto &s) { return s.page; } },
|
|
||||||
{ "file_uri", [](auto &s) { return s.fileUri() .toStdString(); } },
|
|
||||||
};
|
|
||||||
|
|
||||||
JinjaPromptAttachment::~JinjaPromptAttachment() = default;
|
|
||||||
|
|
||||||
const JinjaFieldMap<PromptAttachment> JinjaPromptAttachment::s_fields = {
|
|
||||||
{ "url", [](auto &s) { return s.url.toString() .toStdString(); } },
|
|
||||||
{ "file", [](auto &s) { return s.file() .toStdString(); } },
|
|
||||||
{ "processed_content", [](auto &s) { return s.processedContent().toStdString(); } },
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<std::string> JinjaMessage::GetKeys() const
|
|
||||||
{
|
{
|
||||||
std::vector<std::string> result;
|
return {
|
||||||
auto &keys = this->keys();
|
{ "collection", m_source->collection.toStdString() },
|
||||||
result.reserve(keys.size());
|
{ "path", m_source->path .toStdString() },
|
||||||
result.assign(keys.begin(), keys.end());
|
{ "file", m_source->file .toStdString() },
|
||||||
return result;
|
{ "title", m_source->title .toStdString() },
|
||||||
|
{ "author", m_source->author .toStdString() },
|
||||||
|
{ "date", m_source->date .toStdString() },
|
||||||
|
{ "text", m_source->text .toStdString() },
|
||||||
|
{ "page", m_source->page },
|
||||||
|
{ "file_uri", m_source->fileUri() .toStdString() },
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
auto JinjaMessage::keys() const -> const std::unordered_set<std::string_view> &
|
json::object_t JinjaPromptAttachment::AsJson() const
|
||||||
{
|
{
|
||||||
static const std::unordered_set<std::string_view> baseKeys
|
return {
|
||||||
{ "role", "content" };
|
{ "url", m_attachment->url.toString() .toStdString() },
|
||||||
static const std::unordered_set<std::string_view> userKeys
|
{ "file", m_attachment->file() .toStdString() },
|
||||||
{ "role", "content", "sources", "prompt_attachments" };
|
{ "processed_content", m_attachment->processedContent().toStdString() },
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
json::object_t JinjaMessage::AsJson() const
|
||||||
|
{
|
||||||
|
json::object_t obj;
|
||||||
|
{
|
||||||
|
json::string_t role;
|
||||||
switch (m_item->type()) {
|
switch (m_item->type()) {
|
||||||
using enum ChatItem::Type;
|
using enum MessageItem::Type;
|
||||||
case System:
|
case System: role = "system"; break;
|
||||||
case Response:
|
case Prompt: role = "user"; break;
|
||||||
return baseKeys;
|
case Response: role = "assistant"; break;
|
||||||
case Prompt:
|
case ToolResponse: role = "tool"; break;
|
||||||
return userKeys;
|
|
||||||
}
|
}
|
||||||
Q_UNREACHABLE();
|
obj.emplace_back("role", std::move(role));
|
||||||
}
|
|
||||||
|
|
||||||
bool operator==(const JinjaMessage &a, const JinjaMessage &b)
|
|
||||||
{
|
|
||||||
if (a.m_item == b.m_item)
|
|
||||||
return true;
|
|
||||||
const auto &[ia, ib] = std::tie(*a.m_item, *b.m_item);
|
|
||||||
auto type = ia.type();
|
|
||||||
if (type != ib.type() || ia.value != ib.value)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
switch (type) {
|
|
||||||
using enum ChatItem::Type;
|
|
||||||
case System:
|
|
||||||
case Response:
|
|
||||||
return true;
|
|
||||||
case Prompt:
|
|
||||||
return ia.sources == ib.sources && ia.promptAttachments == ib.promptAttachments;
|
|
||||||
}
|
}
|
||||||
Q_UNREACHABLE();
|
{
|
||||||
}
|
QString content;
|
||||||
|
if (m_version == 0 && m_item->type() == MessageItem::Type::Prompt) {
|
||||||
const JinjaFieldMap<JinjaMessage> JinjaMessage::s_fields = {
|
content = m_item->bakedPrompt();
|
||||||
{ "role", [](auto &m) {
|
} else {
|
||||||
switch (m.item().type()) {
|
content = m_item->content();
|
||||||
using enum ChatItem::Type;
|
|
||||||
case System: return "system"sv;
|
|
||||||
case Prompt: return "user"sv;
|
|
||||||
case Response: return "assistant"sv;
|
|
||||||
}
|
}
|
||||||
Q_UNREACHABLE();
|
obj.emplace_back("content", content.toStdString());
|
||||||
} },
|
}
|
||||||
{ "content", [](auto &m) {
|
if (m_item->type() == MessageItem::Type::Prompt) {
|
||||||
if (m.version() == 0 && m.item().type() == ChatItem::Type::Prompt)
|
{
|
||||||
return m.item().bakedPrompt().toStdString();
|
auto sources = m_item->sources() | views::transform([](auto &r) {
|
||||||
return m.item().value.toStdString();
|
return JinjaResultInfo(r).AsJson();
|
||||||
} },
|
|
||||||
{ "sources", [](auto &m) {
|
|
||||||
auto sources = m.item().sources | views::transform([](auto &r) {
|
|
||||||
return jinja2::GenericMap([map = std::make_shared<JinjaResultInfo>(r)] { return map.get(); });
|
|
||||||
});
|
});
|
||||||
return jinja2::ValuesList(sources.begin(), sources.end());
|
obj.emplace("sources", json::array_t(sources.begin(), sources.end()));
|
||||||
} },
|
}
|
||||||
{ "prompt_attachments", [](auto &m) {
|
{
|
||||||
auto attachments = m.item().promptAttachments | views::transform([](auto &pa) {
|
auto attachments = m_item->promptAttachments() | views::transform([](auto &pa) {
|
||||||
return jinja2::GenericMap([map = std::make_shared<JinjaPromptAttachment>(pa)] { return map.get(); });
|
return JinjaPromptAttachment(pa).AsJson();
|
||||||
});
|
});
|
||||||
return jinja2::ValuesList(attachments.begin(), attachments.end());
|
obj.emplace("prompt_attachments", json::array_t(attachments.begin(), attachments.end()));
|
||||||
} },
|
}
|
||||||
};
|
}
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
@ -3,47 +3,21 @@
|
|||||||
#include "chatmodel.h"
|
#include "chatmodel.h"
|
||||||
#include "database.h"
|
#include "database.h"
|
||||||
|
|
||||||
#include <jinja2cpp/value.h>
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
#include <functional>
|
#include <QtTypes> // IWYU pragma: keep
|
||||||
#include <ranges>
|
|
||||||
#include <string>
|
|
||||||
#include <string_view>
|
|
||||||
#include <unordered_map>
|
|
||||||
#include <unordered_set>
|
|
||||||
|
|
||||||
#include <QtGlobal>
|
// IWYU pragma: no_forward_declare MessageItem
|
||||||
|
// IWYU pragma: no_forward_declare PromptAttachment
|
||||||
|
// IWYU pragma: no_forward_declare ResultInfo
|
||||||
|
|
||||||
namespace views = std::views;
|
using json = nlohmann::ordered_json;
|
||||||
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
using JinjaFieldMap = std::unordered_map<std::string_view, std::function<jinja2::Value (const T &)>>;
|
|
||||||
|
|
||||||
template <typename Derived>
|
template <typename Derived>
|
||||||
class JinjaComparable : public jinja2::IMapItemAccessor {
|
class JinjaHelper {
|
||||||
public:
|
public:
|
||||||
JinjaComparable() = default;
|
json::object_t AsJson() const { return static_cast<const Derived *>(this)->AsJson(); }
|
||||||
|
|
||||||
bool IsEqual(const jinja2::IComparable &other) const override;
|
|
||||||
|
|
||||||
private:
|
|
||||||
Q_DISABLE_COPY_MOVE(JinjaComparable)
|
|
||||||
};
|
|
||||||
|
|
||||||
template <typename Derived>
|
|
||||||
class JinjaHelper : public JinjaComparable<Derived> {
|
|
||||||
public:
|
|
||||||
size_t GetSize() const override
|
|
||||||
{ return Derived::s_fields.size(); }
|
|
||||||
|
|
||||||
bool HasValue(const std::string &name) const override
|
|
||||||
{ return Derived::s_fields.contains(name); }
|
|
||||||
|
|
||||||
jinja2::Value GetValueByName(const std::string &name) const override;
|
|
||||||
|
|
||||||
std::vector<std::string> GetKeys() const override
|
|
||||||
{ auto keys = views::elements<0>(Derived::s_fields); return { keys.begin(), keys.end() }; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class JinjaResultInfo : public JinjaHelper<JinjaResultInfo> {
|
class JinjaResultInfo : public JinjaHelper<JinjaResultInfo> {
|
||||||
@ -51,18 +25,10 @@ public:
|
|||||||
explicit JinjaResultInfo(const ResultInfo &source) noexcept
|
explicit JinjaResultInfo(const ResultInfo &source) noexcept
|
||||||
: m_source(&source) {}
|
: m_source(&source) {}
|
||||||
|
|
||||||
~JinjaResultInfo() override;
|
json::object_t AsJson() const;
|
||||||
|
|
||||||
const ResultInfo &value() const { return *m_source; }
|
|
||||||
|
|
||||||
friend bool operator==(const JinjaResultInfo &a, const JinjaResultInfo &b)
|
|
||||||
{ return a.m_source == b.m_source || *a.m_source == *b.m_source; }
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static const JinjaFieldMap<ResultInfo> s_fields;
|
|
||||||
const ResultInfo *m_source;
|
const ResultInfo *m_source;
|
||||||
|
|
||||||
friend class JinjaHelper<JinjaResultInfo>;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class JinjaPromptAttachment : public JinjaHelper<JinjaPromptAttachment> {
|
class JinjaPromptAttachment : public JinjaHelper<JinjaPromptAttachment> {
|
||||||
@ -70,47 +36,20 @@ public:
|
|||||||
explicit JinjaPromptAttachment(const PromptAttachment &attachment) noexcept
|
explicit JinjaPromptAttachment(const PromptAttachment &attachment) noexcept
|
||||||
: m_attachment(&attachment) {}
|
: m_attachment(&attachment) {}
|
||||||
|
|
||||||
~JinjaPromptAttachment() override;
|
json::object_t AsJson() const;
|
||||||
|
|
||||||
const PromptAttachment &value() const { return *m_attachment; }
|
|
||||||
|
|
||||||
friend bool operator==(const JinjaPromptAttachment &a, const JinjaPromptAttachment &b)
|
|
||||||
{ return a.m_attachment == b.m_attachment || *a.m_attachment == *b.m_attachment; }
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static const JinjaFieldMap<PromptAttachment> s_fields;
|
|
||||||
const PromptAttachment *m_attachment;
|
const PromptAttachment *m_attachment;
|
||||||
|
|
||||||
friend class JinjaHelper<JinjaPromptAttachment>;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class JinjaMessage : public JinjaHelper<JinjaMessage> {
|
class JinjaMessage : public JinjaHelper<JinjaMessage> {
|
||||||
public:
|
public:
|
||||||
explicit JinjaMessage(uint version, const ChatItem &item) noexcept
|
explicit JinjaMessage(uint version, const MessageItem &item) noexcept
|
||||||
: m_version(version), m_item(&item) {}
|
: m_version(version), m_item(&item) {}
|
||||||
|
|
||||||
const JinjaMessage &value () const { return *this; }
|
json::object_t AsJson() const;
|
||||||
uint version() const { return m_version; }
|
|
||||||
const ChatItem &item () const { return *m_item; }
|
|
||||||
|
|
||||||
size_t GetSize() const override { return keys().size(); }
|
|
||||||
bool HasValue(const std::string &name) const override { return keys().contains(name); }
|
|
||||||
|
|
||||||
jinja2::Value GetValueByName(const std::string &name) const override
|
|
||||||
{ return HasValue(name) ? JinjaHelper::GetValueByName(name) : jinja2::EmptyValue(); }
|
|
||||||
|
|
||||||
std::vector<std::string> GetKeys() const override;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
auto keys() const -> const std::unordered_set<std::string_view> &;
|
|
||||||
|
|
||||||
private:
|
|
||||||
static const JinjaFieldMap<JinjaMessage> s_fields;
|
|
||||||
uint m_version;
|
uint m_version;
|
||||||
const ChatItem *m_item;
|
const MessageItem *m_item;
|
||||||
|
|
||||||
friend class JinjaHelper<JinjaMessage>;
|
|
||||||
friend bool operator==(const JinjaMessage &a, const JinjaMessage &b);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#include "jinja_helpers.inl"
|
|
||||||
|
@ -1,17 +0,0 @@
|
|||||||
template <typename D>
|
|
||||||
bool JinjaComparable<D>::IsEqual(const jinja2::IComparable &other) const
|
|
||||||
{
|
|
||||||
if (auto *omsg = dynamic_cast<const D *>(&other))
|
|
||||||
return *static_cast<const D *>(this) == *omsg;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename D>
|
|
||||||
jinja2::Value JinjaHelper<D>::GetValueByName(const std::string &name) const
|
|
||||||
{
|
|
||||||
if (auto it = D::s_fields.find(name); it != D::s_fields.end()) {
|
|
||||||
auto [_, func] = *it;
|
|
||||||
return func(static_cast<const D *>(this)->value());
|
|
||||||
}
|
|
||||||
return jinja2::EmptyValue();
|
|
||||||
}
|
|
774
gpt4all-chat/src/jinja_replacements.cpp
Normal file
774
gpt4all-chat/src/jinja_replacements.cpp
Normal file
@ -0,0 +1,774 @@
|
|||||||
|
// The map in this file is automatically generated by Jared. Do not hand-edit it.
|
||||||
|
|
||||||
|
#include "jinja_replacements.h"
|
||||||
|
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
|
||||||
|
// This is a list of prompt templates known to GPT4All and their associated replacements which are automatically used
|
||||||
|
// instead when loading the chat template from GGUF. These exist for two primary reasons:
|
||||||
|
// - HuggingFace model authors make ugly chat templates because they do not expect the end user to see them;
|
||||||
|
// - and chat templates occasionally use features we do not support. This is less true now that we use minja.
|
||||||
|
|
||||||
|
// The substitution list.
|
||||||
|
|
||||||
|
const std::unordered_map<std::string_view, std::string_view> CHAT_TEMPLATE_SUBSTITUTIONS {
|
||||||
|
// calme-2.1-phi3.5-4b.Q6_K.gguf (reported by ThilotE on Discord), Phi-3.5-mini-instruct-Q4_0.gguf (nomic-ai/gpt4all#3345)
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({% for message in messages %}{% if message['role'] == 'system' and message['content'] %}{{'<|system|>
|
||||||
|
' + message['content'] + '<|end|>
|
||||||
|
'}}{% elif message['role'] == 'user' %}{{'<|user|>
|
||||||
|
' + message['content'] + '<|end|>
|
||||||
|
'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>
|
||||||
|
' + message['content'] + '<|end|>
|
||||||
|
'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>
|
||||||
|
' }}{% else %}{{ eos_token }}{% endif %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({%- for message in messages %}
|
||||||
|
{%- if message['role'] == 'system' and message['content'] %}
|
||||||
|
{{- '<|system|>\n' + message['content'] + '<|end|>\n' }}
|
||||||
|
{%- elif message['role'] == 'user' %}
|
||||||
|
{{- '<|user|>\n' + message['content'] + '<|end|>\n' }}
|
||||||
|
{%- elif message['role'] == 'assistant' %}
|
||||||
|
{{- '<|assistant|>\n' + message['content'] + '<|end|>\n' }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|assistant|>\n' }}
|
||||||
|
{%- else %}
|
||||||
|
{{- eos_token }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// DeepSeek-R1-Distill-Qwen-7B-Q4_0.gguf
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({%- if not add_generation_prompt is defined %}
|
||||||
|
{%- set add_generation_prompt = false %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if messages[0]['role'] == 'system' %}
|
||||||
|
{{- messages[0]['content'] }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- for message in messages %}
|
||||||
|
{%- if message['role'] == 'user' %}
|
||||||
|
{{- '<|User|>' + message['content'] }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if message['role'] == 'assistant' %}
|
||||||
|
{%- set content = message['content'] | regex_replace('^[\\s\\S]*</think>', '') %}
|
||||||
|
{{- '<|Assistant|>' + content + '<|end▁of▁sentence|>' }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor -%}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|Assistant|>' }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// gemma-2-9b-it-Q4_0.gguf (nomic-ai/gpt4all#3282)
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '
|
||||||
|
' + message['content'] | trim + '<end_of_turn>
|
||||||
|
' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model
|
||||||
|
'}}{% endif %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({{- bos_token }}
|
||||||
|
{%- if messages[0]['role'] == 'system' %}
|
||||||
|
{{- raise_exception('System role not supported') }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- for message in messages %}
|
||||||
|
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}
|
||||||
|
{{- raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if message['role'] == 'assistant' %}
|
||||||
|
{%- set role = 'model' %}
|
||||||
|
{%- else %}
|
||||||
|
{%- set role = message['role'] %}
|
||||||
|
{%- endif %}
|
||||||
|
{{- '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<start_of_turn>model\n' }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// ghost-7b-v0.9.1-Q4_0.gguf
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({% for message in messages %}
|
||||||
|
{% if message['role'] == 'user' %}
|
||||||
|
{{ '<|user|>
|
||||||
|
' + message['content'] + eos_token }}
|
||||||
|
{% elif message['role'] == 'system' %}
|
||||||
|
{{ '<|system|>
|
||||||
|
' + message['content'] + eos_token }}
|
||||||
|
{% elif message['role'] == 'assistant' %}
|
||||||
|
{{ '<|assistant|>
|
||||||
|
' + message['content'] + eos_token }}
|
||||||
|
{% endif %}
|
||||||
|
{% if loop.last and add_generation_prompt %}
|
||||||
|
{{ '<|assistant|>' }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({%- for message in messages %}
|
||||||
|
{%- if message['role'] == 'user' %}
|
||||||
|
{{- '<|user|>\n' + message['content'] + eos_token }}
|
||||||
|
{%- elif message['role'] == 'system' %}
|
||||||
|
{{- '<|system|>\n' + message['content'] + eos_token }}
|
||||||
|
{%- elif message['role'] == 'assistant' %}
|
||||||
|
{{- '<|assistant|>\n' + message['content'] + eos_token }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if loop.last and add_generation_prompt %}
|
||||||
|
{{- '<|assistant|>' }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// granite-3.1-3b-a800m-instruct-Q4_0.gguf, granite-3.1-8b-instruct-Q4_0.gguf (nomic-ai/gpt4all#3471)
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({%- if messages[0]['role'] == 'system' %}{%- set system_message = messages[0]['content'] %}{%- set loop_messages = messages[1:] %}{%- else %}{%- set system_message = "Knowledge Cutoff Date: April 2024. You are Granite, developed by IBM." %}{%- if tools and documents %}{%- set system_message = system_message + " You are a helpful AI assistant with access to the following tools. When a tool is required to answer the user's query, respond with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request. Write the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data." %}{%- elif tools %}{%- set system_message = system_message + " You are a helpful AI assistant with access to the following tools. When a tool is required to answer the user's query, respond with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request." %}{%- elif documents %}{%- set system_message = system_message + " Write the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data." %}{%- else %}{%- set system_message = system_message + " You are a helpful AI assistant." %}{%- endif %}{%- if controls and 'citations' in controls and documents %}{%- set system_message = system_message + ' In your response, use the symbols <co> and </co> to indicate when a fact comes from a document in the search result, e.g <co>0</co> for a fact from document 0. Afterwards, list all the citations with their corresponding documents in an ordered list.' %}{%- endif %}{%- if controls and 'hallucinations' in controls and documents %}{%- set system_message = system_message + ' Finally, after the response is written, include a numbered list of sentences from the response that are potentially hallucinated and not based in the documents.' %}{%- endif %}{%- set loop_messages = messages %}{%- endif %}{{- '<|start_of_role|>system<|end_of_role|>' + system_message + '<|end_of_text|> ' }}{%- if tools %}{{- '<|start_of_role|>tools<|end_of_role|>' }}{{- tools | tojson(indent=4) }}{{- '<|end_of_text|> ' }}{%- endif %}{%- if documents %}{{- '<|start_of_role|>documents<|end_of_role|>' }}{%- for document in documents %}{{- 'Document ' + loop.index0 | string + ' ' }}{{- document['text'] }}{%- if not loop.last %}{{- ' '}}{%- endif%}{%- endfor %}{{- '<|end_of_text|> ' }}{%- endif %}{%- for message in loop_messages %}{{- '<|start_of_role|>' + message['role'] + '<|end_of_role|>' + message['content'] + '<|end_of_text|> ' }}{%- if loop.last and add_generation_prompt %}{{- '<|start_of_role|>assistant' }}{%- if controls %}{{- ' ' + controls | tojson()}}{%- endif %}{{- '<|end_of_role|>' }}{%- endif %}{%- endfor %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({%- if messages[0]['role'] == 'system' %}
|
||||||
|
{%- set system_message = messages[0]['content'] %}
|
||||||
|
{%- set loop_messages = messages[1:] %}
|
||||||
|
{%- else %}
|
||||||
|
{%- set system_message = "Knowledge Cutoff Date: April 2024. You are Granite, developed by IBM. You are a helpful AI assistant." %}
|
||||||
|
{%- set loop_messages = messages %}
|
||||||
|
{%- endif %}
|
||||||
|
{{- '<|start_of_role|>system<|end_of_role|>' + system_message + '<|end_of_text|> ' }}
|
||||||
|
{%- for message in loop_messages %}
|
||||||
|
{{- '<|start_of_role|>' + message['role'] + '<|end_of_role|>' + message['content'] + '<|end_of_text|> ' }}
|
||||||
|
{%- if loop.last and add_generation_prompt %}
|
||||||
|
{{- '<|start_of_role|>assistant<|end_of_role|>' }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// Hermes-3-Llama-3.2-3B.Q4_0.gguf, mistral-7b-openorca.gguf2.Q4_0.gguf
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '
|
||||||
|
' + message['content'] + '<|im_end|>' + '
|
||||||
|
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
|
||||||
|
' }}{% endif %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({%- for message in messages %}
|
||||||
|
{{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>\n' }}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|im_start|>assistant\n' }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// Llama-3.2-1B-Instruct-Q4_0.gguf, Llama-3.2-3B-Instruct-Q4_0.gguf, SummLlama3.2-3B-Q4_0.gguf (nomic-ai/gpt4all#3309)
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({{- bos_token }}
|
||||||
|
{%- if custom_tools is defined %}
|
||||||
|
{%- set tools = custom_tools %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if not tools_in_user_message is defined %}
|
||||||
|
{%- set tools_in_user_message = true %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if not date_string is defined %}
|
||||||
|
{%- if strftime_now is defined %}
|
||||||
|
{%- set date_string = strftime_now("%d %b %Y") %}
|
||||||
|
{%- else %}
|
||||||
|
{%- set date_string = "26 Jul 2024" %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if not tools is defined %}
|
||||||
|
{%- set tools = none %}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{#- This block extracts the system message, so we can slot it into the right place. #}
|
||||||
|
{%- if messages[0]['role'] == 'system' %}
|
||||||
|
{%- set system_message = messages[0]['content']|trim %}
|
||||||
|
{%- set messages = messages[1:] %}
|
||||||
|
{%- else %}
|
||||||
|
{%- set system_message = "" %}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{#- System message #}
|
||||||
|
{{- "<|start_header_id|>system<|end_header_id|>\n\n" }}
|
||||||
|
{%- if tools is not none %}
|
||||||
|
{{- "Environment: ipython\n" }}
|
||||||
|
{%- endif %}
|
||||||
|
{{- "Cutting Knowledge Date: December 2023\n" }}
|
||||||
|
{{- "Today Date: " + date_string + "\n\n" }}
|
||||||
|
{%- if tools is not none and not tools_in_user_message %}
|
||||||
|
{{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }}
|
||||||
|
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
|
||||||
|
{{- "Do not use variables.\n\n" }}
|
||||||
|
{%- for t in tools %}
|
||||||
|
{{- t | tojson(indent=4) }}
|
||||||
|
{{- "\n\n" }}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- endif %}
|
||||||
|
{{- system_message }}
|
||||||
|
{{- "<|eot_id|>" }}
|
||||||
|
|
||||||
|
{#- Custom tools are passed in a user message with some extra guidance #}
|
||||||
|
{%- if tools_in_user_message and not tools is none %}
|
||||||
|
{#- Extract the first user message so we can plug it in here #}
|
||||||
|
{%- if messages | length != 0 %}
|
||||||
|
{%- set first_user_message = messages[0]['content']|trim %}
|
||||||
|
{%- set messages = messages[1:] %}
|
||||||
|
{%- else %}
|
||||||
|
{{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }}
|
||||||
|
{%- endif %}
|
||||||
|
{{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}
|
||||||
|
{{- "Given the following functions, please respond with a JSON for a function call " }}
|
||||||
|
{{- "with its proper arguments that best answers the given prompt.\n\n" }}
|
||||||
|
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
|
||||||
|
{{- "Do not use variables.\n\n" }}
|
||||||
|
{%- for t in tools %}
|
||||||
|
{{- t | tojson(indent=4) }}
|
||||||
|
{{- "\n\n" }}
|
||||||
|
{%- endfor %}
|
||||||
|
{{- first_user_message + "<|eot_id|>"}}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- for message in messages %}
|
||||||
|
{%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}
|
||||||
|
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}
|
||||||
|
{%- elif 'tool_calls' in message %}
|
||||||
|
{%- if not message.tool_calls|length == 1 %}
|
||||||
|
{{- raise_exception("This model only supports single tool-calls at once!") }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- set tool_call = message.tool_calls[0].function %}
|
||||||
|
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
|
||||||
|
{{- '{"name": "' + tool_call.name + '", ' }}
|
||||||
|
{{- '"parameters": ' }}
|
||||||
|
{{- tool_call.arguments | tojson }}
|
||||||
|
{{- "}" }}
|
||||||
|
{{- "<|eot_id|>" }}
|
||||||
|
{%- elif message.role == "tool" or message.role == "ipython" %}
|
||||||
|
{{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }}
|
||||||
|
{%- if message.content is mapping or message.content is iterable %}
|
||||||
|
{{- message.content | tojson }}
|
||||||
|
{%- else %}
|
||||||
|
{{- message.content }}
|
||||||
|
{%- endif %}
|
||||||
|
{{- "<|eot_id|>" }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({{- bos_token }}
|
||||||
|
{%- set date_string = strftime_now('%d %b %Y') %}
|
||||||
|
|
||||||
|
{#- This block extracts the system message, so we can slot it into the right place. #}
|
||||||
|
{%- if messages[0]['role'] == 'system' %}
|
||||||
|
{%- set system_message = messages[0]['content'] | trim %}
|
||||||
|
{%- set loop_start = 1 %}
|
||||||
|
{%- else %}
|
||||||
|
{%- set system_message = '' %}
|
||||||
|
{%- set loop_start = 0 %}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{#- System message #}
|
||||||
|
{{- '<|start_header_id|>system<|end_header_id|>\n\n' }}
|
||||||
|
{{- 'Cutting Knowledge Date: December 2023\n' }}
|
||||||
|
{{- 'Today Date: ' + date_string + '\n\n' }}
|
||||||
|
{{- system_message }}
|
||||||
|
{{- '<|eot_id|>' }}
|
||||||
|
|
||||||
|
{%- for message in messages %}
|
||||||
|
{%- if loop.index0 >= loop_start %}
|
||||||
|
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// Llama-3.3-70B-Instruct-Q4_0.gguf (nomic-ai/gpt4all#3305)
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({{- bos_token }}
|
||||||
|
{%- if custom_tools is defined %}
|
||||||
|
{%- set tools = custom_tools %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if not tools_in_user_message is defined %}
|
||||||
|
{%- set tools_in_user_message = true %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if not date_string is defined %}
|
||||||
|
{%- set date_string = "26 Jul 2024" %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if not tools is defined %}
|
||||||
|
{%- set tools = none %}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{#- This block extracts the system message, so we can slot it into the right place. #}
|
||||||
|
{%- if messages[0]['role'] == 'system' %}
|
||||||
|
{%- set system_message = messages[0]['content']|trim %}
|
||||||
|
{%- set messages = messages[1:] %}
|
||||||
|
{%- else %}
|
||||||
|
{%- set system_message = "" %}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{#- System message + builtin tools #}
|
||||||
|
{{- "<|start_header_id|>system<|end_header_id|>\n\n" }}
|
||||||
|
{%- if builtin_tools is defined or tools is not none %}
|
||||||
|
{{- "Environment: ipython\n" }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if builtin_tools is defined %}
|
||||||
|
{{- "Tools: " + builtin_tools | reject('equalto', 'code_interpreter') | join(", ") + "\n\n"}}
|
||||||
|
{%- endif %}
|
||||||
|
{{- "Cutting Knowledge Date: December 2023\n" }}
|
||||||
|
{{- "Today Date: " + date_string + "\n\n" }}
|
||||||
|
{%- if tools is not none and not tools_in_user_message %}
|
||||||
|
{{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }}
|
||||||
|
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
|
||||||
|
{{- "Do not use variables.\n\n" }}
|
||||||
|
{%- for t in tools %}
|
||||||
|
{{- t | tojson(indent=4) }}
|
||||||
|
{{- "\n\n" }}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- endif %}
|
||||||
|
{{- system_message }}
|
||||||
|
{{- "<|eot_id|>" }}
|
||||||
|
|
||||||
|
{#- Custom tools are passed in a user message with some extra guidance #}
|
||||||
|
{%- if tools_in_user_message and not tools is none %}
|
||||||
|
{#- Extract the first user message so we can plug it in here #}
|
||||||
|
{%- if messages | length != 0 %}
|
||||||
|
{%- set first_user_message = messages[0]['content']|trim %}
|
||||||
|
{%- set messages = messages[1:] %}
|
||||||
|
{%- else %}
|
||||||
|
{{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }}
|
||||||
|
{%- endif %}
|
||||||
|
{{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}
|
||||||
|
{{- "Given the following functions, please respond with a JSON for a function call " }}
|
||||||
|
{{- "with its proper arguments that best answers the given prompt.\n\n" }}
|
||||||
|
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
|
||||||
|
{{- "Do not use variables.\n\n" }}
|
||||||
|
{%- for t in tools %}
|
||||||
|
{{- t | tojson(indent=4) }}
|
||||||
|
{{- "\n\n" }}
|
||||||
|
{%- endfor %}
|
||||||
|
{{- first_user_message + "<|eot_id|>"}}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- for message in messages %}
|
||||||
|
{%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}
|
||||||
|
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}
|
||||||
|
{%- elif 'tool_calls' in message %}
|
||||||
|
{%- if not message.tool_calls|length == 1 %}
|
||||||
|
{{- raise_exception("This model only supports single tool-calls at once!") }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- set tool_call = message.tool_calls[0].function %}
|
||||||
|
{%- if builtin_tools is defined and tool_call.name in builtin_tools %}
|
||||||
|
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
|
||||||
|
{{- "<|python_tag|>" + tool_call.name + ".call(" }}
|
||||||
|
{%- for arg_name, arg_val in tool_call.arguments | items %}
|
||||||
|
{{- arg_name + '="' + arg_val + '"' }}
|
||||||
|
{%- if not loop.last %}
|
||||||
|
{{- ", " }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{{- ")" }}
|
||||||
|
{%- else %}
|
||||||
|
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
|
||||||
|
{{- '{"name": "' + tool_call.name + '", ' }}
|
||||||
|
{{- '"parameters": ' }}
|
||||||
|
{{- tool_call.arguments | tojson }}
|
||||||
|
{{- "}" }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if builtin_tools is defined %}
|
||||||
|
{#- This means we're in ipython mode #}
|
||||||
|
{{- "<|eom_id|>" }}
|
||||||
|
{%- else %}
|
||||||
|
{{- "<|eot_id|>" }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- elif message.role == "tool" or message.role == "ipython" %}
|
||||||
|
{{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }}
|
||||||
|
{%- if message.content is mapping or message.content is iterable %}
|
||||||
|
{{- message.content | tojson }}
|
||||||
|
{%- else %}
|
||||||
|
{{- message.content }}
|
||||||
|
{%- endif %}
|
||||||
|
{{- "<|eot_id|>" }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({{- bos_token }}
|
||||||
|
{%- set date_string = strftime_now('%d %b %Y') %}
|
||||||
|
|
||||||
|
{#- This block extracts the system message, so we can slot it into the right place. #}
|
||||||
|
{%- if messages[0]['role'] == 'system' %}
|
||||||
|
{%- set system_message = messages[0]['content'] | trim %}
|
||||||
|
{%- set loop_start = 1 %}
|
||||||
|
{%- else %}
|
||||||
|
{%- set system_message = '' %}
|
||||||
|
{%- set loop_start = 0 %}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{#- System message #}
|
||||||
|
{{- '<|start_header_id|>system<|end_header_id|>\n\n' }}
|
||||||
|
{{- 'Cutting Knowledge Date: December 2023\n' }}
|
||||||
|
{{- 'Today Date: ' + date_string + '\n\n' }}
|
||||||
|
{{- system_message }}
|
||||||
|
{{- '<|eot_id|>' }}
|
||||||
|
|
||||||
|
{%- for message in messages %}
|
||||||
|
{%- if loop.index0 >= loop_start %}
|
||||||
|
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// Llama3-DiscoLeo-Instruct-8B-32k-v0.1-Q4_0.gguf (nomic-ai/gpt4all#3347)
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>
|
||||||
|
|
||||||
|
'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>
|
||||||
|
|
||||||
|
' }}{% endif %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({%- for message in messages %}
|
||||||
|
{%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' %}
|
||||||
|
{%- if loop.index0 == 0 %}
|
||||||
|
{%- set content = bos_token + content %}
|
||||||
|
{%- endif %}
|
||||||
|
{{- content }}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>
|
||||||
|
|
||||||
|
'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>
|
||||||
|
|
||||||
|
' }})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({%- set loop_messages = messages %}
|
||||||
|
{%- for message in loop_messages %}
|
||||||
|
{%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}
|
||||||
|
{%- if loop.index0 == 0 %}
|
||||||
|
{%- set content = bos_token + content %}
|
||||||
|
{%- endif %}
|
||||||
|
{{- content }}
|
||||||
|
{%- endfor %}
|
||||||
|
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }})TEMPLATE",
|
||||||
|
},
|
||||||
|
// Meta-Llama-3-8B-Instruct.Q4_0.gguf
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>
|
||||||
|
|
||||||
|
'+ message['content'] | trim + '<|eot_id|>' %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>
|
||||||
|
|
||||||
|
' }}{% endif %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({%- set loop_messages = messages %}
|
||||||
|
{%- for message in loop_messages %}
|
||||||
|
{%- set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}
|
||||||
|
{{- content }}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// Mistral-Nemo-Instruct-2407-Q4_0.gguf (nomic-ai/gpt4all#3284)
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({%- if messages[0]["role"] == "system" %}
|
||||||
|
{%- set system_message = messages[0]["content"] %}
|
||||||
|
{%- set loop_messages = messages[1:] %}
|
||||||
|
{%- else %}
|
||||||
|
{%- set loop_messages = messages %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if not tools is defined %}
|
||||||
|
{%- set tools = none %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- set user_messages = loop_messages | selectattr("role", "equalto", "user") | list %}
|
||||||
|
|
||||||
|
{#- This block checks for alternating user/assistant messages, skipping tool calling messages #}
|
||||||
|
{%- set ns = namespace() %}
|
||||||
|
{%- set ns.index = 0 %}
|
||||||
|
{%- for message in loop_messages %}
|
||||||
|
{%- if not (message.role == "tool" or message.role == "tool_results" or (message.tool_calls is defined and message.tool_calls is not none)) %}
|
||||||
|
{%- if (message["role"] == "user") != (ns.index % 2 == 0) %}
|
||||||
|
{{- raise_exception("After the optional system message, conversation roles must alternate user/assistant/user/assistant/...") }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- set ns.index = ns.index + 1 %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
|
||||||
|
{{- bos_token }}
|
||||||
|
{%- for message in loop_messages %}
|
||||||
|
{%- if message["role"] == "user" %}
|
||||||
|
{%- if tools is not none and (message == user_messages[-1]) %}
|
||||||
|
{{- "[AVAILABLE_TOOLS][" }}
|
||||||
|
{%- for tool in tools %}
|
||||||
|
{%- set tool = tool.function %}
|
||||||
|
{{- '{"type": "function", "function": {' }}
|
||||||
|
{%- for key, val in tool.items() if key != "return" %}
|
||||||
|
{%- if val is string %}
|
||||||
|
{{- '"' + key + '": "' + val + '"' }}
|
||||||
|
{%- else %}
|
||||||
|
{{- '"' + key + '": ' + val|tojson }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if not loop.last %}
|
||||||
|
{{- ", " }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{{- "}}" }}
|
||||||
|
{%- if not loop.last %}
|
||||||
|
{{- ", " }}
|
||||||
|
{%- else %}
|
||||||
|
{{- "]" }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{{- "[/AVAILABLE_TOOLS]" }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if loop.last and system_message is defined %}
|
||||||
|
{{- "[INST]" + system_message + "\n\n" + message["content"] + "[/INST]" }}
|
||||||
|
{%- else %}
|
||||||
|
{{- "[INST]" + message["content"] + "[/INST]" }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- elif (message.tool_calls is defined and message.tool_calls is not none) %}
|
||||||
|
{{- "[TOOL_CALLS][" }}
|
||||||
|
{%- for tool_call in message.tool_calls %}
|
||||||
|
{%- set out = tool_call.function|tojson %}
|
||||||
|
{{- out[:-1] }}
|
||||||
|
{%- if not tool_call.id is defined or tool_call.id|length != 9 %}
|
||||||
|
{{- raise_exception("Tool call IDs should be alphanumeric strings with length 9!") }}
|
||||||
|
{%- endif %}
|
||||||
|
{{- ', "id": "' + tool_call.id + '"}' }}
|
||||||
|
{%- if not loop.last %}
|
||||||
|
{{- ", " }}
|
||||||
|
{%- else %}
|
||||||
|
{{- "]" + eos_token }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- elif message["role"] == "assistant" %}
|
||||||
|
{{- message["content"] + eos_token}}
|
||||||
|
{%- elif message["role"] == "tool_results" or message["role"] == "tool" %}
|
||||||
|
{%- if message.content is defined and message.content.content is defined %}
|
||||||
|
{%- set content = message.content.content %}
|
||||||
|
{%- else %}
|
||||||
|
{%- set content = message.content %}
|
||||||
|
{%- endif %}
|
||||||
|
{{- '[TOOL_RESULTS]{"content": ' + content|string + ", " }}
|
||||||
|
{%- if not message.tool_call_id is defined or message.tool_call_id|length != 9 %}
|
||||||
|
{{- raise_exception("Tool call IDs should be alphanumeric strings with length 9!") }}
|
||||||
|
{%- endif %}
|
||||||
|
{{- '"call_id": "' + message.tool_call_id + '"}[/TOOL_RESULTS]' }}
|
||||||
|
{%- else %}
|
||||||
|
{{- raise_exception("Only user and assistant roles are supported, with the exception of an initial optional system message!") }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({%- if messages[0]['role'] == 'system' %}
|
||||||
|
{%- set system_message = messages[0]['content'] %}
|
||||||
|
{%- set loop_start = 1 %}
|
||||||
|
{%- else %}
|
||||||
|
{%- set loop_start = 0 %}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{{- bos_token }}
|
||||||
|
{%- for message in messages %}
|
||||||
|
{#- This block checks for alternating user/assistant messages, skipping tool calling messages #}
|
||||||
|
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}
|
||||||
|
{{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
{%- if message['role'] == 'user' %}
|
||||||
|
{%- if loop.last and loop_start == 1 %}
|
||||||
|
{{- '[INST]' + system_message + '\n\n' + message['content'] + '[/INST]' }}
|
||||||
|
{%- else %}
|
||||||
|
{{- '[INST]' + message['content'] + '[/INST]' }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- elif message['role'] == 'assistant' %}
|
||||||
|
{{- message['content'] + eos_token }}
|
||||||
|
{%- else %}
|
||||||
|
{{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({% for message in messages %}{{'<|im_start|>' + message['role'] + '
|
||||||
|
' + message['content'] + '<|im_end|>' + '
|
||||||
|
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
|
||||||
|
' }}{% endif %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({%- for message in messages %}
|
||||||
|
{{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>\n' }}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|im_start|>assistant\n' }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// occiglot-7b-de-en-instruct.Q4_0.gguf (nomic-ai/gpt4all#3283)
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({{'<s>'}}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful assistant. Please give a long and detailed answer.' %}{% endif %}{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{{'<|im_start|>system
|
||||||
|
' + system_message + '<|im_end|>
|
||||||
|
'}}{% endif %}{{'<|im_start|>' + message['role'] + '
|
||||||
|
' + message['content'] + '<|im_end|>' + '
|
||||||
|
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
|
||||||
|
' }}{% endif %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({{- bos_token }}
|
||||||
|
{%- if messages[0]['role'] == 'system' %}
|
||||||
|
{%- set loop_start = 1 %}
|
||||||
|
{%- set system_message = messages[0]['content'] %}
|
||||||
|
{%- else %}
|
||||||
|
{%- set loop_start = 0 %}
|
||||||
|
{%- set system_message = 'You are a helpful assistant. Please give a long and detailed answer.' %}
|
||||||
|
{%- endif %}
|
||||||
|
{{- '<|im_start|>system\n' + system_message + '<|im_end|>\n' }}
|
||||||
|
{%- for message in messages %}
|
||||||
|
{%- if loop.index0 >= loop_start %}
|
||||||
|
{{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>\n' }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|im_start|>assistant\n' }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// OLMoE-1B-7B-0125-Instruct-Q4_0.gguf (nomic-ai/gpt4all#3471)
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({{ bos_token }}{% for message in messages %}{% if message['role'] == 'system' %}{{ '<|system|>
|
||||||
|
' + message['content'] + '
|
||||||
|
' }}{% elif message['role'] == 'user' %}{{ '<|user|>
|
||||||
|
' + message['content'] + '
|
||||||
|
' }}{% elif message['role'] == 'assistant' %}{% if not loop.last %}{{ '<|assistant|>
|
||||||
|
' + message['content'] + eos_token + '
|
||||||
|
' }}{% else %}{{ '<|assistant|>
|
||||||
|
' + message['content'] + eos_token }}{% endif %}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<|assistant|>
|
||||||
|
' }}{% endif %}{% endfor %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({{- bos_token }}
|
||||||
|
{%- for message in messages %}
|
||||||
|
{%- if message['role'] == 'system' %}
|
||||||
|
{{- '<|system|>\n' + message['content'] + '\n' }}
|
||||||
|
{%- elif message['role'] == 'user' %}
|
||||||
|
{{- '<|user|>\n' + message['content'] + '\n' }}
|
||||||
|
{%- elif message['role'] == 'assistant' %}
|
||||||
|
{%- if not loop.last %}
|
||||||
|
{{- '<|assistant|>\n' + message['content'] + eos_token + '\n' }}
|
||||||
|
{%- else %}
|
||||||
|
{{- '<|assistant|>\n' + message['content'] + eos_token }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if loop.last and add_generation_prompt %}
|
||||||
|
{{- '<|assistant|>\n' }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// OLMoE-1B-7B-0924-Instruct-Q4_0.gguf (nomic-ai/gpt4all#3471)
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({{ bos_token }}{% for message in messages %}
|
||||||
|
{% if message['role'] == 'system' %}
|
||||||
|
{{ '<|system|>
|
||||||
|
' + message['content'] }}
|
||||||
|
{% elif message['role'] == 'user' %}
|
||||||
|
{{ '<|user|>
|
||||||
|
' + message['content'] }}
|
||||||
|
{% elif message['role'] == 'assistant' %}
|
||||||
|
{{ '<|assistant|>
|
||||||
|
' + message['content'] + eos_token }}
|
||||||
|
{% endif %}
|
||||||
|
{% if loop.last and add_generation_prompt %}
|
||||||
|
{{ '<|assistant|>' }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({{- bos_token }}
|
||||||
|
{%- for message in messages %}
|
||||||
|
{%- if message['role'] == 'system' %}
|
||||||
|
{{- '<|system|>\n' + message['content'] }}
|
||||||
|
{%- elif message['role'] == 'user' %}
|
||||||
|
{{- '<|user|>\n' + message['content'] }}
|
||||||
|
{%- elif message['role'] == 'assistant' %}
|
||||||
|
{{- '<|assistant|>\n' + message['content'] + eos_token }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- if loop.last and add_generation_prompt %}
|
||||||
|
{{- '<|assistant|>' }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// Phi-3.1-mini-128k-instruct-Q4_0.gguf (nomic-ai/gpt4all#3346)
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>
|
||||||
|
' + message['content'] + '<|end|>
|
||||||
|
'}}{% elif message['role'] == 'user' %}{{'<|user|>
|
||||||
|
' + message['content'] + '<|end|>
|
||||||
|
'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>
|
||||||
|
' + message['content'] + '<|end|>
|
||||||
|
'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>
|
||||||
|
' }}{% else %}{{ eos_token }}{% endif %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({%- for message in messages %}
|
||||||
|
{%- if message['role'] == 'system' %}
|
||||||
|
{{- '<|system|>\n' + message['content'] + '<|end|>\n' }}
|
||||||
|
{%- elif message['role'] == 'user' %}
|
||||||
|
{{- '<|user|>\n' + message['content'] + '<|end|>\n' }}
|
||||||
|
{%- elif message['role'] == 'assistant' %}
|
||||||
|
{{- '<|assistant|>\n' + message['content'] + '<|end|>\n' }}
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|assistant|>\n' }}
|
||||||
|
{%- else %}
|
||||||
|
{{- eos_token }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// Phi-3-mini-4k-instruct.Q4_0.gguf
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '
|
||||||
|
' + message['content'] + '<|end|>
|
||||||
|
' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>
|
||||||
|
' }}{% else %}{{ eos_token }}{% endif %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({{- bos_token }}
|
||||||
|
{%- for message in messages %}
|
||||||
|
{{- '<|' + message['role'] + '|>\n' + message['content'] + '<|end|>\n' }}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|assistant|>\n' }}
|
||||||
|
{%- else %}
|
||||||
|
{{- eos_token }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
},
|
||||||
|
// qwen2-1_5b-instruct-q4_0.gguf (nomic-ai/gpt4all#3263), qwen2-72b-instruct-q4_0.gguf
|
||||||
|
{
|
||||||
|
// original
|
||||||
|
R"TEMPLATE({% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system
|
||||||
|
You are a helpful assistant.<|im_end|>
|
||||||
|
' }}{% endif %}{{'<|im_start|>' + message['role'] + '
|
||||||
|
' + message['content'] + '<|im_end|>' + '
|
||||||
|
'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
|
||||||
|
' }}{% endif %})TEMPLATE",
|
||||||
|
// replacement
|
||||||
|
R"TEMPLATE({%- for message in messages %}
|
||||||
|
{%- if loop.first and messages[0]['role'] != 'system' %}
|
||||||
|
{{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
|
||||||
|
{%- endif %}
|
||||||
|
{{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>\n' }}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- if add_generation_prompt %}
|
||||||
|
{{- '<|im_start|>assistant\n' }}
|
||||||
|
{%- endif %})TEMPLATE",
|
||||||
|
},
|
||||||
|
};
|
6
gpt4all-chat/src/jinja_replacements.h
Normal file
6
gpt4all-chat/src/jinja_replacements.h
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <string_view>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
|
extern const std::unordered_map<std::string_view, std::string_view> CHAT_TEMPLATE_SUBSTITUTIONS;
|
@ -12,6 +12,9 @@
|
|||||||
#include <QSettings>
|
#include <QSettings>
|
||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
|
#include <QtSystemDetection>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
#ifdef GPT4ALL_OFFLINE_INSTALLER
|
#ifdef GPT4ALL_OFFLINE_INSTALLER
|
||||||
# include <QDesktopServices>
|
# include <QDesktopServices>
|
||||||
@ -25,6 +28,7 @@
|
|||||||
|
|
||||||
using namespace Qt::Literals::StringLiterals;
|
using namespace Qt::Literals::StringLiterals;
|
||||||
|
|
||||||
|
|
||||||
class MyLLM: public LLM { };
|
class MyLLM: public LLM { };
|
||||||
Q_GLOBAL_STATIC(MyLLM, llmInstance)
|
Q_GLOBAL_STATIC(MyLLM, llmInstance)
|
||||||
LLM *LLM::globalInstance()
|
LLM *LLM::globalInstance()
|
||||||
|
@ -3,7 +3,8 @@
|
|||||||
|
|
||||||
#include <QObject>
|
#include <QObject>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QtGlobal>
|
#include <QtTypes>
|
||||||
|
|
||||||
|
|
||||||
class LLM : public QObject
|
class LLM : public QObject
|
||||||
{
|
{
|
||||||
|
@ -5,10 +5,14 @@
|
|||||||
#include "mysettings.h"
|
#include "mysettings.h"
|
||||||
|
|
||||||
#include <QCoreApplication>
|
#include <QCoreApplication>
|
||||||
|
#include <QDebug>
|
||||||
#include <QGlobalStatic>
|
#include <QGlobalStatic>
|
||||||
#include <QGuiApplication>
|
#include <QGuiApplication>
|
||||||
|
#include <QList>
|
||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
#include <Qt>
|
#include <Qt>
|
||||||
|
#include <QtLogging>
|
||||||
|
|
||||||
|
|
||||||
class MyLocalDocs: public LocalDocs { };
|
class MyLocalDocs: public LocalDocs { };
|
||||||
Q_GLOBAL_STATIC(MyLocalDocs, localDocsInstance)
|
Q_GLOBAL_STATIC(MyLocalDocs, localDocsInstance)
|
||||||
|
@ -2,11 +2,14 @@
|
|||||||
#define LOCALDOCS_H
|
#define LOCALDOCS_H
|
||||||
|
|
||||||
#include "database.h"
|
#include "database.h"
|
||||||
#include "localdocsmodel.h" // IWYU pragma: keep
|
#include "localdocsmodel.h"
|
||||||
|
|
||||||
#include <QObject>
|
#include <QObject>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QStringList>
|
#include <QStringList> // IWYU pragma: keep
|
||||||
|
|
||||||
|
// IWYU pragma: no_forward_declare LocalDocsModel
|
||||||
|
|
||||||
|
|
||||||
class LocalDocs : public QObject
|
class LocalDocs : public QObject
|
||||||
{
|
{
|
||||||
|
@ -5,11 +5,11 @@
|
|||||||
|
|
||||||
#include <QDateTime>
|
#include <QDateTime>
|
||||||
#include <QMap>
|
#include <QMap>
|
||||||
#include <QVector>
|
#include <QVector> // IWYU pragma: keep
|
||||||
#include <QtGlobal>
|
|
||||||
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
|
|
||||||
LocalDocsCollectionsModel::LocalDocsCollectionsModel(QObject *parent)
|
LocalDocsCollectionsModel::LocalDocsCollectionsModel(QObject *parent)
|
||||||
: QSortFilterProxyModel(parent)
|
: QSortFilterProxyModel(parent)
|
||||||
{
|
{
|
||||||
|
@ -4,17 +4,19 @@
|
|||||||
#include "database.h"
|
#include "database.h"
|
||||||
|
|
||||||
#include <QAbstractListModel>
|
#include <QAbstractListModel>
|
||||||
#include <QByteArray>
|
|
||||||
#include <QHash>
|
|
||||||
#include <QList>
|
#include <QList>
|
||||||
#include <QObject>
|
#include <QObject> // IWYU pragma: keep
|
||||||
#include <QSortFilterProxyModel>
|
#include <QSortFilterProxyModel>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QVariant>
|
|
||||||
#include <Qt>
|
#include <Qt>
|
||||||
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
|
||||||
|
class QByteArray;
|
||||||
|
class QVariant;
|
||||||
|
template <typename Key, typename T> class QHash;
|
||||||
|
|
||||||
|
|
||||||
class LocalDocsCollectionsModel : public QSortFilterProxyModel
|
class LocalDocsCollectionsModel : public QSortFilterProxyModel
|
||||||
{
|
{
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
|
@ -2,8 +2,10 @@
|
|||||||
|
|
||||||
#include <QDateTime>
|
#include <QDateTime>
|
||||||
#include <QDebug>
|
#include <QDebug>
|
||||||
|
#include <QFlags>
|
||||||
#include <QGlobalStatic>
|
#include <QGlobalStatic>
|
||||||
#include <QIODevice>
|
#include <QIODevice>
|
||||||
|
#include <QMutexLocker> // IWYU pragma: keep
|
||||||
#include <QStandardPaths>
|
#include <QStandardPaths>
|
||||||
|
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
@ -12,6 +14,7 @@
|
|||||||
|
|
||||||
using namespace Qt::Literals::StringLiterals;
|
using namespace Qt::Literals::StringLiterals;
|
||||||
|
|
||||||
|
|
||||||
class MyLogger: public Logger { };
|
class MyLogger: public Logger { };
|
||||||
Q_GLOBAL_STATIC(MyLogger, loggerInstance)
|
Q_GLOBAL_STATIC(MyLogger, loggerInstance)
|
||||||
Logger *Logger::globalInstance()
|
Logger *Logger::globalInstance()
|
||||||
@ -62,8 +65,11 @@ void Logger::messageHandler(QtMsgType type, const QMessageLogContext &, const QS
|
|||||||
}
|
}
|
||||||
// Get time and date
|
// Get time and date
|
||||||
auto timestamp = QDateTime::currentDateTime().toString();
|
auto timestamp = QDateTime::currentDateTime().toString();
|
||||||
// Write message
|
|
||||||
const std::string out = u"[%1] (%2): %3\n"_s.arg(typeString, timestamp, msg).toStdString();
|
const std::string out = u"[%1] (%2): %3\n"_s.arg(typeString, timestamp, msg).toStdString();
|
||||||
|
|
||||||
|
// Write message
|
||||||
|
QMutexLocker locker(&logger->m_mutex);
|
||||||
logger->m_file.write(out.c_str());
|
logger->m_file.write(out.c_str());
|
||||||
logger->m_file.flush();
|
logger->m_file.flush();
|
||||||
std::cerr << out;
|
std::cerr << out;
|
||||||
|
@ -2,19 +2,24 @@
|
|||||||
#define LOGGER_H
|
#define LOGGER_H
|
||||||
|
|
||||||
#include <QFile>
|
#include <QFile>
|
||||||
|
#include <QMutex>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
|
|
||||||
class Logger
|
|
||||||
{
|
|
||||||
QFile m_file;
|
|
||||||
|
|
||||||
static void messageHandler(QtMsgType type, const QMessageLogContext &context, const QString &msg);
|
|
||||||
|
|
||||||
|
class Logger {
|
||||||
public:
|
public:
|
||||||
|
explicit Logger();
|
||||||
|
|
||||||
static Logger *globalInstance();
|
static Logger *globalInstance();
|
||||||
|
|
||||||
explicit Logger();
|
private:
|
||||||
|
static void messageHandler(QtMsgType type, const QMessageLogContext &context, const QString &msg);
|
||||||
|
|
||||||
|
private:
|
||||||
|
QFile m_file;
|
||||||
|
QMutex m_mutex;
|
||||||
|
|
||||||
friend class MyLogger;
|
friend class MyLogger;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <Cocoa/Cocoa.h>
|
#include <Cocoa/Cocoa.h>
|
||||||
|
|
||||||
|
|
||||||
void MacOSDock::showIcon()
|
void MacOSDock::showIcon()
|
||||||
{
|
{
|
||||||
[[NSApplication sharedApplication] setActivationPolicy:NSApplicationActivationPolicyRegular];
|
[[NSApplication sharedApplication] setActivationPolicy:NSApplicationActivationPolicyRegular];
|
||||||
|
@ -7,22 +7,37 @@
|
|||||||
#include "modellist.h"
|
#include "modellist.h"
|
||||||
#include "mysettings.h"
|
#include "mysettings.h"
|
||||||
#include "network.h"
|
#include "network.h"
|
||||||
|
#include "toolmodel.h"
|
||||||
|
|
||||||
#include <gpt4all-backend/llmodel.h>
|
#include <gpt4all-backend/llmodel.h>
|
||||||
#include <singleapplication.h>
|
#include <singleapplication.h>
|
||||||
|
|
||||||
|
#include <QByteArray>
|
||||||
#include <QCoreApplication>
|
#include <QCoreApplication>
|
||||||
#include <QFont>
|
#include <QFont>
|
||||||
#include <QFontDatabase>
|
#include <QFontDatabase>
|
||||||
|
#include <QList>
|
||||||
#include <QObject>
|
#include <QObject>
|
||||||
#include <QQmlApplicationEngine>
|
#include <QQmlApplicationEngine>
|
||||||
#include <QQmlContext>
|
#include <QQmlContext>
|
||||||
#include <QQuickWindow>
|
#include <QQuickWindow>
|
||||||
#include <QSettings>
|
#include <QSettings>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
|
#include <QStringList>
|
||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
#include <QVariant>
|
#include <QVariant>
|
||||||
|
#include <QWindow>
|
||||||
#include <Qt>
|
#include <Qt>
|
||||||
|
#include <QtAssert>
|
||||||
|
#include <QtSystemDetection>
|
||||||
|
|
||||||
|
#if G4A_CONFIG(force_d3d12)
|
||||||
|
# include <QSGRendererInterface>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef GPT4ALL_USE_QTPDF
|
||||||
|
# include <fpdfview.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef Q_OS_LINUX
|
#ifdef Q_OS_LINUX
|
||||||
# include <QIcon>
|
# include <QIcon>
|
||||||
@ -57,6 +72,10 @@ static void raiseWindow(QWindow *window)
|
|||||||
|
|
||||||
int main(int argc, char *argv[])
|
int main(int argc, char *argv[])
|
||||||
{
|
{
|
||||||
|
#ifndef GPT4ALL_USE_QTPDF
|
||||||
|
FPDF_InitLibrary();
|
||||||
|
#endif
|
||||||
|
|
||||||
QCoreApplication::setOrganizationName("nomic.ai");
|
QCoreApplication::setOrganizationName("nomic.ai");
|
||||||
QCoreApplication::setOrganizationDomain("gpt4all.io");
|
QCoreApplication::setOrganizationDomain("gpt4all.io");
|
||||||
QCoreApplication::setApplicationName("GPT4All");
|
QCoreApplication::setApplicationName("GPT4All");
|
||||||
@ -74,24 +93,27 @@ int main(int argc, char *argv[])
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if G4A_CONFIG(force_d3d12)
|
||||||
|
QQuickWindow::setGraphicsApi(QSGRendererInterface::Direct3D12);
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef Q_OS_LINUX
|
#ifdef Q_OS_LINUX
|
||||||
app.setWindowIcon(QIcon(":/gpt4all/icons/gpt4all.svg"));
|
app.setWindowIcon(QIcon(":/gpt4all/icons/gpt4all.svg"));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// set search path before constructing the MySettings instance, which relies on this
|
// set search path before constructing the MySettings instance, which relies on this
|
||||||
QString llmodelSearchPaths = QCoreApplication::applicationDirPath();
|
{
|
||||||
const QString libDir = QCoreApplication::applicationDirPath() + "/../lib/";
|
auto appDirPath = QCoreApplication::applicationDirPath();
|
||||||
if (LLM::directoryExists(libDir))
|
QStringList searchPaths {
|
||||||
llmodelSearchPaths += ";" + libDir;
|
#ifdef Q_OS_DARWIN
|
||||||
#if defined(Q_OS_MAC)
|
u"%1/../Frameworks"_s.arg(appDirPath),
|
||||||
const QString binDir = QCoreApplication::applicationDirPath() + "/../../../";
|
#else
|
||||||
if (LLM::directoryExists(binDir))
|
appDirPath,
|
||||||
llmodelSearchPaths += ";" + binDir;
|
u"%1/../lib"_s.arg(appDirPath),
|
||||||
const QString frameworksDir = QCoreApplication::applicationDirPath() + "/../Frameworks/";
|
|
||||||
if (LLM::directoryExists(frameworksDir))
|
|
||||||
llmodelSearchPaths += ";" + frameworksDir;
|
|
||||||
#endif
|
#endif
|
||||||
LLModel::Implementation::setImplementationsSearchPath(llmodelSearchPaths.toStdString());
|
};
|
||||||
|
LLModel::Implementation::setImplementationsSearchPath(searchPaths.join(u';').toStdString());
|
||||||
|
}
|
||||||
|
|
||||||
// Set the local and language translation before the qml engine has even been started. This will
|
// Set the local and language translation before the qml engine has even been started. This will
|
||||||
// use the default system locale unless the user has explicitly set it to use a different one.
|
// use the default system locale unless the user has explicitly set it to use a different one.
|
||||||
@ -116,6 +138,8 @@ int main(int argc, char *argv[])
|
|||||||
qmlRegisterSingletonInstance("download", 1, 0, "Download", Download::globalInstance());
|
qmlRegisterSingletonInstance("download", 1, 0, "Download", Download::globalInstance());
|
||||||
qmlRegisterSingletonInstance("network", 1, 0, "Network", Network::globalInstance());
|
qmlRegisterSingletonInstance("network", 1, 0, "Network", Network::globalInstance());
|
||||||
qmlRegisterSingletonInstance("localdocs", 1, 0, "LocalDocs", LocalDocs::globalInstance());
|
qmlRegisterSingletonInstance("localdocs", 1, 0, "LocalDocs", LocalDocs::globalInstance());
|
||||||
|
qmlRegisterSingletonInstance("toollist", 1, 0, "ToolList", ToolModel::globalInstance());
|
||||||
|
qmlRegisterUncreatableMetaObject(ToolEnums::staticMetaObject, "toolenums", 1, 0, "ToolEnums", "Error: only enums");
|
||||||
qmlRegisterUncreatableMetaObject(MySettingsEnums::staticMetaObject, "mysettingsenums", 1, 0, "MySettingsEnums", "Error: only enums");
|
qmlRegisterUncreatableMetaObject(MySettingsEnums::staticMetaObject, "mysettingsenums", 1, 0, "MySettingsEnums", "Error: only enums");
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -163,5 +187,9 @@ int main(int argc, char *argv[])
|
|||||||
// Otherwise, we can get a heap-use-after-free inside of llama.cpp.
|
// Otherwise, we can get a heap-use-after-free inside of llama.cpp.
|
||||||
ChatListModel::globalInstance()->destroyChats();
|
ChatListModel::globalInstance()->destroyChats();
|
||||||
|
|
||||||
|
#ifndef GPT4ALL_USE_QTPDF
|
||||||
|
FPDF_DestroyLibrary();
|
||||||
|
#endif
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#include "modellist.h"
|
#include "modellist.h"
|
||||||
|
|
||||||
#include "download.h"
|
#include "download.h"
|
||||||
|
#include "jinja_replacements.h"
|
||||||
#include "mysettings.h"
|
#include "mysettings.h"
|
||||||
#include "network.h"
|
#include "network.h"
|
||||||
|
|
||||||
@ -8,9 +9,11 @@
|
|||||||
|
|
||||||
#include <QChar>
|
#include <QChar>
|
||||||
#include <QCoreApplication>
|
#include <QCoreApplication>
|
||||||
|
#include <QCryptographicHash>
|
||||||
#include <QDebug>
|
#include <QDebug>
|
||||||
#include <QDir>
|
#include <QDir>
|
||||||
#include <QDirIterator>
|
#include <QDirIterator>
|
||||||
|
#include <QEvent>
|
||||||
#include <QEventLoop>
|
#include <QEventLoop>
|
||||||
#include <QFile>
|
#include <QFile>
|
||||||
#include <QFileInfo>
|
#include <QFileInfo>
|
||||||
@ -28,14 +31,15 @@
|
|||||||
#include <QSslConfiguration>
|
#include <QSslConfiguration>
|
||||||
#include <QSslSocket>
|
#include <QSslSocket>
|
||||||
#include <QStandardPaths>
|
#include <QStandardPaths>
|
||||||
#include <QStringList>
|
#include <QStringList> // IWYU pragma: keep
|
||||||
#include <QTextStream>
|
#include <QTextStream>
|
||||||
#include <QTimer>
|
#include <QTimer>
|
||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
|
#include <QtAssert>
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
|
#include <QtPreprocessorSupport>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cstddef>
|
|
||||||
#include <iterator>
|
#include <iterator>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <string>
|
#include <string>
|
||||||
@ -62,15 +66,15 @@ static const QString RMODEL_CHAT_TEMPLATE = uR"(<chat>
|
|||||||
{%- if loop.first %}
|
{%- if loop.first %}
|
||||||
{{- '### Context:\n' }}
|
{{- '### Context:\n' }}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{{- 'Collection: ' + source.collection + '\n' +
|
{{- ('Collection: ' + source.collection + '\n' +
|
||||||
'Path: ' + source.path + '\n' +
|
'Path: ' + source.path + '\n' +
|
||||||
'Excerpt: ' + source.text + '\n\n' }}
|
'Excerpt: ' + source.text + '\n\n') | escape }}
|
||||||
{%- endfor %}
|
{%- endfor %}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- for attachment in message.prompt_attachments %}
|
{%- for attachment in message.prompt_attachments %}
|
||||||
{{- attachment.processed_content + '\n\n' }}
|
{{- (attachment.processed_content + '\n\n') | escape }}
|
||||||
{%- endfor %}
|
{%- endfor %}
|
||||||
{{- message.content }}
|
{{- message.content | escape }}
|
||||||
{{- '</' + message['role'] + '>' }}
|
{{- '</' + message['role'] + '>' }}
|
||||||
{%- endfor %}
|
{%- endfor %}
|
||||||
</chat>)"_s;
|
</chat>)"_s;
|
||||||
@ -352,7 +356,18 @@ QVariant ModelInfo::defaultChatTemplate() const
|
|||||||
auto path = (dirpath + filename()).toUtf8();
|
auto path = (dirpath + filename()).toUtf8();
|
||||||
auto res = LLModel::Implementation::chatTemplate(path.constData());
|
auto res = LLModel::Implementation::chatTemplate(path.constData());
|
||||||
if (res) {
|
if (res) {
|
||||||
m_modelChatTemplate = QString::fromStdString(*res);
|
std::string ggufTmpl(std::move(*res));
|
||||||
|
if (ggufTmpl.size() >= 2 && ggufTmpl.end()[-2] != '\n' && ggufTmpl.end()[-1] == '\n')
|
||||||
|
ggufTmpl.erase(ggufTmpl.end() - 1); // strip trailing newline for e.g. Llama-3.2-3B-Instruct
|
||||||
|
if (
|
||||||
|
auto replacement = CHAT_TEMPLATE_SUBSTITUTIONS.find(ggufTmpl);
|
||||||
|
replacement != CHAT_TEMPLATE_SUBSTITUTIONS.end()
|
||||||
|
) {
|
||||||
|
qWarning() << "automatically substituting chat template for" << filename();
|
||||||
|
auto &[badTemplate, goodTemplate] = *replacement;
|
||||||
|
ggufTmpl = goodTemplate;
|
||||||
|
}
|
||||||
|
m_modelChatTemplate = QString::fromStdString(ggufTmpl);
|
||||||
} else {
|
} else {
|
||||||
qWarning().nospace() << "failed to get chat template for " << filename() << ": " << res.error().c_str();
|
qWarning().nospace() << "failed to get chat template for " << filename() << ": " << res.error().c_str();
|
||||||
m_modelChatTemplate = QString(); // do not retry
|
m_modelChatTemplate = QString(); // do not retry
|
||||||
@ -473,14 +488,25 @@ GPT4AllDownloadableModels::GPT4AllDownloadableModels(QObject *parent)
|
|||||||
connect(this, &GPT4AllDownloadableModels::modelReset, this, &GPT4AllDownloadableModels::countChanged);
|
connect(this, &GPT4AllDownloadableModels::modelReset, this, &GPT4AllDownloadableModels::countChanged);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void GPT4AllDownloadableModels::filter(const QVector<QString> &keywords)
|
||||||
|
{
|
||||||
|
m_keywords = keywords;
|
||||||
|
invalidateFilter();
|
||||||
|
}
|
||||||
|
|
||||||
bool GPT4AllDownloadableModels::filterAcceptsRow(int sourceRow,
|
bool GPT4AllDownloadableModels::filterAcceptsRow(int sourceRow,
|
||||||
const QModelIndex &sourceParent) const
|
const QModelIndex &sourceParent) const
|
||||||
{
|
{
|
||||||
QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);
|
QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);
|
||||||
bool hasDescription = !sourceModel()->data(index, ModelList::DescriptionRole).toString().isEmpty();
|
const QString description = sourceModel()->data(index, ModelList::DescriptionRole).toString();
|
||||||
|
bool hasDescription = !description.isEmpty();
|
||||||
bool isClone = sourceModel()->data(index, ModelList::IsCloneRole).toBool();
|
bool isClone = sourceModel()->data(index, ModelList::IsCloneRole).toBool();
|
||||||
bool isDiscovered = sourceModel()->data(index, ModelList::IsDiscoveredRole).toBool();
|
bool isDiscovered = sourceModel()->data(index, ModelList::IsDiscoveredRole).toBool();
|
||||||
return !isDiscovered && hasDescription && !isClone;
|
bool isOnline = sourceModel()->data(index, ModelList::OnlineRole).toBool();
|
||||||
|
bool satisfiesKeyword = m_keywords.isEmpty();
|
||||||
|
for (const QString &k : m_keywords)
|
||||||
|
satisfiesKeyword = description.contains(k) ? true : satisfiesKeyword;
|
||||||
|
return !isOnline && !isDiscovered && hasDescription && !isClone && satisfiesKeyword;
|
||||||
}
|
}
|
||||||
|
|
||||||
int GPT4AllDownloadableModels::count() const
|
int GPT4AllDownloadableModels::count() const
|
||||||
@ -1599,7 +1625,6 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
|
|||||||
QString requiresVersion = obj["requires"].toString();
|
QString requiresVersion = obj["requires"].toString();
|
||||||
QString versionRemoved = obj["removedIn"].toString();
|
QString versionRemoved = obj["removedIn"].toString();
|
||||||
QString url = obj["url"].toString();
|
QString url = obj["url"].toString();
|
||||||
QByteArray modelHash = obj["md5sum"].toString().toLatin1();
|
|
||||||
bool isDefault = obj.contains("isDefault") && obj["isDefault"] == u"true"_s;
|
bool isDefault = obj.contains("isDefault") && obj["isDefault"] == u"true"_s;
|
||||||
bool disableGUI = obj.contains("disableGUI") && obj["disableGUI"] == u"true"_s;
|
bool disableGUI = obj.contains("disableGUI") && obj["disableGUI"] == u"true"_s;
|
||||||
QString description = obj["description"].toString();
|
QString description = obj["description"].toString();
|
||||||
@ -1610,6 +1635,16 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
|
|||||||
QString type = obj["type"].toString();
|
QString type = obj["type"].toString();
|
||||||
bool isEmbeddingModel = obj["embeddingModel"].toBool();
|
bool isEmbeddingModel = obj["embeddingModel"].toBool();
|
||||||
|
|
||||||
|
QByteArray modelHash;
|
||||||
|
ModelInfo::HashAlgorithm hashAlgorithm;
|
||||||
|
if (auto it = obj.find("sha256sum"_L1); it != obj.end()) {
|
||||||
|
modelHash = it->toString().toLatin1();
|
||||||
|
hashAlgorithm = ModelInfo::Sha256;
|
||||||
|
} else {
|
||||||
|
modelHash = obj["md5sum"].toString().toLatin1();
|
||||||
|
hashAlgorithm = ModelInfo::Md5;
|
||||||
|
}
|
||||||
|
|
||||||
// Some models aren't supported in the GUI at all
|
// Some models aren't supported in the GUI at all
|
||||||
if (disableGUI)
|
if (disableGUI)
|
||||||
continue;
|
continue;
|
||||||
@ -1638,7 +1673,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
|
|||||||
{ ModelList::FilenameRole, modelFilename },
|
{ ModelList::FilenameRole, modelFilename },
|
||||||
{ ModelList::FilesizeRole, modelFilesize },
|
{ ModelList::FilesizeRole, modelFilesize },
|
||||||
{ ModelList::HashRole, modelHash },
|
{ ModelList::HashRole, modelHash },
|
||||||
{ ModelList::HashAlgorithmRole, ModelInfo::Md5 },
|
{ ModelList::HashAlgorithmRole, hashAlgorithm },
|
||||||
{ ModelList::DefaultRole, isDefault },
|
{ ModelList::DefaultRole, isDefault },
|
||||||
{ ModelList::DescriptionRole, description },
|
{ ModelList::DescriptionRole, description },
|
||||||
{ ModelList::RequiresVersionRole, requiresVersion },
|
{ ModelList::RequiresVersionRole, requiresVersion },
|
||||||
@ -2322,3 +2357,56 @@ void ModelList::handleDiscoveryItemErrorOccurred(QNetworkReply::NetworkError cod
|
|||||||
qWarning() << u"ERROR: Discovery item failed with error code \"%1-%2\""_s
|
qWarning() << u"ERROR: Discovery item failed with error code \"%1-%2\""_s
|
||||||
.arg(code).arg(reply->errorString()).toStdString();
|
.arg(code).arg(reply->errorString()).toStdString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
QStringList ModelList::remoteModelList(const QString &apiKey, const QUrl &baseUrl)
|
||||||
|
{
|
||||||
|
QStringList modelList;
|
||||||
|
|
||||||
|
// Create the request
|
||||||
|
QNetworkRequest request;
|
||||||
|
request.setUrl(baseUrl.resolved(QUrl("models")));
|
||||||
|
request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
|
||||||
|
|
||||||
|
// Add the Authorization header
|
||||||
|
const QString bearerToken = QString("Bearer %1").arg(apiKey);
|
||||||
|
request.setRawHeader("Authorization", bearerToken.toUtf8());
|
||||||
|
|
||||||
|
// Make the GET request
|
||||||
|
QNetworkReply *reply = m_networkManager.get(request);
|
||||||
|
|
||||||
|
// We use a local event loop to wait for the request to complete
|
||||||
|
QEventLoop loop;
|
||||||
|
connect(reply, &QNetworkReply::finished, &loop, &QEventLoop::quit);
|
||||||
|
loop.exec();
|
||||||
|
|
||||||
|
// Check for errors
|
||||||
|
if (reply->error() == QNetworkReply::NoError) {
|
||||||
|
// Parse the JSON response
|
||||||
|
const QByteArray responseData = reply->readAll();
|
||||||
|
const QJsonDocument jsonDoc = QJsonDocument::fromJson(responseData);
|
||||||
|
|
||||||
|
if (!jsonDoc.isNull() && jsonDoc.isObject()) {
|
||||||
|
QJsonObject rootObj = jsonDoc.object();
|
||||||
|
QJsonValue dataValue = rootObj.value("data");
|
||||||
|
|
||||||
|
if (dataValue.isArray()) {
|
||||||
|
QJsonArray dataArray = dataValue.toArray();
|
||||||
|
for (const QJsonValue &val : dataArray) {
|
||||||
|
if (val.isObject()) {
|
||||||
|
QJsonObject obj = val.toObject();
|
||||||
|
const QString modelId = obj.value("id").toString();
|
||||||
|
modelList.append(modelId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Handle network error (e.g. print it to qDebug)
|
||||||
|
qWarning() << "Error retrieving models:" << reply->errorString();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
reply->deleteLater();
|
||||||
|
|
||||||
|
return modelList;
|
||||||
|
}
|
||||||
|
@ -5,25 +5,29 @@
|
|||||||
#include <QByteArray>
|
#include <QByteArray>
|
||||||
#include <QDateTime>
|
#include <QDateTime>
|
||||||
#include <QHash>
|
#include <QHash>
|
||||||
#include <QLatin1StringView>
|
#include <QLatin1StringView> // IWYU pragma: keep
|
||||||
#include <QList>
|
#include <QList>
|
||||||
#include <QMutex>
|
#include <QMutex>
|
||||||
#include <QNetworkAccessManager>
|
#include <QNetworkAccessManager>
|
||||||
#include <QNetworkReply>
|
#include <QNetworkReply>
|
||||||
#include <QObject>
|
#include <QObject>
|
||||||
#include <QPair>
|
#include <QPair> // IWYU pragma: keep
|
||||||
#include <QQmlEngine>
|
#include <QQmlEngine> // IWYU pragma: keep
|
||||||
#include <QSortFilterProxyModel>
|
#include <QSortFilterProxyModel>
|
||||||
#include <QSslError>
|
#include <QSslError>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QVariant>
|
#include <QVariant>
|
||||||
#include <QVector>
|
#include <QVector> // IWYU pragma: keep
|
||||||
#include <Qt>
|
#include <Qt>
|
||||||
#include <QtGlobal>
|
#include <QtTypes>
|
||||||
|
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
|
// IWYU pragma: no_forward_declare QObject
|
||||||
|
// IWYU pragma: no_forward_declare QSslError
|
||||||
|
class QUrl;
|
||||||
|
|
||||||
using namespace Qt::Literals::StringLiterals;
|
using namespace Qt::Literals::StringLiterals;
|
||||||
|
|
||||||
|
|
||||||
@ -269,7 +273,7 @@ private:
|
|||||||
std::optional<QString> m_chatTemplate;
|
std::optional<QString> m_chatTemplate;
|
||||||
mutable std::optional<QString> m_modelChatTemplate;
|
mutable std::optional<QString> m_modelChatTemplate;
|
||||||
QString m_systemMessage;
|
QString m_systemMessage;
|
||||||
QString m_chatNamePrompt = "Describe the above conversation in seven words or less.";
|
QString m_chatNamePrompt = "Describe the above conversation. Your entire response must be three words or less.";
|
||||||
QString m_suggestedFollowUpPrompt = "Suggest three very short factual follow-up questions that have not been answered yet or cannot be found inspired by the previous conversation and excerpts.";
|
QString m_suggestedFollowUpPrompt = "Suggest three very short factual follow-up questions that have not been answered yet or cannot be found inspired by the previous conversation and excerpts.";
|
||||||
friend class MySettings;
|
friend class MySettings;
|
||||||
friend class ModelList;
|
friend class ModelList;
|
||||||
@ -302,11 +306,16 @@ public:
|
|||||||
explicit GPT4AllDownloadableModels(QObject *parent);
|
explicit GPT4AllDownloadableModels(QObject *parent);
|
||||||
int count() const;
|
int count() const;
|
||||||
|
|
||||||
|
Q_INVOKABLE void filter(const QVector<QString> &keywords);
|
||||||
|
|
||||||
Q_SIGNALS:
|
Q_SIGNALS:
|
||||||
void countChanged();
|
void countChanged();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
bool filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const override;
|
bool filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
QVector<QString> m_keywords;
|
||||||
};
|
};
|
||||||
|
|
||||||
class HuggingFaceDownloadableModels : public QSortFilterProxyModel
|
class HuggingFaceDownloadableModels : public QSortFilterProxyModel
|
||||||
@ -525,6 +534,8 @@ public:
|
|||||||
|
|
||||||
Q_INVOKABLE void discoverSearch(const QString &discover);
|
Q_INVOKABLE void discoverSearch(const QString &discover);
|
||||||
|
|
||||||
|
Q_INVOKABLE QStringList remoteModelList(const QString &apiKey, const QUrl &baseUrl);
|
||||||
|
|
||||||
Q_SIGNALS:
|
Q_SIGNALS:
|
||||||
void countChanged();
|
void countChanged();
|
||||||
void installedModelsChanged();
|
void installedModelsChanged();
|
||||||
|
@ -11,22 +11,27 @@
|
|||||||
#include <QFileInfo>
|
#include <QFileInfo>
|
||||||
#include <QGlobalStatic>
|
#include <QGlobalStatic>
|
||||||
#include <QGuiApplication>
|
#include <QGuiApplication>
|
||||||
#include <QIODevice>
|
#include <QIODevice> // IWYU pragma: keep
|
||||||
#include <QMap>
|
#include <QMap>
|
||||||
#include <QMetaObject>
|
#include <QMetaObject>
|
||||||
#include <QStandardPaths>
|
#include <QStandardPaths>
|
||||||
#include <QThread>
|
#include <QThread>
|
||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
#include <QVariant>
|
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
|
#include <QtAssert>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
#if !(defined(Q_OS_MAC) && defined(__aarch64__))
|
||||||
|
#include <cstring>
|
||||||
|
#endif
|
||||||
|
|
||||||
using namespace Qt::Literals::StringLiterals;
|
using namespace Qt::Literals::StringLiterals;
|
||||||
|
|
||||||
|
|
||||||
// used only for settings serialization, do not translate
|
// used only for settings serialization, do not translate
|
||||||
static const QStringList suggestionModeNames { "LocalDocsOnly", "On", "Off" };
|
static const QStringList suggestionModeNames { "LocalDocsOnly", "On", "Off" };
|
||||||
static const QStringList chatThemeNames { "Light", "Dark", "LegacyDark" };
|
static const QStringList chatThemeNames { "Light", "Dark", "LegacyDark" };
|
||||||
|
@ -4,20 +4,24 @@
|
|||||||
#include "modellist.h" // IWYU pragma: keep
|
#include "modellist.h" // IWYU pragma: keep
|
||||||
|
|
||||||
#include <QDateTime>
|
#include <QDateTime>
|
||||||
#include <QLatin1StringView>
|
#include <QLatin1StringView> // IWYU pragma: keep
|
||||||
#include <QList>
|
#include <QList>
|
||||||
#include <QModelIndex>
|
#include <QModelIndex>
|
||||||
#include <QObject>
|
#include <QObject>
|
||||||
#include <QSettings>
|
#include <QSettings>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QStringList>
|
#include <QStringList> // IWYU pragma: keep
|
||||||
#include <QTranslator>
|
#include <QTranslator>
|
||||||
#include <QVector>
|
#include <QVariant>
|
||||||
|
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
|
||||||
|
// IWYU pragma: no_forward_declare QModelIndex
|
||||||
|
class QLocale;
|
||||||
|
|
||||||
|
|
||||||
namespace MySettingsEnums {
|
namespace MySettingsEnums {
|
||||||
Q_NAMESPACE
|
Q_NAMESPACE
|
||||||
|
|
||||||
|
@ -8,7 +8,6 @@
|
|||||||
#include "localdocsmodel.h"
|
#include "localdocsmodel.h"
|
||||||
#include "modellist.h"
|
#include "modellist.h"
|
||||||
#include "mysettings.h"
|
#include "mysettings.h"
|
||||||
#include "utils.h"
|
|
||||||
|
|
||||||
#include <gpt4all-backend/llmodel.h>
|
#include <gpt4all-backend/llmodel.h>
|
||||||
|
|
||||||
@ -29,7 +28,6 @@
|
|||||||
#include <QSslSocket>
|
#include <QSslSocket>
|
||||||
#include <QSysInfo>
|
#include <QSysInfo>
|
||||||
#include <Qt>
|
#include <Qt>
|
||||||
#include <QtGlobal>
|
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
#include <QUuid>
|
#include <QUuid>
|
||||||
@ -49,6 +47,7 @@ using namespace Qt::Literals::StringLiterals;
|
|||||||
#define STR_(x) #x
|
#define STR_(x) #x
|
||||||
#define STR(x) STR_(x)
|
#define STR(x) STR_(x)
|
||||||
|
|
||||||
|
|
||||||
static const char MIXPANEL_TOKEN[] = "ce362e568ddaee16ed243eaffb5860a2";
|
static const char MIXPANEL_TOKEN[] = "ce362e568ddaee16ed243eaffb5860a2";
|
||||||
|
|
||||||
#ifdef __clang__
|
#ifdef __clang__
|
||||||
@ -242,6 +241,12 @@ void Network::handleJsonUploadFinished()
|
|||||||
|
|
||||||
m_activeUploads.removeAll(jsonReply);
|
m_activeUploads.removeAll(jsonReply);
|
||||||
|
|
||||||
|
if (jsonReply->error() != QNetworkReply::NoError) {
|
||||||
|
qWarning() << "Request to" << jsonReply->url().toString() << "failed:" << jsonReply->errorString();
|
||||||
|
jsonReply->deleteLater();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
QVariant response = jsonReply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
QVariant response = jsonReply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
||||||
Q_ASSERT(response.isValid());
|
Q_ASSERT(response.isValid());
|
||||||
bool ok;
|
bool ok;
|
||||||
@ -449,6 +454,11 @@ void Network::handleIpifyFinished()
|
|||||||
QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());
|
QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());
|
||||||
if (!reply)
|
if (!reply)
|
||||||
return;
|
return;
|
||||||
|
if (reply->error() != QNetworkReply::NoError) {
|
||||||
|
qWarning() << "Request to" << reply->url().toString() << "failed:" << reply->errorString();
|
||||||
|
reply->deleteLater();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
||||||
Q_ASSERT(response.isValid());
|
Q_ASSERT(response.isValid());
|
||||||
@ -473,6 +483,11 @@ void Network::handleMixpanelFinished()
|
|||||||
QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());
|
QNetworkReply *reply = qobject_cast<QNetworkReply *>(sender());
|
||||||
if (!reply)
|
if (!reply)
|
||||||
return;
|
return;
|
||||||
|
if (reply->error() != QNetworkReply::NoError) {
|
||||||
|
qWarning() << "Request to" << reply->url().toString() << "failed:" << reply->errorString();
|
||||||
|
reply->deleteLater();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
||||||
Q_ASSERT(response.isValid());
|
Q_ASSERT(response.isValid());
|
||||||
@ -511,6 +526,11 @@ void Network::handleHealthFinished()
|
|||||||
QNetworkReply *healthReply = qobject_cast<QNetworkReply *>(sender());
|
QNetworkReply *healthReply = qobject_cast<QNetworkReply *>(sender());
|
||||||
if (!healthReply)
|
if (!healthReply)
|
||||||
return;
|
return;
|
||||||
|
if (healthReply->error() != QNetworkReply::NoError) {
|
||||||
|
qWarning() << "Request to" << healthReply->url().toString() << "failed:" << healthReply->errorString();
|
||||||
|
healthReply->deleteLater();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
QVariant response = healthReply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
QVariant response = healthReply->attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
||||||
Q_ASSERT(response.isValid());
|
Q_ASSERT(response.isValid());
|
||||||
|
@ -11,7 +11,14 @@
|
|||||||
#include <QSslError>
|
#include <QSslError>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QVariant>
|
#include <QVariant>
|
||||||
#include <QVector>
|
#include <QVariantMap> // IWYU pragma: keep
|
||||||
|
#include <QVector> // IWYU pragma: keep
|
||||||
|
|
||||||
|
// IWYU pragma: no_forward_declare QByteArray
|
||||||
|
// IWYU pragma: no_forward_declare QNetworkReply
|
||||||
|
// IWYU pragma: no_forward_declare QSslError
|
||||||
|
class QUrl;
|
||||||
|
|
||||||
|
|
||||||
struct KeyValue {
|
struct KeyValue {
|
||||||
QString key;
|
QString key;
|
||||||
|
@ -4,9 +4,10 @@
|
|||||||
#include "chatmodel.h"
|
#include "chatmodel.h"
|
||||||
#include "modellist.h"
|
#include "modellist.h"
|
||||||
#include "mysettings.h"
|
#include "mysettings.h"
|
||||||
#include "utils.h"
|
#include "utils.h" // IWYU pragma: keep
|
||||||
|
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
#include <gpt4all-backend/llmodel.h>
|
||||||
|
|
||||||
#include <QByteArray>
|
#include <QByteArray>
|
||||||
#include <QCborArray>
|
#include <QCborArray>
|
||||||
@ -15,32 +16,38 @@
|
|||||||
#include <QDateTime>
|
#include <QDateTime>
|
||||||
#include <QDebug>
|
#include <QDebug>
|
||||||
#include <QHostAddress>
|
#include <QHostAddress>
|
||||||
|
#include <QHttpHeaders>
|
||||||
#include <QHttpServer>
|
#include <QHttpServer>
|
||||||
|
#include <QHttpServerRequest>
|
||||||
#include <QHttpServerResponder>
|
#include <QHttpServerResponder>
|
||||||
#include <QJsonArray>
|
#include <QJsonArray>
|
||||||
#include <QJsonDocument>
|
#include <QJsonDocument>
|
||||||
#include <QJsonObject>
|
#include <QJsonObject>
|
||||||
#include <QJsonValue>
|
#include <QJsonValue>
|
||||||
#include <QLatin1StringView>
|
#include <QLatin1StringView>
|
||||||
#include <QPair>
|
#include <QPair> // IWYU pragma: keep
|
||||||
|
#include <QTcpServer>
|
||||||
#include <QVariant>
|
#include <QVariant>
|
||||||
#include <Qt>
|
#include <Qt>
|
||||||
|
#include <QtAssert>
|
||||||
#include <QtCborCommon>
|
#include <QtCborCommon>
|
||||||
#include <QtGlobal>
|
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
|
#include <QtMinMax>
|
||||||
|
#include <QtPreprocessorSupport>
|
||||||
|
#include <QtTypes>
|
||||||
|
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
|
#include <exception>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <span>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <type_traits>
|
#include <string_view>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
#include <variant>
|
||||||
#if QT_VERSION >= QT_VERSION_CHECK(6, 8, 0)
|
#include <vector>
|
||||||
# include <QTcpServer>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
using namespace std::string_literals;
|
using namespace std::string_literals;
|
||||||
using namespace Qt::Literals::StringLiterals;
|
using namespace Qt::Literals::StringLiterals;
|
||||||
@ -451,23 +458,17 @@ static QJsonObject requestFromJson(const QByteArray &request)
|
|||||||
void Server::start()
|
void Server::start()
|
||||||
{
|
{
|
||||||
m_server = std::make_unique<QHttpServer>(this);
|
m_server = std::make_unique<QHttpServer>(this);
|
||||||
#if QT_VERSION >= QT_VERSION_CHECK(6, 8, 0)
|
|
||||||
auto *tcpServer = new QTcpServer(m_server.get());
|
auto *tcpServer = new QTcpServer(m_server.get());
|
||||||
#else
|
|
||||||
auto *tcpServer = m_server.get();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
auto port = MySettings::globalInstance()->networkPort();
|
auto port = MySettings::globalInstance()->networkPort();
|
||||||
if (!tcpServer->listen(QHostAddress::LocalHost, port)) {
|
if (!tcpServer->listen(QHostAddress::LocalHost, port)) {
|
||||||
qWarning() << "Server ERROR: Failed to listen on port" << port;
|
qWarning() << "Server ERROR: Failed to listen on port" << port;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#if QT_VERSION >= QT_VERSION_CHECK(6, 8, 0)
|
|
||||||
if (!m_server->bind(tcpServer)) {
|
if (!m_server->bind(tcpServer)) {
|
||||||
qWarning() << "Server ERROR: Failed to HTTP server to socket" << port;
|
qWarning() << "Server ERROR: Failed to HTTP server to socket" << port;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
m_server->route("/v1/models", QHttpServerRequest::Method::Get,
|
m_server->route("/v1/models", QHttpServerRequest::Method::Get,
|
||||||
[](const QHttpServerRequest &) {
|
[](const QHttpServerRequest &) {
|
||||||
@ -607,19 +608,12 @@ void Server::start()
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
#if QT_VERSION >= QT_VERSION_CHECK(6, 8, 0)
|
|
||||||
m_server->addAfterRequestHandler(this, [](const QHttpServerRequest &req, QHttpServerResponse &resp) {
|
m_server->addAfterRequestHandler(this, [](const QHttpServerRequest &req, QHttpServerResponse &resp) {
|
||||||
Q_UNUSED(req);
|
Q_UNUSED(req);
|
||||||
auto headers = resp.headers();
|
auto headers = resp.headers();
|
||||||
headers.append("Access-Control-Allow-Origin"_L1, "*"_L1);
|
headers.append("Access-Control-Allow-Origin"_L1, "*"_L1);
|
||||||
resp.setHeaders(std::move(headers));
|
resp.setHeaders(std::move(headers));
|
||||||
});
|
});
|
||||||
#else
|
|
||||||
m_server->afterRequest([](QHttpServerResponse &&resp) {
|
|
||||||
resp.addHeader("Access-Control-Allow-Origin", "*");
|
|
||||||
return std::move(resp);
|
|
||||||
});
|
|
||||||
#endif
|
|
||||||
|
|
||||||
connect(this, &Server::requestResetResponseState, m_chat, &Chat::resetResponseState, Qt::BlockingQueuedConnection);
|
connect(this, &Server::requestResetResponseState, m_chat, &Chat::resetResponseState, Qt::BlockingQueuedConnection);
|
||||||
}
|
}
|
||||||
@ -694,7 +688,8 @@ auto Server::handleCompletionRequest(const CompletionRequest &request)
|
|||||||
promptCtx,
|
promptCtx,
|
||||||
/*usedLocalDocs*/ false);
|
/*usedLocalDocs*/ false);
|
||||||
} catch (const std::exception &e) {
|
} catch (const std::exception &e) {
|
||||||
emit responseChanged(e.what());
|
m_chatModel->setResponseValue(e.what());
|
||||||
|
m_chatModel->setError();
|
||||||
emit responseStopped(0);
|
emit responseStopped(0);
|
||||||
return makeError(QHttpServerResponder::StatusCode::InternalServerError);
|
return makeError(QHttpServerResponder::StatusCode::InternalServerError);
|
||||||
}
|
}
|
||||||
@ -772,16 +767,16 @@ auto Server::handleChatRequest(const ChatRequest &request)
|
|||||||
Q_ASSERT(!request.messages.isEmpty());
|
Q_ASSERT(!request.messages.isEmpty());
|
||||||
|
|
||||||
// adds prompt/response items to GUI
|
// adds prompt/response items to GUI
|
||||||
QList<ChatItem> chatItems;
|
std::vector<MessageInput> messages;
|
||||||
for (auto &message : request.messages) {
|
for (auto &message : request.messages) {
|
||||||
using enum ChatRequest::Message::Role;
|
using enum ChatRequest::Message::Role;
|
||||||
switch (message.role) {
|
switch (message.role) {
|
||||||
case System: chatItems.emplace_back(ChatItem::system_tag, message.content); break;
|
case System: messages.push_back({ MessageInput::Type::System, message.content }); break;
|
||||||
case User: chatItems.emplace_back(ChatItem::prompt_tag, message.content); break;
|
case User: messages.push_back({ MessageInput::Type::Prompt, message.content }); break;
|
||||||
case Assistant: chatItems.emplace_back(ChatItem::response_tag, message.content); break;
|
case Assistant: messages.push_back({ MessageInput::Type::Response, message.content }); break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
auto subrange = m_chatModel->appendResponseWithHistory(chatItems);
|
auto startOffset = m_chatModel->appendResponseWithHistory(messages);
|
||||||
|
|
||||||
// FIXME(jared): taking parameters from the UI inhibits reproducibility of results
|
// FIXME(jared): taking parameters from the UI inhibits reproducibility of results
|
||||||
LLModel::PromptContext promptCtx {
|
LLModel::PromptContext promptCtx {
|
||||||
@ -801,9 +796,10 @@ auto Server::handleChatRequest(const ChatRequest &request)
|
|||||||
for (int i = 0; i < request.n; ++i) {
|
for (int i = 0; i < request.n; ++i) {
|
||||||
ChatPromptResult result;
|
ChatPromptResult result;
|
||||||
try {
|
try {
|
||||||
result = promptInternalChat(m_collections, promptCtx, subrange);
|
result = promptInternalChat(m_collections, promptCtx, startOffset);
|
||||||
} catch (const std::exception &e) {
|
} catch (const std::exception &e) {
|
||||||
emit responseChanged(e.what());
|
m_chatModel->setResponseValue(e.what());
|
||||||
|
m_chatModel->setError();
|
||||||
emit responseStopped(0);
|
emit responseStopped(0);
|
||||||
return makeError(QHttpServerResponder::StatusCode::InternalServerError);
|
return makeError(QHttpServerResponder::StatusCode::InternalServerError);
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
#include <QHttpServerResponse>
|
#include <QHttpServerResponse>
|
||||||
#include <QJsonObject>
|
#include <QJsonObject>
|
||||||
#include <QList>
|
#include <QList>
|
||||||
#include <QObject>
|
#include <QObject> // IWYU pragma: keep
|
||||||
#include <QString>
|
#include <QString>
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
76
gpt4all-chat/src/tool.cpp
Normal file
76
gpt4all-chat/src/tool.cpp
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
#include "tool.h"
|
||||||
|
|
||||||
|
#include <QDataStream>
|
||||||
|
#include <QtTypes>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
using json = nlohmann::ordered_json;
|
||||||
|
|
||||||
|
|
||||||
|
json::object_t Tool::jinjaValue() const
|
||||||
|
{
|
||||||
|
json::array_t paramList;
|
||||||
|
const QList<ToolParamInfo> p = parameters();
|
||||||
|
for (auto &info : p) {
|
||||||
|
std::string typeStr;
|
||||||
|
switch (info.type) {
|
||||||
|
using enum ToolEnums::ParamType;
|
||||||
|
case String: typeStr = "string"; break;
|
||||||
|
case Number: typeStr = "number"; break;
|
||||||
|
case Integer: typeStr = "integer"; break;
|
||||||
|
case Object: typeStr = "object"; break;
|
||||||
|
case Array: typeStr = "array"; break;
|
||||||
|
case Boolean: typeStr = "boolean"; break;
|
||||||
|
case Null: typeStr = "null"; break;
|
||||||
|
}
|
||||||
|
paramList.emplace_back(json::initializer_list_t {
|
||||||
|
{ "name", info.name.toStdString() },
|
||||||
|
{ "type", typeStr },
|
||||||
|
{ "description", info.description.toStdString() },
|
||||||
|
{ "required", info.required },
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
{ "name", name().toStdString() },
|
||||||
|
{ "description", description().toStdString() },
|
||||||
|
{ "function", function().toStdString() },
|
||||||
|
{ "parameters", paramList },
|
||||||
|
{ "symbolicFormat", symbolicFormat().toStdString() },
|
||||||
|
{ "examplePrompt", examplePrompt().toStdString() },
|
||||||
|
{ "exampleCall", exampleCall().toStdString() },
|
||||||
|
{ "exampleReply", exampleReply().toStdString() },
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
void ToolCallInfo::serialize(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream << name;
|
||||||
|
stream << params.size();
|
||||||
|
for (auto param : params) {
|
||||||
|
stream << param.name;
|
||||||
|
stream << param.type;
|
||||||
|
stream << param.value;
|
||||||
|
}
|
||||||
|
stream << result;
|
||||||
|
stream << error;
|
||||||
|
stream << errorString;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ToolCallInfo::deserialize(QDataStream &stream, int version)
|
||||||
|
{
|
||||||
|
stream >> name;
|
||||||
|
qsizetype count;
|
||||||
|
stream >> count;
|
||||||
|
for (int i = 0; i < count; ++i) {
|
||||||
|
ToolParam p;
|
||||||
|
stream >> p.name;
|
||||||
|
stream >> p.type;
|
||||||
|
stream >> p.value;
|
||||||
|
}
|
||||||
|
stream >> result;
|
||||||
|
stream >> error;
|
||||||
|
stream >> errorString;
|
||||||
|
return true;
|
||||||
|
}
|
137
gpt4all-chat/src/tool.h
Normal file
137
gpt4all-chat/src/tool.h
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
#ifndef TOOL_H
|
||||||
|
#define TOOL_H
|
||||||
|
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
|
#include <QList>
|
||||||
|
#include <QObject>
|
||||||
|
#include <QString>
|
||||||
|
#include <QVariant>
|
||||||
|
#include <QtGlobal>
|
||||||
|
|
||||||
|
class QDataStream;
|
||||||
|
|
||||||
|
using json = nlohmann::ordered_json;
|
||||||
|
|
||||||
|
|
||||||
|
namespace ToolEnums
|
||||||
|
{
|
||||||
|
Q_NAMESPACE
|
||||||
|
enum class Error
|
||||||
|
{
|
||||||
|
NoError = 0,
|
||||||
|
TimeoutError = 2,
|
||||||
|
UnknownError = 499,
|
||||||
|
};
|
||||||
|
Q_ENUM_NS(Error)
|
||||||
|
|
||||||
|
enum class ParamType { String, Number, Integer, Object, Array, Boolean, Null }; // json schema types
|
||||||
|
Q_ENUM_NS(ParamType)
|
||||||
|
|
||||||
|
enum class ParseState {
|
||||||
|
None,
|
||||||
|
InTagChoice,
|
||||||
|
InStart,
|
||||||
|
Partial,
|
||||||
|
Complete,
|
||||||
|
};
|
||||||
|
Q_ENUM_NS(ParseState)
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ToolParamInfo
|
||||||
|
{
|
||||||
|
QString name;
|
||||||
|
ToolEnums::ParamType type;
|
||||||
|
QString description;
|
||||||
|
bool required;
|
||||||
|
};
|
||||||
|
Q_DECLARE_METATYPE(ToolParamInfo)
|
||||||
|
|
||||||
|
struct ToolParam
|
||||||
|
{
|
||||||
|
QString name;
|
||||||
|
ToolEnums::ParamType type;
|
||||||
|
QVariant value;
|
||||||
|
bool operator==(const ToolParam& other) const
|
||||||
|
{
|
||||||
|
return name == other.name && type == other.type && value == other.value;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Q_DECLARE_METATYPE(ToolParam)
|
||||||
|
|
||||||
|
struct ToolCallInfo
|
||||||
|
{
|
||||||
|
QString name;
|
||||||
|
QList<ToolParam> params;
|
||||||
|
QString result;
|
||||||
|
ToolEnums::Error error = ToolEnums::Error::NoError;
|
||||||
|
QString errorString;
|
||||||
|
|
||||||
|
void serialize(QDataStream &stream, int version);
|
||||||
|
bool deserialize(QDataStream &stream, int version);
|
||||||
|
|
||||||
|
bool operator==(const ToolCallInfo& other) const
|
||||||
|
{
|
||||||
|
return name == other.name && result == other.result && params == other.params
|
||||||
|
&& error == other.error && errorString == other.errorString;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Q_DECLARE_METATYPE(ToolCallInfo)
|
||||||
|
|
||||||
|
class Tool : public QObject
|
||||||
|
{
|
||||||
|
Q_OBJECT
|
||||||
|
Q_PROPERTY(QString name READ name CONSTANT)
|
||||||
|
Q_PROPERTY(QString description READ description CONSTANT)
|
||||||
|
Q_PROPERTY(QString function READ function CONSTANT)
|
||||||
|
Q_PROPERTY(QList<ToolParamInfo> parameters READ parameters CONSTANT)
|
||||||
|
Q_PROPERTY(QString examplePrompt READ examplePrompt CONSTANT)
|
||||||
|
Q_PROPERTY(QString exampleCall READ exampleCall CONSTANT)
|
||||||
|
Q_PROPERTY(QString exampleReply READ exampleReply CONSTANT)
|
||||||
|
|
||||||
|
public:
|
||||||
|
Tool() : QObject(nullptr) {}
|
||||||
|
virtual ~Tool() {}
|
||||||
|
|
||||||
|
virtual void run(const QList<ToolParam> ¶ms) = 0;
|
||||||
|
virtual bool interrupt() = 0;
|
||||||
|
|
||||||
|
// Tools should set these if they encounter errors. For instance, a tool depending upon the network
|
||||||
|
// might set these error variables if the network is not available.
|
||||||
|
virtual ToolEnums::Error error() const { return ToolEnums::Error::NoError; }
|
||||||
|
virtual QString errorString() const { return QString(); }
|
||||||
|
|
||||||
|
// [Required] Human readable name of the tool.
|
||||||
|
virtual QString name() const = 0;
|
||||||
|
|
||||||
|
// [Required] Human readable description of what the tool does. Use this tool to: {{description}}
|
||||||
|
virtual QString description() const = 0;
|
||||||
|
|
||||||
|
// [Required] Must be unique. Name of the function to invoke. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
|
||||||
|
virtual QString function() const = 0;
|
||||||
|
|
||||||
|
// [Optional] List describing the tool's parameters. An empty list specifies no parameters.
|
||||||
|
virtual QList<ToolParamInfo> parameters() const { return {}; }
|
||||||
|
|
||||||
|
// [Optional] The symbolic format of the toolcall.
|
||||||
|
virtual QString symbolicFormat() const { return QString(); }
|
||||||
|
|
||||||
|
// [Optional] A human generated example of a prompt that could result in this tool being called.
|
||||||
|
virtual QString examplePrompt() const { return QString(); }
|
||||||
|
|
||||||
|
// [Optional] An example of this tool call that pairs with the example query. It should be the
|
||||||
|
// complete string that the model must generate.
|
||||||
|
virtual QString exampleCall() const { return QString(); }
|
||||||
|
|
||||||
|
// [Optional] An example of the reply the model might generate given the result of the tool call.
|
||||||
|
virtual QString exampleReply() const { return QString(); }
|
||||||
|
|
||||||
|
bool operator==(const Tool &other) const { return function() == other.function(); }
|
||||||
|
|
||||||
|
json::object_t jinjaValue() const;
|
||||||
|
|
||||||
|
Q_SIGNALS:
|
||||||
|
void runComplete(const ToolCallInfo &info);
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // TOOL_H
|
202
gpt4all-chat/src/toolcallparser.cpp
Normal file
202
gpt4all-chat/src/toolcallparser.cpp
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
#include "toolcallparser.h"
|
||||||
|
|
||||||
|
#include "tool.h"
|
||||||
|
|
||||||
|
#include <QChar>
|
||||||
|
#include <QSet>
|
||||||
|
#include <QtAssert>
|
||||||
|
#include <QtTypes>
|
||||||
|
|
||||||
|
#include <stdexcept>
|
||||||
|
|
||||||
|
|
||||||
|
ToolCallParser::ToolCallParser()
|
||||||
|
: ToolCallParser(ToolCallConstants::AllTagNames)
|
||||||
|
{}
|
||||||
|
|
||||||
|
ToolCallParser::ToolCallParser(const QStringList &tagNames)
|
||||||
|
{
|
||||||
|
QSet<QChar> firstChars;
|
||||||
|
for (auto &name : tagNames) {
|
||||||
|
if (name.isEmpty())
|
||||||
|
throw std::invalid_argument("ToolCallParser(): tag names must not be empty");
|
||||||
|
if (firstChars.contains(name.at(0)))
|
||||||
|
throw std::invalid_argument("ToolCallParser(): tag names must not share any prefix");
|
||||||
|
firstChars << name.at(0);
|
||||||
|
m_possibleStartTags << makeStartTag(name).toUtf8();
|
||||||
|
m_possibleEndTags << makeEndTag (name).toUtf8();
|
||||||
|
}
|
||||||
|
reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ToolCallParser::reset()
|
||||||
|
{
|
||||||
|
// Resets the search state, but not the buffer or global state
|
||||||
|
resetSearchState();
|
||||||
|
|
||||||
|
// These are global states maintained between update calls
|
||||||
|
m_buffers.clear();
|
||||||
|
m_buffers << QByteArray();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ToolCallParser::resetSearchState()
|
||||||
|
{
|
||||||
|
m_expected = {'<'};
|
||||||
|
m_expectedIndex = 0;
|
||||||
|
m_state = ToolEnums::ParseState::None;
|
||||||
|
|
||||||
|
m_toolCall.clear();
|
||||||
|
m_startTagBuffer.clear();
|
||||||
|
m_endTagBuffer.clear();
|
||||||
|
|
||||||
|
m_currentTagIndex = -1;
|
||||||
|
m_startIndex = -1;
|
||||||
|
m_endIndex = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ToolCallParser::isExpected(char c) const
|
||||||
|
{
|
||||||
|
return m_expected.isEmpty() || m_expected.contains(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ToolCallParser::setExpected(const QList<QByteArray> &tags)
|
||||||
|
{
|
||||||
|
m_expected.clear();
|
||||||
|
for (const auto &tag : tags) {
|
||||||
|
Q_ASSERT(tag.size() > m_expectedIndex);
|
||||||
|
m_expected << tag.at(m_expectedIndex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
QByteArray ToolCallParser::startTag() const
|
||||||
|
{
|
||||||
|
if (m_currentTagIndex < 0)
|
||||||
|
return {};
|
||||||
|
return m_possibleStartTags.at(m_currentTagIndex);
|
||||||
|
}
|
||||||
|
|
||||||
|
QByteArray ToolCallParser::endTag() const
|
||||||
|
{
|
||||||
|
if (m_currentTagIndex < 0)
|
||||||
|
return {};
|
||||||
|
return m_possibleEndTags.at(m_currentTagIndex);
|
||||||
|
}
|
||||||
|
|
||||||
|
QByteArray &ToolCallParser::currentBuffer()
|
||||||
|
{
|
||||||
|
return m_buffers.last();
|
||||||
|
}
|
||||||
|
|
||||||
|
// This method is called with an arbitrary string and a current state. This method should take the
|
||||||
|
// current state into account and then parse through the update character by character to arrive at
|
||||||
|
// the new state.
|
||||||
|
void ToolCallParser::update(const QByteArray &update)
|
||||||
|
{
|
||||||
|
currentBuffer().append(update);
|
||||||
|
|
||||||
|
for (qsizetype i = currentBuffer().size() - update.size(); i < currentBuffer().size(); ++i) {
|
||||||
|
const char c = currentBuffer()[i];
|
||||||
|
const bool foundMatch = isExpected(c);
|
||||||
|
if (!foundMatch) {
|
||||||
|
resetSearchState();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (m_state) {
|
||||||
|
case ToolEnums::ParseState::None:
|
||||||
|
{
|
||||||
|
m_expectedIndex = 1;
|
||||||
|
setExpected(m_possibleStartTags);
|
||||||
|
m_state = ToolEnums::ParseState::InTagChoice;
|
||||||
|
m_startIndex = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case ToolEnums::ParseState::InTagChoice:
|
||||||
|
{
|
||||||
|
for (int i = 0; i < m_possibleStartTags.size(); ++i) {
|
||||||
|
const auto &tag = m_possibleStartTags.at(i);
|
||||||
|
if (c == tag.at(1)) m_currentTagIndex = i;
|
||||||
|
}
|
||||||
|
if (m_currentTagIndex >= 0) {
|
||||||
|
m_expectedIndex = 2;
|
||||||
|
setExpected({m_possibleStartTags.at(m_currentTagIndex)});
|
||||||
|
m_state = ToolEnums::ParseState::InStart;
|
||||||
|
} else
|
||||||
|
resetSearchState();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case ToolEnums::ParseState::InStart:
|
||||||
|
{
|
||||||
|
m_startTagBuffer.append(c);
|
||||||
|
|
||||||
|
const auto startTag = this->startTag();
|
||||||
|
Q_ASSERT(!startTag.isEmpty());
|
||||||
|
if (m_expectedIndex == startTag.size() - 1) {
|
||||||
|
m_expectedIndex = 0;
|
||||||
|
setExpected({});
|
||||||
|
m_state = ToolEnums::ParseState::Partial;
|
||||||
|
} else {
|
||||||
|
++m_expectedIndex;
|
||||||
|
Q_ASSERT(m_currentTagIndex >= 0);
|
||||||
|
setExpected({startTag});
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case ToolEnums::ParseState::Partial:
|
||||||
|
{
|
||||||
|
Q_ASSERT(m_currentTagIndex >= 0);
|
||||||
|
const auto endTag = this->endTag();
|
||||||
|
Q_ASSERT(!endTag.isEmpty());
|
||||||
|
m_toolCall.append(c);
|
||||||
|
m_endTagBuffer.append(c);
|
||||||
|
if (m_endTagBuffer.size() > endTag.size())
|
||||||
|
m_endTagBuffer.remove(0, 1);
|
||||||
|
if (m_endTagBuffer == endTag) {
|
||||||
|
m_endIndex = i + 1;
|
||||||
|
m_toolCall.chop(endTag.size());
|
||||||
|
m_state = ToolEnums::ParseState::Complete;
|
||||||
|
m_endTagBuffer.clear();
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case ToolEnums::ParseState::Complete:
|
||||||
|
{
|
||||||
|
// Already complete, do nothing further
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ToolCallParser::splitIfPossible()
|
||||||
|
{
|
||||||
|
// The first split happens when we're in a partial state
|
||||||
|
if (m_buffers.size() < 2 && m_state == ToolEnums::ParseState::Partial) {
|
||||||
|
Q_ASSERT(m_startIndex >= 0);
|
||||||
|
const auto beforeToolCall = currentBuffer().left(m_startIndex);
|
||||||
|
const auto toolCall = currentBuffer().mid (m_startIndex);
|
||||||
|
m_buffers = { beforeToolCall, toolCall };
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The second split happens when we're in the complete state
|
||||||
|
if (m_buffers.size() < 3 && m_state == ToolEnums::ParseState::Complete) {
|
||||||
|
Q_ASSERT(m_endIndex >= 0);
|
||||||
|
const auto &beforeToolCall = m_buffers.first();
|
||||||
|
const auto toolCall = currentBuffer().left(m_endIndex);
|
||||||
|
const auto afterToolCall = currentBuffer().mid (m_endIndex);
|
||||||
|
m_buffers = { beforeToolCall, toolCall, afterToolCall };
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
QStringList ToolCallParser::buffers() const
|
||||||
|
{
|
||||||
|
QStringList result;
|
||||||
|
result.reserve(m_buffers.size());
|
||||||
|
for (const auto &buffer : m_buffers)
|
||||||
|
result << QString::fromUtf8(buffer);
|
||||||
|
return result;
|
||||||
|
}
|
73
gpt4all-chat/src/toolcallparser.h
Normal file
73
gpt4all-chat/src/toolcallparser.h
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
#ifndef TOOLCALLPARSER_H
|
||||||
|
#define TOOLCALLPARSER_H
|
||||||
|
|
||||||
|
#include <QByteArray>
|
||||||
|
#include <QList>
|
||||||
|
#include <QString>
|
||||||
|
#include <QStringList> // IWYU pragma: keep
|
||||||
|
|
||||||
|
namespace ToolEnums { enum class ParseState; }
|
||||||
|
|
||||||
|
using namespace Qt::Literals::StringLiterals;
|
||||||
|
|
||||||
|
|
||||||
|
class ToolCallParser
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
ToolCallParser();
|
||||||
|
ToolCallParser(const QStringList &tagNames);
|
||||||
|
|
||||||
|
void reset();
|
||||||
|
void update(const QByteArray &update);
|
||||||
|
QString toolCall() const { return QString::fromUtf8(m_toolCall); }
|
||||||
|
int startIndex() const { return m_startIndex; }
|
||||||
|
ToolEnums::ParseState state() const { return m_state; }
|
||||||
|
QByteArray startTag() const;
|
||||||
|
QByteArray endTag() const;
|
||||||
|
|
||||||
|
bool splitIfPossible();
|
||||||
|
QStringList buffers() const;
|
||||||
|
int numberOfBuffers() const { return m_buffers.size(); }
|
||||||
|
|
||||||
|
static QString makeStartTag(const QString &name) { return u"<%1>"_s .arg(name); }
|
||||||
|
static QString makeEndTag (const QString &name) { return u"</%1>"_s.arg(name); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
QByteArray ¤tBuffer();
|
||||||
|
void resetSearchState();
|
||||||
|
bool isExpected(char c) const;
|
||||||
|
void setExpected(const QList<QByteArray> &tags);
|
||||||
|
|
||||||
|
QList<QByteArray> m_possibleStartTags;
|
||||||
|
QList<QByteArray> m_possibleEndTags;
|
||||||
|
QByteArray m_startTagBuffer;
|
||||||
|
QByteArray m_endTagBuffer;
|
||||||
|
int m_currentTagIndex;
|
||||||
|
|
||||||
|
QList<char> m_expected;
|
||||||
|
int m_expectedIndex;
|
||||||
|
ToolEnums::ParseState m_state;
|
||||||
|
QList<QByteArray> m_buffers;
|
||||||
|
QByteArray m_toolCall;
|
||||||
|
int m_startIndex;
|
||||||
|
int m_endIndex;
|
||||||
|
};
|
||||||
|
|
||||||
|
namespace ToolCallConstants
|
||||||
|
{
|
||||||
|
// NB: the parsing code assumes the first char of the various tags differ
|
||||||
|
|
||||||
|
inline const QString CodeInterpreterFunction = u"javascript_interpret"_s;
|
||||||
|
inline const QString CodeInterpreterStartTag = ToolCallParser::makeStartTag(CodeInterpreterFunction);
|
||||||
|
inline const QString CodeInterpreterEndTag = ToolCallParser::makeEndTag (CodeInterpreterFunction);
|
||||||
|
inline const QString CodeInterpreterPrefix = u"%1\n```javascript\n"_s.arg(CodeInterpreterStartTag);
|
||||||
|
inline const QString CodeInterpreterSuffix = u"```\n%1"_s .arg(CodeInterpreterEndTag );
|
||||||
|
|
||||||
|
inline const QString ThinkTagName = u"think"_s;
|
||||||
|
inline const QString ThinkStartTag = ToolCallParser::makeStartTag(ThinkTagName);
|
||||||
|
inline const QString ThinkEndTag = ToolCallParser::makeEndTag (ThinkTagName);
|
||||||
|
|
||||||
|
inline const QStringList AllTagNames { CodeInterpreterFunction, ThinkTagName };
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // TOOLCALLPARSER_H
|
32
gpt4all-chat/src/toolmodel.cpp
Normal file
32
gpt4all-chat/src/toolmodel.cpp
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
#include "toolmodel.h"
|
||||||
|
|
||||||
|
#include "codeinterpreter.h"
|
||||||
|
|
||||||
|
#include <QCoreApplication>
|
||||||
|
#include <QEvent>
|
||||||
|
#include <QGlobalStatic>
|
||||||
|
|
||||||
|
|
||||||
|
class MyToolModel: public ToolModel { };
|
||||||
|
Q_GLOBAL_STATIC(MyToolModel, toolModelInstance)
|
||||||
|
ToolModel *ToolModel::globalInstance()
|
||||||
|
{
|
||||||
|
return toolModelInstance();
|
||||||
|
}
|
||||||
|
|
||||||
|
ToolModel::ToolModel()
|
||||||
|
: QAbstractListModel(nullptr)
|
||||||
|
{
|
||||||
|
QCoreApplication::instance()->installEventFilter(this);
|
||||||
|
|
||||||
|
Tool* codeInterpreter = new CodeInterpreter;
|
||||||
|
m_tools.append(codeInterpreter);
|
||||||
|
m_toolMap.insert(codeInterpreter->function(), codeInterpreter);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ToolModel::eventFilter(QObject *obj, QEvent *ev)
|
||||||
|
{
|
||||||
|
if (obj == QCoreApplication::instance() && ev->type() == QEvent::LanguageChange)
|
||||||
|
emit dataChanged(index(0, 0), index(m_tools.size() - 1, 0));
|
||||||
|
return false;
|
||||||
|
}
|
111
gpt4all-chat/src/toolmodel.h
Normal file
111
gpt4all-chat/src/toolmodel.h
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
#ifndef TOOLMODEL_H
|
||||||
|
#define TOOLMODEL_H
|
||||||
|
|
||||||
|
#include "tool.h"
|
||||||
|
|
||||||
|
#include <QAbstractListModel>
|
||||||
|
#include <QByteArray>
|
||||||
|
#include <QHash>
|
||||||
|
#include <QList>
|
||||||
|
#include <QString>
|
||||||
|
#include <QVariant>
|
||||||
|
#include <QtPreprocessorSupport>
|
||||||
|
|
||||||
|
|
||||||
|
class ToolModel : public QAbstractListModel
|
||||||
|
{
|
||||||
|
Q_OBJECT
|
||||||
|
Q_PROPERTY(int count READ count NOTIFY countChanged)
|
||||||
|
|
||||||
|
public:
|
||||||
|
static ToolModel *globalInstance();
|
||||||
|
|
||||||
|
enum Roles {
|
||||||
|
NameRole = Qt::UserRole + 1,
|
||||||
|
DescriptionRole,
|
||||||
|
FunctionRole,
|
||||||
|
ParametersRole,
|
||||||
|
SymbolicFormatRole,
|
||||||
|
ExamplePromptRole,
|
||||||
|
ExampleCallRole,
|
||||||
|
ExampleReplyRole,
|
||||||
|
};
|
||||||
|
|
||||||
|
int rowCount(const QModelIndex &parent = QModelIndex()) const override
|
||||||
|
{
|
||||||
|
Q_UNUSED(parent)
|
||||||
|
return m_tools.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
QVariant data(const QModelIndex &index, int role = Qt::DisplayRole) const override
|
||||||
|
{
|
||||||
|
if (!index.isValid() || index.row() < 0 || index.row() >= m_tools.size())
|
||||||
|
return QVariant();
|
||||||
|
|
||||||
|
const Tool *item = m_tools.at(index.row());
|
||||||
|
switch (role) {
|
||||||
|
case NameRole:
|
||||||
|
return item->name();
|
||||||
|
case DescriptionRole:
|
||||||
|
return item->description();
|
||||||
|
case FunctionRole:
|
||||||
|
return item->function();
|
||||||
|
case ParametersRole:
|
||||||
|
return QVariant::fromValue(item->parameters());
|
||||||
|
case SymbolicFormatRole:
|
||||||
|
return item->symbolicFormat();
|
||||||
|
case ExamplePromptRole:
|
||||||
|
return item->examplePrompt();
|
||||||
|
case ExampleCallRole:
|
||||||
|
return item->exampleCall();
|
||||||
|
case ExampleReplyRole:
|
||||||
|
return item->exampleReply();
|
||||||
|
}
|
||||||
|
|
||||||
|
return QVariant();
|
||||||
|
}
|
||||||
|
|
||||||
|
QHash<int, QByteArray> roleNames() const override
|
||||||
|
{
|
||||||
|
QHash<int, QByteArray> roles;
|
||||||
|
roles[NameRole] = "name";
|
||||||
|
roles[DescriptionRole] = "description";
|
||||||
|
roles[FunctionRole] = "function";
|
||||||
|
roles[ParametersRole] = "parameters";
|
||||||
|
roles[SymbolicFormatRole] = "symbolicFormat";
|
||||||
|
roles[ExamplePromptRole] = "examplePrompt";
|
||||||
|
roles[ExampleCallRole] = "exampleCall";
|
||||||
|
roles[ExampleReplyRole] = "exampleReply";
|
||||||
|
return roles;
|
||||||
|
}
|
||||||
|
|
||||||
|
Q_INVOKABLE Tool* get(int index) const
|
||||||
|
{
|
||||||
|
if (index < 0 || index >= m_tools.size()) return nullptr;
|
||||||
|
return m_tools.at(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
Q_INVOKABLE Tool *get(const QString &id) const
|
||||||
|
{
|
||||||
|
if (!m_toolMap.contains(id)) return nullptr;
|
||||||
|
return m_toolMap.value(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
int count() const { return m_tools.size(); }
|
||||||
|
|
||||||
|
Q_SIGNALS:
|
||||||
|
void countChanged();
|
||||||
|
void valueChanged(int index, const QString &value);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
bool eventFilter(QObject *obj, QEvent *ev) override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
explicit ToolModel();
|
||||||
|
~ToolModel() {}
|
||||||
|
friend class MyToolModel;
|
||||||
|
QList<Tool*> m_tools;
|
||||||
|
QHash<QString, Tool*> m_toolMap;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // TOOLMODEL_H
|
@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
#include <QByteArray>
|
#include <QByteArray>
|
||||||
#include <QJsonValue>
|
#include <QJsonValue>
|
||||||
#include <QLatin1StringView>
|
#include <QLatin1StringView> // IWYU pragma: keep
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QStringView>
|
#include <QStringView>
|
||||||
#include <QUtf8StringView>
|
#include <QUtf8StringView>
|
||||||
@ -13,8 +13,9 @@
|
|||||||
|
|
||||||
#include <initializer_list>
|
#include <initializer_list>
|
||||||
#include <string_view>
|
#include <string_view>
|
||||||
#include <utility>
|
#include <utility> // IWYU pragma: keep
|
||||||
|
|
||||||
|
// IWYU pragma: no_forward_declare QJsonValue
|
||||||
class QJsonObject;
|
class QJsonObject;
|
||||||
|
|
||||||
|
|
||||||
@ -40,4 +41,4 @@ MAKE_FORMATTER(QVariant, value.toString().toUtf8());
|
|||||||
// alternative to QJsonObject's initializer_list constructor that accepts Latin-1 strings
|
// alternative to QJsonObject's initializer_list constructor that accepts Latin-1 strings
|
||||||
QJsonObject makeJsonObject(std::initializer_list<std::pair<QLatin1StringView, QJsonValue>> args);
|
QJsonObject makeJsonObject(std::initializer_list<std::pair<QLatin1StringView, QJsonValue>> args);
|
||||||
|
|
||||||
#include "utils.inl"
|
#include "utils.inl" // IWYU pragma: export
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#include <QJsonObject>
|
#include <QJsonObject>
|
||||||
|
|
||||||
|
|
||||||
inline QJsonObject makeJsonObject(std::initializer_list<std::pair<QLatin1StringView, QJsonValue>> args)
|
inline QJsonObject makeJsonObject(std::initializer_list<std::pair<QLatin1StringView, QJsonValue>> args)
|
||||||
{
|
{
|
||||||
QJsonObject obj;
|
QJsonObject obj;
|
||||||
|
@ -7,15 +7,16 @@
|
|||||||
#include <xlsxformat.h>
|
#include <xlsxformat.h>
|
||||||
#include <xlsxworksheet.h>
|
#include <xlsxworksheet.h>
|
||||||
|
|
||||||
|
#include <QChar>
|
||||||
#include <QDateTime>
|
#include <QDateTime>
|
||||||
#include <QDebug>
|
#include <QDebug>
|
||||||
|
#include <QLatin1StringView>
|
||||||
#include <QList>
|
#include <QList>
|
||||||
#include <QRegularExpression>
|
#include <QRegularExpression>
|
||||||
#include <QString>
|
#include <QString>
|
||||||
#include <QStringList>
|
#include <QStringList> // IWYU pragma: keep
|
||||||
#include <QStringView>
|
#include <QStringView>
|
||||||
#include <QVariant>
|
#include <QVariant>
|
||||||
#include <QtGlobal>
|
|
||||||
#include <QtLogging>
|
#include <QtLogging>
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
class QIODevice;
|
class QIODevice;
|
||||||
class QString;
|
class QString;
|
||||||
|
|
||||||
|
|
||||||
class XLSXToMD
|
class XLSXToMD
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user