mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-07-22 00:02:24 -04:00
Compare commits
17 Commits
e129be4301
...
2fda274601
Author | SHA1 | Date | |
---|---|---|---|
|
2fda274601 | ||
|
1721cdbf0c | ||
|
177172fe00 | ||
|
3444a47cad | ||
|
89a59e7f99 | ||
|
f5dd74bcf0 | ||
|
78d930516d | ||
|
83b8eea611 | ||
|
1bebe78c56 | ||
|
b75a209374 | ||
|
e90263c23f | ||
|
f414c28589 | ||
|
7e5e84fbb7 | ||
|
37b007603a | ||
|
c25dc51935 | ||
|
34daf240f9 | ||
|
721d854095 |
23
README.md
23
README.md
@ -1,11 +1,9 @@
|
||||
<h1 align="center">GPT4All</h1>
|
||||
|
||||
<p align="center">Open-source assistant-style large language models that run locally on your CPU</p>
|
||||
|
||||
<p align="center"><strong>New</strong>: Now with Nomic Vulkan Universal GPU support. <a href="https://blog.nomic.ai/posts/gpt4all-gpu-inference-with-vulkan">Learn more</a>.</p>
|
||||
<p align="center">Open-source large language models that run locally on your CPU and nearly any GPU</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://gpt4all.io">GPT4All Website</a>
|
||||
<a href="https://gpt4all.io">GPT4All Website and Models</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -32,14 +30,25 @@ Run on an M1 macOS Device (not sped up!)
|
||||
</p>
|
||||
|
||||
## GPT4All: An ecosystem of open-source on-edge large language models.
|
||||
GPT4All is an ecosystem to train and deploy **powerful** and **customized** large language models that run locally on consumer grade CPUs. Note that your CPU needs to support [AVX or AVX2 instructions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions).
|
||||
|
||||
> [!IMPORTANT]
|
||||
> GPT4All v2.5.0 and newer only supports models in GGUF format (.gguf). Models used with a previous version of GPT4All (.bin extension) will no longer work.
|
||||
|
||||
GPT4All is an ecosystem to run **powerful** and **customized** large language models that work locally on consumer grade CPUs and any GPU. Note that your CPU needs to support [AVX or AVX2 instructions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions).
|
||||
|
||||
Learn more in the [documentation](https://docs.gpt4all.io).
|
||||
|
||||
The goal is simple - be the best instruction tuned assistant-style language model that any person or enterprise can freely use, distribute and build on.
|
||||
|
||||
A GPT4All model is a 3GB - 8GB file that you can download and plug into the GPT4All open-source ecosystem software. **Nomic AI** supports and maintains this software ecosystem to enforce quality and security alongside spearheading the effort to allow any person or enterprise to easily train and deploy their own on-edge large language models.
|
||||
|
||||
### What's New ([Issue Tracker](https://github.com/orgs/nomic-ai/projects/2))
|
||||
- **October 19th, 2023**: GGUF Support Launches with Support for:
|
||||
- Mistral 7b base model, an updated model gallery on [gpt4all.io](https://gpt4all.io), several new local code models including Rift Coder v1.5
|
||||
- [Nomic Vulkan](https://blog.nomic.ai/posts/gpt4all-gpu-inference-with-vulkan) support for Q4_0, Q6 quantizations in GGUF.
|
||||
- Offline build support for running old versions of the GPT4All Local LLM Chat Client.
|
||||
- **September 18th, 2023**: [Nomic Vulkan](https://blog.nomic.ai/posts/gpt4all-gpu-inference-with-vulkan) launches supporting local LLM inference on AMD, Intel, Samsung, Qualcomm and NVIDIA GPUs.
|
||||
- **August 15th, 2023**: GPT4All API launches allowing inference of local LLMs from docker containers.
|
||||
- **July 2023**: Stable support for LocalDocs, a GPT4All Plugin that allows you to privately and locally chat with your data.
|
||||
|
||||
|
||||
### Chat Client
|
||||
Run any GPT4All model natively on your home desktop with the auto-updating desktop chat client. See <a href="https://gpt4all.io">GPT4All Website</a> for a full list of open-source models you can run with this powerful desktop application.
|
||||
|
@ -18,6 +18,6 @@ COPY gpt4all_api/app /app
|
||||
RUN mkdir -p /models
|
||||
|
||||
# Include the following line to bake a model into the image and not have to download it on API start.
|
||||
RUN wget -q --show-progress=off https://gpt4all.io/models/gguf/${MODEL_BIN} -P /models \
|
||||
RUN wget -q --show-progress=off https://gpt4all.io/models/${MODEL_BIN} -P /models \
|
||||
&& md5sum /models/${MODEL_BIN}
|
||||
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit a8ed8c858985ef94d97a3cf2c97085b680c6d5d0
|
||||
Subproject commit 2dee60214b0001cf03e1cec0a53a61a17b55c1eb
|
@ -10,6 +10,7 @@
|
||||
#include <cassert>
|
||||
#include <cstdlib>
|
||||
#include <sstream>
|
||||
#include <regex>
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
@ -81,6 +82,13 @@ const std::vector<LLModel::Implementation> &LLModel::Implementation::implementat
|
||||
static auto* libs = new std::vector<Implementation>([] () {
|
||||
std::vector<Implementation> fres;
|
||||
|
||||
std::string impl_name_re = "(bert|llama|gptj|llamamodel-mainline)";
|
||||
if (requires_avxonly()) {
|
||||
impl_name_re += "-avxonly";
|
||||
} else {
|
||||
impl_name_re += "-(default|metal)";
|
||||
}
|
||||
std::regex re(impl_name_re);
|
||||
auto search_in_directory = [&](const std::string& paths) {
|
||||
std::stringstream ss(paths);
|
||||
std::string path;
|
||||
@ -90,7 +98,10 @@ const std::vector<LLModel::Implementation> &LLModel::Implementation::implementat
|
||||
// Iterate over all libraries
|
||||
for (const auto& f : std::filesystem::directory_iterator(fs_path)) {
|
||||
const std::filesystem::path& p = f.path();
|
||||
|
||||
if (p.extension() != LIB_FILE_EXT) continue;
|
||||
if (!std::regex_search(p.stem().string(), re)) continue;
|
||||
|
||||
// Add to list if model implementation
|
||||
try {
|
||||
Dlhandle dl(p.string());
|
||||
|
@ -40,5 +40,5 @@ directory, if necessary.
|
||||
If you have already saved a model beforehand, specify its path with the `-m`/`--model` argument,
|
||||
for example:
|
||||
```shell
|
||||
python app.py repl --model /home/user/my-gpt4all-models/GPT4All-13B-snoozy.ggmlv3.q4_0.bin
|
||||
python app.py repl --model /home/user/my-gpt4all-models/gpt4all-13b-snoozy-q4_0.gguf
|
||||
```
|
||||
|
3
gpt4all-bindings/cli/app.py
Normal file → Executable file
3
gpt4all-bindings/cli/app.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
"""GPT4All CLI
|
||||
|
||||
The GPT4All CLI is a self-contained script based on the `gpt4all` and `typer` packages. It offers a
|
||||
@ -53,7 +54,7 @@ def repl(
|
||||
model: Annotated[
|
||||
str,
|
||||
typer.Option("--model", "-m", help="Model to use for chatbot"),
|
||||
] = "ggml-gpt4all-j-v1.3-groovy",
|
||||
] = "mistral-7b-instruct-v0.1.Q4_0.gguf",
|
||||
n_threads: Annotated[
|
||||
int,
|
||||
typer.Option("--n-threads", "-t", help="Number of threads to use for chatbot"),
|
||||
|
@ -1,3 +1,4 @@
|
||||
#!/bin/sh
|
||||
mkdir -p runtimes
|
||||
rm -rf runtimes/linux-x64
|
||||
mkdir -p runtimes/linux-x64/native
|
||||
|
@ -50,7 +50,7 @@ Test it out! In a Python script or console:
|
||||
|
||||
```python
|
||||
from gpt4all import GPT4All
|
||||
model = GPT4All("orca-mini-3b.ggmlv3.q4_0.bin")
|
||||
model = GPT4All("orca-mini-3b-gguf2-q4_0.gguf")
|
||||
output = model.generate("The capital of France is ", max_tokens=3)
|
||||
print(output)
|
||||
```
|
||||
@ -59,7 +59,7 @@ print(output)
|
||||
GPU Usage
|
||||
```python
|
||||
from gpt4all import GPT4All
|
||||
model = GPT4All("orca-mini-3b.ggmlv3.q4_0.bin", device='gpu') # device='amd', device='intel'
|
||||
model = GPT4All("orca-mini-3b-gguf2-q4_0.gguf", device='gpu') # device='amd', device='intel'
|
||||
output = model.generate("The capital of France is ", max_tokens=3)
|
||||
print(output)
|
||||
```
|
||||
|
@ -166,7 +166,7 @@ If you want to use a different model, you can do so with the `-m`/`--model` para
|
||||
model file name is provided, it will again check in `.cache/gpt4all/` and might start downloading.
|
||||
If instead given a path to an existing model, the command could for example look like this:
|
||||
```shell
|
||||
python app.py repl --model /home/user/my-gpt4all-models/GPT4All-13B-snoozy.ggmlv3.q4_0.bin
|
||||
python app.py repl --model /home/user/my-gpt4all-models/gpt4all-13b-snoozy-q4_0.gguf
|
||||
```
|
||||
|
||||
When you're done and want to end a session, simply type `/exit`.
|
||||
|
@ -11,7 +11,7 @@ pip install gpt4all
|
||||
=== "GPT4All Example"
|
||||
``` py
|
||||
from gpt4all import GPT4All
|
||||
model = GPT4All("orca-mini-3b.ggmlv3.q4_0.bin")
|
||||
model = GPT4All("orca-mini-3b-gguf2-q4_0.gguf")
|
||||
output = model.generate("The capital of France is ", max_tokens=3)
|
||||
print(output)
|
||||
```
|
||||
@ -35,7 +35,7 @@ Use the GPT4All `chat_session` context manager to hold chat conversations with t
|
||||
|
||||
=== "GPT4All Example"
|
||||
``` py
|
||||
model = GPT4All(model_name='orca-mini-3b.ggmlv3.q4_0.bin')
|
||||
model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')
|
||||
with model.chat_session():
|
||||
response1 = model.generate(prompt='hello', temp=0)
|
||||
response2 = model.generate(prompt='write me a short poem', temp=0)
|
||||
@ -89,7 +89,7 @@ To interact with GPT4All responses as the model generates, use the `streaming=Tr
|
||||
=== "GPT4All Streaming Example"
|
||||
``` py
|
||||
from gpt4all import GPT4All
|
||||
model = GPT4All("orca-mini-3b.ggmlv3.q4_0.bin")
|
||||
model = GPT4All("orca-mini-3b-gguf2-q4_0.gguf")
|
||||
tokens = []
|
||||
for token in model.generate("The capital of France is", max_tokens=20, streaming=True):
|
||||
tokens.append(token)
|
||||
@ -135,7 +135,7 @@ is the same as if it weren't provided; that is, `~/.cache/gpt4all/` is the defau
|
||||
``` py
|
||||
from pathlib import Path
|
||||
from gpt4all import GPT4All
|
||||
model = GPT4All(model_name='orca-mini-3b.ggmlv3.q4_0.bin',
|
||||
model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf',
|
||||
model_path=(Path.home() / '.cache' / 'gpt4all'),
|
||||
allow_download=False)
|
||||
response = model.generate('my favorite 3 fruits are:', temp=0)
|
||||
@ -152,7 +152,7 @@ If you want to point it at the chat GUI's default folder, it should be:
|
||||
from pathlib import Path
|
||||
from gpt4all import GPT4All
|
||||
|
||||
model_name = 'orca-mini-3b.ggmlv3.q4_0.bin'
|
||||
model_name = 'orca-mini-3b-gguf2-q4_0.gguf'
|
||||
model_path = Path.home() / 'Library' / 'Application Support' / 'nomic.ai' / 'GPT4All'
|
||||
model = GPT4All(model_name, model_path)
|
||||
```
|
||||
@ -161,7 +161,7 @@ If you want to point it at the chat GUI's default folder, it should be:
|
||||
from pathlib import Path
|
||||
from gpt4all import GPT4All
|
||||
import os
|
||||
model_name = 'orca-mini-3b.ggmlv3.q4_0.bin'
|
||||
model_name = 'orca-mini-3b-gguf2-q4_0.gguf'
|
||||
model_path = Path(os.environ['LOCALAPPDATA']) / 'nomic.ai' / 'GPT4All'
|
||||
model = GPT4All(model_name, model_path)
|
||||
```
|
||||
@ -170,7 +170,7 @@ If you want to point it at the chat GUI's default folder, it should be:
|
||||
from pathlib import Path
|
||||
from gpt4all import GPT4All
|
||||
|
||||
model_name = 'orca-mini-3b.ggmlv3.q4_0.bin'
|
||||
model_name = 'orca-mini-3b-gguf2-q4_0.gguf'
|
||||
model_path = Path.home() / '.local' / 'share' / 'nomic.ai' / 'GPT4All'
|
||||
model = GPT4All(model_name, model_path)
|
||||
```
|
||||
@ -182,7 +182,7 @@ from pathlib import Path
|
||||
import gpt4all.gpt4all
|
||||
gpt4all.gpt4all.DEFAULT_MODEL_DIRECTORY = Path.home() / 'my' / 'models-directory'
|
||||
from gpt4all import GPT4All
|
||||
model = GPT4All('orca-mini-3b.ggmlv3.q4_0.bin')
|
||||
model = GPT4All('orca-mini-3b-gguf2-q4_0.gguf')
|
||||
...
|
||||
```
|
||||
|
||||
@ -193,7 +193,7 @@ Session templates can be customized when starting a `chat_session` context:
|
||||
=== "GPT4All Custom Session Templates Example"
|
||||
``` py
|
||||
from gpt4all import GPT4All
|
||||
model = GPT4All('ggml-Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_1.bin')
|
||||
model = GPT4All('wizardlm-13b-v1.2.Q4_0.gguf')
|
||||
system_template = 'A chat between a curious user and an artificial intelligence assistant.'
|
||||
# many models use triple hash '###' for keywords, Vicunas are simpler:
|
||||
prompt_template = 'USER: {0}\nASSISTANT: '
|
||||
@ -222,7 +222,7 @@ To do the same outside a session, the input has to be formatted manually. For ex
|
||||
|
||||
=== "GPT4All Templates Outside a Session Example"
|
||||
``` py
|
||||
model = GPT4All('ggml-Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_1.bin')
|
||||
model = GPT4All('wizardlm-13b-v1.2.Q4_0.gguf')
|
||||
system_template = 'A chat between a curious user and an artificial intelligence assistant.'
|
||||
prompt_template = 'USER: {0}\nASSISTANT: '
|
||||
prompts = ['name 3 colors', 'now name 3 fruits', 'what were the 3 colors in your earlier response?']
|
||||
@ -285,7 +285,7 @@ customized in a subclass. As an example:
|
||||
```
|
||||
=== "GPT4All Custom Subclass Example"
|
||||
``` py
|
||||
model = RotatingTemplateGPT4All('ggml-Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_1.bin')
|
||||
model = RotatingTemplateGPT4All('wizardlm-13b-v1.2.Q4_0.gguf')
|
||||
with model.chat_session(): # starting a session is optional in this example
|
||||
response1 = model.generate("hi, who are you?")
|
||||
print(response1)
|
||||
@ -345,7 +345,7 @@ logging infrastructure offers [many more customization options][py-logging-cookb
|
||||
import logging
|
||||
from gpt4all import GPT4All
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
model = GPT4All('nous-hermes-13b.ggmlv3.q4_0.bin')
|
||||
model = GPT4All('nous-hermes-llama2-13b.Q4_0.gguf')
|
||||
with model.chat_session('You are a geography expert.\nBe terse.',
|
||||
'### Instruction:\n{0}\n### Response:\n'):
|
||||
response = model.generate('who are you?', temp=0)
|
||||
@ -414,7 +414,7 @@ If you know exactly when a model should stop responding, you can add a custom ca
|
||||
=== "GPT4All Custom Stop Callback"
|
||||
``` py
|
||||
from gpt4all import GPT4All
|
||||
model = GPT4All('orca-mini-3b.ggmlv3.q4_0.bin')
|
||||
model = GPT4All('orca-mini-3b-gguf2-q4_0.gguf')
|
||||
|
||||
def stop_on_token_callback(token_id, token_string):
|
||||
# one sentence is enough:
|
||||
|
@ -9,7 +9,7 @@ GPT4All software is optimized to run inference of 3-13 billion parameter large l
|
||||
=== "GPT4All Example"
|
||||
``` py
|
||||
from gpt4all import GPT4All
|
||||
model = GPT4All("orca-mini-3b.ggmlv3.q4_0.bin")
|
||||
model = GPT4All("orca-mini-3b-gguf2-q4_0.gguf")
|
||||
output = model.generate("The capital of France is ", max_tokens=3)
|
||||
print(output)
|
||||
```
|
||||
|
@ -75,7 +75,7 @@ class GPT4All:
|
||||
Constructor
|
||||
|
||||
Args:
|
||||
model_name: Name of GPT4All or custom model. Including ".bin" file extension is optional but encouraged.
|
||||
model_name: Name of GPT4All or custom model. Including ".gguf" file extension is optional but encouraged.
|
||||
model_path: Path to directory containing model file or, if file does not exist, where to download model.
|
||||
Default is None, in which case models will be stored in `~/.cache/gpt4all/`.
|
||||
model_type: Model architecture. This argument currently does not have any functionality and is just used as
|
||||
@ -141,7 +141,7 @@ class GPT4All:
|
||||
Model config.
|
||||
"""
|
||||
|
||||
model_filename = append_bin_suffix_if_missing(model_name)
|
||||
model_filename = append_extension_if_missing(model_name)
|
||||
|
||||
# get the config for the model
|
||||
config: ConfigType = DEFAULT_MODEL_CONFIG
|
||||
@ -201,7 +201,7 @@ class GPT4All:
|
||||
Download model from https://gpt4all.io.
|
||||
|
||||
Args:
|
||||
model_filename: Filename of model (with .bin extension).
|
||||
model_filename: Filename of model (with .gguf extension).
|
||||
model_path: Path to download model to.
|
||||
verbose: If True (default), print debug messages.
|
||||
url: the models remote url (e.g. may be hosted on HF)
|
||||
@ -456,7 +456,7 @@ def empty_chat_session(system_prompt: str = "") -> List[MessageType]:
|
||||
return [{"role": "system", "content": system_prompt}]
|
||||
|
||||
|
||||
def append_bin_suffix_if_missing(model_name):
|
||||
def append_extension_if_missing(model_name):
|
||||
if not model_name.endswith((".bin", ".gguf")):
|
||||
model_name += ".bin"
|
||||
model_name += ".gguf"
|
||||
return model_name
|
||||
|
1
gpt4all-bindings/python/gpt4all/tests/test_embed_timings.py
Normal file → Executable file
1
gpt4all-bindings/python/gpt4all/tests/test_embed_timings.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import time
|
||||
from io import StringIO
|
||||
|
@ -8,7 +8,7 @@ import pytest
|
||||
|
||||
|
||||
def test_inference():
|
||||
model = GPT4All(model_name='orca-mini-3b.ggmlv3.q4_0.bin')
|
||||
model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')
|
||||
output_1 = model.generate('hello', top_k=1)
|
||||
|
||||
with model.chat_session():
|
||||
@ -47,49 +47,44 @@ def do_long_input(model):
|
||||
|
||||
|
||||
def test_inference_long_orca_3b():
|
||||
model = GPT4All(model_name="orca-mini-3b.ggmlv3.q4_0.bin")
|
||||
model = GPT4All(model_name="orca-mini-3b-gguf2-q4_0.gguf")
|
||||
do_long_input(model)
|
||||
|
||||
|
||||
def test_inference_long_falcon():
|
||||
model = GPT4All(model_name='ggml-model-gpt4all-falcon-q4_0.bin')
|
||||
model = GPT4All(model_name='gpt4all-falcon-q4_0.gguf')
|
||||
do_long_input(model)
|
||||
|
||||
|
||||
def test_inference_long_llama_7b():
|
||||
model = GPT4All(model_name="orca-mini-7b.ggmlv3.q4_0.bin")
|
||||
model = GPT4All(model_name="mistral-7b-openorca.Q4_0.gguf")
|
||||
do_long_input(model)
|
||||
|
||||
|
||||
def test_inference_long_llama_13b():
|
||||
model = GPT4All(model_name='ggml-nous-hermes-13b.ggmlv3.q4_0.bin')
|
||||
model = GPT4All(model_name='nous-hermes-llama2-13b.Q4_0.gguf')
|
||||
do_long_input(model)
|
||||
|
||||
|
||||
def test_inference_long_mpt():
|
||||
model = GPT4All(model_name='ggml-mpt-7b-chat.bin')
|
||||
model = GPT4All(model_name='mpt-7b-chat-q4_0.gguf')
|
||||
do_long_input(model)
|
||||
|
||||
|
||||
def test_inference_long_replit():
|
||||
model = GPT4All(model_name='ggml-replit-code-v1-3b.bin')
|
||||
do_long_input(model)
|
||||
|
||||
|
||||
def test_inference_long_groovy():
|
||||
model = GPT4All(model_name='ggml-gpt4all-j-v1.3-groovy.bin')
|
||||
model = GPT4All(model_name='replit-code-v1_5-3b-q4_0.gguf')
|
||||
do_long_input(model)
|
||||
|
||||
|
||||
def test_inference_hparams():
|
||||
model = GPT4All(model_name='orca-mini-3b.ggmlv3.q4_0.bin')
|
||||
model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')
|
||||
|
||||
output = model.generate("The capital of france is ", max_tokens=3)
|
||||
assert 'Paris' in output
|
||||
|
||||
|
||||
def test_inference_falcon():
|
||||
model = GPT4All(model_name='ggml-model-gpt4all-falcon-q4_0.bin')
|
||||
model = GPT4All(model_name='gpt4all-falcon-q4_0.gguf')
|
||||
prompt = 'hello'
|
||||
output = model.generate(prompt)
|
||||
assert isinstance(output, str)
|
||||
@ -97,7 +92,7 @@ def test_inference_falcon():
|
||||
|
||||
|
||||
def test_inference_mpt():
|
||||
model = GPT4All(model_name='ggml-mpt-7b-chat.bin')
|
||||
model = GPT4All(model_name='mpt-7b-chat-q4_0.gguf')
|
||||
prompt = 'hello'
|
||||
output = model.generate(prompt)
|
||||
assert isinstance(output, str)
|
||||
|
@ -61,7 +61,7 @@ copy_prebuilt_C_lib(SRC_CLIB_DIRECtORY,
|
||||
|
||||
setup(
|
||||
name=package_name,
|
||||
version="2.0.0",
|
||||
version="2.0.1",
|
||||
description="Python bindings for GPT4All",
|
||||
author="Nomic and the Open Source Community",
|
||||
author_email="support@nomic.ai",
|
||||
|
@ -15,7 +15,8 @@ Napi::Function NodeModelWrapper::GetClass(Napi::Env env) {
|
||||
InstanceMethod("initGpuByString", &NodeModelWrapper::InitGpuByString),
|
||||
InstanceMethod("hasGpuDevice", &NodeModelWrapper::HasGpuDevice),
|
||||
InstanceMethod("listGpu", &NodeModelWrapper::GetGpuDevices),
|
||||
InstanceMethod("memoryNeeded", &NodeModelWrapper::GetRequiredMemory)
|
||||
InstanceMethod("memoryNeeded", &NodeModelWrapper::GetRequiredMemory),
|
||||
InstanceMethod("dispose", &NodeModelWrapper::Dispose)
|
||||
});
|
||||
// Keep a static reference to the constructor
|
||||
//
|
||||
@ -313,7 +314,9 @@ Napi::Value NodeModelWrapper::GetRequiredMemory(const Napi::CallbackInfo& info)
|
||||
threadSafeContext->nativeThread = std::thread(threadEntry, threadSafeContext);
|
||||
return threadSafeContext->deferred_.Promise();
|
||||
}
|
||||
|
||||
void NodeModelWrapper::Dispose(const Napi::CallbackInfo& info) {
|
||||
llmodel_model_destroy(inference_);
|
||||
}
|
||||
void NodeModelWrapper::SetThreadCount(const Napi::CallbackInfo& info) {
|
||||
if(info[0].IsNumber()) {
|
||||
llmodel_setThreadCount(GetInference(), info[0].As<Napi::Number>().Int64Value());
|
||||
|
@ -24,6 +24,7 @@ public:
|
||||
*/
|
||||
Napi::Value Prompt(const Napi::CallbackInfo& info);
|
||||
void SetThreadCount(const Napi::CallbackInfo& info);
|
||||
void Dispose(const Napi::CallbackInfo& info);
|
||||
Napi::Value getName(const Napi::CallbackInfo& info);
|
||||
Napi::Value ThreadCount(const Napi::CallbackInfo& info);
|
||||
Napi::Value GenerateEmbedding(const Napi::CallbackInfo& info);
|
||||
|
0
gpt4all-bindings/typescript/scripts/build_unix.sh
Normal file → Executable file
0
gpt4all-bindings/typescript/scripts/build_unix.sh
Normal file → Executable file
@ -42,6 +42,8 @@ const completion2 = await createCompletion(model, [
|
||||
|
||||
console.log(completion2.choices[0].message)
|
||||
|
||||
//CALLING DISPOSE WILL INVALID THE NATIVE MODEL. USE THIS TO CLEANUP
|
||||
model.dispose()
|
||||
// At the moment, from testing this code, concurrent model prompting is not possible.
|
||||
// Behavior: The last prompt gets answered, but the rest are cancelled
|
||||
// my experience with threading is not the best, so if anyone who is good is willing to give this a shot,
|
||||
|
16
gpt4all-bindings/typescript/src/gpt4all.d.ts
vendored
16
gpt4all-bindings/typescript/src/gpt4all.d.ts
vendored
@ -61,6 +61,11 @@ declare class InferenceModel {
|
||||
prompt: string,
|
||||
options?: Partial<LLModelPromptContext>
|
||||
): Promise<string>;
|
||||
|
||||
/**
|
||||
* delete and cleanup the native model
|
||||
*/
|
||||
dispose(): void
|
||||
}
|
||||
|
||||
declare class EmbeddingModel {
|
||||
@ -69,6 +74,12 @@ declare class EmbeddingModel {
|
||||
config: ModelConfig;
|
||||
|
||||
embed(text: string): Float32Array;
|
||||
|
||||
/**
|
||||
* delete and cleanup the native model
|
||||
*/
|
||||
dispose(): void
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -163,6 +174,11 @@ declare class LLModel {
|
||||
* @returns
|
||||
*/
|
||||
listGpu() : GpuDevice[]
|
||||
|
||||
/**
|
||||
* delete and cleanup the native model
|
||||
*/
|
||||
dispose(): void
|
||||
}
|
||||
/**
|
||||
* an object that contains gpu data on this machine.
|
||||
|
@ -15,6 +15,10 @@ class InferenceModel {
|
||||
const result = this.llm.raw_prompt(prompt, normalizedPromptContext, () => {});
|
||||
return result;
|
||||
}
|
||||
|
||||
dispose() {
|
||||
this.llm.dispose();
|
||||
}
|
||||
}
|
||||
|
||||
class EmbeddingModel {
|
||||
@ -29,6 +33,10 @@ class EmbeddingModel {
|
||||
embed(text) {
|
||||
return this.llm.embed(text)
|
||||
}
|
||||
|
||||
dispose() {
|
||||
this.llm.dispose();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -18,7 +18,7 @@ endif()
|
||||
|
||||
set(APP_VERSION_MAJOR 2)
|
||||
set(APP_VERSION_MINOR 5)
|
||||
set(APP_VERSION_PATCH 1)
|
||||
set(APP_VERSION_PATCH 2)
|
||||
set(APP_VERSION "${APP_VERSION_MAJOR}.${APP_VERSION_MINOR}.${APP_VERSION_PATCH}")
|
||||
|
||||
# Include the binary directory for the generated header file
|
||||
|
1
gpt4all-chat/cmake/sign_dmg.py
Normal file → Executable file
1
gpt4all-chat/cmake/sign_dmg.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
@ -282,8 +282,8 @@ Window {
|
||||
highlighted: comboBox.highlightedIndex === index
|
||||
}
|
||||
Accessible.role: Accessible.ComboBox
|
||||
Accessible.name: qsTr("ComboBox for displaying/picking the current model")
|
||||
Accessible.description: qsTr("Use this for picking the current model to use; the first item is the current model")
|
||||
Accessible.name: qsTr("List of available models")
|
||||
Accessible.description: qsTr("The top item is the current model")
|
||||
onActivated: function (index) {
|
||||
currentChat.stopGenerating()
|
||||
currentChat.reset();
|
||||
@ -307,7 +307,7 @@ Window {
|
||||
running: parent.visible
|
||||
Accessible.role: Accessible.Animation
|
||||
Accessible.name: qsTr("Busy indicator")
|
||||
Accessible.description: qsTr("Displayed when the model is loading")
|
||||
Accessible.description: qsTr("loading model...")
|
||||
}
|
||||
|
||||
Label {
|
||||
@ -339,8 +339,8 @@ Window {
|
||||
padding: 15
|
||||
|
||||
Accessible.role: Accessible.ButtonMenu
|
||||
Accessible.name: qsTr("Hamburger button")
|
||||
Accessible.description: qsTr("Hamburger button that reveals a drawer on the left of the application")
|
||||
Accessible.name: qsTr("Main menu")
|
||||
Accessible.description: qsTr("Navigation drawer with options")
|
||||
|
||||
background: Item {
|
||||
anchors.centerIn: parent
|
||||
@ -389,7 +389,7 @@ Window {
|
||||
Item {
|
||||
Accessible.role: Accessible.Dialog
|
||||
Accessible.name: qsTr("Network dialog")
|
||||
Accessible.description: qsTr("Dialog for opt-in to sharing feedback/conversations")
|
||||
Accessible.description: qsTr("opt-in to share feedback/conversations")
|
||||
}
|
||||
}
|
||||
|
||||
@ -405,7 +405,7 @@ Window {
|
||||
padding: 15
|
||||
toggled: MySettings.networkIsActive
|
||||
source: "qrc:/gpt4all/icons/network.svg"
|
||||
Accessible.name: qsTr("Network button")
|
||||
Accessible.name: qsTr("Network")
|
||||
Accessible.description: qsTr("Reveals a dialogue where you can opt-in for sharing data over network")
|
||||
|
||||
onClicked: {
|
||||
@ -441,8 +441,8 @@ Window {
|
||||
padding: 15
|
||||
toggled: currentChat.collectionList.length
|
||||
source: "qrc:/gpt4all/icons/db.svg"
|
||||
Accessible.name: qsTr("Add collections of documents to the chat")
|
||||
Accessible.description: qsTr("Provides a button to add collections of documents to the chat")
|
||||
Accessible.name: qsTr("Add documents")
|
||||
Accessible.description: qsTr("add collections of documents to the chat")
|
||||
|
||||
onClicked: {
|
||||
collectionsDialog.open()
|
||||
@ -460,8 +460,8 @@ Window {
|
||||
z: 200
|
||||
padding: 15
|
||||
source: "qrc:/gpt4all/icons/settings.svg"
|
||||
Accessible.name: qsTr("Settings button")
|
||||
Accessible.description: qsTr("Reveals a dialogue where you can change various settings")
|
||||
Accessible.name: qsTr("Settings")
|
||||
Accessible.description: qsTr("Reveals a dialogue with settings")
|
||||
|
||||
onClicked: {
|
||||
settingsDialog.open()
|
||||
@ -528,7 +528,7 @@ Window {
|
||||
z: 200
|
||||
padding: 15
|
||||
source: "qrc:/gpt4all/icons/copy.svg"
|
||||
Accessible.name: qsTr("Copy button")
|
||||
Accessible.name: qsTr("Copy")
|
||||
Accessible.description: qsTr("Copy the conversation to the clipboard")
|
||||
|
||||
TextEdit{
|
||||
@ -595,7 +595,7 @@ Window {
|
||||
source: "qrc:/gpt4all/icons/regenerate.svg"
|
||||
|
||||
Accessible.name: text
|
||||
Accessible.description: qsTr("Reset the context which erases current conversation")
|
||||
Accessible.description: qsTr("Reset the context and erase current conversation")
|
||||
|
||||
onClicked: {
|
||||
Network.sendResetContext(chatModel.count)
|
||||
@ -623,7 +623,7 @@ Window {
|
||||
font.pixelSize: theme.fontSizeLarge
|
||||
Accessible.role: Accessible.Dialog
|
||||
Accessible.name: text
|
||||
Accessible.description: qsTr("Dialog indicating an error")
|
||||
Accessible.description: qsTr("Error dialog")
|
||||
}
|
||||
background: Rectangle {
|
||||
anchors.fill: parent
|
||||
@ -641,7 +641,7 @@ Window {
|
||||
height: window.height - (window.height * .1)
|
||||
Item {
|
||||
Accessible.role: Accessible.Dialog
|
||||
Accessible.name: qsTr("Download new models dialog")
|
||||
Accessible.name: qsTr("Download new models")
|
||||
Accessible.description: qsTr("Dialog for downloading new models")
|
||||
}
|
||||
}
|
||||
@ -740,8 +740,8 @@ Window {
|
||||
ScrollBar.vertical: ScrollBar { policy: ScrollBar.AlwaysOn }
|
||||
|
||||
Accessible.role: Accessible.List
|
||||
Accessible.name: qsTr("List of prompt/response pairs")
|
||||
Accessible.description: qsTr("This is the list of prompt/response pairs comprising the actual conversation with the model")
|
||||
Accessible.name: qsTr("Conversation with the model")
|
||||
Accessible.description: qsTr("prompt / response pairs from the conversation")
|
||||
|
||||
delegate: TextArea {
|
||||
id: myTextArea
|
||||
@ -811,7 +811,7 @@ Window {
|
||||
running: (currentResponse ? true : false) && value === "" && currentChat.responseInProgress
|
||||
Accessible.role: Accessible.Animation
|
||||
Accessible.name: qsTr("Busy indicator")
|
||||
Accessible.description: qsTr("Displayed when the model is thinking")
|
||||
Accessible.description: qsTr("The model is thinking")
|
||||
}
|
||||
Label {
|
||||
anchors.verticalCenter: parent.verticalCenter
|
||||
@ -1053,7 +1053,7 @@ Window {
|
||||
}
|
||||
Accessible.role: Accessible.EditableText
|
||||
Accessible.name: placeholderText
|
||||
Accessible.description: qsTr("Textfield for sending messages/prompts to the model")
|
||||
Accessible.description: qsTr("Send messages/prompts to the model")
|
||||
Keys.onReturnPressed: (event)=> {
|
||||
if (event.modifiers & Qt.ControlModifier || event.modifiers & Qt.ShiftModifier)
|
||||
event.accepted = false;
|
||||
@ -1090,7 +1090,7 @@ Window {
|
||||
height: 30
|
||||
visible: !currentChat.isServer
|
||||
source: "qrc:/gpt4all/icons/send_message.svg"
|
||||
Accessible.name: qsTr("Send the message button")
|
||||
Accessible.name: qsTr("Send message")
|
||||
Accessible.description: qsTr("Sends the message/prompt contained in textfield to the model")
|
||||
|
||||
onClicked: {
|
||||
|
@ -94,17 +94,17 @@
|
||||
},
|
||||
{
|
||||
"order": "h",
|
||||
"md5sum": "f5bc6a52f72efd9128efb2eeed802c86",
|
||||
"md5sum": "cf5e8f73747f9d7c6fe72a629808c1de",
|
||||
"name": "MPT Chat",
|
||||
"filename": "mpt-7b-chat-q4_0.gguf",
|
||||
"filesize": "3911522272",
|
||||
"filename": "mpt-7b-chat-merges-q4_0.gguf",
|
||||
"filesize": "3796133728",
|
||||
"requires": "2.5.0",
|
||||
"ramrequired": "8",
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "MPT",
|
||||
"description": "<strong>Good model with novel architecture</strong><br><ul><li>Fast responses<li>Chat based<li>Trained by Mosaic ML<li>Cannot be used commercially</ul>",
|
||||
"url": "https://gpt4all.io/models/gguf/mpt-7b-chat-q4_0.gguf",
|
||||
"url": "https://gpt4all.io/models/gguf/mpt-7b-chat-merges-q4_0.gguf",
|
||||
"promptTemplate": "<|im_start|>user\n%1<|im_end|><|im_start|>assistant\n",
|
||||
"systemPrompt": "<|im_start|>system\n- You are a helpful assistant chatbot trained by MosaicML.\n- You answer questions.\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>"
|
||||
},
|
||||
|
@ -550,6 +550,21 @@
|
||||
* Jared Van Bortel (Nomic AI)
|
||||
* Adam Treat (Nomic AI)
|
||||
* Community (beta testers, bug reporters, bindings authors)
|
||||
"
|
||||
},
|
||||
{
|
||||
"version": "2.5.1",
|
||||
"notes":
|
||||
"
|
||||
* Accessibility fixes
|
||||
* Bugfix for crasher on Windows
|
||||
",
|
||||
"contributors":
|
||||
"
|
||||
* Aaron Miller (Nomic AI)
|
||||
* Jared Van Bortel (Nomic AI)
|
||||
* Victor Tsaran <vtsaran@yahoo.com>
|
||||
* Community (beta testers, bug reporters, bindings authors)
|
||||
"
|
||||
}
|
||||
]
|
||||
|
@ -35,8 +35,8 @@ MySettingsTab {
|
||||
Layout.fillWidth: false
|
||||
model: ["Dark", "Light"]
|
||||
Accessible.role: Accessible.ComboBox
|
||||
Accessible.name: qsTr("ComboBox for displaying/picking the color theme")
|
||||
Accessible.description: qsTr("Use this for picking the color theme for the chat client to use")
|
||||
Accessible.name: qsTr("Color theme")
|
||||
Accessible.description: qsTr("Color theme for the chat client to use")
|
||||
function updateModel() {
|
||||
themeBox.currentIndex = themeBox.indexOfValue(MySettings.chatTheme);
|
||||
}
|
||||
@ -70,8 +70,8 @@ MySettingsTab {
|
||||
Layout.fillWidth: false
|
||||
model: ["Small", "Medium", "Large"]
|
||||
Accessible.role: Accessible.ComboBox
|
||||
Accessible.name: qsTr("ComboBox for displaying/picking the font size")
|
||||
Accessible.description: qsTr("Use this for picking the font size of the chat client")
|
||||
Accessible.name: qsTr("Font size")
|
||||
Accessible.description: qsTr("Font size of the chat client")
|
||||
function updateModel() {
|
||||
fontBox.currentIndex = fontBox.indexOfValue(MySettings.fontSize);
|
||||
}
|
||||
@ -105,8 +105,8 @@ MySettingsTab {
|
||||
Layout.fillWidth: false
|
||||
model: MySettings.deviceList
|
||||
Accessible.role: Accessible.ComboBox
|
||||
Accessible.name: qsTr("ComboBox for displaying/picking the device")
|
||||
Accessible.description: qsTr("Use this for picking the device of the chat client")
|
||||
Accessible.name: qsTr("Device")
|
||||
Accessible.description: qsTr("Device of the chat client")
|
||||
function updateModel() {
|
||||
deviceBox.currentIndex = deviceBox.indexOfValue(MySettings.device);
|
||||
}
|
||||
@ -143,8 +143,8 @@ MySettingsTab {
|
||||
Layout.fillWidth: true
|
||||
model: ModelList.userDefaultModelList
|
||||
Accessible.role: Accessible.ComboBox
|
||||
Accessible.name: qsTr("ComboBox for displaying/picking the default model")
|
||||
Accessible.description: qsTr("Use this for picking the default model to use; the first item is the current default model")
|
||||
Accessible.name: qsTr("Default model")
|
||||
Accessible.description: qsTr("Default model to use; the first item is the current default model")
|
||||
function updateModel() {
|
||||
comboBox.currentIndex = comboBox.indexOfValue(MySettings.userDefaultModel);
|
||||
}
|
||||
@ -194,7 +194,7 @@ MySettingsTab {
|
||||
Layout.row: 5
|
||||
Layout.column: 2
|
||||
text: qsTr("Browse")
|
||||
Accessible.description: qsTr("Opens a folder picker dialog to choose where to save model files")
|
||||
Accessible.description: qsTr("Choose where to save model files")
|
||||
onClicked: {
|
||||
openFolderDialog("file://" + MySettings.modelPath, function(selectedFolder) {
|
||||
MySettings.modelPath = selectedFolder
|
||||
|
@ -31,8 +31,8 @@ Drawer {
|
||||
anchors.margins: 10
|
||||
|
||||
Accessible.role: Accessible.Pane
|
||||
Accessible.name: qsTr("Drawer on the left of the application")
|
||||
Accessible.description: qsTr("Drawer that is revealed by pressing the hamburger button")
|
||||
Accessible.name: qsTr("Drawer")
|
||||
Accessible.description: qsTr("Main navigation drawer")
|
||||
|
||||
MyButton {
|
||||
id: newChat
|
||||
@ -42,7 +42,7 @@ Drawer {
|
||||
topPadding: 20
|
||||
bottomPadding: 20
|
||||
text: qsTr("\uFF0B New chat")
|
||||
Accessible.description: qsTr("Use this to create a new chat")
|
||||
Accessible.description: qsTr("Create a new chat")
|
||||
background: Rectangle {
|
||||
border.color: newChat.down ? theme.backgroundLightest : theme.buttonBorder
|
||||
border.width: 2
|
||||
@ -135,7 +135,7 @@ Drawer {
|
||||
}
|
||||
Accessible.role: Accessible.Button
|
||||
Accessible.name: qsTr("Select the current chat")
|
||||
Accessible.description: qsTr("Provides a button to select the current chat or edit the chat when in edit mode")
|
||||
Accessible.description: qsTr("Select the current chat or edit the chat when in edit mode")
|
||||
}
|
||||
Row {
|
||||
id: buttons
|
||||
@ -155,8 +155,7 @@ Drawer {
|
||||
chatName.readOnly = false
|
||||
chatName.selectByMouse = true
|
||||
}
|
||||
Accessible.name: qsTr("Edit the chat name")
|
||||
Accessible.description: qsTr("Provides a button to edit the chat name")
|
||||
Accessible.name: qsTr("Edit chat name")
|
||||
}
|
||||
MyToolButton {
|
||||
id: trashButton
|
||||
@ -168,8 +167,7 @@ Drawer {
|
||||
trashQuestionDisplayed = true
|
||||
timer.start()
|
||||
}
|
||||
Accessible.name: qsTr("Delete of the chat")
|
||||
Accessible.description: qsTr("Provides a button to delete the chat")
|
||||
Accessible.name: qsTr("Delete chat")
|
||||
}
|
||||
}
|
||||
Rectangle {
|
||||
@ -207,8 +205,7 @@ Drawer {
|
||||
Network.sendRemoveChat()
|
||||
}
|
||||
Accessible.role: Accessible.Button
|
||||
Accessible.name: qsTr("Confirm delete of the chat")
|
||||
Accessible.description: qsTr("Provides a button to confirm delete of the chat")
|
||||
Accessible.name: qsTr("Confirm chat deletion")
|
||||
}
|
||||
Button {
|
||||
id: cancel
|
||||
@ -230,8 +227,7 @@ Drawer {
|
||||
trashQuestionDisplayed = false
|
||||
}
|
||||
Accessible.role: Accessible.Button
|
||||
Accessible.name: qsTr("Cancel the delete of the chat")
|
||||
Accessible.description: qsTr("Provides a button to cancel delete of the chat")
|
||||
Accessible.name: qsTr("Cancel chat deletion")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -256,7 +252,7 @@ Drawer {
|
||||
anchors.bottomMargin: 10
|
||||
text: qsTr("Updates")
|
||||
font.pixelSize: theme.fontSizeLarge
|
||||
Accessible.description: qsTr("Use this to launch an external application that will check for updates to the installer")
|
||||
Accessible.description: qsTr("Launch an external application that will check for updates to the installer")
|
||||
onClicked: {
|
||||
if (!LLM.checkForUpdates())
|
||||
checkForUpdatesError.open()
|
||||
@ -270,7 +266,7 @@ Drawer {
|
||||
anchors.bottom: aboutButton.top
|
||||
anchors.bottomMargin: 10
|
||||
text: qsTr("Downloads")
|
||||
Accessible.description: qsTr("Use this to launch a dialog to download new models")
|
||||
Accessible.description: qsTr("Launch a dialog to download new models")
|
||||
onClicked: {
|
||||
downloadClicked()
|
||||
}
|
||||
@ -282,7 +278,7 @@ Drawer {
|
||||
anchors.right: parent.right
|
||||
anchors.bottom: parent.bottom
|
||||
text: qsTr("About")
|
||||
Accessible.description: qsTr("Use this to launch a dialog to show the about page")
|
||||
Accessible.description: qsTr("Launch a dialog to show the about page")
|
||||
onClicked: {
|
||||
aboutClicked()
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ MySettingsTab {
|
||||
text: qsTr("Add")
|
||||
Accessible.role: Accessible.Button
|
||||
Accessible.name: text
|
||||
Accessible.description: qsTr("Add button")
|
||||
Accessible.description: qsTr("Add collection")
|
||||
onClicked: {
|
||||
var isError = false;
|
||||
if (root.collection === "") {
|
||||
|
@ -125,7 +125,7 @@ MyDialog {
|
||||
Layout.fillWidth: true
|
||||
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
|
||||
visible: !isChatGPT && !installed && !calcHash && downloadError === ""
|
||||
Accessible.description: qsTr("Cancel/Resume/Download button to stop/restart/start the download")
|
||||
Accessible.description: qsTr("Stop/restart/start the download")
|
||||
background: Rectangle {
|
||||
border.color: downloadButton.down ? theme.backgroundLightest : theme.buttonBorder
|
||||
border.width: 2
|
||||
@ -151,7 +151,7 @@ MyDialog {
|
||||
Layout.fillWidth: true
|
||||
Layout.alignment: Qt.AlignTop | Qt.AlignHCenter
|
||||
visible: installed || downloadError !== ""
|
||||
Accessible.description: qsTr("Remove button to remove model from filesystem")
|
||||
Accessible.description: qsTr("Remove model from filesystem")
|
||||
background: Rectangle {
|
||||
border.color: removeButton.down ? theme.backgroundLightest : theme.buttonBorder
|
||||
border.width: 2
|
||||
@ -186,8 +186,8 @@ MyDialog {
|
||||
Download.installModel(filename, openaiKey.text);
|
||||
}
|
||||
Accessible.role: Accessible.Button
|
||||
Accessible.name: qsTr("Install button")
|
||||
Accessible.description: qsTr("Install button to install chatgpt model")
|
||||
Accessible.name: qsTr("Install")
|
||||
Accessible.description: qsTr("Install chatGPT model")
|
||||
}
|
||||
|
||||
ColumnLayout {
|
||||
@ -385,7 +385,7 @@ MyDialog {
|
||||
linkColor: theme.textColor
|
||||
Accessible.role: Accessible.Paragraph
|
||||
Accessible.name: qsTr("Description")
|
||||
Accessible.description: qsTr("The description of the file")
|
||||
Accessible.description: qsTr("File description")
|
||||
onLinkActivated: Qt.openUrlExternally(link)
|
||||
}
|
||||
}
|
||||
@ -456,7 +456,7 @@ MyDialog {
|
||||
}
|
||||
MyButton {
|
||||
text: qsTr("Browse")
|
||||
Accessible.description: qsTr("Opens a folder picker dialog to choose where to save model files")
|
||||
Accessible.description: qsTr("Choose where to save model files")
|
||||
onClicked: modelPathDialog.open()
|
||||
}
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ Item {
|
||||
font.pixelSize: theme.fontSizeLarge
|
||||
Accessible.role: Accessible.Button
|
||||
Accessible.name: text
|
||||
Accessible.description: qsTr("Restores the settings dialog to a default state")
|
||||
Accessible.description: qsTr("Restores settings dialog to a default state")
|
||||
onClicked: {
|
||||
root.restoreDefaultsClicked();
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O
|
||||
}
|
||||
Accessible.role: Accessible.EditableText
|
||||
Accessible.name: qsTr("Attribution (optional)")
|
||||
Accessible.description: qsTr("Textfield for providing attribution")
|
||||
Accessible.description: qsTr("Provide attribution")
|
||||
onEditingFinished: {
|
||||
MySettings.networkAttribution = attribution.text;
|
||||
}
|
||||
@ -103,12 +103,12 @@ NOTE: By turning on this feature, you will be sending your data to the GPT4All O
|
||||
spacing: 10
|
||||
MyButton {
|
||||
text: qsTr("Enable")
|
||||
Accessible.description: qsTr("Enable opt-in button")
|
||||
Accessible.description: qsTr("Enable opt-in")
|
||||
DialogButtonBox.buttonRole: DialogButtonBox.AcceptRole
|
||||
}
|
||||
MyButton {
|
||||
text: qsTr("Cancel")
|
||||
Accessible.description: qsTr("Cancel opt-in button")
|
||||
Accessible.description: qsTr("Cancel opt-in")
|
||||
DialogButtonBox.buttonRole: DialogButtonBox.RejectRole
|
||||
}
|
||||
background: Rectangle {
|
||||
|
@ -21,8 +21,8 @@ MyDialog {
|
||||
|
||||
Item {
|
||||
Accessible.role: Accessible.Dialog
|
||||
Accessible.name: qsTr("Settings dialog")
|
||||
Accessible.description: qsTr("Dialog containing various application settings")
|
||||
Accessible.name: qsTr("Settings")
|
||||
Accessible.description: qsTr("Contains various application settings")
|
||||
}
|
||||
|
||||
ListModel {
|
||||
|
@ -133,7 +133,6 @@ model release that uses your data!")
|
||||
font.pixelSize: theme.fontSizeLarge
|
||||
Accessible.role: Accessible.Paragraph
|
||||
Accessible.name: qsTr("Opt-in for anonymous usage statistics")
|
||||
Accessible.description: qsTr("Label for opt-in")
|
||||
}
|
||||
|
||||
ButtonGroup {
|
||||
@ -162,7 +161,7 @@ model release that uses your data!")
|
||||
font.pixelSize: theme.fontSizeLarge
|
||||
Accessible.role: Accessible.RadioButton
|
||||
Accessible.name: qsTr("Opt-in for anonymous usage statistics")
|
||||
Accessible.description: qsTr("Radio button to allow opt-in for anonymous usage statistics")
|
||||
Accessible.description: qsTr("Allow opt-in for anonymous usage statistics")
|
||||
|
||||
background: Rectangle {
|
||||
color: "transparent"
|
||||
@ -203,7 +202,7 @@ model release that uses your data!")
|
||||
font.pixelSize: theme.fontSizeLarge
|
||||
Accessible.role: Accessible.RadioButton
|
||||
Accessible.name: qsTr("Opt-out for anonymous usage statistics")
|
||||
Accessible.description: qsTr("Radio button to allow opt-out for anonymous usage statistics")
|
||||
Accessible.description: qsTr("Allow opt-out for anonymous usage statistics")
|
||||
|
||||
background: Rectangle {
|
||||
color: "transparent"
|
||||
@ -249,7 +248,7 @@ model release that uses your data!")
|
||||
font.pixelSize: theme.fontSizeLarge
|
||||
Accessible.role: Accessible.Paragraph
|
||||
Accessible.name: qsTr("Opt-in for network")
|
||||
Accessible.description: qsTr("Checkbox to allow opt-in for network")
|
||||
Accessible.description: qsTr("Allow opt-in for network")
|
||||
}
|
||||
|
||||
ButtonGroup {
|
||||
@ -276,7 +275,7 @@ model release that uses your data!")
|
||||
font.pixelSize: theme.fontSizeLarge
|
||||
Accessible.role: Accessible.RadioButton
|
||||
Accessible.name: qsTr("Opt-in for network")
|
||||
Accessible.description: qsTr("Radio button to allow opt-in anonymous sharing of chats to the GPT4All Datalake")
|
||||
Accessible.description: qsTr("Allow opt-in anonymous sharing of chats to the GPT4All Datalake")
|
||||
|
||||
background: Rectangle {
|
||||
color: "transparent"
|
||||
@ -317,7 +316,7 @@ model release that uses your data!")
|
||||
font.pixelSize: theme.fontSizeLarge
|
||||
Accessible.role: Accessible.RadioButton
|
||||
Accessible.name: qsTr("Opt-out for network")
|
||||
Accessible.description: qsTr("Radio button to allow opt-out anonymous sharing of chats to the GPT4All Datalake")
|
||||
Accessible.description: qsTr("Allow opt-out anonymous sharing of chats to the GPT4All Datalake")
|
||||
|
||||
background: Rectangle {
|
||||
color: "transparent"
|
||||
|
1
gpt4all-training/build_map.py
Normal file → Executable file
1
gpt4all-training/build_map.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import numpy as np
|
||||
from nomic import atlas
|
||||
import glob
|
||||
|
1
gpt4all-training/clean.py
Normal file → Executable file
1
gpt4all-training/clean.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import numpy as np
|
||||
import glob
|
||||
import os
|
||||
|
0
gpt4all-training/create_hostname.sh
Normal file → Executable file
0
gpt4all-training/create_hostname.sh
Normal file → Executable file
1
gpt4all-training/eval_figures.py
Normal file → Executable file
1
gpt4all-training/eval_figures.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import glob
|
||||
import pickle
|
||||
import numpy as np
|
||||
|
1
gpt4all-training/eval_self_instruct.py
Normal file → Executable file
1
gpt4all-training/eval_self_instruct.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import torch
|
||||
import pickle
|
||||
|
1
gpt4all-training/generate.py
Normal file → Executable file
1
gpt4all-training/generate.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from peft import PeftModelForCausalLM
|
||||
from read import read_config
|
||||
|
1
gpt4all-training/inference.py
Normal file → Executable file
1
gpt4all-training/inference.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
0
gpt4all-training/launcher.sh
Normal file → Executable file
0
gpt4all-training/launcher.sh
Normal file → Executable file
1
gpt4all-training/train.py
Normal file → Executable file
1
gpt4all-training/train.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, get_scheduler
|
||||
import torch
|
||||
|
Loading…
x
Reference in New Issue
Block a user