mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-07-21 00:02:15 -04:00
Compare commits
No commits in common. "10f9b49313d01d53006b3d7c78608da8e3c62257" and "aed206834224701d7ba0b311c54c5f3d3fffa38b" have entirely different histories.
10f9b49313
...
aed2068342
11
README.md
11
README.md
@ -92,17 +92,6 @@ Example tags: `backend`, `bindings`, `python-bindings`, `documentation`, etc.
|
|||||||
<a href="https://s3.amazonaws.com/static.nomic.ai/gpt4all/2023_GPT4All_Technical_Report.pdf">:green_book: Technical Report 1: GPT4All</a>
|
<a href="https://s3.amazonaws.com/static.nomic.ai/gpt4all/2023_GPT4All_Technical_Report.pdf">:green_book: Technical Report 1: GPT4All</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
## Star History
|
|
||||||
|
|
||||||
<a href="https://star-history.com/#nomic-ai/gpt4all&Date">
|
|
||||||
<picture>
|
|
||||||
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=nomic-ai/gpt4all&type=Date&theme=dark" />
|
|
||||||
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=nomic-ai/gpt4all&type=Date" />
|
|
||||||
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=nomic-ai/gpt4all&type=Date" />
|
|
||||||
</picture>
|
|
||||||
</a>
|
|
||||||
|
|
||||||
|
|
||||||
## Citation
|
## Citation
|
||||||
|
|
||||||
If you utilize this repository, models or data in a downstream project, please consider citing it with:
|
If you utilize this repository, models or data in a downstream project, please consider citing it with:
|
||||||
|
@ -1 +1 @@
|
|||||||
Subproject commit 500689ad356a81a471a7fb68cc70f7aee5a5f56e
|
Subproject commit 3742085b0429cbe0ede49bcb9f891e4a5e25a724
|
@ -242,8 +242,6 @@ if (LLAMA_KOMPUTE)
|
|||||||
kompute/op_mul_mat_mat_f16.comp
|
kompute/op_mul_mat_mat_f16.comp
|
||||||
kompute/op_mul_mat_mat_q8_0.comp
|
kompute/op_mul_mat_mat_q8_0.comp
|
||||||
kompute/op_mul_mat_mat_q4_0.comp
|
kompute/op_mul_mat_mat_q4_0.comp
|
||||||
kompute/op_mul_mat_mat_q4_1.comp
|
|
||||||
kompute/op_mul_mat_mat_q6_k.comp
|
|
||||||
kompute/op_mul_mat_f16.comp
|
kompute/op_mul_mat_f16.comp
|
||||||
kompute/op_mul_mat_q8_0.comp
|
kompute/op_mul_mat_q8_0.comp
|
||||||
kompute/op_mul_mat_q4_0.comp
|
kompute/op_mul_mat_q4_0.comp
|
||||||
@ -278,8 +276,6 @@ if (LLAMA_KOMPUTE)
|
|||||||
shaderop_mul_mat_mat_f16.h
|
shaderop_mul_mat_mat_f16.h
|
||||||
shaderop_mul_mat_mat_q8_0.h
|
shaderop_mul_mat_mat_q8_0.h
|
||||||
shaderop_mul_mat_mat_q4_0.h
|
shaderop_mul_mat_mat_q4_0.h
|
||||||
shaderop_mul_mat_mat_q4_1.h
|
|
||||||
shaderop_mul_mat_mat_q6_k.h
|
|
||||||
shaderop_mul_mat_f16.h
|
shaderop_mul_mat_f16.h
|
||||||
shaderop_mul_mat_q8_0.h
|
shaderop_mul_mat_q8_0.h
|
||||||
shaderop_mul_mat_q4_0.h
|
shaderop_mul_mat_q4_0.h
|
||||||
|
@ -4,13 +4,13 @@ The GPT4All CLI is a self-contained script based on the `gpt4all` and `typer` pa
|
|||||||
REPL to communicate with a language model similar to the chat GUI application, but more basic.
|
REPL to communicate with a language model similar to the chat GUI application, but more basic.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import importlib.metadata
|
|
||||||
import io
|
import io
|
||||||
|
import pkg_resources # should be present as a dependency of gpt4all
|
||||||
import sys
|
import sys
|
||||||
|
import typer
|
||||||
|
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
from typing_extensions import Annotated
|
from typing_extensions import Annotated
|
||||||
|
|
||||||
import typer
|
|
||||||
from gpt4all import GPT4All
|
from gpt4all import GPT4All
|
||||||
|
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ def repl(
|
|||||||
|
|
||||||
use_new_loop = False
|
use_new_loop = False
|
||||||
try:
|
try:
|
||||||
version = importlib.metadata.version('gpt4all')
|
version = pkg_resources.Environment()['gpt4all'][0].version
|
||||||
version_major = int(version.split('.')[0])
|
version_major = int(version.split('.')[0])
|
||||||
if version_major >= 1:
|
if version_major >= 1:
|
||||||
use_new_loop = True
|
use_new_loop = True
|
||||||
|
@ -8,8 +8,9 @@ import java.io.ByteArrayOutputStream;
|
|||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.*;
|
import java.util.HashMap;
|
||||||
import java.util.stream.Collectors;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
public class LLModel implements AutoCloseable {
|
public class LLModel implements AutoCloseable {
|
||||||
|
|
||||||
@ -305,197 +306,6 @@ public class LLModel implements AutoCloseable {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* The array of messages for the conversation.
|
|
||||||
*/
|
|
||||||
public static class Messages {
|
|
||||||
|
|
||||||
private final List<PromptMessage> messages = new ArrayList<>();
|
|
||||||
|
|
||||||
public Messages(PromptMessage...messages) {
|
|
||||||
this.messages.addAll(Arrays.asList(messages));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Messages(List<PromptMessage> messages) {
|
|
||||||
this.messages.addAll(messages);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Messages addPromptMessage(PromptMessage promptMessage) {
|
|
||||||
this.messages.add(promptMessage);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
List<PromptMessage> toList() {
|
|
||||||
return Collections.unmodifiableList(this.messages);
|
|
||||||
}
|
|
||||||
|
|
||||||
List<Map<String, String>> toListMap() {
|
|
||||||
return messages.stream()
|
|
||||||
.map(PromptMessage::toMap).collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A message in the conversation, identical to OpenAI's chat message.
|
|
||||||
*/
|
|
||||||
public static class PromptMessage {
|
|
||||||
|
|
||||||
private static final String ROLE = "role";
|
|
||||||
private static final String CONTENT = "content";
|
|
||||||
|
|
||||||
private final Map<String, String> message = new HashMap<>();
|
|
||||||
|
|
||||||
public PromptMessage() {
|
|
||||||
}
|
|
||||||
|
|
||||||
public PromptMessage(Role role, String content) {
|
|
||||||
addRole(role);
|
|
||||||
addContent(content);
|
|
||||||
}
|
|
||||||
|
|
||||||
public PromptMessage addRole(Role role) {
|
|
||||||
return this.addParameter(ROLE, role.type());
|
|
||||||
}
|
|
||||||
|
|
||||||
public PromptMessage addContent(String content) {
|
|
||||||
return this.addParameter(CONTENT, content);
|
|
||||||
}
|
|
||||||
|
|
||||||
public PromptMessage addParameter(String key, String value) {
|
|
||||||
this.message.put(key, value);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String content() {
|
|
||||||
return this.parameter(CONTENT);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Role role() {
|
|
||||||
String role = this.parameter(ROLE);
|
|
||||||
return Role.from(role);
|
|
||||||
}
|
|
||||||
|
|
||||||
public String parameter(String key) {
|
|
||||||
return this.message.get(key);
|
|
||||||
}
|
|
||||||
|
|
||||||
Map<String, String> toMap() {
|
|
||||||
return Collections.unmodifiableMap(this.message);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public enum Role {
|
|
||||||
|
|
||||||
SYSTEM("system"), ASSISTANT("assistant"), USER("user");
|
|
||||||
|
|
||||||
private final String type;
|
|
||||||
|
|
||||||
String type() {
|
|
||||||
return this.type;
|
|
||||||
}
|
|
||||||
|
|
||||||
static Role from(String type) {
|
|
||||||
|
|
||||||
if (type == null) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (type) {
|
|
||||||
case "system": return SYSTEM;
|
|
||||||
case "assistant": return ASSISTANT;
|
|
||||||
case "user": return USER;
|
|
||||||
default: throw new IllegalArgumentException(
|
|
||||||
String.format("You passed %s type but only %s are supported",
|
|
||||||
type, Arrays.toString(Role.values())
|
|
||||||
)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Role(String type) {
|
|
||||||
this.type = type;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return type();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The result of the completion, similar to OpenAI's format.
|
|
||||||
*/
|
|
||||||
public static class CompletionReturn {
|
|
||||||
private String model;
|
|
||||||
private Usage usage;
|
|
||||||
private Choices choices;
|
|
||||||
|
|
||||||
public CompletionReturn(String model, Usage usage, Choices choices) {
|
|
||||||
this.model = model;
|
|
||||||
this.usage = usage;
|
|
||||||
this.choices = choices;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Choices choices() {
|
|
||||||
return choices;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String model() {
|
|
||||||
return model;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Usage usage() {
|
|
||||||
return usage;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The generated completions.
|
|
||||||
*/
|
|
||||||
public static class Choices {
|
|
||||||
|
|
||||||
private final List<CompletionChoice> choices = new ArrayList<>();
|
|
||||||
|
|
||||||
public Choices(List<CompletionChoice> choices) {
|
|
||||||
this.choices.addAll(choices);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Choices(CompletionChoice...completionChoices){
|
|
||||||
this.choices.addAll(Arrays.asList(completionChoices));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Choices addCompletionChoice(CompletionChoice completionChoice) {
|
|
||||||
this.choices.add(completionChoice);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public CompletionChoice first() {
|
|
||||||
return this.choices.get(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
public int totalChoices() {
|
|
||||||
return this.choices.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
public CompletionChoice get(int index) {
|
|
||||||
return this.choices.get(index);
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<CompletionChoice> choices() {
|
|
||||||
return Collections.unmodifiableList(choices);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A completion choice, similar to OpenAI's format.
|
|
||||||
*/
|
|
||||||
public static class CompletionChoice extends PromptMessage {
|
|
||||||
public CompletionChoice(Role role, String content) {
|
|
||||||
super(role, content);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class ChatCompletionResponse {
|
public static class ChatCompletionResponse {
|
||||||
public String model;
|
public String model;
|
||||||
@ -513,41 +323,6 @@ public class LLModel implements AutoCloseable {
|
|||||||
// Getters and setters
|
// Getters and setters
|
||||||
}
|
}
|
||||||
|
|
||||||
public CompletionReturn chatCompletionResponse(Messages messages,
|
|
||||||
GenerationConfig generationConfig) {
|
|
||||||
return chatCompletion(messages, generationConfig, false, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* chatCompletion formats the existing chat conversation into a template to be
|
|
||||||
* easier to process for chat UIs. It is not absolutely necessary as generate method
|
|
||||||
* may be directly used to make generations with gpt models.
|
|
||||||
*
|
|
||||||
* @param messages object to create theMessages to send to GPT model
|
|
||||||
* @param generationConfig How to decode/process the generation.
|
|
||||||
* @param streamToStdOut Send tokens as they are calculated Standard output.
|
|
||||||
* @param outputFullPromptToStdOut Should full prompt built out of messages be sent to Standard output.
|
|
||||||
* @return CompletionReturn contains stats and generated Text.
|
|
||||||
*/
|
|
||||||
public CompletionReturn chatCompletion(Messages messages,
|
|
||||||
GenerationConfig generationConfig, boolean streamToStdOut,
|
|
||||||
boolean outputFullPromptToStdOut) {
|
|
||||||
|
|
||||||
String fullPrompt = buildPrompt(messages.toListMap());
|
|
||||||
|
|
||||||
if(outputFullPromptToStdOut)
|
|
||||||
System.out.print(fullPrompt);
|
|
||||||
|
|
||||||
String generatedText = generate(fullPrompt, generationConfig, streamToStdOut);
|
|
||||||
|
|
||||||
final CompletionChoice promptMessage = new CompletionChoice(Role.ASSISTANT, generatedText);
|
|
||||||
final Choices choices = new Choices(promptMessage);
|
|
||||||
|
|
||||||
final Usage usage = getUsage(fullPrompt, generatedText);
|
|
||||||
return new CompletionReturn(this.modelName, usage, choices);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public ChatCompletionResponse chatCompletion(List<Map<String, String>> messages,
|
public ChatCompletionResponse chatCompletion(List<Map<String, String>> messages,
|
||||||
GenerationConfig generationConfig) {
|
GenerationConfig generationConfig) {
|
||||||
return chatCompletion(messages, generationConfig, false, false);
|
return chatCompletion(messages, generationConfig, false, false);
|
||||||
@ -577,23 +352,19 @@ public class LLModel implements AutoCloseable {
|
|||||||
ChatCompletionResponse response = new ChatCompletionResponse();
|
ChatCompletionResponse response = new ChatCompletionResponse();
|
||||||
response.model = this.modelName;
|
response.model = this.modelName;
|
||||||
|
|
||||||
response.usage = getUsage(fullPrompt, generatedText);
|
Usage usage = new Usage();
|
||||||
|
usage.promptTokens = fullPrompt.length();
|
||||||
|
usage.completionTokens = generatedText.length();
|
||||||
|
usage.totalTokens = fullPrompt.length() + generatedText.length();
|
||||||
|
response.usage = usage;
|
||||||
|
|
||||||
Map<String, String> message = new HashMap<>();
|
Map<String, String> message = new HashMap<>();
|
||||||
message.put("role", "assistant");
|
message.put("role", "assistant");
|
||||||
message.put("content", generatedText);
|
message.put("content", generatedText);
|
||||||
|
|
||||||
response.choices = List.of(message);
|
response.choices = List.of(message);
|
||||||
|
|
||||||
return response;
|
return response;
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private Usage getUsage(String fullPrompt, String generatedText) {
|
|
||||||
Usage usage = new Usage();
|
|
||||||
usage.promptTokens = fullPrompt.length();
|
|
||||||
usage.completionTokens = generatedText.length();
|
|
||||||
usage.totalTokens = fullPrompt.length() + generatedText.length();
|
|
||||||
return usage;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected static String buildPrompt(List<Map<String, String>> messages) {
|
protected static String buildPrompt(List<Map<String, String>> messages) {
|
||||||
|
@ -28,33 +28,6 @@ import static org.mockito.Mockito.*;
|
|||||||
@ExtendWith(MockitoExtension.class)
|
@ExtendWith(MockitoExtension.class)
|
||||||
public class BasicTests {
|
public class BasicTests {
|
||||||
|
|
||||||
@Test
|
|
||||||
public void simplePromptWithObject(){
|
|
||||||
|
|
||||||
LLModel model = Mockito.spy(new LLModel());
|
|
||||||
|
|
||||||
LLModel.GenerationConfig config =
|
|
||||||
LLModel.config()
|
|
||||||
.withNPredict(20)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
// The generate method will return "4"
|
|
||||||
doReturn("4").when( model ).generate(anyString(), eq(config), eq(true));
|
|
||||||
|
|
||||||
LLModel.PromptMessage promptMessage1 = new LLModel.PromptMessage(LLModel.Role.SYSTEM, "You are a helpful assistant");
|
|
||||||
LLModel.PromptMessage promptMessage2 = new LLModel.PromptMessage(LLModel.Role.USER, "Add 2+2");
|
|
||||||
|
|
||||||
LLModel.Messages messages = new LLModel.Messages(promptMessage1, promptMessage2);
|
|
||||||
|
|
||||||
LLModel.CompletionReturn response = model.chatCompletion(
|
|
||||||
messages, config, true, true);
|
|
||||||
|
|
||||||
assertTrue( response.choices().first().content().contains("4") );
|
|
||||||
|
|
||||||
// Verifies the prompt and response are certain length.
|
|
||||||
assertEquals( 224 , response.usage().totalTokens );
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void simplePrompt(){
|
public void simplePrompt(){
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ class Embed4All:
|
|||||||
Args:
|
Args:
|
||||||
n_threads: number of CPU threads used by GPT4All. Default is None, then the number of threads are determined automatically.
|
n_threads: number of CPU threads used by GPT4All. Default is None, then the number of threads are determined automatically.
|
||||||
"""
|
"""
|
||||||
self.gpt4all = GPT4All(model_name or 'all-MiniLM-L6-v2-f16.gguf', n_threads=n_threads, **kwargs)
|
self.gpt4all = GPT4All(model_name or 'ggml-all-MiniLM-L6-v2-f16.gguf', n_threads=n_threads, **kwargs)
|
||||||
|
|
||||||
def embed(self, text: str) -> List[float]:
|
def embed(self, text: str) -> List[float]:
|
||||||
"""
|
"""
|
||||||
|
@ -1,27 +1,23 @@
|
|||||||
import atexit
|
|
||||||
import ctypes
|
import ctypes
|
||||||
import importlib.resources
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
|
from queue import Queue
|
||||||
import re
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
from contextlib import ExitStack
|
|
||||||
from queue import Queue
|
|
||||||
from typing import Callable, Iterable, List
|
from typing import Callable, Iterable, List
|
||||||
|
|
||||||
|
import pkg_resources
|
||||||
|
|
||||||
logger: logging.Logger = logging.getLogger(__name__)
|
logger: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
file_manager = ExitStack()
|
|
||||||
atexit.register(file_manager.close) # clean up files on exit
|
|
||||||
|
|
||||||
# TODO: provide a config file to make this more robust
|
# TODO: provide a config file to make this more robust
|
||||||
MODEL_LIB_PATH = file_manager.enter_context(importlib.resources.as_file(
|
LLMODEL_PATH = os.path.join("llmodel_DO_NOT_MODIFY", "build").replace("\\", "\\\\")
|
||||||
importlib.resources.files("gpt4all") / "llmodel_DO_NOT_MODIFY" / "build",
|
MODEL_LIB_PATH = str(pkg_resources.resource_filename("gpt4all", LLMODEL_PATH)).replace("\\", "\\\\")
|
||||||
))
|
|
||||||
|
|
||||||
def load_llmodel_library():
|
def load_llmodel_library():
|
||||||
system = platform.system()
|
system = platform.system()
|
||||||
@ -40,7 +36,9 @@ def load_llmodel_library():
|
|||||||
|
|
||||||
llmodel_file = "libllmodel" + "." + c_lib_ext
|
llmodel_file = "libllmodel" + "." + c_lib_ext
|
||||||
|
|
||||||
llmodel_dir = str(MODEL_LIB_PATH / llmodel_file).replace("\\", r"\\")
|
llmodel_dir = str(pkg_resources.resource_filename("gpt4all", os.path.join(LLMODEL_PATH, llmodel_file))).replace(
|
||||||
|
"\\", "\\\\"
|
||||||
|
)
|
||||||
|
|
||||||
llmodel_lib = ctypes.CDLL(llmodel_dir)
|
llmodel_lib = ctypes.CDLL(llmodel_dir)
|
||||||
|
|
||||||
@ -133,7 +131,7 @@ llmodel.llmodel_set_implementation_search_path.restype = None
|
|||||||
llmodel.llmodel_threadCount.argtypes = [ctypes.c_void_p]
|
llmodel.llmodel_threadCount.argtypes = [ctypes.c_void_p]
|
||||||
llmodel.llmodel_threadCount.restype = ctypes.c_int32
|
llmodel.llmodel_threadCount.restype = ctypes.c_int32
|
||||||
|
|
||||||
llmodel.llmodel_set_implementation_search_path(str(MODEL_LIB_PATH).replace("\\", r"\\").encode("utf-8"))
|
llmodel.llmodel_set_implementation_search_path(MODEL_LIB_PATH.encode("utf-8"))
|
||||||
|
|
||||||
llmodel.llmodel_available_gpu_devices.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.POINTER(ctypes.c_int32)]
|
llmodel.llmodel_available_gpu_devices.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.POINTER(ctypes.c_int32)]
|
||||||
llmodel.llmodel_available_gpu_devices.restype = ctypes.POINTER(LLModelGPUDevice)
|
llmodel.llmodel_available_gpu_devices.restype = ctypes.POINTER(LLModelGPUDevice)
|
||||||
|
@ -32,8 +32,13 @@ One click installers for macOS, Linux, and Windows at https://gpt4all.io
|
|||||||
* Multi-chat - a list of current and past chats and the ability to save/delete/export and switch between
|
* Multi-chat - a list of current and past chats and the ability to save/delete/export and switch between
|
||||||
* Text to speech - have the AI response with voice
|
* Text to speech - have the AI response with voice
|
||||||
* Speech to text - give the prompt with your voice
|
* Speech to text - give the prompt with your voice
|
||||||
|
* Python bindings
|
||||||
|
* Typescript bindings
|
||||||
* Plugin support for langchain other developer tools
|
* Plugin support for langchain other developer tools
|
||||||
* chat gui headless operation mode
|
* Save your prompt/responses to disk
|
||||||
|
* Upload prompt/response manually/automatically to nomic.ai to aid future training runs
|
||||||
|
* Syntax highlighting support for programming languages, etc.
|
||||||
|
* REST API with a built-in webserver in the chat gui itself with a headless operation mode as well
|
||||||
* Advanced settings for changing temperature, topk, etc. (DONE)
|
* Advanced settings for changing temperature, topk, etc. (DONE)
|
||||||
* * Improve the accessibility of the installer for screen reader users
|
* * Improve the accessibility of the installer for screen reader users
|
||||||
* YOUR IDEA HERE
|
* YOUR IDEA HERE
|
||||||
|
@ -385,11 +385,7 @@ bool Chat::serialize(QDataStream &stream, int version) const
|
|||||||
stream << m_modelInfo.filename();
|
stream << m_modelInfo.filename();
|
||||||
if (version > 2)
|
if (version > 2)
|
||||||
stream << m_collections;
|
stream << m_collections;
|
||||||
|
if (!m_llmodel->serialize(stream, version, true /*serializeKV*/))
|
||||||
const bool serializeKV = MySettings::globalInstance()->saveChatsContext();
|
|
||||||
if (version > 5)
|
|
||||||
stream << serializeKV;
|
|
||||||
if (!m_llmodel->serialize(stream, version, serializeKV))
|
|
||||||
return false;
|
return false;
|
||||||
if (!m_chatModel->serialize(stream, version))
|
if (!m_chatModel->serialize(stream, version))
|
||||||
return false;
|
return false;
|
||||||
@ -417,6 +413,7 @@ bool Chat::deserialize(QDataStream &stream, int version)
|
|||||||
if (!m_modelInfo.id().isEmpty())
|
if (!m_modelInfo.id().isEmpty())
|
||||||
emit modelInfoChanged();
|
emit modelInfoChanged();
|
||||||
|
|
||||||
|
bool deserializeKV = true; // make this a setting
|
||||||
bool discardKV = m_modelInfo.id().isEmpty();
|
bool discardKV = m_modelInfo.id().isEmpty();
|
||||||
|
|
||||||
// Prior to version 2 gptj models had a bug that fixed the kv_cache to F32 instead of F16 so
|
// Prior to version 2 gptj models had a bug that fixed the kv_cache to F32 instead of F16 so
|
||||||
@ -428,11 +425,6 @@ bool Chat::deserialize(QDataStream &stream, int version)
|
|||||||
stream >> m_collections;
|
stream >> m_collections;
|
||||||
emit collectionListChanged(m_collections);
|
emit collectionListChanged(m_collections);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool deserializeKV = true;
|
|
||||||
if (version > 5)
|
|
||||||
stream >> deserializeKV;
|
|
||||||
|
|
||||||
m_llmodel->setModelInfo(m_modelInfo);
|
m_llmodel->setModelInfo(m_modelInfo);
|
||||||
if (!m_llmodel->deserialize(stream, version, deserializeKV, discardKV))
|
if (!m_llmodel->deserialize(stream, version, deserializeKV, discardKV))
|
||||||
return false;
|
return false;
|
||||||
|
@ -54,8 +54,6 @@ public:
|
|||||||
}
|
}
|
||||||
ChatModel *chatModel() { return m_chatModel; }
|
ChatModel *chatModel() { return m_chatModel; }
|
||||||
|
|
||||||
bool isNewChat() const { return m_name == tr("New Chat") && !m_chatModel->count(); }
|
|
||||||
|
|
||||||
Q_INVOKABLE void reset();
|
Q_INVOKABLE void reset();
|
||||||
Q_INVOKABLE void processSystemPrompt();
|
Q_INVOKABLE void processSystemPrompt();
|
||||||
Q_INVOKABLE bool isModelLoaded() const;
|
Q_INVOKABLE bool isModelLoaded() const;
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
#include <QDataStream>
|
#include <QDataStream>
|
||||||
|
|
||||||
#define CHAT_FORMAT_MAGIC 0xF5D553CC
|
#define CHAT_FORMAT_MAGIC 0xF5D553CC
|
||||||
#define CHAT_FORMAT_VERSION 6
|
#define CHAT_FORMAT_VERSION 5
|
||||||
|
|
||||||
class MyChatListModel: public ChatListModel { };
|
class MyChatListModel: public ChatListModel { };
|
||||||
Q_GLOBAL_STATIC(MyChatListModel, chatListModelInstance)
|
Q_GLOBAL_STATIC(MyChatListModel, chatListModelInstance)
|
||||||
@ -17,10 +17,11 @@ ChatListModel *ChatListModel::globalInstance()
|
|||||||
ChatListModel::ChatListModel()
|
ChatListModel::ChatListModel()
|
||||||
: QAbstractListModel(nullptr)
|
: QAbstractListModel(nullptr)
|
||||||
, m_newChat(nullptr)
|
, m_newChat(nullptr)
|
||||||
|
, m_dummyChat(nullptr)
|
||||||
, m_serverChat(nullptr)
|
, m_serverChat(nullptr)
|
||||||
, m_currentChat(nullptr)
|
, m_currentChat(nullptr)
|
||||||
{
|
{
|
||||||
addChat();
|
addDummyChat();
|
||||||
|
|
||||||
ChatsRestoreThread *thread = new ChatsRestoreThread;
|
ChatsRestoreThread *thread = new ChatsRestoreThread;
|
||||||
connect(thread, &ChatsRestoreThread::chatRestored, this, &ChatListModel::restoreChat);
|
connect(thread, &ChatsRestoreThread::chatRestored, this, &ChatListModel::restoreChat);
|
||||||
@ -58,7 +59,10 @@ void ChatListModel::saveChats()
|
|||||||
for (Chat *chat : m_chats) {
|
for (Chat *chat : m_chats) {
|
||||||
if (chat == m_serverChat)
|
if (chat == m_serverChat)
|
||||||
continue;
|
continue;
|
||||||
if (chat->isNewChat())
|
const bool isChatGPT = chat->modelInfo().isChatGPT;
|
||||||
|
if (!isChatGPT && !MySettings::globalInstance()->saveChats())
|
||||||
|
continue;
|
||||||
|
if (isChatGPT && !MySettings::globalInstance()->saveChatGPTChats())
|
||||||
continue;
|
continue;
|
||||||
toSave.append(chat);
|
toSave.append(chat);
|
||||||
}
|
}
|
||||||
@ -245,13 +249,35 @@ void ChatListModel::restoreChat(Chat *chat)
|
|||||||
chat->setParent(this);
|
chat->setParent(this);
|
||||||
connect(chat, &Chat::nameChanged, this, &ChatListModel::nameChanged);
|
connect(chat, &Chat::nameChanged, this, &ChatListModel::nameChanged);
|
||||||
|
|
||||||
|
if (m_dummyChat) {
|
||||||
|
beginResetModel();
|
||||||
|
m_chats = QList<Chat*>({chat});
|
||||||
|
setCurrentChat(chat);
|
||||||
|
delete m_dummyChat;
|
||||||
|
m_dummyChat = nullptr;
|
||||||
|
endResetModel();
|
||||||
|
} else {
|
||||||
beginInsertRows(QModelIndex(), m_chats.size(), m_chats.size());
|
beginInsertRows(QModelIndex(), m_chats.size(), m_chats.size());
|
||||||
m_chats.append(chat);
|
m_chats.append(chat);
|
||||||
endInsertRows();
|
endInsertRows();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void ChatListModel::chatsRestoredFinished()
|
void ChatListModel::chatsRestoredFinished()
|
||||||
{
|
{
|
||||||
|
if (m_dummyChat) {
|
||||||
|
beginResetModel();
|
||||||
|
Chat *dummy = m_dummyChat;
|
||||||
|
m_dummyChat = nullptr;
|
||||||
|
m_chats.clear();
|
||||||
|
addChat();
|
||||||
|
delete dummy;
|
||||||
|
endResetModel();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m_chats.isEmpty())
|
||||||
|
addChat();
|
||||||
|
|
||||||
addServerChat();
|
addServerChat();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ public:
|
|||||||
Q_INVOKABLE void addChat()
|
Q_INVOKABLE void addChat()
|
||||||
{
|
{
|
||||||
// Don't add a new chat if we already have one
|
// Don't add a new chat if we already have one
|
||||||
if (m_newChat)
|
if (m_newChat || m_dummyChat)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
// Create a new chat pointer and connect it to determine when it is populated
|
// Create a new chat pointer and connect it to determine when it is populated
|
||||||
@ -101,6 +101,18 @@ public:
|
|||||||
setCurrentChat(m_newChat);
|
setCurrentChat(m_newChat);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Q_INVOKABLE void addDummyChat()
|
||||||
|
{
|
||||||
|
// Create a new dummy chat pointer and don't connect it
|
||||||
|
m_dummyChat = new Chat(this);
|
||||||
|
beginInsertRows(QModelIndex(), 0, 0);
|
||||||
|
m_chats.prepend(m_dummyChat);
|
||||||
|
endInsertRows();
|
||||||
|
emit countChanged();
|
||||||
|
m_currentChat = m_dummyChat;
|
||||||
|
emit currentChatChanged();
|
||||||
|
}
|
||||||
|
|
||||||
Q_INVOKABLE void addServerChat()
|
Q_INVOKABLE void addServerChat()
|
||||||
{
|
{
|
||||||
// Create a new dummy chat pointer and don't connect it
|
// Create a new dummy chat pointer and don't connect it
|
||||||
@ -240,6 +252,7 @@ private Q_SLOTS:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
Chat* m_newChat;
|
Chat* m_newChat;
|
||||||
|
Chat* m_dummyChat;
|
||||||
Chat* m_serverChat;
|
Chat* m_serverChat;
|
||||||
Chat* m_currentChat;
|
Chat* m_currentChat;
|
||||||
QList<Chat*> m_chats;
|
QList<Chat*> m_chats;
|
||||||
|
@ -110,17 +110,17 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"order": "i",
|
"order": "i",
|
||||||
"md5sum": "0e769317b90ac30d6e09486d61fefa26",
|
"md5sum": "aae346fe095e60139ca39b3fda4ac7ae",
|
||||||
"name": "Mini Orca (Small)",
|
"name": "Mini Orca (Small)",
|
||||||
"filename": "orca-mini-3b-gguf2-q4_0.gguf",
|
"filename": "orca-mini-3b.q4_0.gguf",
|
||||||
"filesize": "1979946720",
|
"filesize": "1928648352",
|
||||||
"requires": "2.5.0",
|
"requires": "2.5.0",
|
||||||
"ramrequired": "4",
|
"ramrequired": "4",
|
||||||
"parameters": "3 billion",
|
"parameters": "3 billion",
|
||||||
"quant": "q4_0",
|
"quant": "q4_0",
|
||||||
"type": "OpenLLaMa",
|
"type": "OpenLLaMa",
|
||||||
"description": "<strong>Small version of new model with novel dataset</strong><br><ul><li>Instruction based<li>Explain tuned datasets<li>Orca Research Paper dataset construction approaches<li>Cannot be used commercially</ul>",
|
"description": "<strong>Small version of new model with novel dataset</strong><br><ul><li>Instruction based<li>Explain tuned datasets<li>Orca Research Paper dataset construction approaches<li>Licensed for commercial use</ul>",
|
||||||
"url": "https://gpt4all.io/models/gguf/orca-mini-3b-gguf2-q4_0.gguf",
|
"url": "https://gpt4all.io/models/gguf/orca-mini-3b.q4_0.gguf",
|
||||||
"promptTemplate": "### User:\n%1\n### Response:\n",
|
"promptTemplate": "### User:\n%1\n### Response:\n",
|
||||||
"systemPrompt": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n"
|
"systemPrompt": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n"
|
||||||
},
|
},
|
||||||
|
@ -10,7 +10,8 @@
|
|||||||
#include <QUrl>
|
#include <QUrl>
|
||||||
|
|
||||||
static int default_threadCount = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
static int default_threadCount = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
static bool default_saveChatsContext = false;
|
static bool default_saveChats = false;
|
||||||
|
static bool default_saveChatGPTChats = true;
|
||||||
static bool default_serverChat = false;
|
static bool default_serverChat = false;
|
||||||
static QString default_userDefaultModel = "Application default";
|
static QString default_userDefaultModel = "Application default";
|
||||||
static bool default_forceMetal = false;
|
static bool default_forceMetal = false;
|
||||||
@ -102,7 +103,8 @@ void MySettings::restoreApplicationDefaults()
|
|||||||
setFontSize(default_fontSize);
|
setFontSize(default_fontSize);
|
||||||
setDevice(default_device);
|
setDevice(default_device);
|
||||||
setThreadCount(default_threadCount);
|
setThreadCount(default_threadCount);
|
||||||
setSaveChatsContext(default_saveChatsContext);
|
setSaveChats(default_saveChats);
|
||||||
|
setSaveChatGPTChats(default_saveChatGPTChats);
|
||||||
setServerChat(default_serverChat);
|
setServerChat(default_serverChat);
|
||||||
setModelPath(defaultLocalModelsPath());
|
setModelPath(defaultLocalModelsPath());
|
||||||
setUserDefaultModel(default_userDefaultModel);
|
setUserDefaultModel(default_userDefaultModel);
|
||||||
@ -395,22 +397,40 @@ void MySettings::setThreadCount(int c)
|
|||||||
emit threadCountChanged();
|
emit threadCountChanged();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MySettings::saveChatsContext() const
|
bool MySettings::saveChats() const
|
||||||
{
|
{
|
||||||
QSettings setting;
|
QSettings setting;
|
||||||
setting.sync();
|
setting.sync();
|
||||||
return setting.value("saveChatsContext", default_saveChatsContext).toBool();
|
return setting.value("saveChats", default_saveChats).toBool();
|
||||||
}
|
}
|
||||||
|
|
||||||
void MySettings::setSaveChatsContext(bool b)
|
void MySettings::setSaveChats(bool b)
|
||||||
{
|
{
|
||||||
if (saveChatsContext() == b)
|
if (saveChats() == b)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
QSettings setting;
|
QSettings setting;
|
||||||
setting.setValue("saveChatsContext", b);
|
setting.setValue("saveChats", b);
|
||||||
setting.sync();
|
setting.sync();
|
||||||
emit saveChatsContextChanged();
|
emit saveChatsChanged();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MySettings::saveChatGPTChats() const
|
||||||
|
{
|
||||||
|
QSettings setting;
|
||||||
|
setting.sync();
|
||||||
|
return setting.value("saveChatGPTChats", default_saveChatGPTChats).toBool();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MySettings::setSaveChatGPTChats(bool b)
|
||||||
|
{
|
||||||
|
if (saveChatGPTChats() == b)
|
||||||
|
return;
|
||||||
|
|
||||||
|
QSettings setting;
|
||||||
|
setting.setValue("saveChatGPTChats", b);
|
||||||
|
setting.sync();
|
||||||
|
emit saveChatGPTChatsChanged();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MySettings::serverChat() const
|
bool MySettings::serverChat() const
|
||||||
|
@ -10,7 +10,8 @@ class MySettings : public QObject
|
|||||||
{
|
{
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
Q_PROPERTY(int threadCount READ threadCount WRITE setThreadCount NOTIFY threadCountChanged)
|
Q_PROPERTY(int threadCount READ threadCount WRITE setThreadCount NOTIFY threadCountChanged)
|
||||||
Q_PROPERTY(bool saveChatsContext READ saveChatsContext WRITE setSaveChatsContext NOTIFY saveChatsContextChanged)
|
Q_PROPERTY(bool saveChats READ saveChats WRITE setSaveChats NOTIFY saveChatsChanged)
|
||||||
|
Q_PROPERTY(bool saveChatGPTChats READ saveChatGPTChats WRITE setSaveChatGPTChats NOTIFY saveChatGPTChatsChanged)
|
||||||
Q_PROPERTY(bool serverChat READ serverChat WRITE setServerChat NOTIFY serverChatChanged)
|
Q_PROPERTY(bool serverChat READ serverChat WRITE setServerChat NOTIFY serverChatChanged)
|
||||||
Q_PROPERTY(QString modelPath READ modelPath WRITE setModelPath NOTIFY modelPathChanged)
|
Q_PROPERTY(QString modelPath READ modelPath WRITE setModelPath NOTIFY modelPathChanged)
|
||||||
Q_PROPERTY(QString userDefaultModel READ userDefaultModel WRITE setUserDefaultModel NOTIFY userDefaultModelChanged)
|
Q_PROPERTY(QString userDefaultModel READ userDefaultModel WRITE setUserDefaultModel NOTIFY userDefaultModelChanged)
|
||||||
@ -63,8 +64,10 @@ public:
|
|||||||
// Application settings
|
// Application settings
|
||||||
int threadCount() const;
|
int threadCount() const;
|
||||||
void setThreadCount(int c);
|
void setThreadCount(int c);
|
||||||
bool saveChatsContext() const;
|
bool saveChats() const;
|
||||||
void setSaveChatsContext(bool b);
|
void setSaveChats(bool b);
|
||||||
|
bool saveChatGPTChats() const;
|
||||||
|
void setSaveChatGPTChats(bool b);
|
||||||
bool serverChat() const;
|
bool serverChat() const;
|
||||||
void setServerChat(bool b);
|
void setServerChat(bool b);
|
||||||
QString modelPath() const;
|
QString modelPath() const;
|
||||||
@ -119,7 +122,8 @@ Q_SIGNALS:
|
|||||||
void promptTemplateChanged(const ModelInfo &model);
|
void promptTemplateChanged(const ModelInfo &model);
|
||||||
void systemPromptChanged(const ModelInfo &model);
|
void systemPromptChanged(const ModelInfo &model);
|
||||||
void threadCountChanged();
|
void threadCountChanged();
|
||||||
void saveChatsContextChanged();
|
void saveChatsChanged();
|
||||||
|
void saveChatGPTChatsChanged();
|
||||||
void serverChatChanged();
|
void serverChatChanged();
|
||||||
void modelPathChanged();
|
void modelPathChanged();
|
||||||
void userDefaultModelChanged();
|
void userDefaultModelChanged();
|
||||||
|
@ -317,6 +317,16 @@ void Network::sendNetworkToggled(bool isActive)
|
|||||||
sendMixpanelEvent("network_toggled", QVector<KeyValue>{kv});
|
sendMixpanelEvent("network_toggled", QVector<KeyValue>{kv});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Network::sendSaveChatsToggled(bool isActive)
|
||||||
|
{
|
||||||
|
if (!MySettings::globalInstance()->networkUsageStatsActive())
|
||||||
|
return;
|
||||||
|
KeyValue kv;
|
||||||
|
kv.key = QString("isActive");
|
||||||
|
kv.value = QJsonValue(isActive);
|
||||||
|
sendMixpanelEvent("savechats_toggled", QVector<KeyValue>{kv});
|
||||||
|
}
|
||||||
|
|
||||||
void Network::sendNewChat(int count)
|
void Network::sendNewChat(int count)
|
||||||
{
|
{
|
||||||
if (!MySettings::globalInstance()->networkUsageStatsActive())
|
if (!MySettings::globalInstance()->networkUsageStatsActive())
|
||||||
|
@ -38,6 +38,7 @@ public Q_SLOTS:
|
|||||||
void sendDownloadFinished(const QString &model, bool success);
|
void sendDownloadFinished(const QString &model, bool success);
|
||||||
Q_INVOKABLE void sendSettingsDialog();
|
Q_INVOKABLE void sendSettingsDialog();
|
||||||
Q_INVOKABLE void sendNetworkToggled(bool active);
|
Q_INVOKABLE void sendNetworkToggled(bool active);
|
||||||
|
Q_INVOKABLE void sendSaveChatsToggled(bool active);
|
||||||
Q_INVOKABLE void sendNewChat(int count);
|
Q_INVOKABLE void sendNewChat(int count);
|
||||||
Q_INVOKABLE void sendRemoveChat();
|
Q_INVOKABLE void sendRemoveChat();
|
||||||
Q_INVOKABLE void sendRenameChat();
|
Q_INVOKABLE void sendRenameChat();
|
||||||
|
@ -234,35 +234,53 @@ MySettingsTab {
|
|||||||
Accessible.description: ToolTip.text
|
Accessible.description: ToolTip.text
|
||||||
}
|
}
|
||||||
Label {
|
Label {
|
||||||
id: saveChatsContextLabel
|
id: saveChatsLabel
|
||||||
text: qsTr("Save chats context to disk:")
|
text: qsTr("Save chats to disk:")
|
||||||
color: theme.textColor
|
color: theme.textColor
|
||||||
font.pixelSize: theme.fontSizeLarge
|
font.pixelSize: theme.fontSizeLarge
|
||||||
Layout.row: 7
|
Layout.row: 7
|
||||||
Layout.column: 0
|
Layout.column: 0
|
||||||
}
|
}
|
||||||
MyCheckBox {
|
MyCheckBox {
|
||||||
id: saveChatsContextBox
|
id: saveChatsBox
|
||||||
Layout.row: 7
|
Layout.row: 7
|
||||||
Layout.column: 1
|
Layout.column: 1
|
||||||
checked: MySettings.saveChatsContext
|
checked: MySettings.saveChats
|
||||||
onClicked: {
|
onClicked: {
|
||||||
MySettings.saveChatsContext = !MySettings.saveChatsContext
|
Network.sendSaveChatsToggled(saveChatsBox.checked);
|
||||||
|
MySettings.saveChats = !MySettings.saveChats
|
||||||
}
|
}
|
||||||
ToolTip.text: qsTr("WARNING: Saving chats to disk can be ~2GB per chat")
|
ToolTip.text: qsTr("WARNING: Saving chats to disk can be ~2GB per chat")
|
||||||
ToolTip.visible: hovered
|
ToolTip.visible: hovered
|
||||||
}
|
}
|
||||||
|
Label {
|
||||||
|
id: saveChatGPTChatsLabel
|
||||||
|
text: qsTr("Save ChatGPT chats to disk:")
|
||||||
|
color: theme.textColor
|
||||||
|
font.pixelSize: theme.fontSizeLarge
|
||||||
|
Layout.row: 8
|
||||||
|
Layout.column: 0
|
||||||
|
}
|
||||||
|
MyCheckBox {
|
||||||
|
id: saveChatGPTChatsBox
|
||||||
|
Layout.row: 8
|
||||||
|
Layout.column: 1
|
||||||
|
checked: MySettings.saveChatGPTChats
|
||||||
|
onClicked: {
|
||||||
|
MySettings.saveChatGPTChats = !MySettings.saveChatGPTChats
|
||||||
|
}
|
||||||
|
}
|
||||||
Label {
|
Label {
|
||||||
id: serverChatLabel
|
id: serverChatLabel
|
||||||
text: qsTr("Enable API server:")
|
text: qsTr("Enable API server:")
|
||||||
color: theme.textColor
|
color: theme.textColor
|
||||||
font.pixelSize: theme.fontSizeLarge
|
font.pixelSize: theme.fontSizeLarge
|
||||||
Layout.row: 8
|
Layout.row: 9
|
||||||
Layout.column: 0
|
Layout.column: 0
|
||||||
}
|
}
|
||||||
MyCheckBox {
|
MyCheckBox {
|
||||||
id: serverChatBox
|
id: serverChatBox
|
||||||
Layout.row: 8
|
Layout.row: 9
|
||||||
Layout.column: 1
|
Layout.column: 1
|
||||||
checked: MySettings.serverChat
|
checked: MySettings.serverChat
|
||||||
onClicked: {
|
onClicked: {
|
||||||
@ -272,7 +290,7 @@ MySettingsTab {
|
|||||||
ToolTip.visible: hovered
|
ToolTip.visible: hovered
|
||||||
}
|
}
|
||||||
Rectangle {
|
Rectangle {
|
||||||
Layout.row: 9
|
Layout.row: 10
|
||||||
Layout.column: 0
|
Layout.column: 0
|
||||||
Layout.columnSpan: 3
|
Layout.columnSpan: 3
|
||||||
Layout.fillWidth: true
|
Layout.fillWidth: true
|
||||||
|
Loading…
x
Reference in New Issue
Block a user