mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-07-20 00:02:00 -04:00
Compare commits
No commits in common. "9fb135e020dc4a714b0e1868e6beb77f8e6f287e" and "56c0d2898dce1984cbbaa2881e712c6a3eaecb89" have entirely different histories.
9fb135e020
...
56c0d2898d
17
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
17
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -27,6 +27,21 @@ body:
|
||||
- label: "The official example notebooks/scripts"
|
||||
- label: "My own modified scripts"
|
||||
|
||||
- type: checkboxes
|
||||
id: related-components
|
||||
attributes:
|
||||
label: Related Components
|
||||
description: "Select the components related to the issue (if applicable):"
|
||||
options:
|
||||
- label: "backend"
|
||||
- label: "bindings"
|
||||
- label: "python-bindings"
|
||||
- label: "chat-ui"
|
||||
- label: "models"
|
||||
- label: "circleci"
|
||||
- label: "docker"
|
||||
- label: "api"
|
||||
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
validations:
|
||||
@ -52,4 +67,4 @@ body:
|
||||
required: true
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: "A clear and concise description of what you would expect to happen."
|
||||
description: "A clear and concise description of what you would expect to happen."
|
@ -180,8 +180,8 @@ install(TARGETS llmodel DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
|
||||
# We should probably iterate through the list of the cmake for backend, but these need to be installed
|
||||
# to the this component's dir for the finicky qt installer to work
|
||||
install(TARGETS gptj-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
install(TARGETS gptj-default DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
#install(TARGETS gptj-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
#install(TARGETS gptj-default DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
install(TARGETS llama-mainline-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
install(TARGETS llama-mainline-default DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
install(TARGETS llamamodel-mainline-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
|
@ -190,21 +190,5 @@
|
||||
"systemPrompt": " ",
|
||||
"description": "<strong>Sbert</strong><br><ul><li>For embeddings",
|
||||
"url": "https://gpt4all.io/models/gguf/all-MiniLM-L6-v2-f16.gguf"
|
||||
},
|
||||
{
|
||||
"order": "n",
|
||||
"md5sum": "919de4dd6f25351bcb0223790db1932d",
|
||||
"name": "EM German Mistral",
|
||||
"filename": "em_german_mistral_v01.Q4_0.gguf",
|
||||
"filesize": "4108916352",
|
||||
"requires": "2.5.0",
|
||||
"ramrequired": "8",
|
||||
"parameters": "7 billion",
|
||||
"quant": "q4_0",
|
||||
"type": "Mistral",
|
||||
"description": "<strong>Mistral-based model for German-language applications</strong><br><ul><li>Fast responses</li><li>Chat based model</li><li>Trained by ellamind<li>Finetuned on German instruction and chat data</a><li>Licensed for commercial use</ul>",
|
||||
"url": "https://huggingface.co/TheBloke/em_german_mistral_v01-GGUF/resolve/main/em_german_mistral_v01.Q4_0.gguf",
|
||||
"promptTemplate": "USER: %1 ASSISTANT: ",
|
||||
"systemPrompt": "Du bist ein hilfreicher Assistent. "
|
||||
}
|
||||
]
|
||||
|
@ -75,7 +75,7 @@
|
||||
* resumable downloads for models
|
||||
* chat list in the drawer drop down
|
||||
* add/remove/rename chats
|
||||
* persist chats to disk and restore them with full context (WARNING: the average size of each chat on disk is ~1.5GB)
|
||||
* perist chats to disk and restore them with full context (WARNING: the average size of each chat on disk is ~1.5GB)
|
||||
* NOTE: to turn on the persistent chats feature you need to do so via the settings dialog as it is off by default
|
||||
* automatically rename chats using the AI after the first prompt/response pair
|
||||
* new usage statistics including more detailed hardware info to help debug problems on older hardware
|
||||
@ -524,7 +524,7 @@
|
||||
"version": "2.4.19",
|
||||
"notes":
|
||||
"
|
||||
* Fix a crash on systems with corrupted vulkan drivers or corrupted vulkan dlls
|
||||
* Fix a crasher on systems with corrupted vulkan drivers or corrupted vulkan dlls
|
||||
",
|
||||
"contributors":
|
||||
"
|
||||
|
@ -75,7 +75,7 @@ def train(accelerator, config):
|
||||
else DummyOptim
|
||||
)
|
||||
|
||||
# karpathy doesn't decay embedding, maybe we should exclude
|
||||
# karpathy doesn't decay embeddding, maybe we should exclude
|
||||
# https://github.com/karpathy/minGPT/commit/bbbdac74fa9b2e55574d70056163ffbae42310c1#diff-2075fa9c224b395be5bda85544dd36572b59c76c54562819eadadbf268602834R157s
|
||||
optimizer = optimizer_cls(model.parameters(), lr=config["lr"], weight_decay=config["weight_decay"])
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user