Compare commits

...

4 Commits

Author SHA1 Message Date
cebtenzzre
9fb135e020
cmake: install the GPT-J plugin (#1487) 2023-10-10 15:50:03 -04:00
Cebtenzzre
df66226f7d issue template: remove "Related Components" section 2023-10-10 10:39:28 -07:00
Aaron Miller
3c25d81759 make codespell happy 2023-10-10 12:00:06 -04:00
Jan Philipp Harries
4f0cee9330 added EM German Mistral Model 2023-10-10 11:44:43 -04:00
5 changed files with 22 additions and 21 deletions

View File

@ -27,21 +27,6 @@ body:
- label: "The official example notebooks/scripts"
- label: "My own modified scripts"
- type: checkboxes
id: related-components
attributes:
label: Related Components
description: "Select the components related to the issue (if applicable):"
options:
- label: "backend"
- label: "bindings"
- label: "python-bindings"
- label: "chat-ui"
- label: "models"
- label: "circleci"
- label: "docker"
- label: "api"
- type: textarea
id: reproduction
validations:
@ -67,4 +52,4 @@ body:
required: true
attributes:
label: Expected behavior
description: "A clear and concise description of what you would expect to happen."
description: "A clear and concise description of what you would expect to happen."

View File

@ -180,8 +180,8 @@ install(TARGETS llmodel DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
# We should probably iterate through the list of the cmake for backend, but these need to be installed
# to the this component's dir for the finicky qt installer to work
#install(TARGETS gptj-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
#install(TARGETS gptj-default DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
install(TARGETS gptj-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
install(TARGETS gptj-default DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
install(TARGETS llama-mainline-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
install(TARGETS llama-mainline-default DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})
install(TARGETS llamamodel-mainline-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN})

View File

@ -190,5 +190,21 @@
"systemPrompt": " ",
"description": "<strong>Sbert</strong><br><ul><li>For embeddings",
"url": "https://gpt4all.io/models/gguf/all-MiniLM-L6-v2-f16.gguf"
},
{
"order": "n",
"md5sum": "919de4dd6f25351bcb0223790db1932d",
"name": "EM German Mistral",
"filename": "em_german_mistral_v01.Q4_0.gguf",
"filesize": "4108916352",
"requires": "2.5.0",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "Mistral",
"description": "<strong>Mistral-based model for German-language applications</strong><br><ul><li>Fast responses</li><li>Chat based model</li><li>Trained by ellamind<li>Finetuned on German instruction and chat data</a><li>Licensed for commercial use</ul>",
"url": "https://huggingface.co/TheBloke/em_german_mistral_v01-GGUF/resolve/main/em_german_mistral_v01.Q4_0.gguf",
"promptTemplate": "USER: %1 ASSISTANT: ",
"systemPrompt": "Du bist ein hilfreicher Assistent. "
}
]

View File

@ -75,7 +75,7 @@
* resumable downloads for models
* chat list in the drawer drop down
* add/remove/rename chats
* perist chats to disk and restore them with full context (WARNING: the average size of each chat on disk is ~1.5GB)
* persist chats to disk and restore them with full context (WARNING: the average size of each chat on disk is ~1.5GB)
* NOTE: to turn on the persistent chats feature you need to do so via the settings dialog as it is off by default
* automatically rename chats using the AI after the first prompt/response pair
* new usage statistics including more detailed hardware info to help debug problems on older hardware
@ -524,7 +524,7 @@
"version": "2.4.19",
"notes":
"
* Fix a crasher on systems with corrupted vulkan drivers or corrupted vulkan dlls
* Fix a crash on systems with corrupted vulkan drivers or corrupted vulkan dlls
",
"contributors":
"

View File

@ -75,7 +75,7 @@ def train(accelerator, config):
else DummyOptim
)
# karpathy doesn't decay embeddding, maybe we should exclude
# karpathy doesn't decay embedding, maybe we should exclude
# https://github.com/karpathy/minGPT/commit/bbbdac74fa9b2e55574d70056163ffbae42310c1#diff-2075fa9c224b395be5bda85544dd36572b59c76c54562819eadadbf268602834R157s
optimizer = optimizer_cls(model.parameters(), lr=config["lr"], weight_decay=config["weight_decay"])