diff --git a/settings.yaml b/settings.yaml index a9a676bdb..0a3121fd7 100644 --- a/settings.yaml +++ b/settings.yaml @@ -39,6 +39,7 @@ llm: # Should be matching the selected model max_new_tokens: 512 context_window: 3900 + tokenizer: mistralai/Mistral-7B-Instruct-v0.2 temperature: 0.1 # The temperature of the model. Increasing the temperature will make the model answer more creatively. A value of 0.1 would be more factual. (Default: 0.1) llamacpp: