|
|
@ -93,8 +93,8 @@ class AI(object): |
|
|
|
self.max_context = 2048 |
|
|
|
|
|
|
|
from ..wrappers.langchain_koboldcpp import KoboldCpp |
|
|
|
self.llm_chat = KoboldCpp(temperature=self.bot.temperature, endpoint_url="http://172.16.85.10:5001/api/latest/generate", max_context=self.max_context, stop=['<|endoftext|>'], verbose=True) |
|
|
|
self.llm_summary = KoboldCpp(temperature=0.7, repeat_penalty=1.15, top_k = 20, top_p= 0.9, endpoint_url="http://172.16.85.10:5001/api/latest/generate", max_context=self.max_context, stop=['<|endoftext|>'], max_tokens=512, verbose=True) |
|
|
|
self.llm_chat = KoboldCpp(temperature=self.bot.temperature, endpoint_url="http://172.16.33.10:5001/api/latest/generate", max_context=self.max_context, stop=['<|endoftext|>'], verbose=True) |
|
|
|
self.llm_summary = KoboldCpp(temperature=0.7, repeat_penalty=1.15, top_k = 20, top_p= 0.9, endpoint_url="http://172.16.33.10:5001/api/latest/generate", max_context=self.max_context, stop=['<|endoftext|>'], max_tokens=512, verbose=True) |
|
|
|
self.llm_chat_model = "pygmalion-7b" |
|
|
|
self.llm_summary_model = "vicuna-13b" |
|
|
|
self.text_wrapper = text_wrapper |
|
|
|