Browse Source

remove chunk window for gpu services

master
Hendrik Langer 2 years ago
parent
commit
ef7a60e2d2
  1. 5
      matrix_pygmalion_bot/ai/model_helpers.py

5
matrix_pygmalion_bot/ai/model_helpers.py

@ -136,7 +136,10 @@ async def get_full_prompt(simple_prompt: str, bot, chat_history, model_name: str
#prompt += f"{ai_name}:" #prompt += f"{ai_name}:"
MAX_TOKENS = 2048 MAX_TOKENS = 2048
WINDOW = 600 if bot.service_text == "koboldcpp":
WINDOW = 600
else:
WINDOW = 0
max_new_tokens = 200 max_new_tokens = 200
total_num_tokens = await num_tokens(prompt, model_name) total_num_tokens = await num_tokens(prompt, model_name)
input_num_tokens = await num_tokens(f"{user_name}: {simple_prompt}\n{ai_name}:", model_name) input_num_tokens = await num_tokens(f"{user_name}: {simple_prompt}\n{ai_name}:", model_name)

Loading…
Cancel
Save