You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
64 lines
2.1 KiB
64 lines
2.1 KiB
2 years ago
|
import asyncio
|
||
|
import time
|
||
|
from .prompts import *
|
||
|
from .langchain_memory import BotConversationSummerBufferWindowMemory
|
||
|
|
||
|
from langchain import PromptTemplate
|
||
|
from langchain.chains import LLMChain
|
||
|
|
||
|
|
||
|
import logging
|
||
|
|
||
|
logger = logging.getLogger(__name__)
|
||
|
|
||
|
|
||
|
class AI(object):
|
||
|
|
||
|
def __init__(self, bot, text_wrapper, image_wrapper):
|
||
|
self.name = bot.name
|
||
|
self.bot = bot
|
||
|
|
||
|
from ..wrappers.langchain_koboldcpp import KoboldCpp
|
||
|
self.llm_chat = KoboldCpp(temperature=self.bot.temperature, endpoint_url="http://172.16.85.10:5001/api/latest/generate", stop=['<|endoftext|>'])
|
||
|
self.llm_summary = KoboldCpp(temperature=0.2, endpoint_url="http://172.16.85.10:5001/api/latest/generate", stop=['<|endoftext|>'])
|
||
|
self.text_wrapper = text_wrapper
|
||
|
self.image_wrapper = image_wrapper
|
||
|
|
||
|
self.memory = BotConversationSummerBufferWindowMemory(llm=self.llm_summary, max_token_limit=1200, min_token_limit=200)
|
||
|
|
||
|
|
||
|
|
||
|
|
||
|
async def generate(self, input_text):
|
||
|
prompt_template = "{input}"
|
||
|
chain = LLMChain(
|
||
|
llm=self.llm_chat,
|
||
|
prompt=PromptTemplate.from_template(prompt_template),
|
||
|
)
|
||
|
output = chain.run(input_text)
|
||
|
return output.strip()
|
||
|
|
||
|
async def generate_roleplay(self, message, reply_fn, typing_fn):
|
||
|
prompt = PromptTemplate(
|
||
|
input_variables=["ai_name", "persona", "scenario", "chat_history", "human_name", "ai_name_chat", "human_input"],
|
||
|
template=prompt_template_alpaca,
|
||
|
)
|
||
|
template_roleplay = prompt.format(
|
||
|
ai_name = self.bot.name,
|
||
|
persona = self.bot.persona,
|
||
|
scenario = self.bot.scenario,
|
||
|
chat_history = "{history}",
|
||
|
human_name = message.user_name,
|
||
|
ai_name_chat = self.bot.name,
|
||
|
human_input = "{input}",
|
||
|
)
|
||
|
chain = LLMChain(
|
||
|
llm=self.llm_chat,
|
||
|
prompt=PromptTemplate.from_template(template_roleplay),
|
||
|
verbose=True,
|
||
|
memory=self.memory,
|
||
|
#stop=['<|endoftext|>', '\nYou:', f"\n{message.user_name}:"],
|
||
|
)
|
||
|
output = chain.run(message.message)
|
||
|
return output.strip()
|