@ -1,7 +1,7 @@
import asyncio
import os , time
from . prompts import *
from . langchain_memory import CustomMemory # BotConversationSummaryBufferWindowMemory, TestMemory
from . langchain_memory import CustomMemory , ChangeNamesMemory # BotConversationSummaryBufferWindowMemory, TestMemory
from . . utilities . messages import Message
from langchain import PromptTemplate
@ -91,6 +91,8 @@ class AI(object):
from . . wrappers . langchain_koboldcpp import KoboldCpp
self . llm_chat = KoboldCpp ( temperature = self . bot . temperature , endpoint_url = " http://172.16.85.10:5001/api/latest/generate " , stop = [ ' <|endoftext|> ' ] )
self . llm_summary = KoboldCpp ( temperature = 0.2 , endpoint_url = " http://172.16.85.10:5002/api/latest/generate " , stop = [ ' <|endoftext|> ' ] , max_tokens = 512 )
self . llm_chat_model = " pygmalion-7b "
self . llm_summary_model = " vicuna-13b "
self . text_wrapper = text_wrapper
self . image_wrapper = image_wrapper
self . embeddings = SentenceTransformerEmbeddings ( )
@ -99,19 +101,21 @@ class AI(object):
#self.memory = BotConversationSummerBufferWindowMemory(llm=self.llm_summary, max_token_limit=1200, min_token_limit=200)
def get_memory ( self , room_id , human_prefix = " Human " ) :
def get_memory ( self , room_id , human_prefix = None ) :
if not room_id in self . rooms :
self . rooms [ room_id ] = { }
if " moving_summary " in self . bot . rooms [ room_id ] :
moving_summary = self . bot . rooms [ room_id ] [ ' moving_summary ' ]
else :
moving_summary = " No previous events. "
if not human_prefix :
human_prefix = " Human "
memory = CustomMemory ( memory_key = " chat_history " , input_key = " input " , human_prefix = human_prefix , ai_prefix = self . bot . name , llm = self . llm_summary , summary_prompt = prompt_progressive_summary , moving_summary_buffer = moving_summary , max_len = 1200 , min_len = 200 )
self . rooms [ room_id ] [ " memory " ] = memory
#memory.chat_memory.add_ai_message(self.bot.greeting)
else :
memory = self . rooms [ room_id ] [ " memory " ]
if human_prefix != memory . human_prefix :
if human_prefix :
memory . human_prefix = human_prefix
return memory
@ -185,12 +189,22 @@ class AI(object):
chat_ai_name = self . bot . name
chat_human_name = message . additional_kwargs [ ' user_name ' ]
room_id = message . additional_kwargs [ ' room_id ' ]
if False : # model is vicuna
if self . llm_chat_model . startswith ( ' vicuna ' ) :
prompt_chat = prompt_vicuna
chat_ai_name = " ### Assistant "
chat_human_name = " ### Human "
elif self . llm_chat_model . startswith ( ' pygmalion ' ) :
prompt_chat = prompt_pygmalion
chat_human_name = " You "
elif self . llm_chat_model . startswith ( ' koboldai ' ) :
prompt_chat = prompt_koboldai
else :
prompt_chat = prompt_alpaca
conversation_memory = self . get_memory ( room_id , chat_human_name )
readonlymemory = ReadOnlySharedMemory ( memory = conversation_memory )
custom_memory = ChangeNamesMemory ( memory = conversation_memory , replace_ai_chat_names = { self . bot . name : chat_ai_name } , replace_human_chat_names = { message . additional_kwargs [ ' user_name ' ] : chat_human_name } )
#summary_memory = ConversationSummaryMemory(llm=self.llm_summary, memory_key="summary", input_key="input")
#combined_memory = CombinedMemory(memories=[conversation_memory, summary_memory])
@ -200,20 +214,23 @@ class AI(object):
#when = humanize.naturaltime(t)
#print(when)
# ToDo: either use prompt.format() to fill out the pygmalion prompt and use
# the resulting template text to feed it into the instruct prompt's instruction
# or do this with the prompt.partial()
prompt = prompt_vicuna . partial (
prompt = prompt_chat . partial (
ai_name = self . bot . name ,
persona = self . bot . persona ,
scenario = self . bot . scenario ,
summary = conversation_memory . moving_summary_buffer ,
human_name = chat_human_name ,
#example_dialogue=replace_all(self.bot.example_dialogue, {"{{user}}": chat_human_name, "{{char}}": chat_ai_name})
ai_name_chat = chat_ai_name ,
)
if " summary " in prompt_chat . input_variables :
prompt = prompt . partial ( summary = conversation_memory . moving_summary_buffer )
if " example_dialogue " in prompt_chat . input_variables :
prompt = prompt . partial (
example_dialogue = self . bot . example_dialogue . replace ( " {{ user}} " , chat_human_name )
)
tmp_prompt_text = prompt . format ( chat_history = conversation_memory . buffer , input = message . content )
prompt_len = self . llm_chat . get_num_tokens ( tmp_prompt_text )
@ -230,7 +247,7 @@ class AI(object):
llm = self . llm_chat ,
prompt = prompt ,
verbose = True ,
memory = readonly memory,
memory = custom_ memory,
#stop=['<|endoftext|>', '\nYou:', f"\n{chat_human_name}:"],
)
@ -292,7 +309,7 @@ class AI(object):
else :
input_text = conversation_memory . moving_summary_buffer
return await diary_chain . apredict ( text = input_text )
return await diary_chain . apredict ( text = input_text , ai_name = self . bot . name )
async def agent ( self ) :
@ -358,6 +375,7 @@ class AI(object):
async def sleep ( self ) :
logger . info ( f " { self . bot . name } sleeping now... running background tasks... " )
# Write Date into chat history
for room_id in self . rooms . keys ( ) :
#fake_message = Message(datetime.now().timestamp(), self.bot.name, "", event_id=None, user_id=None, room_name=None, room_id=room_id)
@ -389,7 +407,9 @@ class AI(object):
# Update stats
# Let background tasks run
conversation_memory . chat_memory_day . clear ( )
await conversation_memory . prune_memory ( conversation_memory . min_len )
await self . bot . write_conf2 ( self . bot . rooms )
logger . info ( f " { self . bot . name } done sleeping and ready for the next day... " )
async def prime_llm ( self , text ) :
@ -397,6 +417,7 @@ class AI(object):
def replace_all ( text , dic ) :
#example_dialogue=replace_all(self.bot.example_dialogue, {"{{user}}": chat_human_name, "{{char}}": chat_ai_name})
for i , j in dic . items ( ) :
text = text . replace ( i , j )
return text