@ -1,7 +1,7 @@
import asyncio
import os , time
from . prompts import *
#from .langchain_memory import BotConversationSummaryBufferWindowMemory, TestMemory
from . langchain_memory import CustomMemory # BotConversationSummaryBufferWindowMemory, TestMemory
from . . utilities . messages import Message
from langchain import PromptTemplate
@ -90,7 +90,7 @@ class AI(object):
from . . wrappers . langchain_koboldcpp import KoboldCpp
self . llm_chat = KoboldCpp ( temperature = self . bot . temperature , endpoint_url = " http://172.16.85.10:5001/api/latest/generate " , stop = [ ' <|endoftext|> ' ] )
self . llm_summary = KoboldCpp ( temperature = 0.2 , endpoint_url = " http://172.16.85.10:5001 /api/latest/generate " , stop = [ ' <|endoftext|> ' ] , max_tokens = 512 )
self . llm_summary = KoboldCpp ( temperature = 0.2 , endpoint_url = " http://172.16.85.10:5002 /api/latest/generate " , stop = [ ' <|endoftext|> ' ] , max_tokens = 512 )
self . text_wrapper = text_wrapper
self . image_wrapper = image_wrapper
self . embeddings = SentenceTransformerEmbeddings ( )
@ -102,7 +102,7 @@ class AI(object):
def get_memory ( self , room_id , human_prefix = " Human " ) :
if not room_id in self . rooms :
self . rooms [ room_id ] = { }
memory = ConversationBuffer Memory ( memory_key = " chat_history " , input_key = " input " , human_prefix = human_prefix , ai_prefix = self . bot . name )
memory = Custom Memory ( memory_key = " chat_history " , input_key = " input " , human_prefix = human_prefix , ai_prefix = self . bot . name , llm = self . llm_summary , summary_prompt = prompt_progressive_summary , max_len = 1200 , min_len = 200 )
self . rooms [ room_id ] [ " memory " ] = memory
self . rooms [ room_id ] [ " summary " ] = " No previous events. "
memory . chat_memory . add_ai_message ( self . bot . greeting )
@ -181,25 +181,15 @@ class AI(object):
async def generate_roleplay ( self , message , reply_fn , typing_fn ) :
langchain_human_message = HumanMessage (
content = message . message ,
additional_kwargs = {
" timestamp " : message . timestamp ,
" user_name " : message . user_name ,
" event_id " : message . event_id ,
" user_id " : message . user_id ,
" room_name " : message . room_name ,
" room_id " : message . room_id ,
}
)
chat_ai_name = self . bot . name
chat_human_name = message . user_name
chat_human_name = message . additional_kwargs [ ' user_name ' ]
room_id = message . additional_kwargs [ ' room_id ' ]
if False : # model is vicuna
chat_ai_name = " ### Assistant "
chat_human_name = " ### Human "
conversation_memory = self . get_memory ( message . room_id , message . user _name)
conversation_memory = self . get_memory ( room_id , chat_human_name )
conversation_memory . human_prefix = chat_human_name
readonlymemory = ReadOnlySharedMemory ( memory = conversation_memory )
summary_memory = ConversationSummaryMemory ( llm = self . llm_summary , memory_key = " summary " , input_key = " input " )
@ -211,11 +201,11 @@ class AI(object):
async def make_progressive_summary ( previous_summary , chat_history_text_string ) :
await asyncio . sleep ( 0 ) # yield for matrix-nio
#self.rooms[message. room_id]["summary"] = summary_memory.predict_new_summary(conversation_memory.chat_memory.messages, previous_summary).strip()
#self.rooms[room_id]["summary"] = summary_memory.predict_new_summary(conversation_memory.chat_memory.messages, previous_summary).strip()
summary_chain = LLMChain ( llm = self . llm_summary , prompt = prompt_progressive_summary , verbose = True )
self . rooms [ message . room_id ] [ " summary " ] = await summary_chain . apredict ( summary = previous_summary , chat_history = chat_history_text_string )
self . rooms [ room_id ] [ " summary " ] = await summary_chain . apredict ( summary = previous_summary , chat_history = chat_history_text_string )
# ToDo: maybe add an add_task_done callback and don't access the variable directly from here?
logger . info ( f " New summary is: \" { self . rooms [ message . room_id ] [ ' summary ' ] } \" " )
logger . info ( f " New summary is: \" { self . rooms [ room_id ] [ ' summary ' ] } \" " )
conversation_memory . chat_memory . messages = conversation_memory . chat_memory . messages [ - k * 2 : ]
conversation_memory . load_memory_variables ( { } )
#summary = summarize(conversation_memory.buffer)
@ -224,11 +214,11 @@ class AI(object):
logger . info ( " memory progressive summary scheduled... " )
await self . bot . schedule ( self . bot . queue , make_progressive_summary , self . rooms [ message . room_id ] [ " summary " ] , conversation_memory . buffer ) #.add_done_callback(
await self . bot . schedule ( self . bot . queue , make_progressive_summary , self . rooms [ room_id ] [ " summary " ] , conversation_memory . buffer ) #.add_done_callback(
#t = datetime.fromtimestamp(message.timestamp)
#t = datetime.fromtimestamp(message.additional_kwargs[' timestamp'] )
#when = humanize.naturaltime(t)
#print(when)
@ -241,8 +231,8 @@ class AI(object):
ai_name = self . bot . name ,
persona = self . bot . persona ,
scenario = self . bot . scenario ,
summary = self . rooms [ message . room_id ] [ " summary " ] ,
human_name = message . user _name,
summary = self . rooms [ room_id ] [ " summary " ] ,
human_name = chat_human _name,
#example_dialogue=replace_all(self.bot.example_dialogue, {"{{user}}": chat_human_name, "{{char}}": chat_ai_name})
ai_name_chat = chat_ai_name ,
)
@ -252,48 +242,44 @@ class AI(object):
prompt = prompt ,
verbose = True ,
memory = readonlymemory ,
#stop=['<|endoftext|>', '\nYou:', f"\n{message.user _name}:"],
#stop=['<|endoftext|>', '\nYou:', f"\n{chat_human _name}:"],
)
# output = llm_chain(inputs={"ai_name": self.bot.name, "persona": self.bot.persona, "scenario": self.bot.scenario, "human_name": message.user _name, "ai_name_chat": self.bot.name, "chat_history": "", "input": message.message })['results'][0]['text']
# output = llm_chain(inputs={"ai_name": self.bot.name, "persona": self.bot.persona, "scenario": self.bot.scenario, "human_name": chat_human _name, "ai_name_chat": self.bot.name, "chat_history": "", "input": message.content })['results'][0]['text']
#roleplay_chain = RoleplayChain(llm_chain=chain, character_name=self.bot.name, persona=self.bot.persona, scenario=self.bot.scenario, ai_name_chat=chat_ai_name, human_name_chat=chat_human_name)
stop = [ ' <|endoftext|> ' , f " \n { chat_human_name } " ]
#print(f"Message is: \"{message.message }\"")
#print(f"Message is: \"{message.content }\"")
await asyncio . sleep ( 0 )
output = await chain . arun ( { " input " : message . message , " stop " : stop } )
output = output . replace ( " <BOT> " , self . bot . name ) . replace ( " <USER> " , message . user _name)
output = await chain . arun ( { " input " : message . content , " stop " : stop } )
output = output . replace ( " <BOT> " , self . bot . name ) . replace ( " <USER> " , chat_human _name)
output = output . replace ( " ### Assistant " , self . bot . name )
output = output . replace ( f " \n { self . bot . name } : " , " " )
output = output . strip ( )
if " *activates the neural uplink* " in output . casefold ( ) :
pass # call agent
own_message_resp = await reply_fn ( output )
langchain_ai_message = AIMessage (
content = output ,
additional_kwargs = {
" timestamp " : datetime . now ( ) . timestamp ( ) ,
" user_name " : self . bot . name ,
" event_id " : None ,
" event_id " : own_message_resp . event_id ,
" user_id " : None ,
" room_name " : message . room_name ,
" room_id " : message . room_id ,
" room_name " : message . additional_kwargs [ ' room_name' ] ,
" room_id " : own_ message_resp . room_id ,
}
)
if " *activates the neural uplink* " in output . casefold ( ) :
pass # call agent
#conversation_memory.chat_memory.messages.append(ChatMessage(content=message, role=message.user_name))
conversation_memory . chat_memory . add_user_message ( message . message )
conversation_memory . chat_memory . add_ai_message ( output )
conversation_memory . save_context ( { " input " : message . content } , { " ouput " : output } )
conversation_memory . load_memory_variables ( { } )
if not " messages_today " in self . rooms [ message . room_id ] :
self . rooms [ message . room_id ] [ " messages_today " ] = [ ]
self . rooms [ message . room_id ] [ " messages_today " ] . append ( langchain_human_message )
self . rooms [ message . room_id ] [ " messages_today " ] . append ( langchain_ai_message )
return output . strip ( )
return output
async def summarize ( self , text ) :
@ -306,10 +292,11 @@ class AI(object):
async def diary ( self , room_id ) :
await asyncio . sleep ( 0 ) # yield for matrix-nio
diary_chain = LLMChain ( llm = self . llm_summary , prompt = prompt_outline , verbose = True )
conversation_memory = self . get_memory ( room_id )
#self.rooms[message.room_id]["summary"]
string_messages = [ ]
for m in self . rooms [ room_id ] [ " messages_today " ] :
string_messages . append ( f " { message . user_nam e} : { message . message } " )
for m in conversation_memory . chat_memory_day . messages :
string_messages . append ( f " { message . rol e} : { message . content } " )
return await diary_chain . apredict ( text = " \n " . join ( string_messages ) )
@ -397,12 +384,12 @@ class AI(object):
# Summarize the last day and save a diary entry
yesterday = ( datetime . now ( ) - timedelta ( days = 1 ) ) . strftime ( ' % Y- % m- %d ' )
for room_id in self . rooms . keys ( ) :
if " messages_today " in self . rooms [ room_id ] :
if len ( conversation_memory . chat_memory_day . messages ) > 0 :
self . bot . rooms [ room_id ] [ " diary " ] [ yesterday ] = await self . diary ( room_id )
# Calculate new goals for the character
# Update stats
# Let background tasks run
self . rooms [ room_id ] [ " messages_today " ] = [ ]
conversation_memory . chat_memory_day . clear ( )
await self . bot . write_conf2 ( self . bot . rooms )