@ -143,7 +143,7 @@ class AI(object):
if len ( conversation_memory . chat_memory . messages ) > max_k * 2 :
async def make_progressive_summary ( previous_summary , chat_history_text_string ) :
asyncio . sleep ( 0 ) # yield for matrix-nio
await asyncio . sleep ( 0 ) # yield for matrix-nio
#self.rooms[message.room_id]["summary"] = summary_memory.predict_new_summary(conversation_memory.chat_memory.messages, previous_summary).strip()
summary_chain = LLMChain ( llm = self . llm_summary , prompt = prompt_progressive_summary )
self . rooms [ message . room_id ] [ " summary " ] = await summary_chain . apredict ( summary = previous_summary , chat_history = chat_history_text_string )
@ -156,7 +156,7 @@ class AI(object):
#return summary
logger . info ( " memory progressive summary scheduled... " )
await self . bot . schedule ( self . bot . queue , make_progressive_summary , self . rooms [ message . room_id ] [ " summary " ] , conversation_memory . buffer )
await self . bot . schedule ( self . bot . queue , make_progressive_summary , self . rooms [ message . room_id ] [ " summary " ] , conversation_memory . buffer ) #.add_done_callback(
@ -191,7 +191,7 @@ class AI(object):
#roleplay_chain = RoleplayChain(llm_chain=chain, character_name=self.bot.name, persona=self.bot.persona, scenario=self.bot.scenario, ai_name_chat=chat_ai_name, human_name_chat=chat_human_name)
stop = [ ' <|endoftext|> ' , f " \n { chat_human_name } : "]
stop = [ ' <|endoftext|> ' , f " \n { chat_human_name } " ]
#print(f"Message is: \"{message.message}\"")
output = await chain . arun ( { " input " : message . message , " stop " : stop } )
output = output . replace ( " <BOT> " , self . bot . name ) . replace ( " <USER> " , message . user_name )