|
@ -102,24 +102,23 @@ class AI(object): |
|
|
def get_memory(self, room_id, human_prefix="Human"): |
|
|
def get_memory(self, room_id, human_prefix="Human"): |
|
|
if not room_id in self.rooms: |
|
|
if not room_id in self.rooms: |
|
|
self.rooms[room_id] = {} |
|
|
self.rooms[room_id] = {} |
|
|
memory = CustomMemory(memory_key="chat_history", input_key="input", human_prefix=human_prefix, ai_prefix=self.bot.name, llm=self.llm_summary, summary_prompt=prompt_progressive_summary, max_len=1200, min_len=200) |
|
|
if "moving_summary" in self.bot.rooms[room_id]: |
|
|
|
|
|
moving_summary = self.bot.rooms[room_id]['moving_summary'] |
|
|
|
|
|
else: |
|
|
|
|
|
moving_summary = "No previous events." |
|
|
|
|
|
memory = CustomMemory(memory_key="chat_history", input_key="input", human_prefix=human_prefix, ai_prefix=self.bot.name, llm=self.llm_summary, summary_prompt=prompt_progressive_summary, moving_summary_buffer=moving_summary, max_len=1200, min_len=200) |
|
|
self.rooms[room_id]["memory"] = memory |
|
|
self.rooms[room_id]["memory"] = memory |
|
|
self.rooms[room_id]["summary"] = "No previous events." |
|
|
#memory.chat_memory.add_ai_message(self.bot.greeting) |
|
|
memory.chat_memory.add_ai_message(self.bot.greeting) |
|
|
|
|
|
#memory.save_context({"input": None, "output": self.bot.greeting}) |
|
|
|
|
|
memory.load_memory_variables({}) |
|
|
|
|
|
else: |
|
|
else: |
|
|
memory = self.rooms[room_id]["memory"] |
|
|
memory = self.rooms[room_id]["memory"] |
|
|
#print(f"memory: {memory.load_memory_variables({})}") |
|
|
if human_prefix != memory.human_prefix: |
|
|
#print(f"memory has an estimated {self.llm_chat.get_num_tokens(memory.buffer)} number of tokens") |
|
|
memory.human_prefix = human_prefix |
|
|
return memory |
|
|
return memory |
|
|
|
|
|
|
|
|
async def add_chat_message(self, message): |
|
|
async def add_chat_message(self, message): |
|
|
conversation_memory = self.get_memory(message.room_id) |
|
|
room_id = message.additional_kwargs['room_id'] |
|
|
langchain_message = message.to_langchain() |
|
|
conversation_memory = self.get_memory(room_id) |
|
|
if message.user_id == self.bot.connection.user_id: |
|
|
conversation_memory.chat_memory.messages.append(message) |
|
|
langchain_message.role = self.bot.name |
|
|
|
|
|
conversation_memory.chat_memory.messages.append(langchain_message) |
|
|
|
|
|
|
|
|
|
|
|
async def clear(self, room_id): |
|
|
async def clear(self, room_id): |
|
|
conversation_memory = self.get_memory(room_id) |
|
|
conversation_memory = self.get_memory(room_id) |
|
@ -176,7 +175,7 @@ class AI(object): |
|
|
llm=self.llm_chat, |
|
|
llm=self.llm_chat, |
|
|
prompt=PromptTemplate.from_template(prompt_template), |
|
|
prompt=PromptTemplate.from_template(prompt_template), |
|
|
) |
|
|
) |
|
|
output = await chain.arun(message.message) |
|
|
output = await chain.arun(message.content) |
|
|
return output.strip() |
|
|
return output.strip() |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -190,33 +189,11 @@ class AI(object): |
|
|
chat_human_name = "### Human" |
|
|
chat_human_name = "### Human" |
|
|
|
|
|
|
|
|
conversation_memory = self.get_memory(room_id, chat_human_name) |
|
|
conversation_memory = self.get_memory(room_id, chat_human_name) |
|
|
conversation_memory.human_prefix = chat_human_name |
|
|
|
|
|
readonlymemory = ReadOnlySharedMemory(memory=conversation_memory) |
|
|
readonlymemory = ReadOnlySharedMemory(memory=conversation_memory) |
|
|
summary_memory = ConversationSummaryMemory(llm=self.llm_summary, memory_key="summary", input_key="input") |
|
|
#summary_memory = ConversationSummaryMemory(llm=self.llm_summary, memory_key="summary", input_key="input") |
|
|
#combined_memory = CombinedMemory(memories=[conversation_memory, summary_memory]) |
|
|
#combined_memory = CombinedMemory(memories=[conversation_memory, summary_memory]) |
|
|
|
|
|
|
|
|
k = 1 # 5 |
|
|
#await self.bot.schedule(self.bot.queue, make_progressive_summary, self.rooms[room_id]["summary"], conversation_memory.buffer) #.add_done_callback( |
|
|
max_k = 3 # 12 |
|
|
|
|
|
if len(conversation_memory.chat_memory.messages) > max_k*2: |
|
|
|
|
|
|
|
|
|
|
|
async def make_progressive_summary(previous_summary, chat_history_text_string): |
|
|
|
|
|
await asyncio.sleep(0) # yield for matrix-nio |
|
|
|
|
|
#self.rooms[room_id]["summary"] = summary_memory.predict_new_summary(conversation_memory.chat_memory.messages, previous_summary).strip() |
|
|
|
|
|
summary_chain = LLMChain(llm=self.llm_summary, prompt=prompt_progressive_summary, verbose=True) |
|
|
|
|
|
self.rooms[room_id]["summary"] = await summary_chain.apredict(summary=previous_summary, chat_history=chat_history_text_string) |
|
|
|
|
|
# ToDo: maybe add an add_task_done callback and don't access the variable directly from here? |
|
|
|
|
|
logger.info(f"New summary is: \"{self.rooms[room_id]['summary']}\"") |
|
|
|
|
|
conversation_memory.chat_memory.messages = conversation_memory.chat_memory.messages[-k * 2 :] |
|
|
|
|
|
conversation_memory.load_memory_variables({}) |
|
|
|
|
|
#summary = summarize(conversation_memory.buffer) |
|
|
|
|
|
#print(summary) |
|
|
|
|
|
#return summary |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logger.info("memory progressive summary scheduled...") |
|
|
|
|
|
await self.bot.schedule(self.bot.queue, make_progressive_summary, self.rooms[room_id]["summary"], conversation_memory.buffer) #.add_done_callback( |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#t = datetime.fromtimestamp(message.additional_kwargs['timestamp']) |
|
|
#t = datetime.fromtimestamp(message.additional_kwargs['timestamp']) |
|
|
#when = humanize.naturaltime(t) |
|
|
#when = humanize.naturaltime(t) |
|
@ -231,12 +208,21 @@ class AI(object): |
|
|
ai_name=self.bot.name, |
|
|
ai_name=self.bot.name, |
|
|
persona=self.bot.persona, |
|
|
persona=self.bot.persona, |
|
|
scenario=self.bot.scenario, |
|
|
scenario=self.bot.scenario, |
|
|
summary=self.rooms[room_id]["summary"], |
|
|
summary=conversation_memory.moving_summary_buffer, |
|
|
human_name=chat_human_name, |
|
|
human_name=chat_human_name, |
|
|
#example_dialogue=replace_all(self.bot.example_dialogue, {"{{user}}": chat_human_name, "{{char}}": chat_ai_name}) |
|
|
#example_dialogue=replace_all(self.bot.example_dialogue, {"{{user}}": chat_human_name, "{{char}}": chat_ai_name}) |
|
|
ai_name_chat=chat_ai_name, |
|
|
ai_name_chat=chat_ai_name, |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
tmp_prompt_text = prompt.format(chat_history=conversation_memory.buffer, input=message.content) |
|
|
|
|
|
prompt_len = self.llm_chat.get_num_tokens(tmp_prompt_text) |
|
|
|
|
|
|
|
|
|
|
|
if prompt_len+256 > 2000: |
|
|
|
|
|
logger.warning(f"Prompt too large. Estimated {prompt_len} tokens") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#roleplay_chain = RoleplayChain(llm_chain=chain, character_name=self.bot.name, persona=self.bot.persona, scenario=self.bot.scenario, ai_name_chat=chat_ai_name, human_name_chat=chat_human_name) |
|
|
|
|
|
|
|
|
chain = ConversationChain( |
|
|
chain = ConversationChain( |
|
|
llm=self.llm_chat, |
|
|
llm=self.llm_chat, |
|
|
prompt=prompt, |
|
|
prompt=prompt, |
|
@ -247,8 +233,6 @@ class AI(object): |
|
|
|
|
|
|
|
|
# output = llm_chain(inputs={"ai_name": self.bot.name, "persona": self.bot.persona, "scenario": self.bot.scenario, "human_name": chat_human_name, "ai_name_chat": self.bot.name, "chat_history": "", "input": message.content})['results'][0]['text'] |
|
|
# output = llm_chain(inputs={"ai_name": self.bot.name, "persona": self.bot.persona, "scenario": self.bot.scenario, "human_name": chat_human_name, "ai_name_chat": self.bot.name, "chat_history": "", "input": message.content})['results'][0]['text'] |
|
|
|
|
|
|
|
|
#roleplay_chain = RoleplayChain(llm_chain=chain, character_name=self.bot.name, persona=self.bot.persona, scenario=self.bot.scenario, ai_name_chat=chat_ai_name, human_name_chat=chat_human_name) |
|
|
|
|
|
|
|
|
|
|
|
stop = ['<|endoftext|>', f"\n{chat_human_name}"] |
|
|
stop = ['<|endoftext|>', f"\n{chat_human_name}"] |
|
|
#print(f"Message is: \"{message.content}\"") |
|
|
#print(f"Message is: \"{message.content}\"") |
|
|
await asyncio.sleep(0) |
|
|
await asyncio.sleep(0) |
|
@ -264,20 +248,26 @@ class AI(object): |
|
|
|
|
|
|
|
|
own_message_resp = await reply_fn(output) |
|
|
own_message_resp = await reply_fn(output) |
|
|
|
|
|
|
|
|
langchain_ai_message = AIMessage( |
|
|
output_message = AIMessage( |
|
|
content=output, |
|
|
content=output, |
|
|
additional_kwargs={ |
|
|
additional_kwargs={ |
|
|
"timestamp": datetime.now().timestamp(), |
|
|
"timestamp": datetime.now().timestamp(), |
|
|
"user_name": self.bot.name, |
|
|
"user_name": self.bot.name, |
|
|
"event_id": own_message_resp.event_id, |
|
|
"event_id": own_message_resp.event_id, |
|
|
"user_id": None, |
|
|
"user_id": self.bot.connection.user_id, |
|
|
"room_name": message.additional_kwargs['room_name'], |
|
|
"room_name": message.additional_kwargs['room_name'], |
|
|
"room_id": own_message_resp.room_id, |
|
|
"room_id": own_message_resp.room_id, |
|
|
} |
|
|
} |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
conversation_memory.save_context({"input": message.content}, {"ouput": output}) |
|
|
await conversation_memory.asave_context(message, output_message) |
|
|
conversation_memory.load_memory_variables({}) |
|
|
summary_len = self.llm_chat.get_num_tokens(conversation_memory.moving_summary_buffer) |
|
|
|
|
|
if summary_len > 400: |
|
|
|
|
|
logger.warning("Summary is getting too long. Refining...") |
|
|
|
|
|
conversation_memory.moving_summary_buffer = await self.summarize(conversation_memory.moving_summary_buffer) |
|
|
|
|
|
new_summary_len = self.llm_chat.get_num_tokens(conversation_memory.moving_summary_buffer) |
|
|
|
|
|
logger.info(f"Refined summary from {summary_len} tokens to {new_summary_len} tokens ({new_summary_len-summary_len} tokens)") |
|
|
|
|
|
self.bot.rooms[room_id]['moving_summary'] = conversation_memory.moving_summary_buffer |
|
|
|
|
|
|
|
|
return output |
|
|
return output |
|
|
|
|
|
|
|
@ -293,11 +283,13 @@ class AI(object): |
|
|
await asyncio.sleep(0) # yield for matrix-nio |
|
|
await asyncio.sleep(0) # yield for matrix-nio |
|
|
diary_chain = LLMChain(llm=self.llm_summary, prompt=prompt_outline, verbose=True) |
|
|
diary_chain = LLMChain(llm=self.llm_summary, prompt=prompt_outline, verbose=True) |
|
|
conversation_memory = self.get_memory(room_id) |
|
|
conversation_memory = self.get_memory(room_id) |
|
|
#self.rooms[message.room_id]["summary"] |
|
|
|
|
|
string_messages = [] |
|
|
if self.llm_summary.get_num_tokens(conversation_memory.buffer_day) < 1600: |
|
|
for m in conversation_memory.chat_memory_day.messages: |
|
|
input_text = conversation_memory.buffer_day |
|
|
string_messages.append(f"{message.role}: {message.content}") |
|
|
else: |
|
|
return await diary_chain.apredict(text="\n".join(string_messages)) |
|
|
input_text = conversation_memory.moving_summary_buffer |
|
|
|
|
|
|
|
|
|
|
|
return await diary_chain.apredict(text=input_text) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def agent(self): |
|
|
async def agent(self): |
|
@ -371,7 +363,7 @@ class AI(object): |
|
|
content=f"~~~~ {datetime.now().strftime('%A, %B %d, %Y')} ~~~~", |
|
|
content=f"~~~~ {datetime.now().strftime('%A, %B %d, %Y')} ~~~~", |
|
|
additional_kwargs={ |
|
|
additional_kwargs={ |
|
|
"timestamp": datetime.now().timestamp(), |
|
|
"timestamp": datetime.now().timestamp(), |
|
|
"user_name": self.bot.name, |
|
|
"user_name": None, |
|
|
"event_id": None, |
|
|
"event_id": None, |
|
|
"user_id": None, |
|
|
"user_id": None, |
|
|
"room_name": None, |
|
|
"room_name": None, |
|
|