Browse Source

bot summary memory

master
Hendrik Langer 2 years ago
parent
commit
2dbb98cbc8
  1. 89
      matrix_pygmalion_bot/bot/ai/langchain.py
  2. 66
      matrix_pygmalion_bot/bot/ai/prompts.py
  3. 39
      matrix_pygmalion_bot/bot/core.py
  4. 1
      matrix_pygmalion_bot/bot/wrappers/langchain_koboldcpp.py
  5. 1
      requirements.txt

89
matrix_pygmalion_bot/bot/ai/langchain.py

@ -5,7 +5,7 @@ from .langchain_memory import BotConversationSummerBufferWindowMemory
from langchain import PromptTemplate
from langchain import LLMChain, ConversationChain
from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory, CombinedMemory, ConversationSummaryMemory
from langchain.chains.base import Chain
from typing import Dict, List
@ -15,6 +15,9 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import Chroma
import humanize
import datetime as dt
import logging
logger = logging.getLogger(__name__)
@ -65,15 +68,16 @@ class AI(object):
def get_memory(self, message):
if not message.room_id in self.rooms:
self.rooms[message.room_id] = {}
memory = ConversationBufferMemory(memory_key="chat_history", human_prefix=message.user_name, ai_prefix=self.bot.name)
memory = ConversationBufferMemory(memory_key="chat_history", input_key="input", human_prefix=message.user_name, ai_prefix=self.bot.name)
self.rooms[message.room_id]["memory"] = memory
self.rooms[message.room_id]["summary"] = "No previous events."
memory.chat_memory.add_ai_message(self.bot.greeting)
#memory.save_context({"input": None, "output": self.bot.greeting})
memory.load_memory_variables({})
else:
memory = self.rooms[message.room_id]["memory"]
print(f"memory: {memory.load_memory_variables({})}")
print(f"memory has an estimated {self.llm_chat.get_num_tokens(memory.buffer)} number of tokens")
#print(f"memory: {memory.load_memory_variables({})}")
#print(f"memory has an estimated {self.llm_chat.get_num_tokens(memory.buffer)} number of tokens")
return memory
@ -120,18 +124,59 @@ class AI(object):
output = await chain.arun(message.message)
return output.strip()
async def generate_roleplay(self, message, reply_fn, typing_fn):
memory = self.get_memory(message)
readonlymemory = ReadOnlySharedMemory(memory=memory)
chat_ai_name = self.bot.name
chat_human_name = message.user_name
if False: # model is vicuna
chat_ai_name = "### Assistant"
chat_human_name = "### Human"
conversation_memory = self.get_memory(message)
readonlymemory = ReadOnlySharedMemory(memory=conversation_memory)
summary_memory = ConversationSummaryMemory(llm=self.llm_summary, memory_key="summary", input_key="input")
#combined_memory = CombinedMemory(memories=[conversation_memory, summary_memory])
k = 5 #5
max_k = 12 #10
if len(conversation_memory.chat_memory.messages) > max_k*2:
async def make_progressive_summary(previous_summary, chat_history_text_string):
asyncio.sleep(0) # yield for matrix-nio
#self.rooms[message.room_id]["summary"] = summary_memory.predict_new_summary(conversation_memory.chat_memory.messages, previous_summary).strip()
summary_chain = LLMChain(llm=self.llm_summary, prompt=prompt_progressive_summary)
self.rooms[message.room_id]["summary"] = await summary_chain.apredict(summary=previous_summary, chat_history=chat_history_text_string)
# ToDo: maybe add an add_task_done callback and don't access the variable directly from here?
logger.info(f"New summary is: \"{self.rooms[message.room_id]['summary']}\"")
conversation_memory.chat_memory.messages = conversation_memory.chat_memory.messages[-k * 2 :]
conversation_memory.load_memory_variables({})
#summary = summarize(conversation_memory.buffer)
#print(summary)
#return summary
logger.info("memory progressive summary scheduled...")
await self.bot.schedule(self.bot.queue, make_progressive_summary, self.rooms[message.room_id]["summary"], conversation_memory.buffer)
#t = dt.datetime.fromtimestamp(message.timestamp)
#when = humanize.naturaltime(t)
#print(when)
# ToDo: either use prompt.format() to fill out the pygmalion prompt and use
# the resulting template text to feed it into the instruct prompt's instruction
# or do this with the prompt.partial()
prompt = prompt_vicuna.partial(
ai_name=self.bot.name,
persona=self.bot.persona,
scenario=self.bot.scenario,
summary=self.rooms[message.room_id]["summary"],
human_name=message.user_name,
#example_dialogue=replace_all(self.bot.example_dialogue, {"{{user}}": message.user_name, "{{char}}": self.bot.name})
ai_name_chat=self.bot.name,
#example_dialogue=replace_all(self.bot.example_dialogue, {"{{user}}": chat_human_name, "{{char}}": chat_ai_name})
ai_name_chat=chat_ai_name,
)
chain = ConversationChain(
@ -144,19 +189,35 @@ class AI(object):
# output = llm_chain(inputs={"ai_name": self.bot.name, "persona": self.bot.persona, "scenario": self.bot.scenario, "human_name": message.user_name, "ai_name_chat": self.bot.name, "chat_history": "", "input": message.message})['results'][0]['text']
#roleplay_chain = RoleplayChain(llm_chain=chain, character_name=self.bot.name, persona=self.bot.persona, scenario=self.bot.scenario, ai_name_chat=self.bot.name, human_name_chat=message.user_name)
#roleplay_chain = RoleplayChain(llm_chain=chain, character_name=self.bot.name, persona=self.bot.persona, scenario=self.bot.scenario, ai_name_chat=chat_ai_name, human_name_chat=chat_human_name)
stop = ['<|endoftext|>', f"\n{message.user_name}:"]
print(f"Message is: \"{message.message}\"")
stop = ['<|endoftext|>', f"\n{chat_human_name}:"]
#print(f"Message is: \"{message.message}\"")
output = await chain.arun({"input":message.message, "stop": stop})
memory.chat_memory.add_user_message(message.message)
memory.chat_memory.add_ai_message(output)
output = output.replace("<BOT>", self.bot.name).replace("<USER>", message.user_name)
output = output.replace("### Assistant", self.bot.name)
output = output.replace(f"\n{self.bot.name}:", "")
output = output.strip()
memory.load_memory_variables({})
if "*activates the neural uplink*" in output:
pass # call agent
conversation_memory.chat_memory.add_user_message(message.message)
conversation_memory.chat_memory.add_ai_message(output)
conversation_memory.load_memory_variables({})
return output.strip()
async def summarize(self, text):
summary_chain = LLMChain(llm=llm_summary, prompt=prompt_summary, verbose=True)
return await summary_chain.arun(text=text)
#ToDo: We can summarize the whole dialogue here, let half of it in the buffer but skip doing a summary until this is flushed, too?
async def prime_llm(self, text):
self.llm_chat(text, max_tokens=1)
def replace_all(text, dic):
for i, j in dic.items():
text = text.replace(i, j)

66
matrix_pygmalion_bot/bot/ai/prompts.py

@ -42,12 +42,15 @@ prompt_alpaca = PromptTemplate(
prompt_vicuna = PromptTemplate.from_template("""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
Roleplay the character {ai_name}, that is described in the following lines. You always stay in character.
Given a context, compose a storytelling dialogue. Also narrate {ai_name}'s thoughts and situation in a vivid and detailed way. Use classic free-form RP online style. Surround narration and thoughts with asterisks.
### Input:
{ai_name}'s Persona: {persona}
Scenario: {scenario}
Summary of previous events:
{summary}
### Response:
Current conversation:
{chat_history}
@ -78,11 +81,55 @@ prompt_instruct_with_input = PromptTemplate.from_template(
{output}"""
)
prompt_instruct = PromptTemplate.from_template(
"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
### Instruction:
{instruction}
### Response:
{output}"""
)
template_question_simple = """Question: {question}
Answer: Let's think step by step."""
prompt_summary = PromptTemplate.from_template(
"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
Summarize the following text in one paragraph.
### Input:
{text}
### Response:
"""
)
prompt_progressive_summary = PromptTemplate.from_template(
"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
Based on the provided summary and new lines of conversation, give a brief and refined final summary. Only include relevant facts and key takeaways. Skip mundane details of prior events in the final and refined summary.
### Input:
Current summary:
{summary}
New lines of conversation:
{chat_history}
### Response:
New summary:
"""
)
#Progressively summarize the lines of conversation provided, adding onto the previous summary returning a new summary.
# Roleplay the character that is described in the following lines. You always stay in character.
# Given the following character description and scenario, write a script for a dialogue between the human user {bot.user_name} and the fictional AI assistant {bot.name}. Play the role of the character {bot.name}.
@ -116,3 +163,20 @@ Answer: Let's think step by step."""
#"Explain your reasoning"
#"Provide details to support your answer"
#"Compare and contrast your answer with alternatives"
# From the alpaca dataset:
#Add a single line of dialogue to react to this situation.
#Given two people and their conversations, output a dialogue between them
#Given two people, generate a few sentences of dialogue that convey how they feel.
#Create a dialogue between two people who just met at a party.
#Generate two thousand words of dialogue between two characters.
#Generate the script of a dialogue between two characters with the following situation.
#Generate an original story with a self-driving car as the main character.

39
matrix_pygmalion_bot/bot/core.py

@ -29,16 +29,14 @@ class ChatBot(object):
#print(f"Hello, I'm {name}")
def init_character(self, persona, scenario, greeting, example_dialogue=[], nsfw=False, temperature=0.72):
self.persona = persona
self.scenario = scenario
self.greeting = greeting
self.example_dialogue = example_dialogue
self.persona = persona.replace('\\n', '\n').replace('{{char}}', self.name)
self.scenario = scenario.replace('\\n', '\n').replace('{{char}}', self.name)
self.greeting = greeting.replace('\\n', '\n').replace('{{char}}', self.name)
self.example_dialogue = [i.replace('\\n', '\n').replace('{{char}}', self.name) for i in example_dialogue]
# .replace("\\n", "\n") ??????
self.nsfw = nsfw
self.temperature = temperature
#self.example_dialogue = self.example_dialogue.replace('{{char}}', self.name)
async def persist(self, data_dir):
self.chatlog_path = os.path.join(data_dir, "chatlogs/")
self.images_path = os.path.join(data_dir, "images/")
@ -146,16 +144,15 @@ class ChatBot(object):
# print(
# f"{room.display_name} |{encrypted_symbol}| {room.user_name(event.sender)}: {event.body}"
# )
if room.is_named:
print(f"room.display_name: {room.display_name}")
if room.is_group:
print(f"room.group_name(): {room.group_name()}")
print(f"room.joined_count: {room.joined_count}")
print(f"room.member_count: {room.member_count}")
print(f"room.encrypted: {room.encrypted}")
print(f"room.users: {room.users}")
print(f"room.room_id: {room.room_id}")
# if room.is_named:
# print(f"room.display_name: {room.display_name}")
# if room.is_group:
# print(f"room.group_name(): {room.group_name()}")
# print(f"room.joined_count: {room.joined_count}")
# print(f"room.member_count: {room.member_count}")
# print(f"room.encrypted: {room.encrypted}")
# print(f"room.users: {room.users}")
# print(f"room.room_id: {room.room_id}")
if self.name.casefold() == message.user_name.casefold():
"""Bot and user have the same name"""
@ -168,7 +165,7 @@ class ChatBot(object):
self.chatlog.save(message)
return
if "disabled" in self.rooms[message.room_id] and self.rooms[message.room_id]["disabled"] == True and not message.message.startswith('!start'):
if "disabled" in self.rooms[message.room_id] and self.rooms[message.room_id]["disabled"] == True and not (message.message.startswith('!start') or message.message.startswith('!begin')):
return
await self.connection.room_read_markers(room.room_id, event.event_id, event.event_id)
@ -218,6 +215,8 @@ class ChatBot(object):
elif message.message.startswith('!temperature'):
self.temperature = float( message.message.removeprefix('!temperature').strip() )
elif message.message.startswith('!begin'):
self.rooms[message.room_id]["disabled"] = False
self.write_conf2(self.rooms)
self.chatlog.clear(message.room_id)
# ToDo reset time / ticks
await reply_fn(self.greeting)
@ -268,3 +267,9 @@ class ChatBot(object):
task.add_done_callback(done_callback)
self.background_tasks.add(task)
task.add_done_callback(self.background_tasks.discard)
# closure
def outer_function(self, x):
def inner_funtion(y):
return x+y
return inner_function

1
matrix_pygmalion_bot/bot/wrappers/langchain_koboldcpp.py

@ -68,6 +68,7 @@ class KoboldCpp(LLM):
}
logger.info(f"sending request to koboldcpp.")
logger.warning(f"WARNING: request is blocking. try to use llm's _acall()")
TRIES = 30
for i in range(TRIES):

1
requirements.txt

@ -9,3 +9,4 @@ webuiapi
langchain
chromadb
sentence-transformers
humanize

Loading…
Cancel
Save