Hendrik Langer 1 year ago
parent
commit
0ba76fa7e8
  1. 262
      matrix_pygmalion_bot/bot/ai/langchain.py
  2. 59
      matrix_pygmalion_bot/bot/ai/langchain_memory.py
  3. 86
      matrix_pygmalion_bot/bot/ai/prompts.py
  4. 58
      matrix_pygmalion_bot/bot/core.py
  5. 18
      matrix_pygmalion_bot/bot/utilities/messages.py
  6. 5
      matrix_pygmalion_bot/bot/wrappers/langchain_koboldcpp.py
  7. 15
      matrix_pygmalion_bot/connections/matrix.py
  8. 5
      requirements.txt

262
matrix_pygmalion_bot/bot/ai/langchain.py

@ -1,22 +1,29 @@
import asyncio
import time
import os, time
from .prompts import *
from .langchain_memory import BotConversationSummerBufferWindowMemory
#from .langchain_memory import BotConversationSummaryBufferWindowMemory, TestMemory
from ..utilities.messages import Message
from langchain import PromptTemplate
from langchain import LLMChain, ConversationChain
from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory, CombinedMemory, ConversationSummaryMemory
from langchain.chains.base import Chain
from typing import Dict, List
from typing import Dict, List, Union
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import Chroma
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser, ZeroShotAgent
from langchain.schema import AgentAction, AgentFinish
from langchain.schema import AIMessage, HumanMessage, SystemMessage, ChatMessage
from langchain.utilities import OpenWeatherMapAPIWrapper, SearxSearchWrapper, PythonREPL
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
import humanize
import datetime as dt
from datetime import datetime, timedelta
import logging
@ -46,7 +53,31 @@ class RoleplayChain(Chain):
other_keys = {k: v for k, v in inputs.items() if k not in self.input_keys}
result = self.llm_chain.predict(**other_keys)
return {self.output_key: result}
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
# Check if agent should finish
if "Final Answer:" in llm_output:
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
regex = r"Action\s*\d*\s*:(.*?)[\s]*[\"\'](.*)[\"\']"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
class AI(object):
@ -59,63 +90,87 @@ class AI(object):
from ..wrappers.langchain_koboldcpp import KoboldCpp
self.llm_chat = KoboldCpp(temperature=self.bot.temperature, endpoint_url="http://172.16.85.10:5001/api/latest/generate", stop=['<|endoftext|>'])
self.llm_summary = KoboldCpp(temperature=0.2, endpoint_url="http://172.16.85.10:5001/api/latest/generate", stop=['<|endoftext|>'])
self.llm_summary = KoboldCpp(temperature=0.2, endpoint_url="http://172.16.85.10:5001/api/latest/generate", stop=['<|endoftext|>'], max_tokens=512)
self.text_wrapper = text_wrapper
self.image_wrapper = image_wrapper
self.embeddings = SentenceTransformerEmbeddings()
#embeddings = SentenceTransformerEmbeddings(model="all-MiniLM-L6-v2")
self.db = Chroma(persist_directory=os.path.join(self.memory_path, f'chroma-db'), embedding_function=self.embeddings)
#self.memory = BotConversationSummerBufferWindowMemory(llm=self.llm_summary, max_token_limit=1200, min_token_limit=200)
def get_memory(self, message):
if not message.room_id in self.rooms:
self.rooms[message.room_id] = {}
memory = ConversationBufferMemory(memory_key="chat_history", input_key="input", human_prefix=message.user_name, ai_prefix=self.bot.name)
self.rooms[message.room_id]["memory"] = memory
self.rooms[message.room_id]["summary"] = "No previous events."
def get_memory(self, room_id, human_prefix="Human"):
if not room_id in self.rooms:
self.rooms[room_id] = {}
memory = ConversationBufferMemory(memory_key="chat_history", input_key="input", human_prefix=human_prefix, ai_prefix=self.bot.name)
self.rooms[room_id]["memory"] = memory
self.rooms[room_id]["summary"] = "No previous events."
memory.chat_memory.add_ai_message(self.bot.greeting)
#memory.save_context({"input": None, "output": self.bot.greeting})
memory.load_memory_variables({})
else:
memory = self.rooms[message.room_id]["memory"]
memory = self.rooms[room_id]["memory"]
#print(f"memory: {memory.load_memory_variables({})}")
#print(f"memory has an estimated {self.llm_chat.get_num_tokens(memory.buffer)} number of tokens")
return memory
async def add_chat_message(self, message):
conversation_memory = self.get_memory(message.room_id)
langchain_message = message.to_langchain()
if message.user_id == self.bot.connection.user_id:
langchain_message.role = self.bot.name
conversation_memory.chat_memory.messages.append(langchain_message)
async def generate(self, message, reply_fn, typing_fn):
embeddings = SentenceTransformerEmbeddings()
#embeddings = SentenceTransformerEmbeddings(model="all-MiniLM-L6-v2")
async def clear(self, room_id):
conversation_memory = self.get_memory(room_id)
conversation_memory.clear()
loader = TextLoader('./germany.txt')
async def ingest_textfile(self, filename, category):
loader = TextLoader(filename)
documents = loader.load()
documents[0].metadata['indexed'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
documents[0].metadata['category'] = category
text_splitter = RecursiveCharacterTextSplitter(
# Set a really small chunk size, just to show.
chunk_size = 600,
chunk_overlap = 100,
chunk_size = 1024,
chunk_overlap = 80,
length_function = len,
#length_function = self.llm_chat.get_num_tokens, # The Embeddings are generated with SsentenceTransformers, not this model
)
docs = text_splitter.split_documents(documents)
db = Chroma(persist_directory=os.path.join(self.memory_path, f'chroma-db'), embedding_function=embeddings)
for i in range(len(docs)):
docs[i].metadata['part'] = f"{i}/{len(docs)}"
print(f"Indexing {len(docs)} documents")
texts = [doc.page_content for doc in docs]
metadatas = [doc.metadata for doc in docs]
#db.add_texts(texts=texts, metadatas=metadatas, ids=None)
#db.persist()
query = "How is climate in Germany?"
output_docs = db.similarity_search_with_score(query)
self.db.add_texts(texts=texts, metadatas=metadatas, ids=None)
self.db.persist()
async def search_vectordb(self, query, category):
#query = "How is climate in Germany?"
#retreiver = db.as_retreiver()
#docs = retreiver.get_relevant_documents(query)
if category:
#https://github.com/chroma-core/chroma/blob/main/examples/where_filtering.ipynb
output_docs = self.db.similarity_search_with_score(query, filter={"category": category})
else:
output_docs = self.db.similarity_search_with_score(query)
print(query)
print('###')
for doc, score in output_docs:
print("-" * 80)
print("Score: ", score)
print(doc.page_content)
#print(doc.page_content)
print(doc)
print("-" * 80)
async def generate(self, message, reply_fn, typing_fn):
prompt_template = "{input}"
chain = LLMChain(
llm=self.llm_chat,
@ -126,6 +181,17 @@ class AI(object):
async def generate_roleplay(self, message, reply_fn, typing_fn):
langchain_human_message = HumanMessage(
content=message.message,
additional_kwargs={
"timestamp": message.timestamp,
"user_name": message.user_name,
"event_id": message.event_id,
"user_id": message.user_id,
"room_name": message.room_name,
"room_id": message.room_id,
}
)
chat_ai_name = self.bot.name
chat_human_name = message.user_name
@ -133,19 +199,20 @@ class AI(object):
chat_ai_name = "### Assistant"
chat_human_name = "### Human"
conversation_memory = self.get_memory(message)
conversation_memory = self.get_memory(message.room_id, message.user_name)
conversation_memory.human_prefix = chat_human_name
readonlymemory = ReadOnlySharedMemory(memory=conversation_memory)
summary_memory = ConversationSummaryMemory(llm=self.llm_summary, memory_key="summary", input_key="input")
#combined_memory = CombinedMemory(memories=[conversation_memory, summary_memory])
k = 5 #5
max_k = 12 #10
k = 1 # 5
max_k = 3 # 12
if len(conversation_memory.chat_memory.messages) > max_k*2:
async def make_progressive_summary(previous_summary, chat_history_text_string):
await asyncio.sleep(0) # yield for matrix-nio
#self.rooms[message.room_id]["summary"] = summary_memory.predict_new_summary(conversation_memory.chat_memory.messages, previous_summary).strip()
summary_chain = LLMChain(llm=self.llm_summary, prompt=prompt_progressive_summary)
summary_chain = LLMChain(llm=self.llm_summary, prompt=prompt_progressive_summary, verbose=True)
self.rooms[message.room_id]["summary"] = await summary_chain.apredict(summary=previous_summary, chat_history=chat_history_text_string)
# ToDo: maybe add an add_task_done callback and don't access the variable directly from here?
logger.info(f"New summary is: \"{self.rooms[message.room_id]['summary']}\"")
@ -155,12 +222,13 @@ class AI(object):
#print(summary)
#return summary
logger.info("memory progressive summary scheduled...")
await self.bot.schedule(self.bot.queue, make_progressive_summary, self.rooms[message.room_id]["summary"], conversation_memory.buffer) #.add_done_callback(
#t = dt.datetime.fromtimestamp(message.timestamp)
#t = datetime.fromtimestamp(message.timestamp)
#when = humanize.naturaltime(t)
#print(when)
@ -193,26 +261,150 @@ class AI(object):
stop = ['<|endoftext|>', f"\n{chat_human_name}"]
#print(f"Message is: \"{message.message}\"")
await asyncio.sleep(0)
output = await chain.arun({"input":message.message, "stop": stop})
output = output.replace("<BOT>", self.bot.name).replace("<USER>", message.user_name)
output = output.replace("### Assistant", self.bot.name)
output = output.replace(f"\n{self.bot.name}: ", " ")
output = output.strip()
if "*activates the neural uplink*" in output:
langchain_ai_message = AIMessage(
content=output,
additional_kwargs={
"timestamp": datetime.now().timestamp(),
"user_name": self.bot.name,
"event_id": None,
"user_id": None,
"room_name": message.room_name,
"room_id": message.room_id,
}
)
if "*activates the neural uplink*" in output.casefold():
pass # call agent
#conversation_memory.chat_memory.messages.append(ChatMessage(content=message, role=message.user_name))
conversation_memory.chat_memory.add_user_message(message.message)
conversation_memory.chat_memory.add_ai_message(output)
conversation_memory.load_memory_variables({})
if not "messages_today" in self.rooms[message.room_id]:
self.rooms[message.room_id]["messages_today"] = []
self.rooms[message.room_id]["messages_today"].append(langchain_human_message)
self.rooms[message.room_id]["messages_today"].append(langchain_ai_message)
return output.strip()
async def summarize(self, text):
summary_chain = LLMChain(llm=llm_summary, prompt=prompt_summary, verbose=True)
return await summary_chain.arun(text=text)
#ToDo: We can summarize the whole dialogue here, let half of it in the buffer but skip doing a summary until this is flushed, too?
await asyncio.sleep(0) # yield for matrix-nio
summary_chain = LLMChain(llm=self.llm_summary, prompt=prompt_summary, verbose=True)
return await summary_chain.arun(text=text)
#ToDo: We can summarize the whole dialogue here, let half of it in the buffer but skip doing a summary until this is flushed, too?
#ToDo: max_tokens and stop
async def diary(self, room_id):
await asyncio.sleep(0) # yield for matrix-nio
diary_chain = LLMChain(llm=self.llm_summary, prompt=prompt_outline, verbose=True)
#self.rooms[message.room_id]["summary"]
string_messages = []
for m in self.rooms[room_id]["messages_today"]:
string_messages.append(f"{message.user_name}: {message.message}")
return await diary_chain.apredict(text="\n".join(string_messages))
async def agent(self):
os.environ["OPENWEATHERMAP_API_KEY"] = "82452fdb0d1e0e805ac096db87914342"
# Tools
search = DuckDuckGoSearchAPIWrapper()
weather = OpenWeatherMapAPIWrapper()
search2 = SearxSearchWrapper(searx_host="https://search.mdosch.de")
python_repl = PythonREPL()
tools = [
Tool(
name = "Search",
func=search.run,
description="useful for when you need to answer questions about current events"
),
Tool(
name = "Searx Search",
func=search.run,
description="useful for when you need to answer questions about current events"
),
Tool(
name = "Weather",
func=weather.run,
description="Useful for fetching current weather information for a specified location. Input should be a location string (e.g. 'London,GB')."
),
Tool(
name = "Summary",
func=summry_chain.run,
description="useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary."
)
]
prompt = ZeroShotAgent.create_prompt(
tools=tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"]
)
output_parser = CustomOutputParser()
# LLM chain consisting of the LLM and a prompt
llm_chain = LLMChain(llm=llm, prompt=prompt_agent)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
#agent = initialize_agent(tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, return_intermediate_steps=True, memory=memory)
#tool_names = [tool.name for tool in tools]
#agent = LLMSingleActionAgent(
# llm_chain=llm_chain,
# output_parser=output_parser,
# stop=["\nObservation:"],
# allowed_tools=tool_names,
# verbose=True,
#)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)
await agent_executor.arun(input="How many people live in canada as of 2023?")
async def sleep(self):
# Write Date into chat history
for room_id in self.rooms.keys():
#fake_message = Message(datetime.now().timestamp(), self.bot.name, "", event_id=None, user_id=None, room_name=None, room_id=room_id)
conversation_memory = self.get_memory(room_id)
message = SystemMessage(
content=f"~~~~ {datetime.now().strftime('%A, %B %d, %Y')} ~~~~",
additional_kwargs={
"timestamp": datetime.now().timestamp(),
"user_name": self.bot.name,
"event_id": None,
"user_id": None,
"room_name": None,
"room_id": room_id,
}
)
conversation_memory.chat_memory.messages.append(message)
#conversation_memory.chat_memory.add_system_message(message)
# Summarize the last day and save a diary entry
yesterday = ( datetime.now() - timedelta(days=1) ).strftime('%Y-%m-%d')
for room_id in self.rooms.keys():
if "messages_today" in self.rooms[room_id]:
self.bot.rooms[room_id]["diary"][yesterday] = await self.diary(room_id)
# Calculate new goals for the character
# Update stats
# Let background tasks run
self.rooms[room_id]["messages_today"] = []
await self.bot.write_conf2(self.bot.rooms)
async def prime_llm(self, text):
self.llm_chat(text, max_tokens=1)

59
matrix_pygmalion_bot/bot/ai/langchain_memory.py

@ -6,8 +6,65 @@ from langchain.memory.prompt import SUMMARY_PROMPT
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel, BaseMessage, get_buffer_string
from ..utilities.messages import Message
class BotConversationSummerBufferWindowMemory(BaseChatMemory):
class ChatMessageHistory(BaseModel):
messages: List[Message] = []
def add_user_message(self, message: Message) -> None:
self.messages.append(message)
def add_ai_message(self, message: Message) -> None:
self.messages.append(message)
def add_system_message(self, message: Message) -> None:
self.messages.append(message)
def add_chat_message(self, message: Message) -> None:
self.messages.append(message)
def clear(self) -> None:
self.messages = []
class TestMemory(BaseMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
chat_memory: ChatMessageHistory = Field(default_factory=ChatMessageHistory)
# buffer: str = ""
output_key: Optional[str] = None
input_key: Optional[str] = None
memory_key: str = "history" #: :meta private:
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
string_messages = []
for m in chat_memory.messages:
string_messages.append(f"{message.user_name}: {message.message}")
return {self.memory_key: "\n".join(string_messages)}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str)
self.chat_memory.add_ai_message(output_str)
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
class BotConversationSummaryBufferWindowMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"

86
matrix_pygmalion_bot/bot/ai/prompts.py

@ -129,6 +129,71 @@ New summary:
)
#Progressively summarize the lines of conversation provided, adding onto the previous summary returning a new summary.
prompt_outline = PromptTemplate.from_template(
"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
Provide an outline of the character's day in keywords.
### Input:
{text}
### Response:
"""
)
# briefly, as a list, use bullet points, outline the main points what character needs to remember about the day, in note form, review ... point by point
prompt_agent = PromptTemplate.from_template(
"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request and explain what actions were used.
### Instruction:
Answer the following questions as best you can. Speak like a priate when you give the Final answer. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin! Remember to speak as a pirate when giving your final answer. Use lots of "Arg"s
### Input:
{input}
### Response:
{agent_scratchpad}
"""
)
prompt_agent2 = PromptTemplate.from_template(
"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request and explain what actions were used.
### Instruction:
Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:
{tools}
### Input:
Previous conversation:
{chat_history}
Question: {input}
Begin!
### Response:
{agent_scratchpad}
"""
)
# Roleplay the character that is described in the following lines. You always stay in character.
@ -140,6 +205,27 @@ New summary:
# \n <EXAMPLE CHAT> YOU / MIKU:
# "instruction": "Using the given facts, create a detailed profile of the character.",
# "input": "Name: Sarah Johnson\nAge: 18\nOccupation: Waitress\nLocation: Los Angeles",
# "instruction": "Please summarize the main events in the story and explain how the characters evolve throughout the narrative."
# "instruction": "Describe the given incident in a crisp and vivid way",
# "instruction": "Describe a movie scene using vivid and descriptive language.",
# "instruction": "Imagine that you are the protagonist of the following story and generate an appropriate dialogue",
# "instruction": "Generate some ideas on what the protagonist in this story could do next.",
# "instruction": "Classify the dialogue into one of the following categories: 1) making or cancelling orders; 2) shipping & delivery; 3) change and return; 4) technical issue with website or app.",
# "instruction": "Generate a dialogue between two characters, Jack and Susan, in a restaurant.",
# You are the narrator. Add some detail to the dialogue below. Write what the character Julia thinks and does. Write a vivid and graphic description of her and her surroundings for the reader.
# https://github.com/ggerganov/llama.cpp/tree/master/examples
## prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n"
# prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n"

58
matrix_pygmalion_bot/bot/core.py

@ -1,10 +1,13 @@
import asyncio
import concurrent.futures
import os, sys
import time
import importlib
import re
import json
import logging
from datetime import datetime, timedelta
import psutil
from functools import partial
from .memory.chatlog import ChatLog
from .utilities.messages import Message
@ -26,6 +29,13 @@ class ChatBot(object):
task = asyncio.create_task(self.worker(f'worker-{self.name}', self.queue))
self.background_tasks.add(task)
task.add_done_callback(self.background_tasks.discard)
#loop = asyncio.get_running_loop()
#with concurrent.futures.ThreadPoolExecutor() as pool:
# task = loop.run_in_executor(pool, self.worker, f'worker-{self.name}', self.queue)
event_loop_task = asyncio.create_task(self.event_loop())
self.background_tasks.add(event_loop_task)
event_loop_task.add_done_callback(self.background_tasks.discard)
#print(f"Hello, I'm {name}")
def init_character(self, persona, scenario, greeting, example_dialogue=[], nsfw=False, temperature=0.72):
@ -59,8 +69,8 @@ class ChatBot(object):
json.dump(data, f)
async def connect(self):
await self.connection.login()
self.connection.callbacks.add_message_callback(self.message_cb, self.redaction_cb)
await self.connection.login()
await self.schedule(self.queue, print, f"Hello, I'm {self.name}")
async def disconnect(self):
@ -123,18 +133,29 @@ class ChatBot(object):
reply_fn = partial(self.connection.send_message, room.room_id)
typing_fn = lambda : self.connection.room_typing(room.room_id, True, 15000)
message.user_name = message.user_name.title()
if self.name.casefold() == message.user_name.casefold():
"""Bot and user have the same name"""
message.user_name += " 2" # or simply "You"
if not room.room_id in self.rooms:
self.rooms[room.room_id] = {}
self.write_conf2(self.rooms)
self.rooms[room.room_id]['tick'] = 0
self.rooms[room.room_id]['num_messages'] = 0
self.rooms[room.room_id]['diary'] = {}
await self.write_conf2(self.rooms)
# ToDo: set ticks 0 / start
if not self.connection.synced:
if not message.is_command() and not message.is_error():
await self.ai.add_chat_message(message)
self.chatlog.save(message, False)
return
if message.is_from(self.connection.user_id):
"""Skip messages from ouselves"""
self.chatlog.save(message)
await self.connection.room_read_markers(room.room_id, event.event_id, event.event_id)
return
# if event.decrypted:
@ -154,11 +175,7 @@ class ChatBot(object):
# print(f"room.users: {room.users}")
# print(f"room.room_id: {room.room_id}")
if self.name.casefold() == message.user_name.casefold():
"""Bot and user have the same name"""
message.user_name += " 2" # or simply "You"
message.user_name = message.user_name.title()
if hasattr(self, "owner"):
if not message.is_from(self.owner):
@ -176,6 +193,8 @@ class ChatBot(object):
# # send, mail, drop, snap picture, photo, image, portrait
else:
await self.schedule(self.queue, self.process_message, message, reply_fn, typing_fn)
self.rooms[room.room_id]['num_messages'] += 1
self.last_conversation = datetime.now()
self.chatlog.save(message)
print("done")
@ -216,16 +235,19 @@ class ChatBot(object):
self.temperature = float( message.message.removeprefix('!temperature').strip() )
elif message.message.startswith('!begin'):
self.rooms[message.room_id]["disabled"] = False
self.write_conf2(self.rooms)
await self.write_conf2(self.rooms)
self.chatlog.clear(message.room_id)
await self.ai.clear(message.room_id)
# ToDo reset time / ticks
await reply_fn(self.greeting)
elif message.message.startswith('!start'):
self.rooms[message.room_id]["disabled"] = False
self.write_conf2(self.rooms)
await self.write_conf2(self.rooms)
elif message.message.startswith('!stop'):
self.rooms[message.room_id]["disabled"] = True
self.write_conf2(self.rooms)
await self.write_conf2(self.rooms)
elif message.message.startswith('!sleep'):
await self.schedule(self.queue, self.ai.sleep)
elif message.message.startswith('!!'):
if self.chatlog.chat_history_len(message.room_id) > 2:
for _ in range(2):
@ -241,6 +263,24 @@ class ChatBot(object):
await reply_fn(output)
async def event_loop(self):
try:
while True:
await asyncio.sleep(60)
for room_id in self.rooms.keys():
self.rooms[room_id]["tick"] += 1
if datetime.now().hour >= 1 and datetime.now().hour < 5:
load1, load5, load15 = [x / psutil.cpu_count() * 100 for x in psutil.getloadavg()]
if load5 < 25 and load1 < 25:
if not hasattr(self, "last_sleep") or self.last_sleep + timedelta(hours=6) < datetime.now():
await self.ai.sleep()
self.last_sleep = datetime.now()
finally:
pass
# await self.write_conf2(self.name)
async def worker(self, name: str, q: asyncio.Queue) -> None:
while True:
cb, args, kwargs = await q.get()

18
matrix_pygmalion_bot/bot/utilities/messages.py

@ -1,3 +1,4 @@
from langchain.schema import AIMessage, HumanMessage, SystemMessage, ChatMessage
class Message(object):
@ -14,11 +15,28 @@ class Message(object):
def from_matrix(cls, room, event):
return cls(event.server_timestamp/1000, room.user_name(event.sender), event.body, event.event_id, event.sender, room.display_name, room.room_id)
def to_langchain(self):
return ChatMessage(
content=self.message,
role=self.user_name, # "chat"
additional_kwargs={
"timestamp": self.timestamp,
"user_name": self.user_name,
"event_id": self.event_id,
"user_id": self.user_id,
"room_name": self.room_name,
"room_id": self.room_id,
}
)
def is_from(self, user_id):
return self.user_id == user_id
def is_command(self):
return self.message.startswith('!')
def is_error(self):
return self.message.startswith('<ERROR>')
def __str__(self):
return str("{}: {}".format(self.user_name, self.message))

5
matrix_pygmalion_bot/bot/wrappers/langchain_koboldcpp.py

@ -6,6 +6,7 @@ from typing import Any, List, Mapping, Optional
import json
import requests
import functools
from langchain.llms.base import LLM
@ -122,7 +123,9 @@ class KoboldCpp(LLM):
TRIES = 30
for i in range(TRIES):
try:
r = requests.post(self.endpoint_url, json=input_data, headers=headers, timeout=600)
loop = asyncio.get_running_loop()
#r = requests.post(self.endpoint_url, json=input_data, headers=headers, timeout=600)
r = await loop.run_in_executor(None, functools.partial(requests.post, self.endpoint_url, json=input_data, headers=headers, timeout=600))
r_json = r.json()
except requests.exceptions.RequestException as e:
raise ValueError(f"http connection error.")

15
matrix_pygmalion_bot/connections/matrix.py

@ -15,13 +15,15 @@ logger = logging.getLogger(__name__)
class Callbacks(object):
"""Class to pass client to callback methods."""
def __init__(self, client: AsyncClient):
def __init__(self):
self.message_callbacks = []
self.message_redaction_callbacks = []
def setup_callbacks(self, client: AsyncClient):
self.client = client
self.client.add_event_callback(self.message_cb, RoomMessageText)
self.client.add_event_callback(self.invite_cb, InviteEvent)
self.client.add_event_callback(self.redaction_cb, RedactionEvent)
self.message_callbacks = []
self.message_redaction_callbacks = []
def add_message_callback(self, callback, redaction_callback=None):
self.message_callbacks.append(callback)
@ -153,6 +155,7 @@ class ChatClient(object):
self.password = password
self.device_name = device_name
self.synced = False
self.callbacks = Callbacks()
async def persist(self, data_dir):
#self.data_dir = data_dir
@ -186,7 +189,7 @@ class ChatClient(object):
config=client_config,
)
self.callbacks = Callbacks(self.client)
self.callbacks.setup_callbacks(self.client)
resp = await self.client.login(self.password, device_name=self.device_name)
# check that we logged in succesfully
@ -209,7 +212,7 @@ class ChatClient(object):
config=client_config,
)
self.callbacks = Callbacks(self.client)
self.callbacks.setup_callbacks(self.client)
# self.client.user_id=config["user_id"],
# self.client.device_id=config["device_id"],
@ -328,7 +331,7 @@ class ChatClient(object):
await self.client.synced.wait()
logger.info("Client is synced")
self.synced = True
logger.info(f"{self.client.user_id}, {self.client.rooms}, {self.client.invited_rooms}, {self.client.encrypted_rooms}")
#logger.info(f"{self.client.user_id}, {self.client.rooms}, {self.client.invited_rooms}, {self.client.encrypted_rooms}")
# if os.path.exists(self.store_path + "megolm_keys"):
# os.remove(self.store_path + "megolm_keys", "pass")
# await self.client.export_keys(self.store_path + "megolm_keys", "pass")

5
requirements.txt

@ -1,4 +1,5 @@
asyncio
requests
matrix-nio[e2e]
transformers
huggingface_hub
@ -10,3 +11,7 @@ langchain
chromadb
sentence-transformers
humanize
psutil
#git+https://github.com/suno-ai/bark.git
#SpeechRecognition
#TTS #(Coqui-TTS or Uberduck ??)

Loading…
Cancel
Save