Browse Source

langchain test

master
Hendrik Langer 2 years ago
parent
commit
65bdce3ad2
  1. 30
      matrix_pygmalion_bot/bot/ai/langchain.py
  2. 1
      matrix_pygmalion_bot/bot/ai/prompts.py
  3. 7
      matrix_pygmalion_bot/bot/core.py
  4. 6
      matrix_pygmalion_bot/bot/wrappers/langchain_koboldcpp.py

30
matrix_pygmalion_bot/bot/ai/langchain.py

@ -52,6 +52,7 @@ class AI(object):
self.name = bot.name
self.bot = bot
self.memory_path = memory_path
self.rooms = {}
from ..wrappers.langchain_koboldcpp import KoboldCpp
self.llm_chat = KoboldCpp(temperature=self.bot.temperature, endpoint_url="http://172.16.85.10:5001/api/latest/generate", stop=['<|endoftext|>'])
@ -60,8 +61,20 @@ class AI(object):
self.image_wrapper = image_wrapper
#self.memory = BotConversationSummerBufferWindowMemory(llm=self.llm_summary, max_token_limit=1200, min_token_limit=200)
self.memory = ConversationBufferMemory(memory_key="chat_history", human_prefix="You", ai_prefix=self.bot.name)
def get_memory(self, message):
if not message.room_id in self.rooms:
self.rooms[message.room_id] = {}
memory = ConversationBufferMemory(memory_key="chat_history", human_prefix=message.user_name, ai_prefix=self.bot.name)
self.rooms[message.room_id]["memory"] = memory
memory.chat_memory.add_ai_message(self.bot.greeting)
#memory.save_context({"input": None, "output": self.bot.greeting})
memory.load_memory_variables({})
else:
memory = self.rooms[message.room_id]["memory"]
print(f"memory: {memory.load_memory_variables({})}")
print(f"memory has an estimated {estimate_num_tokens(memory.buffer)} number of tokens")
return memory
async def generate(self, message, reply_fn, typing_fn):
@ -109,13 +122,14 @@ class AI(object):
async def generate_roleplay(self, message, reply_fn, typing_fn):
self.memory.human_prefix = message.user_name
memory = self.get_memory(message)
prompt = prompt_vicuna.partial(
ai_name=self.bot.name,
persona=self.bot.persona,
scenario=self.bot.scenario,
human_name=message.user_name,
#example_dialogue=replace_all(self.bot.example_dialogue, {"{{user}}": message.user_name, "{{char}}": self.bot.name})
ai_name_chat=self.bot.name,
)
@ -123,7 +137,7 @@ class AI(object):
llm=self.llm_chat,
prompt=prompt,
verbose=True,
memory=self.memory,
memory=memory,
#stop=['<|endoftext|>', '\nYou:', f"\n{message.user_name}:"],
)
@ -131,9 +145,17 @@ class AI(object):
#roleplay_chain = RoleplayChain(llm_chain=chain, character_name=self.bot.name, persona=self.bot.persona, scenario=self.bot.scenario, ai_name_chat=self.bot.name, human_name_chat=message.user_name)
output = chain.run({"input":message.message, "stop": ['<|endoftext|>', f"\n{message.user_name}:"]})
stop = ['<|endoftext|>', f"\n{message.user_name}:"]
print(f"Message is: \"{message.message}\"")
output = chain.run({"input":message.message, "stop": stop})
return output.strip()
def estimate_num_tokens(input_text: str):
return len(input_text)//4+1
def replace_all(text, dic):
for i, j in dic.items():
text = text.replace(i, j)
return text

1
matrix_pygmalion_bot/bot/ai/prompts.py

@ -49,6 +49,7 @@ Roleplay the character {ai_name}, that is described in the following lines. You
Scenario: {scenario}
### Response:
Current conversation:
{chat_history}
{human_name}: {input}
{ai_name_chat}:"""

7
matrix_pygmalion_bot/bot/core.py

@ -36,6 +36,8 @@ class ChatBot(object):
self.nsfw = nsfw
self.temperature = temperature
#self.example_dialogue = self.example_dialogue.replace('{{char}}', self.name)
def persist(self, data_dir):
self.chatlog_path = f"{data_dir}/chatlogs"
self.images_path = f"{data_dir}/images"
@ -143,7 +145,7 @@ class ChatBot(object):
if self.name.casefold() == message.user_name.casefold():
"""Bot and user have the same name"""
message.user_name += "2" # or simply "You"
message.user_name += " 2" # or simply "You"
message.user_name = message.user_name.title()
@ -228,10 +230,11 @@ class ChatBot(object):
while True:
cb, args, kwargs = await q.get()
start = time.perf_counter()
logger.info("queued task started")
if asyncio.iscoroutinefunction(cb):
logger.info("queued task started (coroutine)")
await cb(*args, **kwargs)
else:
logger.info("queued task started (function)")
cb(*args, **kwargs)
q.task_done()
elapsed = time.perf_counter() - start

6
matrix_pygmalion_bot/bot/wrappers/langchain_koboldcpp.py

@ -85,9 +85,9 @@ class KoboldCpp(LLM):
time.sleep(5)
else:
raise ValueError(f"http error. unknown response code")
for s in self.stop:
response = response.rstrip().removesuffix(s)
return response
for s in input_data["stop_sequence"]:
response = response.removesuffix(s).rstrip()
return response.lstrip()
@property
def _identifying_params(self) -> Mapping[str, Any]:

Loading…
Cancel
Save