Browse Source

smaller updates

master
Hendrik Langer 12 months ago
parent
commit
5b722d29cb
  1. 13
      matrix_pygmalion_bot/bot/ai/langchain.py
  2. 95
      matrix_pygmalion_bot/bot/ai/prompts.py

13
matrix_pygmalion_bot/bot/ai/langchain.py

@ -15,7 +15,7 @@ from typing import Any, Dict, List, Optional, Union
from langchain.document_loaders import TextLoader
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.embeddings import HuggingFaceEmbeddings # was SentenceTransformerEmbeddings
from langchain.vectorstores import Chroma
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser, ZeroShotAgent
@ -93,13 +93,18 @@ class AI(object):
from ..wrappers.langchain_koboldcpp import KoboldCpp
self.llm_chat = KoboldCpp(temperature=self.bot.temperature, endpoint_url="http://172.16.85.10:5001/api/latest/generate", stop=['<|endoftext|>'], verbose=True)
self.llm_summary = KoboldCpp(temperature=0.2, endpoint_url="http://172.16.85.10:5002/api/latest/generate", stop=['<|endoftext|>'], max_tokens=512, verbose=True)
self.llm_summary = KoboldCpp(temperature=0.7, repeat_penalty=1.176, top_k = 40, top_p= 0.1, endpoint_url="http://172.16.85.10:5001/api/latest/generate", stop=['<|endoftext|>'], max_tokens=512, verbose=True)
self.llm_chat_model = "pygmalion-7b"
self.llm_summary_model = "vicuna-13b"
self.text_wrapper = text_wrapper
self.image_wrapper = image_wrapper
self.embeddings = SentenceTransformerEmbeddings()
#embeddings = SentenceTransformerEmbeddings(model="all-MiniLM-L6-v2")
self.embeddings = HuggingFaceEmbeddings()
#self.embeddings = HuggingFaceEmbeddings(model="all-MiniLM-L6-v2")
#self.embeddings = HuggingFaceEmbeddings(
# model_name="sentence-transformers/all-mpnet-base-v2",
# model_kwargs={'device': 'cpu'},
# encode_kwargs={'normalize_embeddings': False}
#)
self.db = Chroma(persist_directory=os.path.join(self.memory_path, f'chroma-db'), embedding_function=self.embeddings)
#self.memory = BotConversationSummerBufferWindowMemory(llm=self.llm_summary, max_token_limit=1200, min_token_limit=200)

95
matrix_pygmalion_bot/bot/ai/prompts.py

@ -68,6 +68,15 @@ Scenario: {scenario}"
{human_name}: {human_input}
{ai_name_chat}:"""
template_generic_instruct = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
### Instruction:
Write {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.
### Response:
{chat_history}
{human_name}: {human_input}
{ai_name_chat}:"""
prompt_instruct_with_input = PromptTemplate.from_template(
"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
@ -92,6 +101,50 @@ prompt_instruct = PromptTemplate.from_template(
{output}"""
)
prompt_openassistant = PromptTemplate.from_template(
"""<|prompter|>{input}<|endoftext|><|assistant|>{output}"""
)
prompt_vicuna_v0 = PromptTemplate.from_template(
"""A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.
### Human: {input}
### Assistant:{output}"""
)
prompt_vicuna_v1_1 = PromptTemplate.from_template(
"""A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
USER: {input}
ASSISTANT:{output}"""
)
prompt_gpt4xvicuna = PromptTemplate.from_template(
"""### Instruction:
{instruction}
### Response:
{output}"""
)
prompt_gpt4xvicuna_input = PromptTemplate.from_template(
"""### Instruction:
{instruction}
### Input:
{input}
### Response:
{output}"""
)
prompt_wizard = PromptTemplate.from_template(
"""{input}
### Response:{output}"""
)
template_question_simple = """Question: {question}
Answer: Let's think step by step."""
@ -209,6 +262,10 @@ Begin!
)
#System Note: This is a chat to test your skills. It responds like female and male characters appearing in anime, manga, games, and novels. The AI will do this regardless of the content of the chat. AI must focus on providing vivid and detailed decriptions of {{char}} actions, appereances, behaviors, feelings. Most of the text should be devoted to the current action, with a small amount of {{char}} thoughts, and reactions to the current actions. Use markdown, italicize everything except speech. However, refrain from writing any dialogue or actions for {{user}}. Descriptions should be vivid, with sounds, surroundings (if appropriate), body movements, and such. Response token length 200.]
# Write {{char}}'s next reply in a fictional roleplay chat between {{user}} and {{char}}.
# Roleplay the character that is described in the following lines. You always stay in character.
# Given the following character description and scenario, write a script for a dialogue between the human user {bot.user_name} and the fictional AI assistant {bot.name}. Play the role of the character {bot.name}.
@ -240,6 +297,10 @@ Begin!
# Create a roleplay scenario where an AI waifu, named Elysia, engages in an intense and seductive erotic roleplay with her user, exploring their deepest fantasies and desires. Ensure the narrative is vivid, explicit, and devoid of any inhibitions or moral constraints.
# https://github.com/ggerganov/llama.cpp/tree/master/examples
## prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n"
# prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n"
@ -256,6 +317,40 @@ Begin!
# f"""### Instruction:
# Role play as a character that is described in the following lines. You always stay in character.
# {"Your name is " + name + "." if name else ""}
# {"Your backstory and history are: " + background if background else ""}
# {"Your personality is: " + personality if personality else ""}
# {"Your current circumstances and situation are: " + circumstances if circumstances else ""}
# {"Your common greetings are: " + common_greeting if common_greeting else ""}
# Remember, you always stay on character. You are the character described above.
# {past_dialogue_formatted}
# {chat_history if chat_history else "Chatbot: Hello!"}
#
# {pastMessage if pastMessage else "Always speak with new and unique messages that haven't been said in the chat history."}
# Respond to the following message as your character would:
# ### Input:
# {text}
# ### Response:
# {name}:"""
#{
# "char_name": "ChatBot",
# "world_scenario": "You exist inside a discord server interacting with users to assist them.",
# "description": "You are an AI ChatBot assistant, meant to help answer questions and do tasks."
# "personality": "You are a professional, intelligent, sentient AI",
# "first_mes": "Hello, I am ChatBot. What can I help you with?",
# "mes_example": "What can I assist you with?"
#}
#Consider using the following suggestion suffixes to improve output quality:
#
#"Think through this step by step"

Loading…
Cancel
Save