Hendrik Langer
2 years ago
commit
46bdcd5087
24 changed files with 2456 additions and 0 deletions
@ -0,0 +1,166 @@ |
|||||
|
# Custom |
||||
|
bot.conf |
||||
|
.store |
||||
|
images/ |
||||
|
runpod-test* |
||||
|
|
||||
|
# Byte-compiled / optimized / DLL files |
||||
|
__pycache__/ |
||||
|
*.py[cod] |
||||
|
*$py.class |
||||
|
|
||||
|
# C extensions |
||||
|
*.so |
||||
|
|
||||
|
# Distribution / packaging |
||||
|
.Python |
||||
|
build/ |
||||
|
develop-eggs/ |
||||
|
dist/ |
||||
|
downloads/ |
||||
|
eggs/ |
||||
|
.eggs/ |
||||
|
lib/ |
||||
|
lib64/ |
||||
|
parts/ |
||||
|
sdist/ |
||||
|
var/ |
||||
|
wheels/ |
||||
|
share/python-wheels/ |
||||
|
*.egg-info/ |
||||
|
.installed.cfg |
||||
|
*.egg |
||||
|
MANIFEST |
||||
|
|
||||
|
# PyInstaller |
||||
|
# Usually these files are written by a python script from a template |
||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it. |
||||
|
*.manifest |
||||
|
*.spec |
||||
|
|
||||
|
# Installer logs |
||||
|
pip-log.txt |
||||
|
pip-delete-this-directory.txt |
||||
|
|
||||
|
# Unit test / coverage reports |
||||
|
htmlcov/ |
||||
|
.tox/ |
||||
|
.nox/ |
||||
|
.coverage |
||||
|
.coverage.* |
||||
|
.cache |
||||
|
nosetests.xml |
||||
|
coverage.xml |
||||
|
*.cover |
||||
|
*.py,cover |
||||
|
.hypothesis/ |
||||
|
.pytest_cache/ |
||||
|
cover/ |
||||
|
|
||||
|
# Translations |
||||
|
*.mo |
||||
|
*.pot |
||||
|
|
||||
|
# Django stuff: |
||||
|
*.log |
||||
|
local_settings.py |
||||
|
db.sqlite3 |
||||
|
db.sqlite3-journal |
||||
|
|
||||
|
# Flask stuff: |
||||
|
instance/ |
||||
|
.webassets-cache |
||||
|
|
||||
|
# Scrapy stuff: |
||||
|
.scrapy |
||||
|
|
||||
|
# Sphinx documentation |
||||
|
docs/_build/ |
||||
|
|
||||
|
# PyBuilder |
||||
|
.pybuilder/ |
||||
|
target/ |
||||
|
|
||||
|
# Jupyter Notebook |
||||
|
.ipynb_checkpoints |
||||
|
|
||||
|
# IPython |
||||
|
profile_default/ |
||||
|
ipython_config.py |
||||
|
|
||||
|
# pyenv |
||||
|
# For a library or package, you might want to ignore these files since the code is |
||||
|
# intended to run in multiple environments; otherwise, check them in: |
||||
|
# .python-version |
||||
|
|
||||
|
# pipenv |
||||
|
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. |
||||
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies |
||||
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not |
||||
|
# install all needed dependencies. |
||||
|
#Pipfile.lock |
||||
|
|
||||
|
# poetry |
||||
|
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. |
||||
|
# This is especially recommended for binary packages to ensure reproducibility, and is more |
||||
|
# commonly ignored for libraries. |
||||
|
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control |
||||
|
#poetry.lock |
||||
|
|
||||
|
# pdm |
||||
|
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. |
||||
|
#pdm.lock |
||||
|
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it |
||||
|
# in version control. |
||||
|
# https://pdm.fming.dev/#use-with-ide |
||||
|
.pdm.toml |
||||
|
|
||||
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm |
||||
|
__pypackages__/ |
||||
|
|
||||
|
# Celery stuff |
||||
|
celerybeat-schedule |
||||
|
celerybeat.pid |
||||
|
|
||||
|
# SageMath parsed files |
||||
|
*.sage.py |
||||
|
|
||||
|
# Environments |
||||
|
.env |
||||
|
.venv |
||||
|
env/ |
||||
|
venv/ |
||||
|
ENV/ |
||||
|
env.bak/ |
||||
|
venv.bak/ |
||||
|
|
||||
|
# Spyder project settings |
||||
|
.spyderproject |
||||
|
.spyproject |
||||
|
|
||||
|
# Rope project settings |
||||
|
.ropeproject |
||||
|
|
||||
|
# mkdocs documentation |
||||
|
/site |
||||
|
|
||||
|
# mypy |
||||
|
.mypy_cache/ |
||||
|
.dmypy.json |
||||
|
dmypy.json |
||||
|
|
||||
|
# Pyre type checker |
||||
|
.pyre/ |
||||
|
|
||||
|
# pytype static type analyzer |
||||
|
.pytype/ |
||||
|
|
||||
|
# Cython debug symbols |
||||
|
cython_debug/ |
||||
|
|
||||
|
# PyCharm |
||||
|
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can |
||||
|
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore |
||||
|
# and can be added to the global gitignore or merged into this file. For a more nuclear |
||||
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder. |
||||
|
#.idea/ |
@ -0,0 +1,18 @@ |
|||||
|
[[source]] |
||||
|
url = "https://pypi.org/simple" |
||||
|
verify_ssl = true |
||||
|
name = "pypi" |
||||
|
|
||||
|
[packages] |
||||
|
matrix-nio = "*" |
||||
|
transformers = "*" |
||||
|
huggingface-hub = "*" |
||||
|
asyncio = "*" |
||||
|
python-magic = "*" |
||||
|
pillow = "*" |
||||
|
argostranslate = "*" |
||||
|
|
||||
|
[dev-packages] |
||||
|
|
||||
|
[requires] |
||||
|
python_version = "3.11" |
File diff suppressed because it is too large
@ -0,0 +1,17 @@ |
|||||
|
# matrix-pygmalion-bot |
||||
|
|
||||
|
## Setup |
||||
|
```sh |
||||
|
# install dependencies |
||||
|
pipenv install --dev |
||||
|
|
||||
|
pipenv shell |
||||
|
python3 run.py |
||||
|
``` |
||||
|
|
||||
|
## Other |
||||
|
```sh |
||||
|
python3 -m venv env |
||||
|
source env/bin/activate |
||||
|
pip install -r requirements.txt |
||||
|
``` |
@ -0,0 +1,118 @@ |
|||||
|
# https://github.com/nsarrazin/serge/blob/main/api/utils/generate.py |
||||
|
|
||||
|
import subprocess, os |
||||
|
import asyncio |
||||
|
import logging |
||||
|
|
||||
|
logger = logging.getLogger(__name__) |
||||
|
|
||||
|
|
||||
|
async def generate( |
||||
|
prompt: str, |
||||
|
): |
||||
|
CHUNK_SIZE = 4 |
||||
|
|
||||
|
args = ( |
||||
|
"/home/hendrik/Projects/AI/alpaca.cpp/chat", |
||||
|
"--model", |
||||
|
"/home/hendrik/Projects/AI/alpaca.cpp/" + "ggml-alpaca-7b-q4.bin", |
||||
|
"--prompt", |
||||
|
prompt, |
||||
|
"--n_predict", |
||||
|
str(256), |
||||
|
"--temp", |
||||
|
str(0.1), |
||||
|
"--top_k", |
||||
|
str(50), |
||||
|
"--top_p", |
||||
|
str(0.95), |
||||
|
"--repeat_last_n", |
||||
|
str(64), |
||||
|
"--repeat_penalty", |
||||
|
str(1.3), |
||||
|
"--ctx_size", |
||||
|
str(512), |
||||
|
"--threads", |
||||
|
str(4) |
||||
|
) |
||||
|
|
||||
|
logger.debug(f"Calling LLaMa with arguments", args) |
||||
|
print(prompt) |
||||
|
procLlama = await asyncio.create_subprocess_exec( |
||||
|
*args, stdout=subprocess.PIPE, stderr=subprocess.PIPE |
||||
|
) |
||||
|
|
||||
|
while True: |
||||
|
chunk = await procLlama.stdout.read(CHUNK_SIZE) |
||||
|
|
||||
|
if not chunk: |
||||
|
return_code = await procLlama.wait() |
||||
|
|
||||
|
if return_code != 0: |
||||
|
error_output = await procLlama.stderr.read() |
||||
|
logger.error(error_output.decode("utf-8")) |
||||
|
raise ValueError(f"RETURN CODE {return_code}\n\n"+error_output.decode("utf-8")) |
||||
|
else: |
||||
|
return |
||||
|
|
||||
|
try: |
||||
|
chunk = chunk.decode("utf-8") |
||||
|
except UnicodeDecodeError: |
||||
|
return |
||||
|
|
||||
|
yield chunk |
||||
|
|
||||
|
|
||||
|
async def get_full_prompt(simple_prompt: str, chat_history=None): |
||||
|
|
||||
|
prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request." + "\n\n" |
||||
|
|
||||
|
HISTORY_LEN = 5 |
||||
|
if chat_history: |
||||
|
for message in chat_history[-HISTORY_LEN:]: |
||||
|
if not message["is_own_message"]: |
||||
|
prompt += "### Instruction:\n" + message["message"] + "\n" |
||||
|
else: |
||||
|
prompt += "### Response:\n" + message["message"] + "\n" |
||||
|
|
||||
|
prompt += "### Instruction:\n" + simple_prompt + "\n" |
||||
|
prompt += "### Response:\n" |
||||
|
|
||||
|
return prompt |
||||
|
|
||||
|
|
||||
|
async def get_full_prompt_with_input(simple_prompt: str, additional_input: str, chat_history=None): |
||||
|
|
||||
|
prompt_with_input = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request." + "\n\n" |
||||
|
|
||||
|
HISTORY_LEN = 5 |
||||
|
if chat_history: |
||||
|
for message in chat_history[-HISTORY_LEN:]: |
||||
|
if not message["is_own_message"]: |
||||
|
prompt += "### Instruction:\n" + message["message"] + "\n" |
||||
|
else: |
||||
|
prompt += "### Response:\n" + message["message"] + "\n" |
||||
|
|
||||
|
prompt += "### Instruction:\n" + simple_prompt + "\n" |
||||
|
prompt += "### Input:\n" + additional_input + "\n" |
||||
|
prompt += "### Response:\n" |
||||
|
|
||||
|
return prompt |
||||
|
|
||||
|
|
||||
|
async def get_full_prompt_chat_style(simple_prompt: str, chat_history=None): |
||||
|
|
||||
|
prompt = "Transcript of a dialog, where the User interacts with an Assistant named Julia. Julia is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision." + "\n\n" |
||||
|
|
||||
|
HISTORY_LEN = 5 |
||||
|
if chat_history: |
||||
|
for message in chat_history[-HISTORY_LEN:]: |
||||
|
if not message["is_own_message"]: |
||||
|
prompt += "User: " + message["message"] + "\n" |
||||
|
else: |
||||
|
prompt += "Julia: " + message["message"] + "\n" |
||||
|
|
||||
|
prompt += "User: " + simple_prompt + "\n" |
||||
|
prompt += "Julia: " |
||||
|
|
||||
|
return prompt |
@ -0,0 +1,228 @@ |
|||||
|
import asyncio |
||||
|
import os, tempfile |
||||
|
import logging |
||||
|
|
||||
|
import requests |
||||
|
|
||||
|
from transformers import AutoTokenizer, AutoConfig |
||||
|
from huggingface_hub import hf_hub_download |
||||
|
|
||||
|
logger = logging.getLogger(__name__) |
||||
|
|
||||
|
|
||||
|
async def generate_sync( |
||||
|
prompt: str, |
||||
|
api_key: str, |
||||
|
): |
||||
|
# Set the API endpoint URL |
||||
|
endpoint = "https://api.runpod.ai/v2/pygmalion-6b/runsync" |
||||
|
|
||||
|
# Set the headers for the request |
||||
|
headers = { |
||||
|
"Content-Type": "application/json", |
||||
|
"Authorization": f"Bearer {api_key}" |
||||
|
} |
||||
|
|
||||
|
max_new_tokens = 200 |
||||
|
prompt_num_tokens = await num_tokens(prompt) |
||||
|
|
||||
|
# Define your inputs |
||||
|
input_data = { |
||||
|
"input": { |
||||
|
"prompt": prompt, |
||||
|
"max_length": max(prompt_num_tokens+max_new_tokens, 2048), |
||||
|
"temperature": 0.75 |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
logger.info(f"sending request to runpod.io") |
||||
|
|
||||
|
# Make the request |
||||
|
r = requests.post(endpoint, json=input_data, headers=headers, timeout=180) |
||||
|
|
||||
|
r_json = r.json() |
||||
|
logger.info(r_json) |
||||
|
status = r_json["status"] |
||||
|
|
||||
|
if status == 'COMPLETED': |
||||
|
text = r_json["output"] |
||||
|
answer = text.removeprefix(prompt) |
||||
|
# lines = reply.split('\n') |
||||
|
# reply = lines[0].strip() |
||||
|
idx = answer.find(f"\nYou:") |
||||
|
if idx != -1: |
||||
|
reply = answer[:idx].strip() |
||||
|
else: |
||||
|
reply = answer.removesuffix('<|endoftext|>').strip() |
||||
|
return reply |
||||
|
elif status == 'IN_PROGRESS' or status == 'IN_QUEUE': |
||||
|
job_id = r_json["id"] |
||||
|
TIMEOUT = 180 |
||||
|
DELAY = 5 |
||||
|
for i in range(TIMEOUT//DELAY): |
||||
|
endpoint = "https://api.runpod.ai/v2/pygmalion-6b/status/" + job_id |
||||
|
r = requests.get(endpoint, headers=headers) |
||||
|
r_json = r.json() |
||||
|
logger.info(r_json) |
||||
|
status = r_json["status"] |
||||
|
if status == 'IN_PROGRESS': |
||||
|
await asyncio.sleep(DELAY) |
||||
|
elif status == 'IN_QUEUE': |
||||
|
await asyncio.sleep(DELAY) |
||||
|
elif status == 'COMPLETED': |
||||
|
text = r_json["output"] |
||||
|
answer = text.removeprefix(prompt) |
||||
|
# lines = reply.split('\n') |
||||
|
# reply = lines[0].strip() |
||||
|
idx = answer.find(f"\nYou:") |
||||
|
if idx != -1: |
||||
|
reply = answer[:idx].strip() |
||||
|
else: |
||||
|
reply = answer.removesuffix('<|endoftext|>').strip() |
||||
|
return reply |
||||
|
else: |
||||
|
return "<ERROR>" |
||||
|
else: |
||||
|
return "<ERROR>" |
||||
|
|
||||
|
async def get_full_prompt(simple_prompt: str, bot): |
||||
|
|
||||
|
# Prompt without history |
||||
|
prompt = bot.name + "'s Persona: " + bot.persona + "\n" |
||||
|
prompt += "Scenario: " + bot.scenario + "\n" |
||||
|
prompt += "<START>" + "\n" |
||||
|
#prompt += bot.name + ": " + bot.greeting + "\n" |
||||
|
prompt += "You: " + simple_prompt + "\n" |
||||
|
prompt += bot.name + ":" |
||||
|
|
||||
|
MAX_TOKENS = 2048 |
||||
|
max_new_tokens = 200 |
||||
|
total_num_tokens = await num_tokens(prompt) |
||||
|
visible_history = [] |
||||
|
current_message = True |
||||
|
for key, chat_item in reversed(bot.chat_history.items()): |
||||
|
if current_message: |
||||
|
current_message = False |
||||
|
continue |
||||
|
if chat_item.message.startswith('!begin'): |
||||
|
break |
||||
|
if chat_item.message.startswith('!'): |
||||
|
continue |
||||
|
#if chat_item.message == bot.greeting: |
||||
|
# continue |
||||
|
print("History: " + str(chat_item)) |
||||
|
if chat_item.num_tokens == None: |
||||
|
chat_item.num_tokens = await num_tokens(chat_item.getLine()) |
||||
|
# TODO: is it MAX_TOKENS or MAX_TOKENS - max_new_tokens?? |
||||
|
if total_num_tokens < (MAX_TOKENS - max_new_tokens): |
||||
|
visible_history.append(chat_item) |
||||
|
total_num_tokens += chat_item.num_tokens |
||||
|
print(total_num_tokens) |
||||
|
print("Finally: "+ str(total_num_tokens)) |
||||
|
visible_history = reversed(visible_history) |
||||
|
|
||||
|
prompt = bot.name + "'s Persona: " + bot.persona + "\n" |
||||
|
prompt += "Scenario: " + bot.scenario + "\n" |
||||
|
prompt += "<START>" + "\n" |
||||
|
#prompt += bot.name + ": " + bot.greeting + "\n" |
||||
|
for chat_item in visible_history: |
||||
|
if chat_item.is_own_message: |
||||
|
prompt += bot.name + ": " + chat_item.message + "\n" |
||||
|
else: |
||||
|
prompt += "You" + ": " + chat_item.message + "\n" |
||||
|
prompt += "You: " + simple_prompt + "\n" |
||||
|
prompt += bot.name + ":" |
||||
|
|
||||
|
return prompt |
||||
|
|
||||
|
|
||||
|
async def num_tokens(input_text: str): |
||||
|
# os.makedirs("./models/pygmalion-6b", exist_ok=True) |
||||
|
# hf_hub_download(repo_id="PygmalionAI/pygmalion-6b", filename="config.json", cache_dir="./models/pygmalion-6b") |
||||
|
# config = AutoConfig.from_pretrained("./models/pygmalion-6b/config.json") |
||||
|
tokenizer = AutoTokenizer.from_pretrained("PygmalionAI/pygmalion-6b") |
||||
|
encoding = tokenizer.encode(input_text, add_special_tokens=False) |
||||
|
max_input_size = tokenizer.max_model_input_sizes |
||||
|
return len(encoding) |
||||
|
|
||||
|
async def estimate_num_tokens(input_text: str): |
||||
|
return len(input_text)//4+1 |
||||
|
|
||||
|
|
||||
|
async def generate_image(input_prompt: str, negative_prompt: str, api_key: str): |
||||
|
|
||||
|
# Set the API endpoint URL |
||||
|
endpoint = "https://api.runpod.ai/v1/sd-anything-v4/run" |
||||
|
|
||||
|
# Set the headers for the request |
||||
|
headers = { |
||||
|
"Content-Type": "application/json", |
||||
|
"Authorization": f"Bearer {api_key}" |
||||
|
} |
||||
|
|
||||
|
# Define your inputs |
||||
|
input_data = { |
||||
|
"input": { |
||||
|
"prompt": input_prompt, |
||||
|
"negative_prompt": negative_prompt, |
||||
|
"width": 512, |
||||
|
"height": 768, |
||||
|
"nsfw": True |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
logger.info(f"sending request to runpod.io") |
||||
|
|
||||
|
# Make the request |
||||
|
r = requests.post(endpoint, json=input_data, headers=headers) |
||||
|
r_json = r.json() |
||||
|
logger.info(r_json) |
||||
|
|
||||
|
if r.status_code == 200: |
||||
|
status = r_json["status"] |
||||
|
if status != 'IN_QUEUE': |
||||
|
raise ValueError(f"RETURN CODE {status}") |
||||
|
job_id = r_json["id"] |
||||
|
TIMEOUT = 180 |
||||
|
DELAY = 5 |
||||
|
for i in range(TIMEOUT//DELAY): |
||||
|
endpoint = "https://api.runpod.ai/v1/sd-anything-v4/status/" + job_id |
||||
|
r = requests.get(endpoint, headers=headers) |
||||
|
r_json = r.json() |
||||
|
logger.info(r_json) |
||||
|
status = r_json["status"] |
||||
|
if status == 'IN_PROGRESS': |
||||
|
await asyncio.sleep(DELAY) |
||||
|
elif status == 'IN_QUEUE': |
||||
|
await asyncio.sleep(DELAY) |
||||
|
elif status == 'COMPLETED': |
||||
|
output = r_json["output"] |
||||
|
break |
||||
|
else: |
||||
|
raise ValueError(f"RETURN CODE {status}") |
||||
|
|
||||
|
os.makedirs("./images", exist_ok=True) |
||||
|
files = [] |
||||
|
for image in output: |
||||
|
temp_name = next(tempfile._get_candidate_names()) |
||||
|
filename = "./images/" + temp_name + ".jpg" |
||||
|
await download_image(image["image"], filename) |
||||
|
files.append(filename) |
||||
|
|
||||
|
return files |
||||
|
|
||||
|
async def download_image(url, path): |
||||
|
r = requests.get(url, stream=True) |
||||
|
if r.status_code == 200: |
||||
|
with open(path, 'wb') as f: |
||||
|
for chunk in r: |
||||
|
f.write(chunk) |
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
@ -0,0 +1,284 @@ |
|||||
|
import asyncio |
||||
|
import nio |
||||
|
from nio import (AsyncClient, AsyncClientConfig, MatrixRoom, RoomMessageText, InviteEvent, UploadResponse) |
||||
|
|
||||
|
import os, sys |
||||
|
import importlib |
||||
|
import configparser |
||||
|
import logging |
||||
|
|
||||
|
import aiofiles.os |
||||
|
import magic |
||||
|
from PIL import Image |
||||
|
|
||||
|
from .helpers import ChatItem |
||||
|
ai = importlib.import_module("matrix_pygmalion_bot.ai.runpod_pygmalion") |
||||
|
#from .llama_cpp import generate, get_full_prompt, get_full_prompt_chat_style |
||||
|
#from .runpod_pygmalion import generate_sync, get_full_prompt |
||||
|
import matrix_pygmalion_bot.translate as translate |
||||
|
|
||||
|
STORE_PATH = "./.store/" |
||||
|
|
||||
|
|
||||
|
logger = logging.getLogger(__name__) |
||||
|
config = configparser.ConfigParser() |
||||
|
bots = [] |
||||
|
|
||||
|
class Callbacks(object): |
||||
|
"""Class to pass client to callback methods.""" |
||||
|
|
||||
|
def __init__(self, client: AsyncClient, bot): |
||||
|
self.client = client |
||||
|
self.bot = bot |
||||
|
|
||||
|
async def message_cb(self, room: MatrixRoom, event: RoomMessageText) -> None: |
||||
|
message = event.body |
||||
|
is_own_message = False |
||||
|
if event.sender == self.client.user: |
||||
|
is_own_message = True |
||||
|
is_command = False |
||||
|
if event.body.startswith('!'): |
||||
|
is_command = True |
||||
|
relates_to = None |
||||
|
if 'm.relates_to' in event.source["content"]: |
||||
|
relates_to = event.source["content"]['m.relates_to']["event_id"] |
||||
|
translated_message = message |
||||
|
if not (self.bot.translate is None) and not is_command: |
||||
|
if 'original_message' in event.source["content"]: |
||||
|
translated_message = event.source["content"]['original_message'] |
||||
|
else: |
||||
|
translated_message = translate.translate(message, self.bot.translate, "en") |
||||
|
if hasattr(event, 'body'): |
||||
|
self.bot.chat_history[event.event_id] = ChatItem(event.event_id, event.server_timestamp, room.user_name(event.sender), is_own_message, relates_to, translated_message) |
||||
|
if self.bot.not_synced: |
||||
|
return |
||||
|
print( |
||||
|
"Message received for room {} | {}: {}".format( |
||||
|
room.display_name, room.user_name(event.sender), event.body |
||||
|
) |
||||
|
) |
||||
|
await self.client.room_read_markers(room.room_id, event.event_id, event.event_id) |
||||
|
# Ignore messages from ourselves |
||||
|
if is_own_message: |
||||
|
return |
||||
|
|
||||
|
if hasattr(event, 'body') and event.body.startswith('!replybot'): |
||||
|
print(event) |
||||
|
await self.bot.send_message(self.client, room.room_id, "Hello World!") |
||||
|
return |
||||
|
elif hasattr(event, 'body') and event.body.startswith('!image'): |
||||
|
prompt = event.body.removeprefix('!image').strip() |
||||
|
negative_prompt = "out of frame, (ugly:1.3), (fused fingers), (too many fingers), (bad anatomy:1.5), (watermark:1.5), (words), letters, untracked eyes, asymmetric eyes, floating head, (logo:1.5), (bad hands:1.3), (mangled hands:1.2), (missing hands), (missing arms), backward hands, floating jewelry, unattached jewelry, floating head, doubled head, unattached head, doubled head, head in body, (misshapen body:1.1), (badly fitted headwear:1.2), floating arms, (too many arms:1.5), limbs fused with body, (facial blemish:1.5), badly fitted clothes, imperfect eyes, untracked eyes, crossed eyes, hair growing from clothes, partial faces, hair not attached to head" |
||||
|
if len(prompt) == 0: |
||||
|
prompt = "a beautiful woman" |
||||
|
output = await ai.generate_image(prompt, negative_prompt, self.bot.runpod_api_key) |
||||
|
for imagefile in output: |
||||
|
await self.bot.send_image(self.client, room.room_id, imagefile) |
||||
|
return |
||||
|
elif hasattr(event, 'body') and event.body.startswith('!begin'): |
||||
|
self.bot.chat_history = {} |
||||
|
await self.bot.send_message(self.client, room.room_id, self.bot.greeting) |
||||
|
return |
||||
|
elif event.body.startswith('!!!'): |
||||
|
return |
||||
|
# chat_history_event_id, chat_history_item = self.bot.chat_history.popitem() # current |
||||
|
# await self.client.room_redact(room.room_id, chat_history_item.event_id, reason="user-request") |
||||
|
# chat_history_event_id, chat_history_item = self.bot.chat_history.popitem() |
||||
|
# await self.client.room_redact(room.room_id, chat_history_item.event_id, reason="user-request") |
||||
|
# chat_history_event_id, chat_history_item = self.bot.chat_history.popitem() |
||||
|
# await self.client.room_redact(room.room_id, chat_history_item.event_id, reason="user-request") |
||||
|
# return |
||||
|
elif event.body.startswith('!!'): |
||||
|
return |
||||
|
# chat_history_event_id, chat_history_item = self.bot.chat_history.popitem() # current |
||||
|
# await self.client.room_redact(room.room_id, chat_history_item.event_id, reason="user-request") |
||||
|
# chat_history_event_id, chat_history_item = self.bot.chat_history.popitem() |
||||
|
# await self.client.room_redact(room.room_id, chat_history_item.event_id, reason="user-request") |
||||
|
# chat_history_event_id, chat_history_item = self.bot.chat_history.popitem() # new current |
||||
|
# self.bot.chat_history[chat_history_event_id] = chat_history_item |
||||
|
# message = chat_history_item.message |
||||
|
# # don't return, we generate a new answer |
||||
|
|
||||
|
full_prompt = await ai.get_full_prompt(translated_message, self.bot) |
||||
|
num_tokens = await ai.num_tokens(full_prompt) |
||||
|
logger.info(full_prompt) |
||||
|
logger.info(f"num tokens:" + str(num_tokens)) |
||||
|
# answer = "" |
||||
|
# time = 0 |
||||
|
# error = None |
||||
|
# try: |
||||
|
# async for output in generate(full_prompt): |
||||
|
# await asyncio.sleep(0.1) |
||||
|
# answer += output |
||||
|
# if time % 5 == 0: |
||||
|
# await self.client.room_typing(room.room_id, True, 15000) |
||||
|
# time +=1 |
||||
|
# print(output, end='', flush=True) |
||||
|
# except Exception as e: |
||||
|
# error = e.__str__() |
||||
|
# answer = answer.strip() |
||||
|
# print("") |
||||
|
await self.client.room_typing(room.room_id, True, 15000) |
||||
|
answer = await ai.generate_sync(full_prompt, self.bot.runpod_api_key) |
||||
|
answer = answer.strip() |
||||
|
await self.client.room_typing(room.room_id, False) |
||||
|
translated_answer = answer |
||||
|
if not (self.bot.translate is None): |
||||
|
translated_answer = translate.translate(answer, "en", self.bot.translate) |
||||
|
await self.bot.send_message(self.client, room.room_id, translated_answer, reply_to=event.event_id, original_message=answer) |
||||
|
else: |
||||
|
await self.bot.send_message(self.client, room.room_id, answer, reply_to=event.event_id) |
||||
|
|
||||
|
|
||||
|
|
||||
|
async def invite_cb(self, room: MatrixRoom, event: InviteEvent) -> None: |
||||
|
"""Automatically join all rooms we get invited to""" |
||||
|
result = await self.client.join(room.room_id) |
||||
|
print('Invited to room: {} {}'.format(room.name, room.room_id)) |
||||
|
if isinstance(result, nio.responses.JoinResponse): |
||||
|
print('Joined') |
||||
|
else: |
||||
|
print("Error joining room: {}".format(str(result))) |
||||
|
|
||||
|
class ChatBot(object): |
||||
|
"""Main chatbot""" |
||||
|
|
||||
|
def __init__(self, homeserver, user_id, password): |
||||
|
self.homeserver = homeserver |
||||
|
self.user_id = user_id |
||||
|
self.password = password |
||||
|
|
||||
|
self.runpod_api_key = None |
||||
|
|
||||
|
self.client = None |
||||
|
self.callbacks = None |
||||
|
self.config = None |
||||
|
self.not_synced = True |
||||
|
|
||||
|
self.owner = None |
||||
|
self.translate = None |
||||
|
|
||||
|
self.name = None |
||||
|
self.persona = None |
||||
|
self.scenario = None |
||||
|
self.greeting = None |
||||
|
self.chat_history = {} |
||||
|
|
||||
|
if STORE_PATH and not os.path.isdir(STORE_PATH): |
||||
|
os.mkdir(STORE_PATH) |
||||
|
|
||||
|
def character_init(self, name, persona, scenario, greeting): |
||||
|
self.name = name |
||||
|
self.persona = persona |
||||
|
self.scenario = scenario |
||||
|
self.greeting = greeting |
||||
|
|
||||
|
async def login(self): |
||||
|
self.config = AsyncClientConfig(store_sync_tokens=True) |
||||
|
self.client = AsyncClient(self.homeserver, self.user_id, store_path=STORE_PATH, config=self.config) |
||||
|
self.callbacks = Callbacks(self.client, self) |
||||
|
self.client.add_event_callback(self.callbacks.message_cb, RoomMessageText) |
||||
|
self.client.add_event_callback(self.callbacks.invite_cb, InviteEvent) |
||||
|
|
||||
|
sync_task = asyncio.create_task(self.watch_for_sync(self.client.synced)) |
||||
|
|
||||
|
try: |
||||
|
response = await self.client.login(self.password) |
||||
|
print(response) |
||||
|
await self.client.sync_forever(timeout=30000, full_state=True) |
||||
|
except (asyncio.CancelledError, KeyboardInterrupt): |
||||
|
print("Received interrupt.") |
||||
|
await self.client.close() |
||||
|
|
||||
|
async def watch_for_sync(self, sync_event): |
||||
|
print("Awaiting sync") |
||||
|
await sync_event.wait() |
||||
|
print("Client is synced") |
||||
|
self.not_synced = False |
||||
|
|
||||
|
async def send_message(self, client, room_id, message, reply_to=None, original_message=None): |
||||
|
content={"msgtype": "m.text", "body": message} |
||||
|
if reply_to: |
||||
|
content["m.relates_to"] = {"event_id": reply_to, "rel_type": "de.xd0.mpygbot.in_reply_to"} |
||||
|
if original_message: |
||||
|
content["original_message"] = original_message |
||||
|
|
||||
|
await client.room_send( |
||||
|
room_id=room_id, |
||||
|
message_type="m.room.message", |
||||
|
content=content, |
||||
|
) |
||||
|
|
||||
|
async def send_image(self, client, room_id, image): |
||||
|
"""Send image to room |
||||
|
https://matrix-nio.readthedocs.io/en/latest/examples.html#sending-an-image |
||||
|
""" |
||||
|
mime_type = magic.from_file(image, mime=True) # e.g. "image/jpeg" |
||||
|
if not mime_type.startswith("image/"): |
||||
|
logger.error("Drop message because file does not have an image mime type.") |
||||
|
return |
||||
|
|
||||
|
im = Image.open(image) |
||||
|
(width, height) = im.size # im.size returns (width,height) tuple |
||||
|
|
||||
|
# first do an upload of image, then send URI of upload to room |
||||
|
file_stat = await aiofiles.os.stat(image) |
||||
|
async with aiofiles.open(image, "r+b") as f: |
||||
|
resp, maybe_keys = await client.upload( |
||||
|
f, |
||||
|
content_type=mime_type, # image/jpeg |
||||
|
filename=os.path.basename(image), |
||||
|
filesize=file_stat.st_size, |
||||
|
) |
||||
|
if isinstance(resp, UploadResponse): |
||||
|
print("Image was uploaded successfully to server. ") |
||||
|
else: |
||||
|
print(f"Failed to upload image. Failure response: {resp}") |
||||
|
|
||||
|
content = { |
||||
|
"body": os.path.basename(image), # descriptive title |
||||
|
"info": { |
||||
|
"size": file_stat.st_size, |
||||
|
"mimetype": mime_type, |
||||
|
"thumbnail_info": None, # TODO |
||||
|
"w": width, # width in pixel |
||||
|
"h": height, # height in pixel |
||||
|
"thumbnail_url": None, # TODO |
||||
|
}, |
||||
|
"msgtype": "m.image", |
||||
|
"url": resp.content_uri, |
||||
|
} |
||||
|
|
||||
|
try: |
||||
|
await client.room_send(room_id, message_type="m.room.message", content=content) |
||||
|
print("Image was sent successfully") |
||||
|
except Exception: |
||||
|
print(f"Image send of file {image} failed.") |
||||
|
|
||||
|
async def main() -> None: |
||||
|
config.read('bot.conf') |
||||
|
logging.basicConfig(level=logging.INFO) |
||||
|
for section in config.sections(): |
||||
|
if section == 'DEFAULT' or section == 'Common': |
||||
|
pass |
||||
|
botname = section |
||||
|
homeserver = config[section]['url'] |
||||
|
user_id = config[section]['username'] |
||||
|
password = config[section]['password'] |
||||
|
bot = ChatBot(homeserver, user_id, password) |
||||
|
bot.character_init(botname, config[section]['persona'], config[section]['scenario'], config[section]['greeting']) |
||||
|
if config.has_option(section, 'owner'): |
||||
|
bot.owner = config[section]['owner'] |
||||
|
if config.has_option(section, 'translate'): |
||||
|
bot.translate = config[section]['translate'] |
||||
|
translate.init(bot.translate, "en") |
||||
|
translate.init("en", bot.translate) |
||||
|
if config.has_option(section, 'image_prompt'): |
||||
|
bot.image_prompt = config[section]['image_prompt'] |
||||
|
if config.has_option('DEFAULT', 'runpod_api_key'): |
||||
|
bot.runpod_api_key = config['DEFAULT']['runpod_api_key'] |
||||
|
bots.append(bot) |
||||
|
await bot.login() |
||||
|
print("logged in") |
||||
|
|
||||
|
asyncio.get_event_loop().run_until_complete(main()) |
@ -0,0 +1,15 @@ |
|||||
|
|
||||
|
|
||||
|
class ChatItem: |
||||
|
def __init__(self, event_id, timestamp, user_name, is_own_message, relates_to_event, message): |
||||
|
self.event_id = event_id |
||||
|
self.timestamp = timestamp |
||||
|
self.user_name = user_name |
||||
|
self.is_own_message = is_own_message |
||||
|
self.relates_to_event = relates_to_event |
||||
|
self.message = message |
||||
|
self.num_tokens = None |
||||
|
def __str__(self): |
||||
|
return str("{}: {}".format(self.user_name, self.message)) |
||||
|
def getLine(self): |
||||
|
return str("{}: {}".format(self.user_name, self.message)) |
@ -0,0 +1,25 @@ |
|||||
|
import argostranslate.package |
||||
|
import argostranslate.translate |
||||
|
|
||||
|
#from_code = "de" |
||||
|
#to_code = "en" |
||||
|
|
||||
|
def init(from_code, to_code): |
||||
|
# Download and install Argos Translate package |
||||
|
argostranslate.package.update_package_index() |
||||
|
available_packages = argostranslate.package.get_available_packages() |
||||
|
package_to_install = next( |
||||
|
filter( |
||||
|
lambda x: x.from_code == from_code and x.to_code == to_code, available_packages |
||||
|
) |
||||
|
) |
||||
|
argostranslate.package.install_from_path(package_to_install.download()) |
||||
|
|
||||
|
def translate(text, from_code, to_code): |
||||
|
if (from_code is None) or (to_code is None): |
||||
|
return text |
||||
|
# Translate |
||||
|
translatedText = argostranslate.translate.translate(text, from_code, to_code) |
||||
|
print('Translated: \"{}\" -> \"{}\"'.format(text, translatedText)) |
||||
|
return translatedText |
||||
|
# '¡Hola Mundo!' |
@ -0,0 +1,7 @@ |
|||||
|
asyncio |
||||
|
matrix-nio |
||||
|
transformers |
||||
|
huggingface_hub |
||||
|
python-magic |
||||
|
pillow |
||||
|
argostranslate |
@ -0,0 +1,9 @@ |
|||||
|
#!/usr/bin/env python3 |
||||
|
import asyncio |
||||
|
|
||||
|
try: |
||||
|
from matrix_pygmalion_bot import core |
||||
|
|
||||
|
asyncio.get_event_loop().run_until_complete(core.main()) |
||||
|
except ImportError as e: |
||||
|
print("Unable to import matrix-pygmalion-bot.main:", e) |
@ -0,0 +1,9 @@ |
|||||
|
from python:3.11.1-buster |
||||
|
|
||||
|
WORKDIR / |
||||
|
|
||||
|
RUN pip install runpod |
||||
|
|
||||
|
ADD handler.py . |
||||
|
|
||||
|
CMD [ "python", "-u", "/handler.py" ] |
@ -0,0 +1,21 @@ |
|||||
|
#!/usr/bin/env python |
||||
|
''' Contains the handler function that will be called by the serverless. ''' |
||||
|
|
||||
|
import runpod |
||||
|
|
||||
|
# Load models into VRAM here so they can be warm between requests |
||||
|
|
||||
|
|
||||
|
def handler(event): |
||||
|
''' |
||||
|
This is the handler function that will be called by the serverless. |
||||
|
''' |
||||
|
print(event) |
||||
|
|
||||
|
# do the things |
||||
|
|
||||
|
# return the output that you want to be returned like pre-signed URLs to output artifacts |
||||
|
return "Hello World" |
||||
|
|
||||
|
|
||||
|
runpod.serverless.start({"handler": handler}) |
@ -0,0 +1,18 @@ |
|||||
|
FROM runpod/stable-diffusion:web-automatic-1.5.16 |
||||
|
|
||||
|
SHELL ["/bin/bash", "-c"] |
||||
|
|
||||
|
ENV PATH="${PATH}:/workspace/stable-diffusion-webui/venv/bin" |
||||
|
|
||||
|
WORKDIR / |
||||
|
|
||||
|
RUN rm /workspace/v1-5-pruned-emaonly.ckpt |
||||
|
RUN wget -O model.safetensors https://civitai.com/api/download/models/5616 |
||||
|
RUN pip install -U xformers |
||||
|
RUN pip install runpod |
||||
|
|
||||
|
ADD handler.py . |
||||
|
ADD start.sh /start.sh |
||||
|
RUN chmod +x /start.sh |
||||
|
|
||||
|
CMD [ "/start.sh" ] |
@ -0,0 +1,39 @@ |
|||||
|
import runpod |
||||
|
import subprocess |
||||
|
import requests |
||||
|
import time |
||||
|
|
||||
|
def check_api_availability(host): |
||||
|
while True: |
||||
|
try: |
||||
|
response = requests.get(host) |
||||
|
return |
||||
|
except requests.exceptions.RequestException as e: |
||||
|
print(f"API is not available, retrying in 200ms... ({e})") |
||||
|
except Exception as e: |
||||
|
print('something went wrong') |
||||
|
time.sleep(200/1000) |
||||
|
|
||||
|
check_api_availability("http://127.0.0.1:3000/sdapi/v1/txt2img") |
||||
|
|
||||
|
print('run handler') |
||||
|
|
||||
|
def handler(event): |
||||
|
''' |
||||
|
This is the handler function that will be called by the serverless. |
||||
|
''' |
||||
|
print('got event') |
||||
|
print(event) |
||||
|
|
||||
|
response = requests.post(url=f'http://127.0.0.1:3000/sdapi/v1/txt2img', json=event["input"]) |
||||
|
|
||||
|
json = response.json() |
||||
|
# do the things |
||||
|
|
||||
|
print(json) |
||||
|
|
||||
|
# return the output that you want to be returned like pre-signed URLs to output artifacts |
||||
|
return json |
||||
|
|
||||
|
|
||||
|
runpod.serverless.start({"handler": handler}) |
@ -0,0 +1,11 @@ |
|||||
|
#!/bin/bash |
||||
|
echo "Container Started" |
||||
|
export PYTHONUNBUFFERED=1 |
||||
|
source /workspace/stable-diffusion-webui/venv/bin/activate |
||||
|
cd /workspace/stable-diffusion-webui |
||||
|
echo "starting api" |
||||
|
python webui.py --port 3000 --nowebui --api --xformers --ckpt /model.safetensors & |
||||
|
cd / |
||||
|
|
||||
|
echo "starting worker" |
||||
|
python -u handler.py |
@ -0,0 +1,18 @@ |
|||||
|
FROM runpod/stable-diffusion:web-automatic-1.5.16 |
||||
|
|
||||
|
SHELL ["/bin/bash", "-c"] |
||||
|
|
||||
|
ENV PATH="${PATH}:/workspace/stable-diffusion-webui/venv/bin" |
||||
|
|
||||
|
WORKDIR / |
||||
|
|
||||
|
RUN rm /workspace/v1-5-pruned-emaonly.ckpt |
||||
|
RUN wget -O model.safetensors https://civitai.com/api/download/models/5616 |
||||
|
RUN pip install -U xformers |
||||
|
RUN pip install runpod |
||||
|
|
||||
|
ADD handler.py . |
||||
|
ADD start.sh /start.sh |
||||
|
RUN chmod +x /start.sh |
||||
|
|
||||
|
CMD [ "/start.sh" ] |
@ -0,0 +1,39 @@ |
|||||
|
import runpod |
||||
|
import subprocess |
||||
|
import requests |
||||
|
import time |
||||
|
|
||||
|
def check_api_availability(host): |
||||
|
while True: |
||||
|
try: |
||||
|
response = requests.get(host) |
||||
|
return |
||||
|
except requests.exceptions.RequestException as e: |
||||
|
print(f"API is not available, retrying in 200ms... ({e})") |
||||
|
except Exception as e: |
||||
|
print('something went wrong') |
||||
|
time.sleep(200/1000) |
||||
|
|
||||
|
check_api_availability("http://127.0.0.1:3000/sdapi/v1/txt2img") |
||||
|
|
||||
|
print('run handler') |
||||
|
|
||||
|
def handler(event): |
||||
|
''' |
||||
|
This is the handler function that will be called by the serverless. |
||||
|
''' |
||||
|
print('got event') |
||||
|
print(event) |
||||
|
|
||||
|
response = requests.post(url=f'http://127.0.0.1:3000/sdapi/v1/txt2img', json=event["input"]) |
||||
|
|
||||
|
json = response.json() |
||||
|
# do the things |
||||
|
|
||||
|
print(json) |
||||
|
|
||||
|
# return the output that you want to be returned like pre-signed URLs to output artifacts |
||||
|
return json |
||||
|
|
||||
|
|
||||
|
runpod.serverless.start({"handler": handler}) |
@ -0,0 +1,11 @@ |
|||||
|
#!/bin/bash |
||||
|
echo "Container Started" |
||||
|
export PYTHONUNBUFFERED=1 |
||||
|
source /workspace/stable-diffusion-webui/venv/bin/activate |
||||
|
cd /workspace/stable-diffusion-webui |
||||
|
echo "starting api" |
||||
|
python webui.py --port 3000 --nowebui --api --xformers --ckpt /model.safetensors & |
||||
|
cd / |
||||
|
|
||||
|
echo "starting worker" |
||||
|
python -u handler.py |
@ -0,0 +1,6 @@ |
|||||
|
from setuptools import find_packages, setup |
||||
|
|
||||
|
setup( |
||||
|
name='matrix-pygmalion-bot', |
||||
|
packages=find_packages(), |
||||
|
) |
Loading…
Reference in new issue