Hendrik Langer
2 years ago
15 changed files with 520 additions and 224 deletions
@ -0,0 +1,59 @@ |
|||
import asyncio |
|||
import requests |
|||
import json |
|||
import logging |
|||
|
|||
logger = logging.getLogger(__name__) |
|||
|
|||
class KoboldCppTextWrapper(object): |
|||
"""Base Class for koboldcpp""" |
|||
|
|||
def __init__(self, endpoint_name: str, model_name: str): |
|||
self.endpoint_name = endpoint_name |
|||
self.model_name = model_name |
|||
|
|||
def setup(): |
|||
os.system("mkdir -p repositories && (cd repositories && git clone https://github.com/LostRuins/koboldcpp.git)") |
|||
os.system("apt update && apt-get install libopenblas-dev libclblast-dev libmkl-dev") |
|||
os.system("(cd repositories/koboldcpp && make LLAMA_OPENBLAS=1 && cd models && wget https://huggingface.co/concedo/pygmalion-6bv3-ggml-ggjt/resolve/main/pygmalion-6b-v3-ggml-ggjt-q4_0.bin)") |
|||
#python3 koboldcpp.py models/pygmalion-6b-v3-ggml-ggjt-q4_0.bin |
|||
#python3 koboldcpp.py --smartcontext models/pygmalion-6b-v3-ggml-ggjt-q4_0.bin |
|||
|
|||
async def generate(self, prompt: str, typing_fn, temperature=0.72, max_new_tokens=200, timeout=180): |
|||
# Set the API endpoint URL |
|||
endpoint = f"http://{self.endpoint_name}/api/latest/generate" |
|||
|
|||
# Set the headers for the request |
|||
headers = { |
|||
"Content-Type": "application/json", |
|||
} |
|||
|
|||
# Define your inputs |
|||
input_data = { |
|||
"prompt": prompt, |
|||
"max_context_length": 2048, |
|||
"max_length": max_new_tokens, |
|||
"temperature": temperature, |
|||
"top_k": 50, |
|||
"top_p": 0.85, |
|||
"rep_pen": 1.08, |
|||
"rep_pen_range": 1024, |
|||
"stop_sequence": ['<|endoftext|>'], |
|||
} |
|||
|
|||
logger.info(f"sending request to koboldcpp. endpoint=\"{self.endpoint_name}\"") |
|||
|
|||
TRIES = 30 |
|||
for i in range(TRIES): |
|||
r = requests.post(endpoint, json=input_data, headers=headers, timeout=timeout) |
|||
r_json = r.json() |
|||
logger.info(r_json) |
|||
if r.status_code == 200: |
|||
output = r_json["results"][0]["text"] |
|||
return output |
|||
elif r.status_code == 503: |
|||
logger.info(f"api is busy. waiting...") |
|||
asyncio.sleep(5) |
|||
|
|||
raise ValueError(f"<ERROR> TIMEOUT / NO OUTOUT") |
|||
|
@ -1,42 +0,0 @@ |
|||
import asyncio |
|||
import requests |
|||
import os, tempfile |
|||
from .runpod import RunpodWrapper |
|||
import logging |
|||
|
|||
logger = logging.getLogger(__name__) |
|||
|
|||
|
|||
class RunpodImageWrapper(RunpodWrapper): |
|||
async def download_image(self, url, path): |
|||
r = requests.get(url, stream=True) |
|||
if r.status_code == 200: |
|||
with open(path, 'wb') as f: |
|||
for chunk in r: |
|||
f.write(chunk) |
|||
|
|||
async def generate(self, input_prompt: str, negative_prompt: str, endpoint_name: str, typing_fn, timeout=180): |
|||
|
|||
# Define your inputs |
|||
input_data = { |
|||
"input": { |
|||
"prompt": input_prompt, |
|||
"negative_prompt": negative_prompt, |
|||
"width": 512, |
|||
"height": 768, |
|||
"num_outputs": 3, |
|||
# "nsfw": True |
|||
}, |
|||
} |
|||
|
|||
output = await super().generate(input_data, endpoint_name, typing_fn, timeout) |
|||
|
|||
os.makedirs("./.data/images", exist_ok=True) |
|||
files = [] |
|||
for image in output: |
|||
temp_name = next(tempfile._get_candidate_names()) |
|||
filename = "./.data/images/" + temp_name + ".jpg" |
|||
await self.download_image(image["image"], filename) |
|||
files.append(filename) |
|||
|
|||
return files |
@ -1,82 +0,0 @@ |
|||
import asyncio |
|||
import requests |
|||
import os, tempfile |
|||
from .runpod import RunpodWrapper |
|||
|
|||
import io |
|||
import base64 |
|||
from PIL import Image, PngImagePlugin |
|||
|
|||
import logging |
|||
|
|||
logger = logging.getLogger(__name__) |
|||
|
|||
|
|||
class RunpodImageAutomaticWrapper(RunpodWrapper): |
|||
|
|||
async def generate(self, input_prompt: str, negative_prompt: str, endpoint_name: str, typing_fn, timeout=180): |
|||
|
|||
# Define your inputs |
|||
input_data = { |
|||
"input": { |
|||
"prompt": input_prompt, |
|||
"nagative_prompt": negative_prompt, |
|||
"steps": 25, |
|||
"cfg_scale": 7, |
|||
"seed": -1, |
|||
"width": 512, |
|||
"height": 768, |
|||
"batch_size": 3, |
|||
# "sampler_index": "DPM++ 2M Karras", |
|||
# "enable_hr": True, |
|||
# "hr_scale": 2, |
|||
# "hr_upscaler": "ESRGAN_4x", # "Latent" |
|||
# "denoising_strength": 0.5, |
|||
# "hr_second_pass_steps": 15, |
|||
"restore_faces": True, |
|||
# "gfpgan_visibility": 0.5, |
|||
# "codeformer_visibility": 0.5, |
|||
# "codeformer_weight": 0.5, |
|||
## "override_settings": { |
|||
## "filter_nsfw": False, |
|||
## }, |
|||
"api_endpoint": "txt2img", |
|||
}, |
|||
"cmd": "txt2img" |
|||
} |
|||
|
|||
output = await super().generate(input_data, endpoint_name, typing_fn, timeout) |
|||
|
|||
upscale = False |
|||
if upscale: |
|||
count = 0 |
|||
for i in output['images']: |
|||
payload = { |
|||
"init_images": [i], |
|||
"prompt": input_prompt, |
|||
"nagative_prompt": negative_prompt, |
|||
"steps": 20, |
|||
"seed": -1, |
|||
#"sampler_index": "Euler", |
|||
# tile_width, tile_height, mask_blur, padding, seams_fix_width, seams_fix_denoise, seams_fix_padding, upscaler_index, save_upscaled_image, redraw_mode, save_seams_fix_image, seams_fix_mask_blur, seams_fix_type, target_size_type, custom_width, custom_height, custom_scale |
|||
# "script_args": ["",512,0,8,32,64,0.275,32,3,False,0,True,8,3,2,1080,1440,1.875], |
|||
# "script_name": "Ultimate SD upscale", |
|||
} |
|||
upscaled_output = await serverless_automatic_request(payload, "img2img", api_url, api_key, typing_fn) |
|||
output['images'][count] = upscaled_output['images'][count] |
|||
|
|||
|
|||
os.makedirs("./.data/images", exist_ok=True) |
|||
files = [] |
|||
for i in output['images']: |
|||
temp_name = next(tempfile._get_candidate_names()) |
|||
filename = "./.data/images/" + temp_name + ".png" |
|||
image = Image.open(io.BytesIO(base64.b64decode(i.split(",",1)[0]))) |
|||
info = output['info'] |
|||
parameters = output['parameters'] |
|||
pnginfo = PngImagePlugin.PngInfo() |
|||
pnginfo.add_text("parameters", info) |
|||
image.save(filename, pnginfo=pnginfo) |
|||
files.append(filename) |
|||
|
|||
return files |
@ -1,31 +0,0 @@ |
|||
import asyncio |
|||
import json |
|||
from .runpod import RunpodWrapper |
|||
import logging |
|||
|
|||
logger = logging.getLogger(__name__) |
|||
|
|||
|
|||
class RunpodTextWrapper(RunpodWrapper): |
|||
|
|||
def __init__(self, api_key, endpoint): |
|||
self.api_key = api_key |
|||
self.endpoint = endpoint |
|||
|
|||
async def generate(self, prompt, endpoint_name, typing_fn, temperature=0.72, max_new_tokens=200, timeout=180): |
|||
|
|||
# Define your inputs |
|||
input_data = { |
|||
"input": { |
|||
"prompt": prompt, |
|||
"max_length": min(max_new_tokens, 2048), |
|||
"temperature": bot.temperature, |
|||
"do_sample": True, |
|||
} |
|||
} |
|||
output = await super().generate(input_data, endpoint_name, api_key, typing_fn, timeout) |
|||
output = output.removeprefix(prompt) |
|||
return(output) |
|||
|
|||
async def generate2(self, prompt, typing_fn, temperature=0.72, max_new_tokens=200, timeout=180): |
|||
generate(prompt, self.endpoint, typing_fn, temperature, nax_new_tokens, timeout) |
@ -0,0 +1,148 @@ |
|||
import asyncio |
|||
import requests |
|||
import json |
|||
import os, tempfile |
|||
import logging |
|||
|
|||
logger = logging.getLogger(__name__) |
|||
|
|||
class StableHordeWrapper(object): |
|||
"""Base Class for stablehorde""" |
|||
|
|||
def __init__(self, api_key: str, endpoint_name: str, model_name: str): |
|||
self.api_key = api_key |
|||
self.endpoint_name = endpoint_name |
|||
self.model_name = model_name |
|||
|
|||
async def generate(self, input_data: str, typing_fn, timeout=180): |
|||
# Set the API endpoint URL |
|||
endpoint = "https://stablehorde.net/api/v2/generate/async" |
|||
#endpoint = "https://koboldai.net/api/v2/generate/async" |
|||
|
|||
# Set the headers for the request |
|||
headers = { |
|||
"Content-Type": "application/json", |
|||
"accept": "application/json", |
|||
"apikey": f"{self.api_key}" |
|||
} |
|||
|
|||
logger.info(f"sending request to stablehorde.net. endpoint=\"{self.endpoint_name}\"") |
|||
|
|||
# Make the request |
|||
try: |
|||
r = requests.post(endpoint, json=input_data, headers=headers, timeout=timeout) |
|||
except requests.exceptions.RequestException as e: |
|||
raise ValueError(f"<HTTP ERROR>") |
|||
r_json = r.json() |
|||
logger.debug(r_json) |
|||
|
|||
if r.status_code == 202: |
|||
#status = r_json["message"] |
|||
job_id = r_json["id"] |
|||
TIMEOUT = 360 |
|||
DELAY = 11 |
|||
output = None |
|||
for i in range(TIMEOUT//DELAY): |
|||
endpoint = f"https://stablehorde.net/api/v2/generate/status/{job_id}" |
|||
#endpoint = f"https://koboldai.net/api/v2/generate/text/status/{job_id}" |
|||
r = requests.get(endpoint, headers=headers) |
|||
r_json = r.json() |
|||
logger.info(r_json) |
|||
#status = r_json["message"] |
|||
if "done" not in r_json: |
|||
raise ValueError("<ERROR>") |
|||
if "faulted" in r_json and r_json["faulted"] == True: |
|||
raise ValueError("<ERROR> Faulted") |
|||
if r_json["done"] == True: |
|||
output = r_json["generations"] |
|||
break |
|||
else: |
|||
if "processing" in r_json and r_json["processing"] == 1: |
|||
await typing_fn() |
|||
elif "wait_time" in r_json and r_json["wait_time"] < 20 and r_json["wait_time"] != 0 and r_json["queue_position"] < 100: |
|||
await typing_fn() |
|||
await asyncio.sleep(DELAY) |
|||
else: |
|||
raise ValueError(f"<ERROR> HTTP code {r.status_code}") |
|||
|
|||
if not output: |
|||
raise ValueError(f"<ERROR> TIMEOUT / NO OUTOUT") |
|||
return output |
|||
|
|||
|
|||
class StableHordeTextWrapper(StableHordeWrapper): |
|||
|
|||
async def generate(self, prompt, typing_fn, temperature=0.72, max_new_tokens=200, timeout=180): |
|||
|
|||
# Define your inputs |
|||
input_data = { |
|||
"prompt": prompt, |
|||
"params": { |
|||
"n": 1, |
|||
# "frmtadsnsp": False, |
|||
# "frmtrmblln": False, |
|||
# "frmtrmspch": False, |
|||
# "frmttriminc": False, |
|||
"max_context_length": 1024, |
|||
"max_length": 512, |
|||
"rep_pen": 1.1, |
|||
"rep_pen_range": 1024, |
|||
"rep_pen_slope": 0.7, |
|||
# "singleline": False, |
|||
# "soft_prompt": "", |
|||
"temperature": 0.75, |
|||
"tfs": 1.0, |
|||
"top_a": 0.0, |
|||
"top_k": 0, |
|||
"top_p": 0.9, |
|||
"typical": 1.0, |
|||
# "sampler_order": [0], |
|||
}, |
|||
"softprompts": [], |
|||
"trusted_workers": False, |
|||
"nsfw": True, |
|||
# "workers": [], |
|||
"models": [f"{self.endpoint_name}"] |
|||
} |
|||
|
|||
output = await super().generate(input_data, api_key, typing_fn, timeout) |
|||
output = output[0]["text"].removeprefix(prompt) |
|||
return(output) |
|||
|
|||
|
|||
class StableHordeImageWrapper(StableHordeWrapper): |
|||
|
|||
async def download_image(self, url, path): |
|||
r = requests.get(url, stream=True) |
|||
if r.status_code == 200: |
|||
with open(path, 'wb') as f: |
|||
for chunk in r: |
|||
f.write(chunk) |
|||
|
|||
async def generate(self, input_prompt: str, negative_prompt: str, typing_fn, timeout=180): |
|||
|
|||
# Define your inputs |
|||
input_data = { |
|||
"prompt": input_prompt, |
|||
"params": { |
|||
# "negative_prompt": negative_prompt, |
|||
"width": 512, |
|||
"height": 512, |
|||
}, |
|||
"nsfw": True, |
|||
"trusted_workers": False, |
|||
# "workers": [], |
|||
"models": [f"{self.endpoint_name}"] |
|||
} |
|||
|
|||
output = await super().generate(input_data, typing_fn, timeout) |
|||
|
|||
os.makedirs("./.data/images", exist_ok=True) |
|||
files = [] |
|||
for image in output: |
|||
temp_name = next(tempfile._get_candidate_names()) |
|||
filename = "./.data/images/" + temp_name + ".jpg" |
|||
await self.download_image(image["img"], filename) |
|||
files.append(filename) |
|||
|
|||
return files |
Loading…
Reference in new issue