Browse Source

fix Dockerfile

master
Hendrik Langer 2 years ago
parent
commit
51c5e881e6
  1. 27
      runpod/runpod-worker-oobabooga-api/Dockerfile

27
runpod/runpod-worker-oobabooga-api/Dockerfile

@ -10,9 +10,6 @@ FROM ${DEV_IMAGE} as builder
# DOCKER_BUILDKIT=1 docker build --build-arg MODEL_NAME="PygmalionAI/pygmalion-350m" -t magn418/runpod-oobabooga-pygmalion:test .
# docker builder prune
ARG MODEL_NAME="PygmalionAI/pygmalion-350m"
ENV MODEL_NAME=${MODEL_NAME}
WORKDIR /
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
ENV DEBIAN_FRONTEND noninteractive\
@ -73,23 +70,35 @@ RUN mkdir -p /workspace
WORKDIR /workspace
#RUN mkdir /workspace &&
RUN cd /workspace && git clone https://github.com/oobabooga/text-generation-webui.git && \
RUN --mount=type=cache,target=/root/.cache,sharing=locked pip3 \
cd /workspace && git clone --depth 1 https://github.com/oobabooga/text-generation-webui.git && \
cd /workspace/text-generation-webui && pip3 install -r requirements.txt && \
cd extensions/api && pip3 install -r requirements.txt
# pip3 cache purge
RUN cd /workspace/text-generation-webui/ && mkdir repositories && cd repositories && \
RUN --mount=type=cache,target=/root/.cache,sharing=locked pip3 \
cd /workspace/text-generation-webui/ && mkdir repositories && cd repositories && \
# https://github.com/oobabooga/GPTQ-for-LLaMa
git clone --branch cuda --single-branch https://github.com/qwopqwop200/GPTQ-for-LLaMa.git && \
(cd GPTQ-for-LLaMa && python3 setup_cuda.py bdist_wheel -d .)
# git clone --branch cuda --single-branch https://github.com/qwopqwop200/GPTQ-for-LLaMa.git && \
# (cd GPTQ-for-LLaMa && python3 setup_cuda.py bdist_wheel -d .)
git clone --depth 1 https://github.com/qwopqwop200/GPTQ-for-LLaMa.git && \
(cd GPTQ-for-LLaMa && pip3 install -r requirements.txt)
# && python3 setup_cuda.py install
FROM builder AS modeldownloader
RUN cd /workspace/text-generation-webui && python3 download-model.py ${MODEL_NAME}
ARG MODEL_NAME="PygmalionAI/pygmalion-350m"
ENV MODEL_NAME=${MODEL_NAME}
#RUN cd /workspace/text-generation-webui && python3 download-model.py ${MODEL_NAME}
#RUN git lfs install && \
# git clone --depth 1 https://huggingface.co/${MODEL_NAME}
RUN wget -P /workspace/text-generation-webui/models/ https://raw.githubusercontent.com/BlinkDL/ChatRWKV/main/v2/20B_tokenizer.json && \
wget -P /workspace/text-generation-webui/models/ https://huggingface.co/BlinkDL/rwkv-4-raven/resolve/main/RWKV-4-Raven-7B-v9-Eng99%25-Other1%25-20230412-ctx8192.pth
#https://huggingface.co/BlinkDL/rwkv-4-pile-7b/resolve/main/RWKV-4-Pile-7B-20230406-ctx8192-test949.pth
#RUN cd /workspace/text-generation-webui && python3 download-model.py MetaIX/GPT4-X-Alpaca-30B-Int4 --text-only && \
# wget -P /workspace/text-generation-webui/models/GPT4-X-Alpaca-30B-Int4/ https://huggingface.co/MetaIX/GPT4-X-Alpaca-30B-Int4/resolve/main/gpt4-x-alpaca-30b-4bit.safetensors
FROM ${BASE_IMAGE}
#ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX"

Loading…
Cancel
Save