ARG DEV_IMAGE=nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04 ARG BASE_IMAGE=nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04 #ARG BASE_IMAGE=nvidia/cuda:11.6.2-cudnn8-devel-ubuntu20.04 #ARG BASE_IMAGE=runpod/pytorch:3.10-2.0.0-117 #ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:23.03-py3 FROM ${DEV_IMAGE} as builder # https://github.com/runpod/containers/blob/main/oobabooga/Dockerfile # run; DOCKER_BUILDKIT=1 docker build --build-arg MODEL_NAME="PygmalionAI/pygmalion-350m" -t magn418/runpod-oobabooga-pygmalion:test . ARG MODEL_NAME="PygmalionAI/pygmalion-350m" ENV MODEL_NAME=${MODEL_NAME} WORKDIR / SHELL ["/bin/bash", "-o", "pipefail", "-c"] ENV DEBIAN_FRONTEND noninteractive\ SHELL=/bin/bash RUN --mount=type=cache,target=/var/cache/apt,sharing=locked apt-get update --yes && \ # - apt-get upgrade is run to patch known vulnerabilities in apt-get packages as # the ubuntu base image is rebuilt too seldom sometimes (less than once a month) apt-get upgrade --yes && \ apt install --yes --no-install-recommends \ build-essential \ cmake \ ca-certificates \ git \ git-lfs \ wget \ curl \ bash \ # libgl1 \ software-properties-common \ openssh-server # apt-get clean && rm -rf /var/lib/apt/lists/* && \ # echo "en_US.UTF-8 UTF-8" > /etc/locale.gen #RUN apt-key del 7fa2af80 && \ # apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub # #RUN add-apt-repository ppa:deadsnakes/ppa && \ # apt-get install python3.10 python3.10-dev python3.10-venv python3-pip -y --no-install-recommends && \ # update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.10 1 && \ # update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && \ ## update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \ # update-alternatives --set python3 /usr/bin/python3.10 && \ # update-alternatives --set python /usr/bin/python3 && \ # apt-get clean && rm -rf /var/lib/apt/lists/* RUN --mount=type=cache,target=/var/cache/apt,sharing=locked apt-get update --yes && \ apt install --yes --no-install-recommends \ python3 python3-dev python3-venv python3-pip # apt-get clean && rm -rf /var/lib/apt/lists/* ARG TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" ENV TORCH_CUDA_ARCH_LIST=${TORCH_CUDA_ARCH_LIST} RUN --mount=type=cache,target=/root/.cache,sharing=locked pip3 install --upgrade pip setuptools && \ pip3 install cuda-python==11.8.0 && \ pip3 install torch torchvision torchaudio --extra-index-url=https://download.pytorch.org/whl/cu118 && \ pip3 install bitsandbytes && \ pip3 install safetensors && \ pip3 install sentencepiece && \ pip3 install diffusers && \ pip3 install accelerate xformers triton && \ pip3 install git+https://github.com/huggingface/transformers.git && \ pip3 install huggingface-hub && \ pip3 install runpod # pip3 cache purge RUN mkdir -p /workspace WORKDIR /workspace #RUN mkdir /workspace && RUN cd /workspace && git clone https://github.com/oobabooga/text-generation-webui.git && \ cd /workspace/text-generation-webui && pip3 install -r requirements.txt && \ cd extensions/api && pip3 install -r requirements.txt # pip3 cache purge RUN cd /workspace/text-generation-webui/ && mkdir repositories && cd repositories && \ # https://github.com/oobabooga/GPTQ-for-LLaMa git clone --branch cuda --single-branch https://github.com/qwopqwop200/GPTQ-for-LLaMa.git && \ (cd GPTQ-for-LLaMa && python3 setup_cuda.py bdist_wheel -d .) # && python3 setup_cuda.py install RUN cd /workspace/text-generation-webui && python3 download-model.py ${MODEL_NAME} #RUN git lfs install && \ # git clone --depth 1 https://huggingface.co/${MODEL_NAME} FROM ${BASE_IMAGE} #ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" RUN mkdir -p /workspace WORKDIR /workspace RUN --mount=type=cache,target=/var/cache/apt,sharing=locked apt-get update --yes && \ apt install --yes --no-install-recommends \ python3 python3-dev python3-venv python3-pip \ make g++ \ git # apt-get clean && rm -rf /var/lib/apt/lists/* RUN --mount=type=cache,target=/root/.cache,sharing=locked pip3 install --upgrade pip setuptools && \ pip3 install cuda-python==11.8.0 && \ pip3 install torch torchvision torchaudio --extra-index-url=https://download.pytorch.org/whl/cu118 && \ pip3 install bitsandbytes && \ cp /usr/local/lib/python3.10/dist-packages/bitsandbytes/libbitsandbytes_cuda118.so /usr/local/lib/python3.10/dist-packages/bitsandbytes/libbitsandbytes_cpu.so && \ pip3 install safetensors && \ pip3 install sentencepiece && \ pip3 install diffusers && \ pip3 install accelerate xformers triton && \ pip3 install git+https://github.com/huggingface/transformers.git && \ pip3 install rwkv && \ pip3 install huggingface-hub && \ pip3 install runpod # pip3 cache purge #RUN mkdir -p /workspace/text-generation-webui/repositories COPY --from=builder /workspace/text-generation-webui /workspace/text-generation-webui RUN --mount=type=cache,target=/root/.cache,sharing=locked \ cd /workspace/text-generation-webui && pip3 install -r requirements.txt && \ (cd extensions/api && pip3 install -r requirements.txt) && \ (cd repositories/GPTQ-for-LLaMa && pip3 install -r requirements.txt) && \ pip3 install /workspace/text-generation-webui/repositories/GPTQ-for-LLaMa/*.whl # pip3 cache purge COPY runpod_infer.py /workspace/ COPY start.sh / RUN chmod +x /start.sh CMD [ "/start.sh" ]