Chatbot
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

112 lines
4.3 KiB

ARG DEV_IMAGE=nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04
ARG BASE_IMAGE=nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04
2 years ago
#ARG BASE_IMAGE=nvidia/cuda:11.6.2-cudnn8-devel-ubuntu20.04
#ARG BASE_IMAGE=runpod/pytorch:3.10-2.0.0-117
2 years ago
#ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:23.03-py3
FROM ${DEV_IMAGE} as builder
ARG MODEL_NAME
ENV MODEL_NAME=${MODEL_NAME}
WORKDIR /
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
ENV DEBIAN_FRONTEND noninteractive\
SHELL=/bin/bash
RUN apt-get update --yes && \
# - apt-get upgrade is run to patch known vulnerabilities in apt-get packages as
# the ubuntu base image is rebuilt too seldom sometimes (less than once a month)
apt-get upgrade --yes && \
apt install --yes --no-install-recommends \
build-essential \
cmake \
2 years ago
ca-certificates \
git \
git-lfs \
wget \
curl \
bash \
# libgl1 \
2 years ago
software-properties-common \
openssh-server && \
apt-get clean && rm -rf /var/lib/apt/lists/* && \
echo "en_US.UTF-8 UTF-8" > /etc/locale.gen
#RUN apt-key del 7fa2af80 && \
# apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub
#
#RUN add-apt-repository ppa:deadsnakes/ppa && \
# apt-get install python3.10 python3.10-dev python3.10-venv python3-pip -y --no-install-recommends && \
# update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.10 1 && \
# update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && \
## update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
# update-alternatives --set python3 /usr/bin/python3.10 && \
# update-alternatives --set python /usr/bin/python3 && \
# apt-get clean && rm -rf /var/lib/apt/lists/*
RUN apt-get update --yes && \
apt install --yes --no-install-recommends \
python3 python3-dev python3-venv python3-pip && \
apt-get clean && rm -rf /var/lib/apt/lists/*
2 years ago
RUN pip3 install --upgrade pip && \
pip3 install torch torchvision torchaudio --extra-index-url=https://download.pytorch.org/whl/cu118 && \
2 years ago
pip3 install bitsandbytes && \
pip3 install safetensors && \
pip3 install sentencepiece && \
2 years ago
pip3 install diffusers && \
2 years ago
pip3 install accelerate xformers triton && \
pip3 install git+https://github.com/huggingface/transformers.git && \
2 years ago
pip3 install huggingface-hub && \
pip3 install runpod && \
pip3 cache purge
2 years ago
RUN mkdir -p /workspace
WORKDIR /workspace
ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX"
RUN mkdir repositories && git clone --branch cuda --single-branch https://github.com/qwopqwop200/GPTQ-for-LLaMa.git repositories/GPTQ-for-LLaMa && \
# (cd repositories/GPTQ-for-LLaMa && git reset --hard 437154dd434c3f9d5c9c4e6f401d6d71116ac248) && \
#RUN mkdir repositories && git clone --depth 1 https://github.com/AlpinDale/gptq-gptj.git repositories/GPTQ-for-LLaMa && \
(cd repositories/GPTQ-for-LLaMa && python3 setup_cuda.py install)
COPY model_fetcher.py /workspace/
RUN python3 model_fetcher.py --model_name=${MODEL_NAME}
FROM ${BASE_IMAGE}
RUN mkdir -p /workspace
WORKDIR /workspace
RUN apt-get update --yes && \
apt install --yes --no-install-recommends \
python3 python3-dev python3-venv python3-pip && \
apt-get clean && rm -rf /var/lib/apt/lists/*
RUN pip3 install --upgrade pip && \
pip3 install torch torchvision torchaudio --extra-index-url=https://download.pytorch.org/whl/cu118 && \
pip3 install bitsandbytes && \
pip3 install safetensors && \
pip3 install sentencepiece && \
pip3 install diffusers && \
2 years ago
pip3 install accelerate xformers triton && \
pip3 install git+https://github.com/huggingface/transformers.git && \
pip3 install rwkv && \
pip3 install huggingface-hub && \
pip3 install runpod && \
pip3 cache purge
RUN mkdir -p /workspace/repositories && mkdir -p /root/.cache/huggingface
COPY --from=builder /workspace/repositories /workspace/repositories/
COPY --from=builder /root/.cache/huggingface /root/.cache/huggingface
#RUN git lfs install && \
# git clone --depth 1 https://huggingface.co/${MODEL_NAME}
COPY model_fetcher.py /workspace/
COPY runpod_infer.py /workspace/
2 years ago
COPY RWKV.py /workspace/
COPY test_input.json /workspace/
CMD python3 -u runpod_infer.py --model_name=${MODEL_NAME}