diff --git a/loadmodel/Containerfile-model b/loadmodel/Containerfile-model new file mode 100644 index 00000000..cfa2b6be --- /dev/null +++ b/loadmodel/Containerfile-model @@ -0,0 +1,14 @@ +# This containerfile downloads a model +FROM registry.access.redhat.com/ubi9/ubi:9.3-1476 as model +WORKDIR /model +ARG MODEL_URL=https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_K_M.gguf?download=true +ARG MODEL_FILE=llama-2-7b-chat.Q4_K_M.gguf +RUN dnf install -y wget +RUN wget -O $MODEL_FILE $MODEL_URL + +FROM registry.access.redhat.com/ubi9-micro:9.3-9 +WORKDIR /model +ARG MODEL_URL=https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_K_M.gguf?download=true +ARG MODEL_FILE=llama-2-7b-chat.Q4_K_M.gguf +ENV MODEL_FILE=$MODEL_FILE +COPY --from=model /model/${MODEL_FILE} /model/ diff --git a/Containerfile-rh b/loadmodel/Containerfile-with-app similarity index 57% rename from Containerfile-rh rename to loadmodel/Containerfile-with-app index d4add508..d9042df3 100644 --- a/Containerfile-rh +++ b/loadmodel/Containerfile-with-app @@ -1,20 +1,19 @@ +# This containerfile downloads a model and embeds a text generation application FROM registry.access.redhat.com/ubi9:9.3-1361.1699548029 AS model - +WORKDIR /locallm ARG MODEL_URL=https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_K_M.gguf?download=true ARG MODEL_FILE=llama-2-7b-chat.Q4_K_M.gguf - RUN dnf install -y wget RUN wget -O $MODEL_FILE $MODEL_URL FROM registry.access.redhat.com/ubi9/python-311:1-34.1699551735 - +WORKDIR /locallm ARG MODEL_FILE=llama-2-7b-chat.Q4_K_M.gguf -ENV MODEL_FILE=$MODEL_FILE -COPY --from=model $MODEL_FILE . -ADD app.py . -ADD chat.py . -ADD requirements.txt . -ADD run_locallm.py . +COPY ../requirements.txt /locallm/requirements.txt RUN pip install --upgrade pip -RUN pip install --no-cache-dir --upgrade -r requirements.txt -CMD python app.py +RUN pip install --no-cache-dir --upgrade -r /locallm/requirements.txt +ENV MODEL_FILE=$MODEL_FILE +COPY --from=model /locallm/${MODEL_FILE} /locallm/ +COPY ../src/ /locallm +ENTRYPOINT [ "python", "app.py" ] +