Spaces:
Paused
Paused
Update Dockerfile
Browse files- Dockerfile +6 -1
Dockerfile
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
# Using the specified base image that's suited for llama-cpp-python
|
2 |
-
FROM ghcr.io/abetlen/llama-cpp-python:latest
|
|
|
|
|
3 |
|
4 |
# Environment variables for model details
|
5 |
ENV MODEL_NAME="llava-1.6-mistral-7b-gguf"
|
@@ -24,6 +26,9 @@ RUN mkdir -p /models
|
|
24 |
RUN curl -L "${MODEL_URL}" -o /models/${DEFAULT_MODEL_FILE} && \
|
25 |
curl -L "${CLIP_MODEL_URL}" -o /models/${DEFAULT_CLIP_MODEL_FILE}
|
26 |
|
|
|
|
|
|
|
27 |
# Expose the port the server will run on
|
28 |
EXPOSE 8000
|
29 |
|
|
|
1 |
# Using the specified base image that's suited for llama-cpp-python
|
2 |
+
FROM ghcr.io/abetlen/llama-cpp-python:latest
|
3 |
+
|
4 |
+
VOLUME ["/models"]
|
5 |
|
6 |
# Environment variables for model details
|
7 |
ENV MODEL_NAME="llava-1.6-mistral-7b-gguf"
|
|
|
26 |
RUN curl -L "${MODEL_URL}" -o /models/${DEFAULT_MODEL_FILE} && \
|
27 |
curl -L "${CLIP_MODEL_URL}" -o /models/${DEFAULT_CLIP_MODEL_FILE}
|
28 |
|
29 |
+
|
30 |
+
ENV HOST=0.0.0.0
|
31 |
+
ENV PORT=8000
|
32 |
# Expose the port the server will run on
|
33 |
EXPOSE 8000
|
34 |
|