Spaces:
Sleeping
Sleeping
first commit
Browse files- Dockerfile +24 -0
- app.py +48 -0
- requirements.txt +6 -0
Dockerfile
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use an official Python runtime as the base image
|
2 |
+
FROM python:3.9-slim
|
3 |
+
|
4 |
+
# Set the working directory in the container
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
# Install system packages using apt-get
|
8 |
+
RUN apt-get update \
|
9 |
+
&& apt-get install -y git grep cut \
|
10 |
+
&& rm -rf /var/lib/apt/lists/*
|
11 |
+
|
12 |
+
# Copy the requirements file to the working directory
|
13 |
+
COPY requirements.txt .
|
14 |
+
|
15 |
+
# Install the Python dependencies
|
16 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
17 |
+
|
18 |
+
# Copy the entire project directory to the working directory
|
19 |
+
COPY . .
|
20 |
+
|
21 |
+
EXPOSE 7860
|
22 |
+
|
23 |
+
CMD ["streamlit", "run", "app.py","--server.port", "7860"]
|
24 |
+
|
app.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain import PromptTemplate, LLMChain
|
2 |
+
from gpt4all import GPT4All
|
3 |
+
from huggingface_hub import hf_hub_download
|
4 |
+
import streamlit as st
|
5 |
+
import os
|
6 |
+
import subprocess as sp
|
7 |
+
#gpt=GPT4All("ggml-gpt4all-j-v1.3-groovy")
|
8 |
+
#hf_hub_download(repo_id="dnato/ggml-gpt4all-j-v1.3-groovy.bin", filename="ggml-gpt4all-j-v1.3-groovy.bin", local_dir=".")
|
9 |
+
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
10 |
+
template = """
|
11 |
+
You are a friendly chatbot assistant that responds in a conversational
|
12 |
+
manner to users questions. Keep the answers short, unless specifically
|
13 |
+
asked by the user to elaborate on something.
|
14 |
+
|
15 |
+
Question: {question}
|
16 |
+
|
17 |
+
Answer:"""
|
18 |
+
|
19 |
+
local_path=os.getcwd() + "/ggml-gpt4all-j-v1.3-groovy.bin"
|
20 |
+
|
21 |
+
prompt = PromptTemplate(template=template, input_variables=["question"])
|
22 |
+
from langchain.llms import GPT4All
|
23 |
+
#llm = GPT4All(
|
24 |
+
# model=local_path,
|
25 |
+
# callbacks=[StreamingStdOutCallbackHandler()]
|
26 |
+
#)
|
27 |
+
|
28 |
+
#llm_chain = LLMChain(prompt=prompt, llm=llm)
|
29 |
+
|
30 |
+
|
31 |
+
def main():
|
32 |
+
st.title("GPT4All Chatbot")
|
33 |
+
|
34 |
+
# User input
|
35 |
+
query = st.text_input("Enter your message:")
|
36 |
+
|
37 |
+
# Generate response
|
38 |
+
if st.button("Submit"):
|
39 |
+
#response=llm_chain(query)
|
40 |
+
#response = gptj.chat_completion(messages)
|
41 |
+
#answer = response['choices'][0]['message']['content']
|
42 |
+
|
43 |
+
# Display the response
|
44 |
+
st.text_area("Bot Response:", value=sp.check_output(query), height=100)
|
45 |
+
|
46 |
+
if __name__ == "__main__":
|
47 |
+
main()
|
48 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
gpt4all
|
3 |
+
langchain
|
4 |
+
huggingface
|
5 |
+
huggingface_hub
|
6 |
+
radon
|