try5
Browse files- app.py +6 -3
- requirements.txt +1 -0
app.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
from langchain import PromptTemplate, LLMChain
|
2 |
-
from
|
3 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
4 |
-
|
|
|
5 |
template = """
|
6 |
You are a friendly chatbot assistant that responds in a conversational
|
7 |
manner to users questions. Keep the answers short, unless specifically
|
@@ -10,8 +11,10 @@ asked by the user to elaborate on something.
|
|
10 |
Question: {question}
|
11 |
|
12 |
Answer:"""
|
13 |
-
prompt = PromptTemplate(template=template, input_variables=["question"])
|
14 |
|
|
|
|
|
|
|
15 |
llm = GPT4All(
|
16 |
model='/root/.cache/gpt4all/ggml-gpt4all-j-v1.3-groovy.bin',
|
17 |
callbacks=[StreamingStdOutCallbackHandler()]
|
|
|
1 |
from langchain import PromptTemplate, LLMChain
|
2 |
+
from gpt4all import GPT4All
|
3 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
4 |
+
import subprocess as sp
|
5 |
+
sp.run("ls")
|
6 |
template = """
|
7 |
You are a friendly chatbot assistant that responds in a conversational
|
8 |
manner to users questions. Keep the answers short, unless specifically
|
|
|
11 |
Question: {question}
|
12 |
|
13 |
Answer:"""
|
|
|
14 |
|
15 |
+
gpt=GPT4All("ggml-gpt4all-j-v1.3-groovy")
|
16 |
+
prompt = PromptTemplate(template=template, input_variables=["question"])
|
17 |
+
from langchain.llms import GPT4All
|
18 |
llm = GPT4All(
|
19 |
model='/root/.cache/gpt4all/ggml-gpt4all-j-v1.3-groovy.bin',
|
20 |
callbacks=[StreamingStdOutCallbackHandler()]
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
streamlit
|
2 |
gpt4all
|
3 |
langchain
|
|
|
|
1 |
streamlit
|
2 |
gpt4all
|
3 |
langchain
|
4 |
+
subprocess
|