thedamn commited on
Commit
539199b
·
1 Parent(s): b604eb9
Files changed (2) hide show
  1. app.py +4 -4
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,9 +1,9 @@
1
  from langchain import PromptTemplate, LLMChain
2
  from gpt4all import GPT4All
3
- gpt=GPT4All("ggml-gpt4all-j-v1.3-groovy")
 
 
4
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
5
- import subprocess as sp
6
- sp.check_output("ls")
7
  template = """
8
  You are a friendly chatbot assistant that responds in a conversational
9
  manner to users questions. Keep the answers short, unless specifically
@@ -17,7 +17,7 @@ Answer:"""
17
  prompt = PromptTemplate(template=template, input_variables=["question"])
18
  from langchain.llms import GPT4All
19
  llm = GPT4All(
20
- model='/root/.cache/gpt4all/ggml-gpt4all-j-v1.3-groovy.bin',
21
  callbacks=[StreamingStdOutCallbackHandler()]
22
  )
23
 
 
1
  from langchain import PromptTemplate, LLMChain
2
  from gpt4all import GPT4All
3
+ from huggingface_hub import hf_hub_download
4
+ #gpt=GPT4All("ggml-gpt4all-j-v1.3-groovy")
5
+ hf_hub_download(repo_id="dnato/ggml-gpt4all-j-v1.3-groovy.bin", filename="ggml-gpt4all-j-v1.3-groovy.bin", local_dir=".")
6
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
 
 
7
  template = """
8
  You are a friendly chatbot assistant that responds in a conversational
9
  manner to users questions. Keep the answers short, unless specifically
 
17
  prompt = PromptTemplate(template=template, input_variables=["question"])
18
  from langchain.llms import GPT4All
19
  llm = GPT4All(
20
+ model='ggml-gpt4all-j-v1.3-groovy.bin',
21
  callbacks=[StreamingStdOutCallbackHandler()]
22
  )
23
 
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  streamlit
2
  gpt4all
3
  langchain
4
- subprocess
 
 
1
  streamlit
2
  gpt4all
3
  langchain
4
+ huggingface
5
+ huggingface_hub