Spaces:
Sleeping
Sleeping
update
Browse files- app.py +34 -6
- models/ggml-gpt4all-l13b-snoozy.bin +3 -0
app.py
CHANGED
@@ -1,15 +1,43 @@
|
|
1 |
-
from langchain
|
2 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
-
import gradio as gr
|
4 |
from langchain.llms import GPT4All
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
|
|
|
|
|
|
|
7 |
|
8 |
-
|
9 |
|
10 |
# Define the Gradio interface
|
11 |
def chatbot_interface(input_text):
|
12 |
-
response =
|
13 |
return response
|
14 |
|
15 |
# Define the Gradio app
|
|
|
1 |
+
from langchain import PromptTemplate, LLMChain
|
|
|
|
|
2 |
from langchain.llms import GPT4All
|
3 |
+
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
4 |
+
import gradio as gr
|
5 |
+
import requests
|
6 |
+
from pathlib import Path
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
template = """Question: {question}
|
10 |
+
|
11 |
+
Answer: Let's think step by step."""
|
12 |
+
|
13 |
+
prompt = PromptTemplate(template=template, input_variables=["question"])
|
14 |
+
|
15 |
+
local_path = './models/ggml-gpt4all-l13b-snoozy.bin' # replace with your desired local file path
|
16 |
+
Path(local_path).parent.mkdir(parents=True, exist_ok=True)
|
17 |
+
|
18 |
+
# Example model. Check https://github.com/nomic-ai/pygpt4all for the latest models.
|
19 |
+
url = 'http://gpt4all.io/models/ggml-gpt4all-l13b-snoozy.bin'
|
20 |
+
|
21 |
+
# send a GET request to the URL to download the file. Stream since it's large
|
22 |
+
response = requests.get(url, stream=True)
|
23 |
+
|
24 |
+
# open the file in binary mode and write the contents of the response to it in chunks
|
25 |
+
# This is a large file, so be prepared to wait.
|
26 |
+
with open(local_path, 'wb') as f:
|
27 |
+
for chunk in tqdm(response.iter_content(chunk_size=8192)):
|
28 |
+
if chunk:
|
29 |
+
f.write(chunk)
|
30 |
|
31 |
+
# Callbacks support token-wise streaming
|
32 |
+
callbacks = [StreamingStdOutCallbackHandler()]
|
33 |
+
# Verbose is required to pass to the callback manager
|
34 |
+
llm = GPT4All(model=local_path, callbacks=callbacks, verbose=True)
|
35 |
|
36 |
+
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
37 |
|
38 |
# Define the Gradio interface
|
39 |
def chatbot_interface(input_text):
|
40 |
+
response = llm_chain.run(input_text)
|
41 |
return response
|
42 |
|
43 |
# Define the Gradio app
|
models/ggml-gpt4all-l13b-snoozy.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9bd7b54ce0079c451e51ff79a1d20aafd78782933e42b771cc82ff47579c8c09
|
3 |
+
size 169508864
|