Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,16 @@
|
|
1 |
import gradio as gr
|
2 |
from llama_cpp import Llama
|
3 |
from huggingface_hub import hf_hub_download
|
|
|
4 |
|
5 |
hf_hub_download(repo_id="LLukas22/gpt4all-lora-quantized-ggjt", filename="ggjt-model.bin", local_dir=".")
|
6 |
|
7 |
-
llm = Llama(model_path="./ggjt-model.bin", n_threads=
|
8 |
|
9 |
def chat(input):
|
10 |
resp = llm(input)
|
11 |
return resp['choices'][0]['text']
|
12 |
|
13 |
-
g = gr.Interface(fn=chat, inputs="text", outputs="text", title="GPT4ALL", description="gpt4all: an ecosystem of open-source chatbots trained on a massive collections of clean assistant data including code, stories and dialogue"
|
14 |
g.queue(concurrency_count=1)
|
15 |
g.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from llama_cpp import Llama
|
3 |
from huggingface_hub import hf_hub_download
|
4 |
+
import multiprocessing
|
5 |
|
6 |
hf_hub_download(repo_id="LLukas22/gpt4all-lora-quantized-ggjt", filename="ggjt-model.bin", local_dir=".")
|
7 |
|
8 |
+
llm = Llama(model_path="./ggjt-model.bin", n_threads=multiprocessing.cpu_count())
|
9 |
|
10 |
def chat(input):
|
11 |
resp = llm(input)
|
12 |
return resp['choices'][0]['text']
|
13 |
|
14 |
+
g = gr.Interface(fn=chat, inputs="text", outputs="text", title="GPT4ALL", description="gpt4all: an ecosystem of open-source chatbots trained on a massive collections of clean assistant data including code, stories and dialogue")
|
15 |
g.queue(concurrency_count=1)
|
16 |
g.launch()
|