GPT4ALL / app.py
Monster's picture
Update app.py
c8eb315
raw
history blame
640 Bytes
import gradio as gr
from llama_cpp import Llama
from huggingface_hub import hf_hub_download
hf_hub_download(repo_id="LLukas22/gpt4all-lora-quantized-ggjt", filename="ggjt-model.bin", local_dir=".")
llm = Llama(model_path="./ggjt-model.bin", n_threads=2)
def chat(input):
resp = llm(input)
return resp['choices'][0]['text']
gr.Interface(fn=chat, inputs="text", outputs="text", description="Assistant-style large language model with ~800k GPT-3.5-Turbo Generations", examples=[['What is a three word topic describing the following keywords: baseball, football, soccer'],['List 10 dogs.'],['Reverse a string in python.']]).launch()