Satyam-Singh commited on
Commit
e89e37f
1 Parent(s): 86babc4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -1
app.py CHANGED
@@ -1,3 +1,22 @@
1
  import gradio as gr
 
2
 
3
- gr.load("models/mistralai/Mistral-7B-Instruct-v0.1").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
 
4
+ # Load the Meta-Llama-3.1-8B-Instruct-GGUF model
5
+ model_name = "lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF"
6
+ model = pipeline("text-generation", model=model_name, device=-1) # -1 for CPU
7
+
8
+ # Define the Gradio interface
9
+ def generate_text(prompt):
10
+ output = model(prompt)[0]["generated_text"]
11
+ return output
12
+
13
+ iface = gr.Interface(
14
+ fn=generate_text,
15
+ inputs=gr.Textbox(label="Prompt"),
16
+ outputs=gr.Textbox(label="Generated Text"),
17
+ title="Meta-Llama-3.1-8B-Instruct-GGUF Text Generation",
18
+ description="Enter a prompt to generate text using the Meta-Llama-3.1-8B-Instruct-GGUF model.",
19
+ )
20
+
21
+ # Launch the Gradio app
22
+ iface.launch()