gaur3009 commited on
Commit
d99b89b
·
verified ·
1 Parent(s): 2bdb809

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -0
app.py CHANGED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import LLaMAForConditionalGeneration, LLaMATokenizer
3
+ import gradio as gr
4
+
5
+ model = LLaMAForConditionalGeneration.from_pretrained("meta-llama/llama-3-1-405b")
6
+ tokenizer = LLaMATokenizer.from_pretrained("meta-llama/llama-3-1-405b")
7
+
8
+ def generate_text(prompt):
9
+ inputs = tokenizer(prompt, return_tensors="pt")
10
+ output = model.generate(**inputs)
11
+ return tokenizer.decode(output, skip_special_tokens=True)
12
+
13
+ demo = gr.Interface(
14
+ fn=generate_text,
15
+ inputs="text",
16
+ outputs="text",
17
+ title="LLaMA Text Generation",
18
+ description="Enter a prompt to generate text using the LLaMA model."
19
+ )
20
+
21
+ demo.launch()