Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,10 @@
|
|
1 |
-
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
2 |
import gradio as gr
|
3 |
|
4 |
-
# Load
|
5 |
-
|
6 |
-
|
|
|
|
|
7 |
|
8 |
def eval_text(text):
|
9 |
# Encode the input text
|
@@ -29,6 +30,6 @@ def eval_text(text):
|
|
29 |
return(f"Result: {generation[0]['generated_text']}")
|
30 |
|
31 |
|
32 |
-
demo = gr.Interface(fn=eval_text, inputs="text", outputs="text", title="
|
33 |
|
34 |
demo.launch(share=True)
|
|
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
+
# Load model directly
|
4 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
+
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
7 |
+
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
8 |
|
9 |
def eval_text(text):
|
10 |
# Encode the input text
|
|
|
30 |
return(f"Result: {generation[0]['generated_text']}")
|
31 |
|
32 |
|
33 |
+
demo = gr.Interface(fn=eval_text, inputs="text", outputs="text", title="Llama2")
|
34 |
|
35 |
demo.launch(share=True)
|