Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,9 @@
|
|
1 |
import pandas as pd
|
2 |
import numpy as np
|
3 |
import gradio as gr
|
4 |
-
|
5 |
-
|
6 |
-
import streamlit as st
|
7 |
from transformers import AutoModelForMultipleChoice, AutoTokenizer
|
|
|
8 |
|
9 |
# Load the model and tokenizer
|
10 |
model_path = "/kaggle/input/deberta-v3-large-hf-weights"
|
@@ -43,32 +42,10 @@ iface = gr.Interface(
|
|
43 |
description="Enter the prompt and options (A to E) below and get predictions.",
|
44 |
)
|
45 |
|
46 |
-
#
|
47 |
-
|
48 |
-
st.title("LLM Science Exam Demo")
|
49 |
-
st.markdown("Enter the prompt and options (A to E) below and get predictions.")
|
50 |
-
|
51 |
-
prompt = st.text_area("Prompt", value="This is the prompt", height=100)
|
52 |
-
option_a = st.text_input("Option A", value="Option A text")
|
53 |
-
option_b = st.text_input("Option B", value="Option B text")
|
54 |
-
option_c = st.text_input("Option C", value="Option C text")
|
55 |
-
option_d = st.text_input("Option D", value="Option D text")
|
56 |
-
option_e = st.text_input("Option E", value="Option E text")
|
57 |
-
|
58 |
-
sample_data = {
|
59 |
-
"prompt": prompt,
|
60 |
-
"A": option_a,
|
61 |
-
"B": option_b,
|
62 |
-
"C": option_c,
|
63 |
-
"D": option_d,
|
64 |
-
"E": option_e,
|
65 |
-
}
|
66 |
-
|
67 |
-
predictions = iface.process(sample_data)
|
68 |
-
st.markdown("### Predictions:")
|
69 |
-
st.write(predictions)
|
70 |
|
71 |
-
#
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
1 |
import pandas as pd
|
2 |
import numpy as np
|
3 |
import gradio as gr
|
4 |
+
import torch
|
|
|
|
|
5 |
from transformers import AutoModelForMultipleChoice, AutoTokenizer
|
6 |
+
from huggingface_hub import hf_hub_url, Repository
|
7 |
|
8 |
# Load the model and tokenizer
|
9 |
model_path = "/kaggle/input/deberta-v3-large-hf-weights"
|
|
|
42 |
description="Enter the prompt and options (A to E) below and get predictions.",
|
43 |
)
|
44 |
|
45 |
+
# Run the interface locally
|
46 |
+
iface.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
+
# Once you have verified that the interface works as expected, proceed to create the Hugging Face space:
|
49 |
+
repo_url = hf_hub_url("your-username/your-repo-name")
|
50 |
+
repo = Repository.from_hf_hub(repo_url)
|
51 |
+
repo.push(path="./my_model", model=model, tokenizer=tokenizer, config=model.config)
|