Spaces:
Sleeping
Sleeping
kz209
commited on
Commit
β’
f9f4138
1
Parent(s):
111801d
switch models
Browse files- app.py +2 -1
- pages/summarization_playground.py +15 -1
app.py
CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
|
|
3 |
from pages.arena import create_arena
|
4 |
from pages.summarization_playground import create_summarization_interface
|
5 |
from pages.leaderboard import create_leaderboard
|
|
|
6 |
|
7 |
def welcome_message():
|
8 |
return """
|
@@ -27,7 +28,7 @@ with gr.Blocks() as demo:
|
|
27 |
with gr.TabItem("Leaderboard"):
|
28 |
create_leaderboard()
|
29 |
with gr.TabItem("Batch_Evaluation"):
|
30 |
-
|
31 |
with gr.TabItem("Demo_of_Streaming"):
|
32 |
create_arena()
|
33 |
|
|
|
3 |
from pages.arena import create_arena
|
4 |
from pages.summarization_playground import create_summarization_interface
|
5 |
from pages.leaderboard import create_leaderboard
|
6 |
+
from pages.batch_evaluation import create_batch_evaluation_interface
|
7 |
|
8 |
def welcome_message():
|
9 |
return """
|
|
|
28 |
with gr.TabItem("Leaderboard"):
|
29 |
create_leaderboard()
|
30 |
with gr.TabItem("Batch_Evaluation"):
|
31 |
+
create_batch_evaluation_interface()
|
32 |
with gr.TabItem("Demo_of_Streaming"):
|
33 |
create_arena()
|
34 |
|
pages/summarization_playground.py
CHANGED
@@ -5,8 +5,12 @@ import random
|
|
5 |
from utils.model import Model
|
6 |
from utils.data import dataset
|
7 |
|
|
|
|
|
|
|
8 |
load_dotenv()
|
9 |
|
|
|
10 |
model = {model_name: Model(model_name) for model_name in Model.__model_list__}
|
11 |
|
12 |
random_label = 'π Random dialogue from dataset'
|
@@ -28,7 +32,17 @@ Back in Boston, Kidd is going to rely on Lively even more. He'll play close to 3
|
|
28 |
}
|
29 |
|
30 |
def generate_answer(sources, model, model_name, prompt):
|
31 |
-
content = prompt + '\n' + sources
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
answer = model[model_name].gen(content)
|
33 |
|
34 |
return answer
|
|
|
5 |
from utils.model import Model
|
6 |
from utils.data import dataset
|
7 |
|
8 |
+
import gc
|
9 |
+
import torch
|
10 |
+
|
11 |
load_dotenv()
|
12 |
|
13 |
+
__model_on_gpu__ = ''
|
14 |
model = {model_name: Model(model_name) for model_name in Model.__model_list__}
|
15 |
|
16 |
random_label = 'π Random dialogue from dataset'
|
|
|
32 |
}
|
33 |
|
34 |
def generate_answer(sources, model, model_name, prompt):
|
35 |
+
content = prompt + '\n' + sources + '\n\n'
|
36 |
+
global __model_on_gpu__
|
37 |
+
|
38 |
+
if __model_on_gpu__ != model_name:
|
39 |
+
model[__model_on_gpu__].cpu()
|
40 |
+
gc.collect()
|
41 |
+
torch.cuda.empty_cache()
|
42 |
+
|
43 |
+
model[model_name].gpu()
|
44 |
+
__model_on_gpu__ = model_name
|
45 |
+
|
46 |
answer = model[model_name].gen(content)
|
47 |
|
48 |
return answer
|