chansung commited on
Commit
aa17481
·
verified ·
1 Parent(s): ddca22a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -187
app.py CHANGED
@@ -8,144 +8,6 @@ from utils import load_prompt, setup_gemini_client
8
  from configs.responses import SummaryResponses
9
  from google.genai import types
10
 
11
- def parse_args():
12
- parser = argparse.ArgumentParser()
13
- parser.add_argument("--ai-studio-api-key", type=str, default=os.getenv("GEMINI_API_KEY"))
14
- parser.add_argument("--vertexai", action="store_true", default=False)
15
- parser.add_argument("--vertexai-project", type=str, default="gcp-ml-172005")
16
- parser.add_argument("--vertexai-location", type=str, default="us-central1")
17
- parser.add_argument("--model", type=str, default="gemini-2.0-flash", choices=["gemini-1.5-flash", "gemini-2.0-flash", "gemini-2.0-flash-001"])
18
- parser.add_argument("--seed", type=int, default=2025)
19
- parser.add_argument("--prompt-tmpl-path", type=str, default="configs/prompts.toml")
20
- parser.add_argument("--css-path", type=str, default="statics/styles.css")
21
- args = parser.parse_args()
22
- return args
23
-
24
- def find_attached_file(filename, attached_files):
25
- for file in attached_files:
26
- if file['name'] == filename:
27
- return file
28
- return None
29
-
30
- async def echo(message, history, state, persona):
31
- attached_file = None
32
- system_instruction = Template(prompt_tmpl['summarization']['system_prompt']).safe_substitute(persona=persona)
33
-
34
- if message['files']:
35
- path_local = message['files'][0]
36
- filename = os.path.basename(path_local)
37
-
38
- attached_file = find_attached_file(filename, state["attached_files"])
39
- if attached_file is None:
40
- path_gcp = await client.files.upload(path=path_local)
41
- state["attached_files"].append({
42
- "name": filename,
43
- "path_local": path_local,
44
- "gcp_entity": path_gcp,
45
- "path_gcp": path_gcp.name,
46
- "mime_type=": path_gcp.mime_type,
47
- "expiration_time": path_gcp.expiration_time,
48
- })
49
- attached_file = path_gcp
50
-
51
- user_message = [message['text']]
52
- if attached_file: user_message.append(attached_file)
53
-
54
- chat_history = state['messages']
55
- chat_history = chat_history + user_message
56
- state['messages'] = chat_history
57
-
58
- response_chunks = ""
59
- model_content_stream = await client.models.generate_content_stream(
60
- model=args.model,
61
- contents=state['messages'],
62
- config=types.GenerateContentConfig(
63
- system_instruction=system_instruction, seed=args.seed
64
- ),
65
- )
66
- async for chunk in model_content_stream:
67
- response_chunks += chunk.text
68
- # when model generates too fast, Gradio does not respond that in real-time.
69
- await asyncio.sleep(0.1)
70
- yield (
71
- response_chunks,
72
- state,
73
- state['summary_diff_history'][-1] if len(state['summary_diff_history']) > 1 else "",
74
- state['summary_history'][-1] if len(state['summary_history']) > 1 else "",
75
- gr.Slider(
76
- visible=False if len(state['summary_history']) <= 1 else True,
77
- interactive=False if len(state['summary_history']) <= 1 else True,
78
- ),
79
- )
80
-
81
- # make summary
82
- response = await client.models.generate_content(
83
- model=args.model,
84
- contents=[
85
- Template(
86
- prompt_tmpl['summarization']['prompt']
87
- ).safe_substitute(
88
- previous_summary=state['summary'],
89
- latest_conversation=str({"user": message['text'], "assistant": response_chunks})
90
- )
91
- ],
92
- config=types.GenerateContentConfig(
93
- system_instruction=system_instruction,
94
- seed=args.seed,
95
- response_mime_type='application/json',
96
- response_schema=SummaryResponses
97
- )
98
- )
99
-
100
- prev_summary = state['summary_history'][-1] if len(state['summary_history']) >= 1 else ""
101
-
102
- state['summary'] = (
103
- response.parsed.summary
104
- if getattr(response.parsed, "summary", None) is not None
105
- else response.text
106
- )
107
- state['summary_history'].append(
108
- response.parsed.summary
109
- if getattr(response.parsed, "summary", None) is not None
110
- else response.text
111
- )
112
- state['summary_diff_history'].append(
113
- [
114
- (token[2:], token[0] if token[0] != " " else None)
115
- for token in Differ().compare(prev_summary, state['summary'])
116
- ]
117
- )
118
-
119
- yield (
120
- response_chunks,
121
- state,
122
- state['summary_diff_history'][-1],
123
- state['summary_history'][-1],
124
- gr.Slider(
125
- maximum=len(state['summary_history']),
126
- value=len(state['summary_history']),
127
- visible=False if len(state['summary_history']) == 1 else True, interactive=True
128
- ),
129
- )
130
-
131
- def change_view_toggle(view_toggle):
132
- if view_toggle == "Diff":
133
- return (
134
- gr.HighlightedText(visible=True),
135
- gr.Markdown(visible=False)
136
- )
137
- else:
138
- return (
139
- gr.HighlightedText(visible=False),
140
- gr.Markdown(visible=True)
141
- )
142
-
143
- def navigate_to_summary(summary_num, state):
144
- return (
145
- state['summary_diff_history'][summary_num-1],
146
- state['summary_history'][summary_num-1]
147
- )
148
-
149
  def main(args):
150
  style_css = open(args.css_path, "r").read()
151
 
@@ -168,55 +30,6 @@ def main(args):
168
  gr.Markdown("# Adaptive Summarization")
169
  gr.Markdown("AdaptSum stands for Adaptive Summarization. This project focuses on developing an LLM-powered system for dynamic summarization. Instead of generating entirely new summaries with each update, the system intelligently identifies and modifies only the necessary parts of the existing summary. This approach aims to create a more efficient and fluid summarization process within a continuous chat interaction with an LLM.")
170
 
171
- with gr.Column():
172
- with gr.Accordion("Adaptively Summarized Conversation", elem_id="adaptive-summary-accordion", open=False):
173
- with gr.Row(elem_id="view-toggle-btn-container"):
174
- view_toggle_btn = gr.Radio(
175
- choices=["Diff", "Markdown"],
176
- value="Markdown",
177
- interactive=True,
178
- elem_id="view-toggle-btn"
179
- )
180
-
181
- summary_diff = gr.HighlightedText(
182
- label="Summary so far",
183
- # value="No summary yet. As you chat with the assistant, the summary will be updated automatically.",
184
- combine_adjacent=True,
185
- show_legend=True,
186
- color_map={"-": "red", "+": "green"},
187
- elem_classes=["summary-window"],
188
- visible=False
189
- )
190
-
191
- summary_md = gr.Markdown(
192
- label="Summary so far",
193
- value="No summary yet. As you chat with the assistant, the summary will be updated automatically.",
194
- elem_classes=["summary-window"],
195
- visible=True
196
- )
197
-
198
- summary_num = gr.Slider(label="summary history", minimum=1, maximum=1, step=1, show_reset_button=False, visible=False)
199
-
200
- view_toggle_btn.change(change_view_toggle, inputs=[view_toggle_btn], outputs=[summary_diff, summary_md])
201
- summary_num.release(navigate_to_summary, inputs=[summary_num, state], outputs=[summary_diff, summary_md])
202
-
203
- with gr.Column("persona-dropdown-container", elem_id="persona-dropdown-container"):
204
- persona = gr.Dropdown(
205
- ["expert", "novice", "regular practitioner", "high schooler"],
206
- label="Summary Persona",
207
- info="Control the tonality of the conversation.",
208
- min_width="auto",
209
- )
210
-
211
- with gr.Column("chat-window", elem_id="chat-window"):
212
- gr.ChatInterface(
213
- multimodal=True,
214
- type="messages",
215
- fn=echo,
216
- additional_inputs=[state, persona],
217
- additional_outputs=[state, summary_diff, summary_md, summary_num],
218
- )
219
-
220
  return demo
221
 
222
  if __name__ == "__main__":
 
8
  from configs.responses import SummaryResponses
9
  from google.genai import types
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  def main(args):
12
  style_css = open(args.css_path, "r").read()
13
 
 
30
  gr.Markdown("# Adaptive Summarization")
31
  gr.Markdown("AdaptSum stands for Adaptive Summarization. This project focuses on developing an LLM-powered system for dynamic summarization. Instead of generating entirely new summaries with each update, the system intelligently identifies and modifies only the necessary parts of the existing summary. This approach aims to create a more efficient and fluid summarization process within a continuous chat interaction with an LLM.")
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  return demo
34
 
35
  if __name__ == "__main__":