chansung commited on
Commit
7a61f75
·
verified ·
1 Parent(s): f78dcdf

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. README.md +24 -0
  2. app.py +33 -23
README.md CHANGED
@@ -155,3 +155,27 @@ $ python main.py # or gradio main.py
155
 
156
  # Acknowledgments
157
  This is a project built during the Vertex sprints held by Google's ML Developer Programs team. We are thankful to be granted good amount of GCP credits to do this project.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
 
156
  # Acknowledgments
157
  This is a project built during the Vertex sprints held by Google's ML Developer Programs team. We are thankful to be granted good amount of GCP credits to do this project.
158
+ # AdaptSum
159
+
160
+ AdaptSum stands for Adaptive Summarization. This project focuses on developing an LLM-powered system for dynamic summarization. Instead of generating entirely new summaries with each update, the system intelligently identifies and modifies only the necessary parts of the existing summary. This approach aims to create a more efficient and fluid summarization process within a continuous chat interaction with an LLM.
161
+
162
+ # Instructions
163
+
164
+ 1. Install dependencies
165
+ ```shell
166
+ $ pip install requirements.txt
167
+ ```
168
+
169
+ 2. Setup Gemini API Key
170
+ ```shell
171
+ $ export GEMINI_API_KEY=xxxxx
172
+ ```
173
+ > note that GEMINI API KEY should be obtained from Google AI Studio. Vertex AI is not supported at the moment (this is because Gemini SDK does not provide file uploading functionality for Vertex AI usage now).
174
+
175
+ 3. Run Gradio app
176
+ ```shell
177
+ $ python main.py # or gradio main.py
178
+ ```
179
+
180
+ # Acknowledgments
181
+ This is a project built during the Vertex sprints held by Google's ML Developer Programs team. We are thankful to be granted good amount of GCP credits to do this project.
app.py CHANGED
@@ -27,10 +27,11 @@ def find_attached_file(filename, attached_files):
27
  return file
28
  return None
29
 
30
- async def echo(message, history, state, persona):
31
  attached_file = None
32
  system_instruction = Template(prompt_tmpl['summarization']['system_prompt']).safe_substitute(persona=persona)
33
-
 
34
  if message['files']:
35
  path_local = message['files'][0]
36
  filename = os.path.basename(path_local)
@@ -38,31 +39,38 @@ async def echo(message, history, state, persona):
38
  attached_file = find_attached_file(filename, state["attached_files"])
39
  if attached_file is None:
40
  path_gcp = await client.files.upload(path=path_local)
41
- path_wrap = types.Part.from_uri(
42
- file_uri=path_gcp.uri, mime_type=path_gcp.mime_type
43
- )
44
  state["attached_files"].append({
45
  "name": filename,
46
  "path_local": path_local,
47
  "gcp_entity": path_gcp,
48
- "path_gcp": path_wrap,
49
- "mime_type": path_gcp.mime_type,
50
  "expiration_time": path_gcp.expiration_time,
51
  })
52
- attached_file = path_wrap
53
 
54
- user_message_parts = [types.Part.from_text(text=message['text'])]
55
- if attached_file: user_message_parts.append(attached_file)
56
- user_message = [types.Content(role='user', parts=user_message_parts)]
57
-
58
- state['messages'] = state['messages'] + user_message
59
- print(state['messages'])
60
 
61
  response_chunks = ""
 
 
 
 
 
 
 
 
62
  model_content_stream = await client.models.generate_content_stream(
63
  model=args.model,
64
- contents=state['messages'],
65
- config=types.GenerateContentConfig(seed=args.seed),
 
 
66
  )
67
  async for chunk in model_content_stream:
68
  response_chunks += chunk.text
@@ -80,10 +88,6 @@ async def echo(message, history, state, persona):
80
  ),
81
  gr.DownloadButton(visible=False)
82
  )
83
-
84
- state['messages'] = state['messages'] + [
85
- types.Content(role='model', parts=[types.Part.from_text(text=response_chunks)])
86
- ]
87
 
88
  # make summary
89
  response = await client.models.generate_content(
@@ -92,7 +96,7 @@ async def echo(message, history, state, persona):
92
  Template(
93
  prompt_tmpl['summarization']['prompt']
94
  ).safe_substitute(
95
- previous_summary=state['summary'],
96
  latest_conversation=str({"user": message['text'], "assistant": response_chunks})
97
  )
98
  ],
@@ -233,14 +237,20 @@ def main(args):
233
  label="Summary Persona",
234
  info="Control the tonality of the conversation.",
235
  min_width="auto",
236
- )
 
 
 
 
 
 
237
 
238
  with gr.Column("chat-window", elem_id="chat-window"):
239
  gr.ChatInterface(
240
  multimodal=True,
241
  type="messages",
242
  fn=echo,
243
- additional_inputs=[state, persona],
244
  additional_outputs=[state, last_user_msg, summary_diff, summary_md, summary_num, download_summary_md],
245
  )
246
 
 
27
  return file
28
  return None
29
 
30
+ async def echo(message, history, state, persona, use_generated_summaries):
31
  attached_file = None
32
  system_instruction = Template(prompt_tmpl['summarization']['system_prompt']).safe_substitute(persona=persona)
33
+ use_generated_summaries = True if use_generated_summaries == "Yes" else False
34
+
35
  if message['files']:
36
  path_local = message['files'][0]
37
  filename = os.path.basename(path_local)
 
39
  attached_file = find_attached_file(filename, state["attached_files"])
40
  if attached_file is None:
41
  path_gcp = await client.files.upload(path=path_local)
 
 
 
42
  state["attached_files"].append({
43
  "name": filename,
44
  "path_local": path_local,
45
  "gcp_entity": path_gcp,
46
+ "path_gcp": path_gcp.name,
47
+ "mime_type=": path_gcp.mime_type,
48
  "expiration_time": path_gcp.expiration_time,
49
  })
50
+ attached_file = path_gcp
51
 
52
+ user_message = [message['text']]
53
+ if attached_file: user_message.append(attached_file)
54
+
55
+ chat_history = state['messages']
56
+ chat_history = chat_history + user_message
57
+ state['messages'] = chat_history
58
 
59
  response_chunks = ""
60
+ model_contents = ""
61
+ if use_generated_summaries:
62
+ if "summary_history" in state and len(state["summary_history"]):
63
+ model_contents += state["summary_history"][-1]
64
+ else:
65
+ model_contents = state['messages']
66
+ else:
67
+ model_contents = state['messages']
68
  model_content_stream = await client.models.generate_content_stream(
69
  model=args.model,
70
+ contents=model_contents,
71
+ config=types.GenerateContentConfig(
72
+ system_instruction=system_instruction, seed=args.seed
73
+ ),
74
  )
75
  async for chunk in model_content_stream:
76
  response_chunks += chunk.text
 
88
  ),
89
  gr.DownloadButton(visible=False)
90
  )
 
 
 
 
91
 
92
  # make summary
93
  response = await client.models.generate_content(
 
96
  Template(
97
  prompt_tmpl['summarization']['prompt']
98
  ).safe_substitute(
99
+ previous_summary=state['summary'],
100
  latest_conversation=str({"user": message['text'], "assistant": response_chunks})
101
  )
102
  ],
 
237
  label="Summary Persona",
238
  info="Control the tonality of the conversation.",
239
  min_width="auto",
240
+ )
241
+ use_generated_summaries = gr.Dropdown(
242
+ ["No", "Yes"],
243
+ label="Feed back the generated summaries",
244
+ info="Set this to 'Yes' to ONLY feed the generated summaries back to the model instead of the whole conversation.",
245
+ min_width="auto",
246
+ )
247
 
248
  with gr.Column("chat-window", elem_id="chat-window"):
249
  gr.ChatInterface(
250
  multimodal=True,
251
  type="messages",
252
  fn=echo,
253
+ additional_inputs=[state, persona, use_generated_summaries],
254
  additional_outputs=[state, last_user_msg, summary_diff, summary_md, summary_num, download_summary_md],
255
  )
256