awacke1 commited on
Commit
8228332
·
verified ·
1 Parent(s): 8aa93ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +347 -242
app.py CHANGED
@@ -1,13 +1,23 @@
1
  import streamlit as st
2
- import anthropic
3
- import openai
4
- import base64
5
- import os
6
- import re
7
- import asyncio
8
  from datetime import datetime
 
 
 
 
9
  from gradio_client import Client
10
- from collections import defaultdict
 
 
 
 
 
 
 
 
 
11
  import edge_tts
12
 
13
  # 🎯 1. Core Configuration & Setup
@@ -22,22 +32,12 @@ st.set_page_config(
22
  'About': "🚲BikeAI🏆 Claude/GPT Research AI"
23
  }
24
  )
25
- st.markdown("""
26
- <style>
27
- .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
28
- .stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
29
- .stButton>button {
30
- margin-right: 0.5rem;
31
- }
32
- </style>
33
- """, unsafe_allow_html=True)
34
-
35
- # 🔑 2. API Setup & Clients
36
- from dotenv import load_dotenv
37
  load_dotenv()
38
 
 
39
  openai_api_key = os.getenv('OPENAI_API_KEY', "")
40
  anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
 
41
  if 'OPENAI_API_KEY' in st.secrets:
42
  openai_api_key = st.secrets['OPENAI_API_KEY']
43
  if 'ANTHROPIC_API_KEY' in st.secrets:
@@ -45,7 +45,9 @@ if 'ANTHROPIC_API_KEY' in st.secrets:
45
 
46
  openai.api_key = openai_api_key
47
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
48
- openai_client = openai # Using OpenAI directly
 
 
49
 
50
  # 📝 3. Session State Management
51
  if 'transcript_history' not in st.session_state:
@@ -53,9 +55,17 @@ if 'transcript_history' not in st.session_state:
53
  if 'chat_history' not in st.session_state:
54
  st.session_state['chat_history'] = []
55
  if 'openai_model' not in st.session_state:
56
- st.session_state['openai_model'] = "gpt-4" # Update as needed
57
  if 'messages' not in st.session_state:
58
  st.session_state['messages'] = []
 
 
 
 
 
 
 
 
59
  if 'viewing_prefix' not in st.session_state:
60
  st.session_state['viewing_prefix'] = None
61
  if 'should_rerun' not in st.session_state:
@@ -63,7 +73,23 @@ if 'should_rerun' not in st.session_state:
63
  if 'old_val' not in st.session_state:
64
  st.session_state['old_val'] = None
65
 
66
- # 🧠 4. High-Information Content Extraction
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  def get_high_info_terms(text: str) -> list:
68
  """Extract high-information terms from text, including key phrases."""
69
  stop_words = set([
@@ -124,7 +150,7 @@ def clean_text_for_filename(text: str) -> str:
124
  filtered = [w for w in words if len(w)>3 and w not in stop_short]
125
  return '_'.join(filtered)[:200]
126
 
127
- # 📁 5. File Operations
128
  def generate_filename(prompt, response, file_type="md"):
129
  """
130
  Generate filename with meaningful terms and short dense clips from prompt & response.
@@ -150,7 +176,7 @@ def generate_filename(prompt, response, file_type="md"):
150
  return filename
151
 
152
  def create_file(prompt, response, file_type="md"):
153
- """Create file with an intelligent naming scheme."""
154
  filename = generate_filename(prompt.strip(), response.strip(), file_type)
155
  with open(filename, 'w', encoding='utf-8') as f:
156
  f.write(prompt + "\n\n" + response)
@@ -162,7 +188,7 @@ def get_download_link(file):
162
  b64 = base64.b64encode(f.read()).decode()
163
  return f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(file)}">📂 Download {os.path.basename(file)}</a>'
164
 
165
- # 🔊 6. Audio Processing
166
  def clean_for_speech(text: str) -> str:
167
  """Clean text for speech synthesis"""
168
  text = text.replace("\n", " ")
@@ -172,64 +198,53 @@ def clean_for_speech(text: str) -> str:
172
  text = re.sub(r"\s+", " ", text).strip()
173
  return text
174
 
175
- async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0, out_fn="temp.mp3"):
176
- """Generate audio using Edge TTS (async)"""
 
 
 
 
 
 
 
 
 
 
 
 
 
177
  text = clean_for_speech(text)
178
  if not text.strip():
179
  return None
180
  rate_str = f"{rate:+d}%"
181
  pitch_str = f"{pitch:+d}Hz"
182
  communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
 
183
  await communicate.save(out_fn)
184
  return out_fn
185
 
186
- def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0, out_fn="temp.mp3"):
187
- """Wrapper for Edge TTS generation (sync)"""
188
- return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch, out_fn))
 
 
 
 
189
 
190
  def play_and_download_audio(file_path):
191
- """Play and provide a download link for audio"""
192
  if file_path and os.path.exists(file_path):
193
  st.audio(file_path)
194
  dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
195
  st.markdown(dl_link, unsafe_allow_html=True)
196
 
197
- def auto_play_audio(file_path):
198
- """Embeds an <audio> tag with autoplay + controls + a download link."""
199
- if not file_path or not os.path.exists(file_path):
200
- return
201
- with open(file_path, "rb") as f:
202
- b64_data = base64.b64encode(f.read()).decode("utf-8")
203
- filename = os.path.basename(file_path)
204
- st.markdown(f"""
205
- <audio controls autoplay>
206
- <source src="data:audio/mpeg;base64,{b64_data}" type="audio/mpeg">
207
- Your browser does not support the audio element.
208
- </audio>
209
- <br/>
210
- <a href="data:audio/mpeg;base64,{b64_data}" download="{filename}">
211
- Download {filename}
212
- </a>
213
- """, unsafe_allow_html=True)
214
-
215
- def generate_audio_filename(query, title, summary):
216
- """
217
- Generate a specialized MP3 filename using query + title + summary.
218
- Example: "2310_1205_query_title_summary.mp3"
219
- """
220
- combined = (query + " " + title + " " + summary).strip().lower()
221
- combined = re.sub(r'[^\w\s-]', '', combined) # Remove special characters
222
- combined = "_".join(combined.split())[:80] # Limit length
223
- prefix = datetime.now().strftime("%y%m_%H%M")
224
- return f"{prefix}_{combined}.mp3"
225
-
226
- # 🎬 7. Media Processing
227
  def process_image(image_path, user_prompt):
228
  """Process image with GPT-4V"""
229
  with open(image_path, "rb") as imgf:
230
  image_data = imgf.read()
231
  b64img = base64.b64encode(image_data).decode("utf-8")
232
- resp = openai_client.ChatCompletion.create(
233
  model=st.session_state["openai_model"],
234
  messages=[
235
  {"role": "system", "content": "You are a helpful assistant."},
@@ -242,25 +257,24 @@ def process_image(image_path, user_prompt):
242
  )
243
  return resp.choices[0].message.content
244
 
245
- def process_audio_with_whisper(audio_path):
246
  """Process audio with Whisper"""
247
  with open(audio_path, "rb") as f:
248
- transcription = openai_client.Audio.transcriptions.create(model="whisper-1", file=f)
249
  st.session_state.messages.append({"role": "user", "content": transcription.text})
250
  return transcription.text
251
 
252
  def process_video(video_path, seconds_per_frame=1):
253
  """Extract frames from video"""
254
- import cv2
255
  vid = cv2.VideoCapture(video_path)
256
  total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
257
  fps = vid.get(cv2.CAP_PROP_FPS)
258
- skip = int(fps * seconds_per_frame)
259
  frames_b64 = []
260
  for i in range(0, total, skip):
261
  vid.set(cv2.CAP_PROP_POS_FRAMES, i)
262
  ret, frame = vid.read()
263
- if not ret:
264
  break
265
  _, buf = cv2.imencode(".jpg", frame)
266
  frames_b64.append(base64.b64encode(buf).decode("utf-8"))
@@ -270,61 +284,196 @@ def process_video(video_path, seconds_per_frame=1):
270
  def process_video_with_gpt(video_path, prompt):
271
  """Analyze video frames with GPT-4V"""
272
  frames = process_video(video_path)
273
- resp = openai_client.ChatCompletion.create(
274
  model=st.session_state["openai_model"],
275
  messages=[
276
- {"role": "system", "content": "Analyze video frames."},
277
- {"role": "user", "content": [
278
- {"type": "text", "text": prompt},
279
  *[{"type":"image_url","image_url":{"url":f"data:image/jpeg;base64,{fr}"}} for fr in frames]
280
  ]}
281
  ]
282
  )
283
  return resp.choices[0].message.content
284
 
285
- # 🤖 8. AI Model Integration
 
286
  def save_full_transcript(query, text):
287
  """Save full transcript of Arxiv results as a file."""
288
  create_file(query, text, "md")
289
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
  def process_with_gpt(text):
291
  """Process text with GPT-4"""
292
- if not text:
293
  return
294
  st.session_state.messages.append({"role":"user","content":text})
295
  with st.chat_message("user"):
296
  st.markdown(text)
297
  with st.chat_message("assistant"):
298
- c = openai_client.ChatCompletion.create(
299
  model=st.session_state["openai_model"],
300
  messages=st.session_state.messages,
301
  stream=False
302
  )
303
  ans = c.choices[0].message.content
304
- st.write("GPT-4: " + ans)
305
  create_file(text, ans, "md")
306
  st.session_state.messages.append({"role":"assistant","content":ans})
307
  return ans
308
 
309
  def process_with_claude(text):
310
  """Process text with Claude"""
311
- if not text:
312
  return
313
  with st.chat_message("user"):
314
  st.markdown(text)
315
  with st.chat_message("assistant"):
316
- r = claude_client.completions.create(
317
- prompt=text,
318
- model="claude-3",
319
- max_tokens=1000
320
  )
321
- ans = r['completion']
322
  st.write("Claude-3.5: " + ans)
323
  create_file(text, ans, "md")
324
  st.session_state.chat_history.append({"user":text,"claude":ans})
325
  return ans
326
 
327
- # 📂 9. File Management
328
  def create_zip_of_files(md_files, mp3_files):
329
  """Create zip with intelligent naming"""
330
  md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
@@ -358,21 +507,22 @@ def load_files_for_sidebar():
358
  """Load and group files for sidebar display"""
359
  md_files = glob.glob("*.md")
360
  mp3_files = glob.glob("*.mp3")
361
- md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
362
 
 
363
  all_files = md_files + mp3_files
 
364
  groups = defaultdict(list)
365
  for f in all_files:
366
  fname = os.path.basename(f)
367
- prefix = fname[:10] # e.g., "2310_1205_"
368
  groups[prefix].append(f)
369
 
370
  for prefix in groups:
371
  groups[prefix].sort(key=lambda x: os.path.getmtime(x), reverse=True)
372
 
373
  sorted_prefixes = sorted(groups.keys(),
374
- key=lambda pre: max(os.path.getmtime(x) for x in groups[pre]),
375
- reverse=True)
376
  return groups, sorted_prefixes
377
 
378
  def extract_keywords_from_md(files):
@@ -412,7 +562,7 @@ def display_file_manager_sidebar(groups, sorted_prefixes):
412
  if st.button("⬇️ ZipAll"):
413
  z = create_zip_of_files(all_md, all_mp3)
414
  if z:
415
- st.sidebar.markdown(get_download_link(z), unsafe_allow_html=True)
416
 
417
  for prefix in sorted_prefixes:
418
  files = groups[prefix]
@@ -435,32 +585,106 @@ def display_file_manager_sidebar(groups, sorted_prefixes):
435
  ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
436
  st.write(f"**{fname}** - {ctime}")
437
 
438
- # 🎯 10. Main Application
 
 
 
 
 
 
 
 
439
  def main():
440
  st.sidebar.markdown("### 🚲BikeAI🏆 Multi-Agent Research")
441
- tab_main = st.radio("Action:", ["🎤 Voice","📸 Media","🔍 ArXiv","📝 Editor"], horizontal=True)
442
 
443
- # Placeholder for custom component if needed
444
- # mycomponent = components.declare_component("mycomponent", path="mycomponent")
445
- # val = mycomponent(my_input_value="Hello")
446
 
447
- # Example input handling
448
- # if val:
449
- # # Handle custom component input
450
- # pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451
 
452
  if tab_main == "🔍 ArXiv":
453
  st.subheader("🔍 Query ArXiv")
454
  q = st.text_input("🔍 Query:")
455
 
456
  st.markdown("### 🎛 Options")
457
- full_audio = st.checkbox("📚 Full Audio", value=False, help="Generate full audio response")
458
- full_transcript = st.checkbox("🧾 Full Transcript", value=False, help="Generate a full transcript file")
 
 
 
 
 
 
 
 
 
 
 
459
 
460
- if q and st.button("🔍 Run Query"):
461
- perform_ai_lookup(q)
 
 
 
462
  if full_transcript:
463
- create_file(q, "Full transcript generated.", "md") # Customize as needed
464
 
465
  elif tab_main == "🎤 Voice":
466
  st.subheader("🎤 Voice Input")
@@ -469,7 +693,7 @@ def main():
469
  if st.button("📨 Send"):
470
  process_with_gpt(user_text)
471
  st.subheader("📜 Chat History")
472
- t1, t2 = st.tabs(["Claude History","GPT-4 History"])
473
  with t1:
474
  for c in st.session_state.chat_history:
475
  st.write("**You:**", c["user"])
@@ -483,51 +707,45 @@ def main():
483
  st.header("📸 Images & 🎥 Videos")
484
  tabs = st.tabs(["🖼 Images", "🎥 Video"])
485
  with tabs[0]:
486
- imgs = glob.glob("*.png") + glob.glob("*.jpg") + glob.glob("*.jpeg")
487
  if imgs:
488
- cols = st.columns(st.slider("Cols", 1, 5, 3))
489
- for i, f in enumerate(imgs):
490
- with cols[i % len(cols)]:
491
- st.image(Image.open(f), use_container_width=True)
 
492
  if st.button(f"👀 Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
493
- a = process_image(f, "Describe this image.")
494
  st.markdown(a)
495
  else:
496
  st.write("No images found.")
497
  with tabs[1]:
498
- vids = glob.glob("*.mp4") + glob.glob("*.avi") + glob.glob("*.mov")
499
  if vids:
500
  for v in vids:
501
  with st.expander(f"🎥 {os.path.basename(v)}"):
502
  st.video(v)
503
  if st.button(f"Analyze {os.path.basename(v)}", key=f"analyze_{v}"):
504
- a = process_video_with_gpt(v, "Describe this video.")
505
  st.markdown(a)
506
  else:
507
  st.write("No videos found.")
508
 
509
  elif tab_main == "📝 Editor":
510
- st.subheader("📝 File Editor")
511
- # Example editor logic: list markdown files and allow editing
512
- md_files = glob.glob("*.md")
513
- if md_files:
514
- selected_file = st.selectbox("Select a file to edit:", md_files)
515
- with st.form("edit_form"):
516
- new_content = st.text_area("✏️ Content:", open(selected_file, 'r', encoding='utf-8').read(), height=300)
517
- submitted = st.form_submit_button("💾 Save")
518
- if submitted:
519
- with open(selected_file, 'w', encoding='utf-8') as f:
520
- f.write(new_content)
521
- st.success(f"Updated {selected_file}!")
522
- st.session_state.should_rerun = True
523
  else:
524
- st.write("No markdown files available to edit.")
525
 
526
- # File manager in sidebar
527
  groups, sorted_prefixes = load_files_for_sidebar()
528
  display_file_manager_sidebar(groups, sorted_prefixes)
529
 
530
- # If user clicked "view group"
531
  if st.session_state.viewing_prefix and st.session_state.viewing_prefix in groups:
532
  st.write("---")
533
  st.write(f"**Viewing Group:** {st.session_state.viewing_prefix}")
@@ -536,7 +754,7 @@ def main():
536
  ext = os.path.splitext(fname)[1].lower().strip('.')
537
  st.write(f"### {fname}")
538
  if ext == "md":
539
- content = open(f, 'r', encoding='utf-8').read()
540
  st.markdown(content)
541
  elif ext == "mp3":
542
  st.audio(f)
@@ -547,120 +765,7 @@ def main():
547
 
548
  if st.session_state.should_rerun:
549
  st.session_state.should_rerun = False
550
- st.experimental_rerun()
551
-
552
- def parse_arxiv_papers(ref_text: str):
553
- """
554
- Splits the references into paper-level chunks.
555
- Each paper starts with a number followed by a parenthesis, e.g., "1) [Title (Year)] Summary..."
556
- Returns a list of dictionaries with 'title', 'summary', and 'year'.
557
- Limits to 20 papers.
558
- """
559
- # Split based on patterns like "1) ", "2) ", etc.
560
- chunks = re.split(r'\n?\d+\)\s+', ref_text)
561
- # Remove any empty strings resulting from split
562
- chunks = [chunk.strip() for chunk in chunks if chunk.strip()]
563
- papers = []
564
- for chunk in chunks[:20]:
565
- # Extract title within brackets if present
566
- title_match = re.search(r'\[([^\]]+)\]', chunk)
567
- title = title_match.group(1).strip() if title_match else "No Title"
568
-
569
- # Extract year (assuming it's a 4-digit number within the title or summary)
570
- year_match = re.search(r'\b(20\d{2})\b', chunk)
571
- year = int(year_match.group(1)) if year_match else None
572
-
573
- # The entire chunk is considered the summary
574
- summary = chunk
575
-
576
- papers.append({
577
- 'title': title,
578
- 'summary': summary,
579
- 'year': year
580
- })
581
- return papers
582
-
583
- def perform_ai_lookup(q):
584
- """
585
- Performs the Arxiv search and handles the processing of results.
586
- Generates audio files for each paper (if year is 2023 or 2024).
587
- """
588
- st.write(f"## Query: {q}")
589
-
590
- # 1) Query the HF RAG pipeline
591
- client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
592
- refs = client.predict(q, 20, "Semantic Search", "mistralai/Mixtral-8x7B-Instruct-v0.1", api_name="/update_with_rag_md")[0]
593
- r2 = client.predict(q, "mistralai/Mixtral-8x7B-Instruct-v0.1", True, api_name="/ask_llm")
594
-
595
- # 2) Combine for final text output
596
- result = f"### 🔎 {q}\n\n{r2}\n\n{refs}"
597
- st.markdown(result)
598
-
599
- # 3) Parse references into papers
600
- papers = parse_arxiv_papers(refs)
601
-
602
- # 4) Display each paper and generate audio if applicable
603
- st.write("## Individual Papers (Up to 20)")
604
- for idx, paper in enumerate(papers):
605
- year_str = paper["year"] if paper["year"] else "Unknown Year"
606
- st.markdown(f"**Paper #{idx+1}: {paper['title']}** \n*Year:* {year_str}")
607
- st.markdown(f"*Summary:* {paper['summary']}")
608
- st.write("---")
609
-
610
- # Generate TTS if year is 2023 or 2024
611
- if paper["year"] in [2023, 2024]:
612
- # Combine title and summary for TTS
613
- tts_text = f"Title: {paper['title']}. Summary: {paper['summary']}"
614
- # Generate a specialized filename
615
- mp3_filename = generate_audio_filename(q, paper['title'], paper['summary'])
616
- # Generate audio using Edge TTS
617
- temp_mp3 = speak_with_edge_tts(tts_text, out_fn=mp3_filename)
618
- if temp_mp3 and os.path.exists(mp3_filename):
619
- # Embed the audio player with auto-play and download link
620
- auto_play_audio(mp3_filename)
621
-
622
- # Optionally save the full transcript
623
- st.write("### Transcript")
624
- st.markdown(result)
625
- create_file(q, result, "md")
626
-
627
- def process_with_gpt(text):
628
- """Process text with GPT-4"""
629
- if not text:
630
- return
631
- st.session_state.messages.append({"role":"user","content":text})
632
- with st.chat_message("user"):
633
- st.markdown(text)
634
- with st.chat_message("assistant"):
635
- c = openai_client.ChatCompletion.create(
636
- model=st.session_state["openai_model"],
637
- messages=st.session_state.messages,
638
- stream=False
639
- )
640
- ans = c.choices[0].message.content
641
- st.write("GPT-4: " + ans)
642
- create_file(text, ans, "md")
643
- st.session_state.messages.append({"role":"assistant","content":ans})
644
- return ans
645
-
646
- def process_with_claude(text):
647
- """Process text with Claude"""
648
- if not text:
649
- return
650
- with st.chat_message("user"):
651
- st.markdown(text)
652
- with st.chat_message("assistant"):
653
- r = claude_client.completions.create(
654
- prompt=text,
655
- model="claude-3",
656
- max_tokens=1000
657
- )
658
- ans = r['completion']
659
- st.write("Claude-3.5: " + ans)
660
- create_file(text, ans, "md")
661
- st.session_state.chat_history.append({"user":text,"claude":ans})
662
- return ans
663
 
664
- # Run the app
665
- if __name__ == "__main__":
666
  main()
 
1
  import streamlit as st
2
+ import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
3
+ import plotly.graph_objects as go
4
+ import streamlit.components.v1 as components
 
 
 
5
  from datetime import datetime
6
+ from audio_recorder_streamlit import audio_recorder
7
+ from bs4 import BeautifulSoup
8
+ from collections import defaultdict, deque
9
+ from dotenv import load_dotenv
10
  from gradio_client import Client
11
+ from huggingface_hub import InferenceClient
12
+ from io import BytesIO
13
+ from PIL import Image
14
+ from PyPDF2 import PdfReader
15
+ from urllib.parse import quote
16
+ from xml.etree import ElementTree as ET
17
+ from openai import OpenAI
18
+ import extra_streamlit_components as stx
19
+ from streamlit.runtime.scriptrunner import get_script_run_ctx
20
+ import asyncio
21
  import edge_tts
22
 
23
  # 🎯 1. Core Configuration & Setup
 
32
  'About': "🚲BikeAI🏆 Claude/GPT Research AI"
33
  }
34
  )
 
 
 
 
 
 
 
 
 
 
 
 
35
  load_dotenv()
36
 
37
+ # 🔑 2. API Setup & Clients
38
  openai_api_key = os.getenv('OPENAI_API_KEY', "")
39
  anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
40
+ xai_key = os.getenv('xai',"")
41
  if 'OPENAI_API_KEY' in st.secrets:
42
  openai_api_key = st.secrets['OPENAI_API_KEY']
43
  if 'ANTHROPIC_API_KEY' in st.secrets:
 
45
 
46
  openai.api_key = openai_api_key
47
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
48
+ openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
49
+ HF_KEY = os.getenv('HF_KEY')
50
+ API_URL = os.getenv('API_URL')
51
 
52
  # 📝 3. Session State Management
53
  if 'transcript_history' not in st.session_state:
 
55
  if 'chat_history' not in st.session_state:
56
  st.session_state['chat_history'] = []
57
  if 'openai_model' not in st.session_state:
58
+ st.session_state['openai_model'] = "gpt-4o-2024-05-13"
59
  if 'messages' not in st.session_state:
60
  st.session_state['messages'] = []
61
+ if 'last_voice_input' not in st.session_state:
62
+ st.session_state['last_voice_input'] = ""
63
+ if 'editing_file' not in st.session_state:
64
+ st.session_state['editing_file'] = None
65
+ if 'edit_new_name' not in st.session_state:
66
+ st.session_state['edit_new_name'] = ""
67
+ if 'edit_new_content' not in st.session_state:
68
+ st.session_state['edit_new_content'] = ""
69
  if 'viewing_prefix' not in st.session_state:
70
  st.session_state['viewing_prefix'] = None
71
  if 'should_rerun' not in st.session_state:
 
73
  if 'old_val' not in st.session_state:
74
  st.session_state['old_val'] = None
75
 
76
+ # 🎨 4. Custom CSS
77
+ st.markdown("""
78
+ <style>
79
+ .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
80
+ .stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
81
+ .stButton>button {
82
+ margin-right: 0.5rem;
83
+ }
84
+ </style>
85
+ """, unsafe_allow_html=True)
86
+
87
+ FILE_EMOJIS = {
88
+ "md": "📝",
89
+ "mp3": "🎵",
90
+ }
91
+
92
+ # 🧠 5. High-Information Content Extraction
93
  def get_high_info_terms(text: str) -> list:
94
  """Extract high-information terms from text, including key phrases."""
95
  stop_words = set([
 
150
  filtered = [w for w in words if len(w)>3 and w not in stop_short]
151
  return '_'.join(filtered)[:200]
152
 
153
+ # 📁 6. File Operations
154
  def generate_filename(prompt, response, file_type="md"):
155
  """
156
  Generate filename with meaningful terms and short dense clips from prompt & response.
 
176
  return filename
177
 
178
  def create_file(prompt, response, file_type="md"):
179
+ """Create file with intelligent naming"""
180
  filename = generate_filename(prompt.strip(), response.strip(), file_type)
181
  with open(filename, 'w', encoding='utf-8') as f:
182
  f.write(prompt + "\n\n" + response)
 
188
  b64 = base64.b64encode(f.read()).decode()
189
  return f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(file)}">📂 Download {os.path.basename(file)}</a>'
190
 
191
+ # 🔊 7. Audio Processing
192
  def clean_for_speech(text: str) -> str:
193
  """Clean text for speech synthesis"""
194
  text = text.replace("\n", " ")
 
198
  text = re.sub(r"\s+", " ", text).strip()
199
  return text
200
 
201
+ @st.cache_resource
202
+ def speech_synthesis_html(result):
203
+ """Create HTML for speech synthesis"""
204
+ html_code = f"""
205
+ <html><body>
206
+ <script>
207
+ var msg = new SpeechSynthesisUtterance("{result.replace('"', '')}");
208
+ window.speechSynthesis.speak(msg);
209
+ </script>
210
+ </body></html>
211
+ """
212
+ components.html(html_code, height=0)
213
+
214
+ async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0):
215
+ """Generate audio using Edge TTS"""
216
  text = clean_for_speech(text)
217
  if not text.strip():
218
  return None
219
  rate_str = f"{rate:+d}%"
220
  pitch_str = f"{pitch:+d}Hz"
221
  communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
222
+ out_fn = generate_filename(text, text, "mp3")
223
  await communicate.save(out_fn)
224
  return out_fn
225
 
226
+ def speak_with_edge_tts(text, voice, rate=0, pitch=0):
227
+ """Wrapper for edge TTS generation"""
228
+ try:
229
+ return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch))
230
+ except Exception as e:
231
+ st.error(f"Error generating audio: {e}")
232
+ return None
233
 
234
  def play_and_download_audio(file_path):
235
+ """Play and provide download link for audio"""
236
  if file_path and os.path.exists(file_path):
237
  st.audio(file_path)
238
  dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
239
  st.markdown(dl_link, unsafe_allow_html=True)
240
 
241
+ # 🎬 8. Media Processing
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
  def process_image(image_path, user_prompt):
243
  """Process image with GPT-4V"""
244
  with open(image_path, "rb") as imgf:
245
  image_data = imgf.read()
246
  b64img = base64.b64encode(image_data).decode("utf-8")
247
+ resp = openai_client.chat.completions.create(
248
  model=st.session_state["openai_model"],
249
  messages=[
250
  {"role": "system", "content": "You are a helpful assistant."},
 
257
  )
258
  return resp.choices[0].message.content
259
 
260
+ def process_audio(audio_path):
261
  """Process audio with Whisper"""
262
  with open(audio_path, "rb") as f:
263
+ transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
264
  st.session_state.messages.append({"role": "user", "content": transcription.text})
265
  return transcription.text
266
 
267
  def process_video(video_path, seconds_per_frame=1):
268
  """Extract frames from video"""
 
269
  vid = cv2.VideoCapture(video_path)
270
  total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
271
  fps = vid.get(cv2.CAP_PROP_FPS)
272
+ skip = int(fps*seconds_per_frame)
273
  frames_b64 = []
274
  for i in range(0, total, skip):
275
  vid.set(cv2.CAP_PROP_POS_FRAMES, i)
276
  ret, frame = vid.read()
277
+ if not ret:
278
  break
279
  _, buf = cv2.imencode(".jpg", frame)
280
  frames_b64.append(base64.b64encode(buf).decode("utf-8"))
 
284
  def process_video_with_gpt(video_path, prompt):
285
  """Analyze video frames with GPT-4V"""
286
  frames = process_video(video_path)
287
+ resp = openai_client.chat.completions.create(
288
  model=st.session_state["openai_model"],
289
  messages=[
290
+ {"role":"system","content":"Analyze video frames."},
291
+ {"role":"user","content":[
292
+ {"type":"text","text":prompt},
293
  *[{"type":"image_url","image_url":{"url":f"data:image/jpeg;base64,{fr}"}} for fr in frames]
294
  ]}
295
  ]
296
  )
297
  return resp.choices[0].message.content
298
 
299
+ # 🤖 9. AI Model Integration
300
+
301
  def save_full_transcript(query, text):
302
  """Save full transcript of Arxiv results as a file."""
303
  create_file(query, text, "md")
304
 
305
+ def parse_arxiv_refs(ref_text: str):
306
+ """
307
+ Parse the multi-line references returned by the RAG pipeline.
308
+ Typical format lines like:
309
+ 1) [Paper Title 2023] This is the summary ...
310
+ 2) [Another Title (2024)] Another summary text ...
311
+ We'll attempt to find a year with a small regex or fallback.
312
+ Return list of dicts: { 'title': str, 'summary': str, 'year': int or None }
313
+ """
314
+ lines = ref_text.split('\n')
315
+ results = []
316
+ for line in lines:
317
+ line = line.strip()
318
+ if not line:
319
+ continue
320
+ # Attempt to find [Title ...]
321
+ title_match = re.search(r"\[([^\]]+)\]", line)
322
+ if title_match:
323
+ raw_title = title_match.group(1).strip()
324
+ else:
325
+ # If no bracket found, skip or treat entire line as summary
326
+ raw_title = "No Title"
327
+
328
+ # Attempt to find trailing summary after bracket
329
+ # Example line: " [Paper Title 2024] Paper summary blah blah"
330
+ # So remove the bracketed portion from the line
331
+ remainder = line.replace(title_match.group(0), "").strip() if title_match else line
332
+ summary = remainder
333
+
334
+ # Attempt to guess year from the raw title
335
+ # We look for 4-digit patterns in raw_title or summary
336
+ year_match = re.search(r'(20\d{2})', raw_title)
337
+ if not year_match:
338
+ # fallback: try summary
339
+ year_match = re.search(r'(20\d{2})', summary)
340
+ if year_match:
341
+ year = int(year_match.group(1))
342
+ else:
343
+ year = None
344
+
345
+ results.append({
346
+ 'title': raw_title,
347
+ 'summary': summary,
348
+ 'year': year
349
+ })
350
+ return results
351
+
352
+
353
+ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
354
+ titles_summary=True, full_audio=False, selected_voice="en-US-AriaNeural"):
355
+ """Perform Arxiv search and generate audio summaries."""
356
+ start = time.time()
357
+
358
+ # 🎯 1) Query the HF RAG pipeline
359
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
360
+ refs = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")[0]
361
+ r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
362
+
363
+ # 🎯 2) Combine for final text output
364
+ result = f"### 🔎 {q}\n\n{r2}\n\n{refs}"
365
+ st.markdown(result)
366
+
367
+ # 🎯 3) Generate "all at once" audio if requested
368
+ if full_audio:
369
+ complete_text = f"Complete response for query: {q}. {clean_for_speech(r2)} {clean_for_speech(refs)}"
370
+ audio_file_full = speak_with_edge_tts(complete_text, selected_voice)
371
+ st.write("### 📚 Full Audio")
372
+ play_and_download_audio(audio_file_full)
373
+
374
+ if vocal_summary:
375
+ main_text = clean_for_speech(r2)
376
+ audio_file_main = speak_with_edge_tts(main_text, selected_voice)
377
+ st.write("### 🎙 Short Audio")
378
+ play_and_download_audio(audio_file_main)
379
+
380
+ if extended_refs:
381
+ summaries_text = "Extended references: " + refs.replace('"','')
382
+ summaries_text = clean_for_speech(summaries_text)
383
+ audio_file_refs = speak_with_edge_tts(summaries_text, selected_voice)
384
+ st.write("### 📜 Long Refs")
385
+ play_and_download_audio(audio_file_refs)
386
+
387
+ # --------------------------------------
388
+ # NEW: Parse references, show sorted list
389
+ # --------------------------------------
390
+ parsed_refs = parse_arxiv_refs(refs)
391
+
392
+ # Sort by year descending (put None at bottom)
393
+ parsed_refs.sort(key=lambda x: x["year"] if x["year"] else 0, reverse=True)
394
+
395
+ st.write("## Individual Papers (Most Recent First)")
396
+ for idx, paper in enumerate(parsed_refs):
397
+ year_str = paper["year"] if paper["year"] else "Unknown Year"
398
+ st.markdown(f"**{idx+1}. {paper['title']}** \n*Year:* {year_str}")
399
+ st.markdown(f"*Summary:* {paper['summary']}")
400
+
401
+ # Two new TTS buttons: Title only or Title+Summary
402
+ colA, colB = st.columns(2)
403
+ with colA:
404
+ if st.button(f"🔊 Title", key=f"title_{idx}"):
405
+ text_tts = clean_for_speech(paper['title'])
406
+ audio_file_title = speak_with_edge_tts(text_tts, selected_voice)
407
+ play_and_download_audio(audio_file_title)
408
+
409
+ with colB:
410
+ if st.button(f"🔊 Title+Summary", key=f"summary_{idx}"):
411
+ text_tts = clean_for_speech(paper['title'] + ". " + paper['summary'])
412
+ audio_file_title_summary = speak_with_edge_tts(text_tts, selected_voice)
413
+ play_and_download_audio(audio_file_title_summary)
414
+
415
+ st.write("---")
416
+
417
+ # Keep your original block for "Titles Only" if you want:
418
+ if titles_summary:
419
+ titles = []
420
+ for line in refs.split('\n'):
421
+ m = re.search(r"\[([^\]]+)\]", line)
422
+ if m:
423
+ titles.append(m.group(1))
424
+ if titles:
425
+ titles_text = "Titles: " + ", ".join(titles)
426
+ titles_text = clean_for_speech(titles_text)
427
+ audio_file_titles = speak_with_edge_tts(titles_text, selected_voice)
428
+ st.write("### 🔖 Titles (All-In-One)")
429
+ play_and_download_audio(audio_file_titles)
430
+
431
+ elapsed = time.time()-start
432
+ st.write(f"**Total Elapsed:** {elapsed:.2f} s")
433
+
434
+ # Always create a file with the result
435
+ create_file(q, result, "md")
436
+
437
+ return result
438
+
439
  def process_with_gpt(text):
440
  """Process text with GPT-4"""
441
+ if not text:
442
  return
443
  st.session_state.messages.append({"role":"user","content":text})
444
  with st.chat_message("user"):
445
  st.markdown(text)
446
  with st.chat_message("assistant"):
447
+ c = openai_client.chat.completions.create(
448
  model=st.session_state["openai_model"],
449
  messages=st.session_state.messages,
450
  stream=False
451
  )
452
  ans = c.choices[0].message.content
453
+ st.write("GPT-4o: " + ans)
454
  create_file(text, ans, "md")
455
  st.session_state.messages.append({"role":"assistant","content":ans})
456
  return ans
457
 
458
  def process_with_claude(text):
459
  """Process text with Claude"""
460
+ if not text:
461
  return
462
  with st.chat_message("user"):
463
  st.markdown(text)
464
  with st.chat_message("assistant"):
465
+ r = claude_client.messages.create(
466
+ model="claude-3-sonnet-20240229",
467
+ max_tokens=1000,
468
+ messages=[{"role":"user","content":text}]
469
  )
470
+ ans = r.content[0].text
471
  st.write("Claude-3.5: " + ans)
472
  create_file(text, ans, "md")
473
  st.session_state.chat_history.append({"user":text,"claude":ans})
474
  return ans
475
 
476
+ # 📂 10. File Management
477
  def create_zip_of_files(md_files, mp3_files):
478
  """Create zip with intelligent naming"""
479
  md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
 
507
  """Load and group files for sidebar display"""
508
  md_files = glob.glob("*.md")
509
  mp3_files = glob.glob("*.mp3")
 
510
 
511
+ md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
512
  all_files = md_files + mp3_files
513
+
514
  groups = defaultdict(list)
515
  for f in all_files:
516
  fname = os.path.basename(f)
517
+ prefix = fname[:10]
518
  groups[prefix].append(f)
519
 
520
  for prefix in groups:
521
  groups[prefix].sort(key=lambda x: os.path.getmtime(x), reverse=True)
522
 
523
  sorted_prefixes = sorted(groups.keys(),
524
+ key=lambda pre: max(os.path.getmtime(x) for x in groups[pre]),
525
+ reverse=True)
526
  return groups, sorted_prefixes
527
 
528
  def extract_keywords_from_md(files):
 
562
  if st.button("⬇️ ZipAll"):
563
  z = create_zip_of_files(all_md, all_mp3)
564
  if z:
565
+ st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
566
 
567
  for prefix in sorted_prefixes:
568
  files = groups[prefix]
 
585
  ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
586
  st.write(f"**{fname}** - {ctime}")
587
 
588
+ # 🎯 11. Main Application
589
+ async def get_available_voices():
590
+ voices = await edge_tts.list_voices()
591
+ return [voice["shortName"] for voice in voices]
592
+
593
+ @st.cache_resource
594
+ def fetch_voices():
595
+ return asyncio.run(get_available_voices())
596
+
597
  def main():
598
  st.sidebar.markdown("### 🚲BikeAI🏆 Multi-Agent Research")
599
+ tab_main = st.radio("Action:",["🎤 Voice","📸 Media","🔍 ArXiv","📝 Editor"],horizontal=True)
600
 
601
+ mycomponent = components.declare_component("mycomponent", path="mycomponent")
602
+ val = mycomponent(my_input_value="Hello")
 
603
 
604
+ if 'voices' not in st.session_state:
605
+ st.session_state['voices'] = fetch_voices()
606
+
607
+ st.sidebar.markdown("### 🎤 Select Voice for Audio Generation")
608
+ selected_voice = st.sidebar.selectbox(
609
+ "Choose a voice:",
610
+ options=st.session_state['voices'],
611
+ index=st.session_state['voices'].index("en-US-AriaNeural") if "en-US-AriaNeural" in st.session_state['voices'] else 0
612
+ )
613
+
614
+ # Show input in a text box for editing if detected
615
+ if val:
616
+ val_stripped = val.replace('\n', ' ')
617
+ edited_input = st.text_area("✏️ Edit Input:", value=val_stripped, height=100)
618
+ run_option = st.selectbox("Model:", ["Arxiv", "GPT-4o", "Claude-3.5"])
619
+ col1, col2 = st.columns(2)
620
+ with col1:
621
+ autorun = st.checkbox("⚙ AutoRun", value=True)
622
+ with col2:
623
+ full_audio = st.checkbox("📚FullAudio", value=False,
624
+ help="Generate full audio response")
625
+
626
+ input_changed = (val != st.session_state.old_val)
627
+
628
+ if autorun and input_changed:
629
+ st.session_state.old_val = val
630
+ if run_option == "Arxiv":
631
+ perform_ai_lookup(
632
+ edited_input,
633
+ vocal_summary=True,
634
+ extended_refs=False,
635
+ titles_summary=True,
636
+ full_audio=full_audio,
637
+ selected_voice=selected_voice
638
+ )
639
+ else:
640
+ if run_option == "GPT-4o":
641
+ process_with_gpt(edited_input)
642
+ elif run_option == "Claude-3.5":
643
+ process_with_claude(edited_input)
644
+ else:
645
+ if st.button("▶ Run"):
646
+ st.session_state.old_val = val
647
+ if run_option == "Arxiv":
648
+ perform_ai_lookup(
649
+ edited_input,
650
+ vocal_summary=True,
651
+ extended_refs=False,
652
+ titles_summary=True,
653
+ full_audio=full_audio,
654
+ selected_voice=selected_voice
655
+ )
656
+ else:
657
+ if run_option == "GPT-4o":
658
+ process_with_gpt(edited_input)
659
+ elif run_option == "Claude-3.5":
660
+ process_with_claude(edited_input)
661
 
662
  if tab_main == "🔍 ArXiv":
663
  st.subheader("🔍 Query ArXiv")
664
  q = st.text_input("🔍 Query:")
665
 
666
  st.markdown("### 🎛 Options")
667
+ vocal_summary = st.checkbox("🎙ShortAudio", value=True)
668
+ extended_refs = st.checkbox("📜LongRefs", value=False)
669
+ titles_summary = st.checkbox("🔖TitlesOnly", value=True)
670
+ full_audio = st.checkbox("📚FullAudio", value=False,
671
+ help="Full audio of results")
672
+ full_transcript = st.checkbox("🧾FullTranscript", value=False,
673
+ help="Generate a full transcript file")
674
+
675
+ if q and st.button("🔍Run"):
676
+ result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
677
+ titles_summary=titles_summary, full_audio=full_audio, selected_voice=selected_voice)
678
+ if full_transcript:
679
+ save_full_transcript(q, result)
680
 
681
+ st.markdown("### Change Prompt & Re-Run")
682
+ q_new = st.text_input("🔄 Modify Query:")
683
+ if q_new and st.button("🔄 Re-Run with Modified Query"):
684
+ result = perform_ai_lookup(q_new, vocal_summary=vocal_summary, extended_refs=extended_refs,
685
+ titles_summary=titles_summary, full_audio=full_audio, selected_voice=selected_voice)
686
  if full_transcript:
687
+ save_full_transcript(q_new, result)
688
 
689
  elif tab_main == "🎤 Voice":
690
  st.subheader("🎤 Voice Input")
 
693
  if st.button("📨 Send"):
694
  process_with_gpt(user_text)
695
  st.subheader("📜 Chat History")
696
+ t1,t2=st.tabs(["Claude History","GPT-4o History"])
697
  with t1:
698
  for c in st.session_state.chat_history:
699
  st.write("**You:**", c["user"])
 
707
  st.header("📸 Images & 🎥 Videos")
708
  tabs = st.tabs(["🖼 Images", "🎥 Video"])
709
  with tabs[0]:
710
+ imgs = glob.glob("*.png")+glob.glob("*.jpg")
711
  if imgs:
712
+ c = st.slider("Cols",1,5,3)
713
+ cols = st.columns(c)
714
+ for i,f in enumerate(imgs):
715
+ with cols[i%c]:
716
+ st.image(Image.open(f),use_container_width=True)
717
  if st.button(f"👀 Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
718
+ a = process_image(f,"Describe this image.")
719
  st.markdown(a)
720
  else:
721
  st.write("No images found.")
722
  with tabs[1]:
723
+ vids = glob.glob("*.mp4")
724
  if vids:
725
  for v in vids:
726
  with st.expander(f"🎥 {os.path.basename(v)}"):
727
  st.video(v)
728
  if st.button(f"Analyze {os.path.basename(v)}", key=f"analyze_{v}"):
729
+ a = process_video_with_gpt(v,"Describe video.")
730
  st.markdown(a)
731
  else:
732
  st.write("No videos found.")
733
 
734
  elif tab_main == "📝 Editor":
735
+ if getattr(st.session_state,'current_file',None):
736
+ st.subheader(f"Editing: {st.session_state.current_file}")
737
+ new_text = st.text_area("✏️ Content:", st.session_state.file_content, height=300)
738
+ if st.button("💾 Save"):
739
+ with open(st.session_state.current_file,'w',encoding='utf-8') as f:
740
+ f.write(new_text)
741
+ st.success("Updated!")
742
+ st.session_state.should_rerun = True
 
 
 
 
 
743
  else:
744
+ st.write("Select a file from the sidebar to edit.")
745
 
 
746
  groups, sorted_prefixes = load_files_for_sidebar()
747
  display_file_manager_sidebar(groups, sorted_prefixes)
748
 
 
749
  if st.session_state.viewing_prefix and st.session_state.viewing_prefix in groups:
750
  st.write("---")
751
  st.write(f"**Viewing Group:** {st.session_state.viewing_prefix}")
 
754
  ext = os.path.splitext(fname)[1].lower().strip('.')
755
  st.write(f"### {fname}")
756
  if ext == "md":
757
+ content = open(f,'r',encoding='utf-8').read()
758
  st.markdown(content)
759
  elif ext == "mp3":
760
  st.audio(f)
 
765
 
766
  if st.session_state.should_rerun:
767
  st.session_state.should_rerun = False
768
+ st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
769
 
770
+ if __name__=="__main__":
 
771
  main()