mgbam commited on
Commit
8d36c79
·
verified ·
1 Parent(s): 33d1b65

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -21
app.py CHANGED
@@ -1,24 +1,22 @@
1
- # app.py
2
  """
3
- AnyCoder / Shasha AI – Gradio back‑end
4
 
5
- Shows the custom front‑end shipped in index.html (plus static/style.css & static/index.js).
6
- • Exposes one JSON endpoint (`POST /run/predict`) that the JS front‑end
7
  calls to run model inference.
8
- • Keeps all existing helpers (hf_client, inference, utils, deploy …).
9
  """
10
 
11
  from pathlib import Path
12
- from typing import List, Tuple, Dict, Any
13
 
14
  import gradio as gr
15
 
16
- # ---- local helpers (unchanged) --------------------------------------------
17
- from inference import chat_completion
18
- from tavily_search import enhance_query_with_search
19
- from deploy import send_to_sandbox
20
- from models import AVAILABLE_MODELS, find_model, ModelInfo
21
- from utils import (
22
  extract_text_from_file,
23
  extract_website_content,
24
  history_to_messages,
@@ -52,10 +50,10 @@ def generate(
52
  enable_search: bool,
53
  history: History | None,
54
  ) -> Tuple[str, History]:
55
- """Called by the JS front‑end via fetch('/run/predict')."""
56
  history = history or []
57
 
58
- # --- build system + messages ------------------------------------------
59
  system_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
60
  messages = history_to_messages(history, system_prompt)
61
 
@@ -65,20 +63,20 @@ def generate(
65
  ctx_parts.append("[File]")
66
  ctx_parts.append(extract_text_from_file(file_path)[:5000])
67
  if website_url:
68
- html = extract_website_content(website_url)
69
- if not html.startswith("Error"):
70
  ctx_parts.append("[Website]")
71
- ctx_parts.append(html[:8000])
72
 
73
  user_query = "\n\n".join(filter(None, ctx_parts))
74
  user_query = enhance_query_with_search(user_query, enable_search)
75
  messages.append({"role": "user", "content": user_query})
76
 
77
- # --- run model ---------------------------------------------------------
78
  model: ModelInfo = find_model(model_id) or AVAILABLE_MODELS[0]
79
  answer = chat_completion(model.id, messages)
80
 
81
- # --- post‑process ------------------------------------------------------
82
  if language == "transformers.js":
83
  files = parse_transformers_js_output(answer)
84
  code = format_transformers_js_output(files)
@@ -96,8 +94,8 @@ HTML_SOURCE = Path("index.html").read_text(encoding="utf‑8")
96
 
97
  # ------------------- Gradio UI ---------------------------------------------
98
  with gr.Blocks(css="body{margin:0}", title="AnyCoder AI") as demo:
99
- # 1 visible: your own UI
100
- gr.HTML(HTML_SOURCE, sanitize=False)
101
 
102
  # 2 hidden: API inputs / outputs
103
  with gr.Group(visible=False) as api:
 
 
1
  """
2
+ AnyCoder / Shasha AI – Gradio back‑end
3
 
4
+ Serves the custom front‑end shipped in index.html (+ static/style.css & static/index.js).
5
+ • Exposes one JSON endpoint (`POST /run/predict`) that the JS front‑end
6
  calls to run model inference.
 
7
  """
8
 
9
  from pathlib import Path
10
+ from typing import List, Tuple
11
 
12
  import gradio as gr
13
 
14
+ # ---- local helpers --------------------------------------------------------
15
+ from inference import chat_completion
16
+ from tavily_search import enhance_query_with_search
17
+ from deploy import send_to_sandbox
18
+ from models import AVAILABLE_MODELS, find_model, ModelInfo
19
+ from utils import (
20
  extract_text_from_file,
21
  extract_website_content,
22
  history_to_messages,
 
50
  enable_search: bool,
51
  history: History | None,
52
  ) -> Tuple[str, History]:
53
+ """Called by the JS front‑end via POST /run/predict."""
54
  history = history or []
55
 
56
+ # ----- build system + messages -----------------------------------------
57
  system_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
58
  messages = history_to_messages(history, system_prompt)
59
 
 
63
  ctx_parts.append("[File]")
64
  ctx_parts.append(extract_text_from_file(file_path)[:5000])
65
  if website_url:
66
+ site_html = extract_website_content(website_url)
67
+ if not site_html.startswith("Error"):
68
  ctx_parts.append("[Website]")
69
+ ctx_parts.append(site_html[:8000])
70
 
71
  user_query = "\n\n".join(filter(None, ctx_parts))
72
  user_query = enhance_query_with_search(user_query, enable_search)
73
  messages.append({"role": "user", "content": user_query})
74
 
75
+ # ----- run model --------------------------------------------------------
76
  model: ModelInfo = find_model(model_id) or AVAILABLE_MODELS[0]
77
  answer = chat_completion(model.id, messages)
78
 
79
+ # ----- post‑process output ---------------------------------------------
80
  if language == "transformers.js":
81
  files = parse_transformers_js_output(answer)
82
  code = format_transformers_js_output(files)
 
94
 
95
  # ------------------- Gradio UI ---------------------------------------------
96
  with gr.Blocks(css="body{margin:0}", title="AnyCoder AI") as demo:
97
+ # 1 visible: your custom front‑end
98
+ gr.HTML(HTML_SOURCE) # <- sanitize=False removed
99
 
100
  # 2 hidden: API inputs / outputs
101
  with gr.Group(visible=False) as api: