mgbam commited on
Commit
3a43332
·
verified ·
1 Parent(s): e5b401d

Update api_clients.py

Browse files
Files changed (1) hide show
  1. api_clients.py +155 -155
api_clients.py CHANGED
@@ -1,156 +1,156 @@
1
- import os
2
- from typing import Dict, List, Optional, Tuple
3
-
4
- import gradio as gr
5
- from huggingface_hub import InferenceClient
6
- from tavily import TavilyClient
7
-
8
- from config import (
9
- HTML_SYSTEM_PROMPT, GENERIC_SYSTEM_PROMPT, HTML_SYSTEM_PROMPT_WITH_SEARCH,
10
- GENERIC_SYSTEM_PROMPT_WITH_SEARCH, FollowUpSystemPrompt
11
- )
12
- from chat_processing import (
13
- history_to_messages, messages_to_history,
14
- remove_code_block, apply_search_replace_changes, send_to_sandbox,
15
- history_to_chatbot_messages, get_gradio_language
16
- )
17
- from file_processing import ( # file_processing.py
18
- extract_text_from_file, create_multimodal_message,
19
- )
20
- from web_extraction import extract_website_content
21
-
22
- # HF Inference Client
23
- HF_TOKEN = os.getenv('HF_TOKEN')
24
-
25
- def get_inference_client(model_id):
26
- """Return an InferenceClient with provider based on model_id."""
27
- provider = "groq" if model_id == "moonshotai/Kimi-K2-Instruct" else "auto"
28
- return InferenceClient(
29
- provider=provider,
30
- api_key=HF_TOKEN,
31
- bill_to="huggingface"
32
- )
33
-
34
- # Tavily Search Client
35
- TAVILY_API_KEY = os.getenv('TAVILY_API_KEY')
36
- tavily_client = None
37
- if TAVILY_API_KEY:
38
- try:
39
- tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
40
- except Exception as e:
41
- print(f"Failed to initialize Tavily client: {e}")
42
- tavily_client = None
43
-
44
- def generation_code(query: Optional[str], image: Optional[gr.Image], file: Optional[str], website_url: Optional[str], _setting: Dict[str, str], _history: Optional[List[Tuple[str, str]]], _current_model: Dict, enable_search: bool = False, language: str = "html"):
45
- if query is None:
46
- query = ''
47
- if _history is None:
48
- _history = []
49
-
50
- # Check if there's existing HTML content in history to determine if this is a modification request
51
- has_existing_html = False
52
- if _history:
53
- # Check the last assistant message for HTML content
54
- last_assistant_msg = _history[-1][1] if len(_history) > 0 else ""
55
- if '<!DOCTYPE html>' in last_assistant_msg or '<html' in last_assistant_msg:
56
- has_existing_html = True
57
-
58
- # Choose system prompt based on context
59
- if has_existing_html:
60
- # Use follow-up prompt for modifying existing HTML
61
- system_prompt = FollowUpSystemPrompt
62
- else:
63
- # Use language-specific prompt
64
- if language == "html":
65
- system_prompt = HTML_SYSTEM_PROMPT_WITH_SEARCH if enable_search else HTML_SYSTEM_PROMPT
66
- else:
67
- system_prompt = GENERIC_SYSTEM_PROMPT_WITH_SEARCH.format(language=language) if enable_search else GENERIC_SYSTEM_PROMPT.format(language=language)
68
-
69
- messages = history_to_messages(_history, system_prompt)
70
-
71
- # Extract file text and append to query if file is present
72
- file_text = ""
73
- if file:
74
- file_text = extract_text_from_file(file)
75
- if file_text:
76
- file_text = file_text[:5000] # Limit to 5000 chars for prompt size
77
- query = f"{query}\n\n[Reference file content below]\n{file_text}"
78
-
79
- # Extract website content and append to query if website URL is present
80
- website_text = ""
81
- if website_url and website_url.strip():
82
- website_text = extract_website_content(website_url.strip())
83
- if website_text and not website_text.startswith("Error"):
84
- website_text = website_text[:8000] # Limit to 8000 chars for prompt size
85
- query = f"{query}\n\n[Website content to redesign below]\n{website_text}"
86
- elif website_text.startswith("Error"):
87
- # Provide helpful guidance when website extraction fails
88
- fallback_guidance = """
89
- Since I couldn't extract the website content, please provide additional details about what you'd like to build:
90
- 1. What type of website is this? (e.g., e-commerce, blog, portfolio, dashboard)
91
- 2. What are the main features you want?
92
- 3. What's the target audience?
93
- 4. Any specific design preferences? (colors, style, layout)
94
- This will help me create a better design for you."""
95
- query = f"{query}\n\n[Error extracting website: {website_text}]{fallback_guidance}"
96
-
97
- # Enhance query with search if enabled
98
- enhanced_query = enhance_query_with_search(query, enable_search)
99
-
100
- # Use dynamic client based on selected model
101
- client = get_inference_client(_current_model["id"])
102
-
103
- if image is not None:
104
- messages.append(create_multimodal_message(enhanced_query, image))
105
- else:
106
- messages.append({'role': 'user', 'content': enhanced_query})
107
- try:
108
- completion = client.chat.completions.create(
109
- model=_current_model["id"],
110
- messages=messages,
111
- stream=True,
112
- max_tokens=5000
113
- )
114
- content = ""
115
- for chunk in completion:
116
- if chunk.choices[0].delta.content:
117
- content += chunk.choices[0].delta.content
118
- clean_code = remove_code_block(content)
119
- if has_existing_html:
120
- # Fallback: If the model returns a full HTML file, use it directly
121
- if clean_code.strip().startswith("<!DOCTYPE html>") or clean_code.strip().startswith("<html"):
122
- yield {
123
- "code_output": gr.update(value=clean_code, language=get_gradio_language(language)),
124
- "history_output": history_to_chatbot_messages(_history),
125
- "sandbox": send_to_sandbox(clean_code) if language == "html" else "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your code using the download button above.</div>",
126
- }
127
- else:
128
- last_html = _history[-1][1] if _history else ""
129
- modified_html = apply_search_replace_changes(last_html, clean_code)
130
- clean_html = remove_code_block(modified_html)
131
- yield {
132
- "code_output": gr.update(value=clean_html, language=get_gradio_language(language)),
133
- "history_output": history_to_chatbot_messages(_history),
134
- "sandbox": send_to_sandbox(clean_html) if language == "html" else "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your code using the download button above.</div>",
135
- }
136
- else:
137
- yield {
138
- "code_output": gr.update(value=clean_code, language=get_gradio_language(language)),
139
- "history_output": history_to_chatbot_messages(_history),
140
- "sandbox": send_to_sandbox(clean_code) if language == "html" else "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your code using the download button above.</div>",
141
- }
142
- # Final update
143
- _history = messages_to_history(messages + [{'role': 'assistant', 'content': content}])
144
- yield {
145
- "code_output": remove_code_block(content),
146
- "history": _history,
147
- "sandbox": send_to_sandbox(remove_code_block(content)),
148
- "history_output": history_to_chatbot_messages(_history),
149
- }
150
-
151
- except Exception as e:
152
- error_message = f"Error: {str(e)}"
153
- yield {
154
- "code_output": error_message,
155
- "history_output": history_to_chatbot_messages(_history),
156
  }
 
1
+ import os
2
+ from typing import Dict, List, Optional, Tuple
3
+
4
+ import gradio as gr
5
+ from huggingface_hub import InferenceClient
6
+ from tavily import TavilyClient
7
+
8
+ from config import (
9
+ HTML_SYSTEM_PROMPT, GENERIC_SYSTEM_PROMPT, HTML_SYSTEM_PROMPT_WITH_SEARCH,
10
+ GENERIC_SYSTEM_PROMPT_WITH_SEARCH, FollowUpSystemPrompt
11
+ )
12
+ from chat_processing import (
13
+ history_to_messages, messages_to_history,
14
+ remove_code_block, apply_search_replace_changes, send_to_sandbox,
15
+ history_to_chatbot_messages, get_gradio_language
16
+ )
17
+ from file_processing import ( # file_processing.py
18
+ extract_text_from_file, create_multimodal_message,
19
+ )
20
+ from web_extraction import extract_website_content, enhance_query_with_search
21
+
22
+ # HF Inference Client
23
+ HF_TOKEN = os.getenv('HF_TOKEN')
24
+
25
+ def get_inference_client(model_id):
26
+ """Return an InferenceClient with provider based on model_id."""
27
+ provider = "groq" if model_id == "moonshotai/Kimi-K2-Instruct" else "auto"
28
+ return InferenceClient(
29
+ provider=provider,
30
+ api_key=HF_TOKEN,
31
+ bill_to="huggingface"
32
+ )
33
+
34
+ # Tavily Search Client
35
+ TAVILY_API_KEY = os.getenv('TAVILY_API_KEY')
36
+ tavily_client = None
37
+ if TAVILY_API_KEY:
38
+ try:
39
+ tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
40
+ except Exception as e:
41
+ print(f"Failed to initialize Tavily client: {e}")
42
+ tavily_client = None
43
+
44
+ def generation_code(query: Optional[str], image: Optional[gr.Image], file: Optional[str], website_url: Optional[str], _setting: Dict[str, str], _history: Optional[List[Tuple[str, str]]], _current_model: Dict, enable_search: bool = False, language: str = "html"):
45
+ if query is None:
46
+ query = ''
47
+ if _history is None:
48
+ _history = []
49
+
50
+ # Check if there's existing HTML content in history to determine if this is a modification request
51
+ has_existing_html = False
52
+ if _history:
53
+ # Check the last assistant message for HTML content
54
+ last_assistant_msg = _history[-1][1] if len(_history) > 0 else ""
55
+ if '<!DOCTYPE html>' in last_assistant_msg or '<html' in last_assistant_msg:
56
+ has_existing_html = True
57
+
58
+ # Choose system prompt based on context
59
+ if has_existing_html:
60
+ # Use follow-up prompt for modifying existing HTML
61
+ system_prompt = FollowUpSystemPrompt
62
+ else:
63
+ # Use language-specific prompt
64
+ if language == "html":
65
+ system_prompt = HTML_SYSTEM_PROMPT_WITH_SEARCH if enable_search else HTML_SYSTEM_PROMPT
66
+ else:
67
+ system_prompt = GENERIC_SYSTEM_PROMPT_WITH_SEARCH.format(language=language) if enable_search else GENERIC_SYSTEM_PROMPT.format(language=language)
68
+
69
+ messages = history_to_messages(_history, system_prompt)
70
+
71
+ # Extract file text and append to query if file is present
72
+ file_text = ""
73
+ if file:
74
+ file_text = extract_text_from_file(file)
75
+ if file_text:
76
+ file_text = file_text[:5000] # Limit to 5000 chars for prompt size
77
+ query = f"{query}\n\n[Reference file content below]\n{file_text}"
78
+
79
+ # Extract website content and append to query if website URL is present
80
+ website_text = ""
81
+ if website_url and website_url.strip():
82
+ website_text = extract_website_content(website_url.strip())
83
+ if website_text and not website_text.startswith("Error"):
84
+ website_text = website_text[:8000] # Limit to 8000 chars for prompt size
85
+ query = f"{query}\n\n[Website content to redesign below]\n{website_text}"
86
+ elif website_text.startswith("Error"):
87
+ # Provide helpful guidance when website extraction fails
88
+ fallback_guidance = """
89
+ Since I couldn't extract the website content, please provide additional details about what you'd like to build:
90
+ 1. What type of website is this? (e.g., e-commerce, blog, portfolio, dashboard)
91
+ 2. What are the main features you want?
92
+ 3. What's the target audience?
93
+ 4. Any specific design preferences? (colors, style, layout)
94
+ This will help me create a better design for you."""
95
+ query = f"{query}\n\n[Error extracting website: {website_text}]{fallback_guidance}"
96
+
97
+ # Enhance query with search if enabled
98
+ enhanced_query = enhance_query_with_search(query, enable_search)
99
+
100
+ # Use dynamic client based on selected model
101
+ client = get_inference_client(_current_model["id"])
102
+
103
+ if image is not None:
104
+ messages.append(create_multimodal_message(enhanced_query, image))
105
+ else:
106
+ messages.append({'role': 'user', 'content': enhanced_query})
107
+ try:
108
+ completion = client.chat.completions.create(
109
+ model=_current_model["id"],
110
+ messages=messages,
111
+ stream=True,
112
+ max_tokens=5000
113
+ )
114
+ content = ""
115
+ for chunk in completion:
116
+ if chunk.choices[0].delta.content:
117
+ content += chunk.choices[0].delta.content
118
+ clean_code = remove_code_block(content)
119
+ if has_existing_html:
120
+ # Fallback: If the model returns a full HTML file, use it directly
121
+ if clean_code.strip().startswith("<!DOCTYPE html>") or clean_code.strip().startswith("<html"):
122
+ yield {
123
+ "code_output": gr.update(value=clean_code, language=get_gradio_language(language)),
124
+ "history_output": history_to_chatbot_messages(_history),
125
+ "sandbox": send_to_sandbox(clean_code) if language == "html" else "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your code using the download button above.</div>",
126
+ }
127
+ else:
128
+ last_html = _history[-1][1] if _history else ""
129
+ modified_html = apply_search_replace_changes(last_html, clean_code)
130
+ clean_html = remove_code_block(modified_html)
131
+ yield {
132
+ "code_output": gr.update(value=clean_html, language=get_gradio_language(language)),
133
+ "history_output": history_to_chatbot_messages(_history),
134
+ "sandbox": send_to_sandbox(clean_html) if language == "html" else "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your code using the download button above.</div>",
135
+ }
136
+ else:
137
+ yield {
138
+ "code_output": gr.update(value=clean_code, language=get_gradio_language(language)),
139
+ "history_output": history_to_chatbot_messages(_history),
140
+ "sandbox": send_to_sandbox(clean_code) if language == "html" else "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your code using the download button above.</div>",
141
+ }
142
+ # Final update
143
+ _history = messages_to_history(messages + [{'role': 'assistant', 'content': content}])
144
+ yield {
145
+ "code_output": remove_code_block(content),
146
+ "history": _history,
147
+ "sandbox": send_to_sandbox(remove_code_block(content)),
148
+ "history_output": history_to_chatbot_messages(_history),
149
+ }
150
+
151
+ except Exception as e:
152
+ error_message = f"Error: {str(e)}"
153
+ yield {
154
+ "code_output": error_message,
155
+ "history_output": history_to_chatbot_messages(_history),
156
  }