Update app.py
Browse files
app.py
CHANGED
@@ -13,6 +13,13 @@ import asyncio
|
|
13 |
import aiohttp
|
14 |
import threading
|
15 |
import json
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
app = FastAPI()
|
17 |
|
18 |
@app.get("/")
|
@@ -23,7 +30,6 @@ async def root():
|
|
23 |
async def health_check():
|
24 |
return {"status": "OK"}
|
25 |
|
26 |
-
|
27 |
@app.get("/api/search")
|
28 |
async def search(
|
29 |
q: str,
|
@@ -111,7 +117,6 @@ async def videos(
|
|
111 |
except Exception as e:
|
112 |
raise HTTPException(status_code=500, detail=f"Error during video search: {e}")
|
113 |
|
114 |
-
|
115 |
@app.get("/api/news")
|
116 |
async def news(
|
117 |
q: str,
|
@@ -135,7 +140,6 @@ async def news(
|
|
135 |
except Exception as e:
|
136 |
raise HTTPException(status_code=500, detail=f"Error during news search: {e}")
|
137 |
|
138 |
-
|
139 |
@app.get("/api/llm")
|
140 |
async def llm_chat(
|
141 |
model: str,
|
@@ -148,13 +152,12 @@ async def llm_chat(
|
|
148 |
if system_prompt:
|
149 |
messages.insert(0, {"role": "system", "content": system_prompt}) # Add system message at the beginning
|
150 |
|
151 |
-
llm = LLM(model=model)
|
152 |
response = llm.chat(messages=messages)
|
153 |
return JSONResponse(content={"response": response})
|
154 |
except Exception as e:
|
155 |
raise HTTPException(status_code=500, detail=f"Error during LLM chat: {e}")
|
156 |
|
157 |
-
|
158 |
@app.get("/api/fastAI")
|
159 |
async def fast_ai(user: str, model: str = "llama3-70b", system: str = "Answer as concisely as possible."):
|
160 |
"""Get a response from the Snova AI service."""
|
@@ -171,7 +174,7 @@ async def fast_ai(user: str, model: str = "llama3-8b", system: str = "Answer as
|
|
171 |
return StreamingResponse(fastai_stream(user, model, system), media_type="text/event-stream")
|
172 |
except Exception as e:
|
173 |
raise HTTPException(status_code=500, detail=f"Error during Snova AI request: {e}")
|
174 |
-
|
175 |
@app.get("/api/answers")
|
176 |
async def answers(q: str, proxy: Optional[str] = None):
|
177 |
"""Get instant answers for a query."""
|
@@ -208,7 +211,7 @@ def extract_text_from_webpage(html_content):
|
|
208 |
|
209 |
async def fetch_and_extract(url, max_chars, proxy: Optional[str] = None):
|
210 |
"""Fetches a URL and extracts text asynchronously."""
|
211 |
-
|
212 |
async with aiohttp.ClientSession() as session:
|
213 |
try:
|
214 |
async with session.get(url, headers={"User-Agent": "Mozilla/5.0"}, proxy=proxy) as response:
|
@@ -333,17 +336,16 @@ def web_search_and_extract_threading(
|
|
333 |
except Exception as e:
|
334 |
raise HTTPException(status_code=500, detail=f"Error during search and extraction: {e}")
|
335 |
|
336 |
-
|
337 |
@app.get("/api/adv_web_search")
|
338 |
async def adv_web_search(
|
339 |
q: str,
|
340 |
model: str = "gpt-3.5",
|
341 |
-
max_results: int = 3,
|
342 |
timelimit: Optional[str] = None,
|
343 |
safesearch: str = "moderate",
|
344 |
region: str = "wt-wt",
|
345 |
backend: str = "html",
|
346 |
-
max_chars: int = 6000,
|
347 |
system_prompt: str = "You are Most Advanced and Powerful Ai chatbot, User ask you questions and you have to answer that, You are also provided with Google Search Results, To increase your accuracy and providing real time data. Your task is to answer in best way to user.",
|
348 |
proxy: Optional[str] = None
|
349 |
):
|
@@ -353,12 +355,12 @@ async def adv_web_search(
|
|
353 |
try:
|
354 |
with WEBS(proxy=proxy) as webs:
|
355 |
# 1. Perform the web search
|
356 |
-
search_results = webs.text(keywords=q, region=region,
|
357 |
safesearch=safesearch,
|
358 |
-
timelimit=timelimit, backend=backend,
|
359 |
max_results=max_results)
|
360 |
|
361 |
-
# 2. Extract text from top search result URLs asynchronously
|
362 |
extracted_text = ""
|
363 |
tasks = [fetch_and_extract(result['href'], max_chars, proxy) for result in search_results if 'href' in result]
|
364 |
extracted_results = await asyncio.gather(*tasks)
|
@@ -383,7 +385,6 @@ async def adv_web_search(
|
|
383 |
except Exception as e:
|
384 |
raise HTTPException(status_code=500, detail=f"Error during advanced search: {e}")
|
385 |
|
386 |
-
|
387 |
@app.get("/api/website_summarizer")
|
388 |
async def website_summarizer(url: str, proxy: Optional[str] = None):
|
389 |
"""Summarizes the content of a given URL using a chat model."""
|
@@ -408,7 +409,7 @@ async def website_summarizer(url: str, proxy: Optional[str] = None):
|
|
408 |
raise HTTPException(status_code=500, detail=f"Error fetching or processing URL: {e}")
|
409 |
except Exception as e:
|
410 |
raise HTTPException(status_code=500, detail=f"Error during summarization: {e}")
|
411 |
-
|
412 |
@app.get("/api/ask_website")
|
413 |
async def ask_website(url: str, question: str, model: str = "llama-3-70b", proxy: Optional[str] = None):
|
414 |
"""
|
@@ -438,26 +439,23 @@ async def ask_website(url: str, question: str, model: str = "llama-3-70b", proxy
|
|
438 |
except Exception as e:
|
439 |
raise HTTPException(status_code=500, detail=f"Error during question answering: {e}")
|
440 |
|
441 |
-
from huggingface_hub import InferenceClient
|
442 |
client_sd3 = InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
|
443 |
|
444 |
-
from PIL import Image
|
445 |
-
|
446 |
@app.get("/api/sd3")
|
447 |
def sd3(prompt :str = "",
|
448 |
-
steps: int =
|
449 |
width: int = 1000,
|
450 |
height: int = 1000
|
451 |
):
|
452 |
try:
|
453 |
image = client_sd3.text_to_image(prompt = f"{prompt}, hd, high quality, 4k, masterpiece",
|
454 |
-
num_inference_steps = steps,
|
455 |
width = width, height = height )
|
456 |
image = Image.open(io.BytesIO(image))
|
457 |
return image
|
458 |
except Exception as e:
|
459 |
-
raise HTTPException(status_code=500, detail=f"Error during image generation: {e}")
|
460 |
-
|
461 |
@app.get("/api/maps")
|
462 |
async def maps(
|
463 |
q: str,
|
@@ -511,7 +509,6 @@ def google_translate(q: str, from_: Optional[str] = 'auto', to: str = "en"):
|
|
511 |
return JSONResponse(content=jsonable_encoder({"detected_language": from_ , "original": q , "translated": result}))
|
512 |
except Exception as e:
|
513 |
raise HTTPException(status_code=500, detail=f"Error during translation: {e}")
|
514 |
-
|
515 |
|
516 |
@app.get("/api/youtube/transcript")
|
517 |
async def youtube_transcript(
|
@@ -527,7 +524,7 @@ async def youtube_transcript(
|
|
527 |
return JSONResponse(content=jsonable_encoder(transcript))
|
528 |
except Exception as e:
|
529 |
raise HTTPException(status_code=500, detail=f"Error getting YouTube transcript: {e}")
|
530 |
-
|
531 |
import requests
|
532 |
@app.get("/weather/json/{location}")
|
533 |
def get_weather_json(location: str):
|
@@ -550,4 +547,26 @@ def get_ascii_weather(location: str):
|
|
550 |
# Run the API server if this script is executed
|
551 |
if __name__ == "__main__":
|
552 |
import uvicorn
|
553 |
-
uvicorn.run(app, host="0.0.0.0", port=8083)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
import aiohttp
|
14 |
import threading
|
15 |
import json
|
16 |
+
import os
|
17 |
+
import time
|
18 |
+
from huggingface_hub import HfApi
|
19 |
+
from huggingface_hub import InferenceClient
|
20 |
+
from PIL import Image
|
21 |
+
import io
|
22 |
+
|
23 |
app = FastAPI()
|
24 |
|
25 |
@app.get("/")
|
|
|
30 |
async def health_check():
|
31 |
return {"status": "OK"}
|
32 |
|
|
|
33 |
@app.get("/api/search")
|
34 |
async def search(
|
35 |
q: str,
|
|
|
117 |
except Exception as e:
|
118 |
raise HTTPException(status_code=500, detail=f"Error during video search: {e}")
|
119 |
|
|
|
120 |
@app.get("/api/news")
|
121 |
async def news(
|
122 |
q: str,
|
|
|
140 |
except Exception as e:
|
141 |
raise HTTPException(status_code=500, detail=f"Error during news search: {e}")
|
142 |
|
|
|
143 |
@app.get("/api/llm")
|
144 |
async def llm_chat(
|
145 |
model: str,
|
|
|
152 |
if system_prompt:
|
153 |
messages.insert(0, {"role": "system", "content": system_prompt}) # Add system message at the beginning
|
154 |
|
155 |
+
llm = LLM(model=model)
|
156 |
response = llm.chat(messages=messages)
|
157 |
return JSONResponse(content={"response": response})
|
158 |
except Exception as e:
|
159 |
raise HTTPException(status_code=500, detail=f"Error during LLM chat: {e}")
|
160 |
|
|
|
161 |
@app.get("/api/fastAI")
|
162 |
async def fast_ai(user: str, model: str = "llama3-70b", system: str = "Answer as concisely as possible."):
|
163 |
"""Get a response from the Snova AI service."""
|
|
|
174 |
return StreamingResponse(fastai_stream(user, model, system), media_type="text/event-stream")
|
175 |
except Exception as e:
|
176 |
raise HTTPException(status_code=500, detail=f"Error during Snova AI request: {e}")
|
177 |
+
|
178 |
@app.get("/api/answers")
|
179 |
async def answers(q: str, proxy: Optional[str] = None):
|
180 |
"""Get instant answers for a query."""
|
|
|
211 |
|
212 |
async def fetch_and_extract(url, max_chars, proxy: Optional[str] = None):
|
213 |
"""Fetches a URL and extracts text asynchronously."""
|
214 |
+
|
215 |
async with aiohttp.ClientSession() as session:
|
216 |
try:
|
217 |
async with session.get(url, headers={"User-Agent": "Mozilla/5.0"}, proxy=proxy) as response:
|
|
|
336 |
except Exception as e:
|
337 |
raise HTTPException(status_code=500, detail=f"Error during search and extraction: {e}")
|
338 |
|
|
|
339 |
@app.get("/api/adv_web_search")
|
340 |
async def adv_web_search(
|
341 |
q: str,
|
342 |
model: str = "gpt-3.5",
|
343 |
+
max_results: int = 3,
|
344 |
timelimit: Optional[str] = None,
|
345 |
safesearch: str = "moderate",
|
346 |
region: str = "wt-wt",
|
347 |
backend: str = "html",
|
348 |
+
max_chars: int = 6000,
|
349 |
system_prompt: str = "You are Most Advanced and Powerful Ai chatbot, User ask you questions and you have to answer that, You are also provided with Google Search Results, To increase your accuracy and providing real time data. Your task is to answer in best way to user.",
|
350 |
proxy: Optional[str] = None
|
351 |
):
|
|
|
355 |
try:
|
356 |
with WEBS(proxy=proxy) as webs:
|
357 |
# 1. Perform the web search
|
358 |
+
search_results = webs.text(keywords=q, region=region,
|
359 |
safesearch=safesearch,
|
360 |
+
timelimit=timelimit, backend=backend,
|
361 |
max_results=max_results)
|
362 |
|
363 |
+
# 2. Extract text from top search result URLs asynchronously
|
364 |
extracted_text = ""
|
365 |
tasks = [fetch_and_extract(result['href'], max_chars, proxy) for result in search_results if 'href' in result]
|
366 |
extracted_results = await asyncio.gather(*tasks)
|
|
|
385 |
except Exception as e:
|
386 |
raise HTTPException(status_code=500, detail=f"Error during advanced search: {e}")
|
387 |
|
|
|
388 |
@app.get("/api/website_summarizer")
|
389 |
async def website_summarizer(url: str, proxy: Optional[str] = None):
|
390 |
"""Summarizes the content of a given URL using a chat model."""
|
|
|
409 |
raise HTTPException(status_code=500, detail=f"Error fetching or processing URL: {e}")
|
410 |
except Exception as e:
|
411 |
raise HTTPException(status_code=500, detail=f"Error during summarization: {e}")
|
412 |
+
|
413 |
@app.get("/api/ask_website")
|
414 |
async def ask_website(url: str, question: str, model: str = "llama-3-70b", proxy: Optional[str] = None):
|
415 |
"""
|
|
|
439 |
except Exception as e:
|
440 |
raise HTTPException(status_code=500, detail=f"Error during question answering: {e}")
|
441 |
|
|
|
442 |
client_sd3 = InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
|
443 |
|
|
|
|
|
444 |
@app.get("/api/sd3")
|
445 |
def sd3(prompt :str = "",
|
446 |
+
steps: int = 20,
|
447 |
width: int = 1000,
|
448 |
height: int = 1000
|
449 |
):
|
450 |
try:
|
451 |
image = client_sd3.text_to_image(prompt = f"{prompt}, hd, high quality, 4k, masterpiece",
|
452 |
+
num_inference_steps = steps,
|
453 |
width = width, height = height )
|
454 |
image = Image.open(io.BytesIO(image))
|
455 |
return image
|
456 |
except Exception as e:
|
457 |
+
raise HTTPException(status_code=500, detail=f"Error during image generation: {e}")
|
458 |
+
|
459 |
@app.get("/api/maps")
|
460 |
async def maps(
|
461 |
q: str,
|
|
|
509 |
return JSONResponse(content=jsonable_encoder({"detected_language": from_ , "original": q , "translated": result}))
|
510 |
except Exception as e:
|
511 |
raise HTTPException(status_code=500, detail=f"Error during translation: {e}")
|
|
|
512 |
|
513 |
@app.get("/api/youtube/transcript")
|
514 |
async def youtube_transcript(
|
|
|
524 |
return JSONResponse(content=jsonable_encoder(transcript))
|
525 |
except Exception as e:
|
526 |
raise HTTPException(status_code=500, detail=f"Error getting YouTube transcript: {e}")
|
527 |
+
|
528 |
import requests
|
529 |
@app.get("/weather/json/{location}")
|
530 |
def get_weather_json(location: str):
|
|
|
547 |
# Run the API server if this script is executed
|
548 |
if __name__ == "__main__":
|
549 |
import uvicorn
|
550 |
+
uvicorn.run(app, host="0.0.0.0", port=8083)
|
551 |
+
|
552 |
+
def main():
|
553 |
+
# Retrieve the space ID and token from environment variables
|
554 |
+
space_id = os.getenv("SPACE_ID")
|
555 |
+
token = os.getenv("HF_TOKEN")
|
556 |
+
|
557 |
+
# Initialize the HfApi with the retrieved token
|
558 |
+
api = HfApi(token=token)
|
559 |
+
|
560 |
+
while True:
|
561 |
+
try:
|
562 |
+
# Restart the space
|
563 |
+
api.restart_space(space_id, factory_reboot=False)
|
564 |
+
print(f"Successfully restarted the space: {space_id}")
|
565 |
+
except Exception as e:
|
566 |
+
print(f"Error restarting the space: {e}")
|
567 |
+
|
568 |
+
# Wait for 10 minutes before restarting again
|
569 |
+
time.sleep(600) # Sleep for 600 seconds (10 minutes)
|
570 |
+
|
571 |
+
if __name__ == "__main__":
|
572 |
+
main()
|