DeMaking commited on
Commit
1a7b925
verified
1 Parent(s): 338214b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -0
app.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ from fastapi import FastAPI, Request
4
+ from huggingface_hub import InferenceClient, login
5
+ import langid
6
+
7
+
8
+ # Configure logging
9
+ logging.basicConfig(
10
+ format="%(asctime)s - %(levelname)s - %(message)s",
11
+ level=logging.INFO
12
+ )
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ # Get Hugging Face API token from environment variable
17
+ HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
18
+ if not HF_HUB_TOKEN:
19
+ raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN.")
20
+
21
+
22
+ # Login and initialize the InferenceClient
23
+ login(token=HF_HUB_TOKEN)
24
+ client = InferenceClient(api_key=HF_HUB_TOKEN)
25
+
26
+
27
+ # Create FastAPI app instance
28
+ app = FastAPI()
29
+
30
+
31
+ def detect_language(user_input: str) -> str:
32
+ """
33
+ Detect the language of the input text.
34
+ Returns "hebrew" if Hebrew, "english" if English, or "unsupported" otherwise.
35
+ """
36
+ try:
37
+ lang, _ = langid.classify(user_input)
38
+ if lang == "he":
39
+ return "hebrew"
40
+ elif lang == "en":
41
+ return "english"
42
+ else:
43
+ return "unsupported"
44
+ except Exception as e:
45
+ logger.error(f"Language detection error: {e}")
46
+ return "unsupported"
47
+
48
+
49
+ def generate_response(text: str) -> str:
50
+ """
51
+ Generate a response based on the input text.
52
+ Selects a prompt and model according to the detected language,
53
+ and calls the Hugging Face chat completion API.
54
+ """
55
+ language = detect_language(text)
56
+ if language == "hebrew":
57
+ # Hebrew prompt: answer shortly but explain your decision-making process
58
+ content = "转砖诪讜专 注诇 转砖讜讘讛 拽爪专讛, 讗讘诇 转住驻专 讗讬讱 拽讬讘诇转 讗转 讛讛讞诇讟讛, " + text
59
+ model = "yam-peleg/Hebrew-Gemma-11B-V2" # You can change this model as needed.
60
+ elif language == "english":
61
+ content = "keep it short but tell your decision making process, " + text
62
+ model = "mistralai/Mistral-Nemo-Instruct-2407"
63
+ else:
64
+ return "Sorry, I only support Hebrew and English."
65
+
66
+ messages = [{"role": "user", "content": content}]
67
+
68
+ try:
69
+ completion = client.chat.completions.create(
70
+ model=model,
71
+ messages=messages,
72
+ max_tokens=2048,
73
+ temperature=0.5,
74
+ top_p=0.7
75
+ )
76
+ return completion.choices[0].message.content
77
+ except Exception as e:
78
+ logger.error(f"Error generating response: {e}")
79
+ return "Error: Could not generate response."
80
+
81
+
82
+ @app.post("/generate_response")
83
+ async def generate_text(request: Request):
84
+ """
85
+ API endpoint that accepts a JSON payload with a "text" field,
86
+ and returns the generated response from the chat model.
87
+ """
88
+ try:
89
+ data = await request.json()
90
+ text = data.get("text", "").strip()
91
+ if not text:
92
+ return {"error": "No text provided"}
93
+ response = generate_response(text)
94
+ return {"response": response}
95
+ except Exception as e:
96
+ logger.error(f"Error processing request: {e}")
97
+ return {"error": "An unexpected error occurred."}
98
+
99
+
100
+ @app.get("/")
101
+ async def root():
102
+ """
103
+ Root endpoint for checking if the API is running.
104
+ """
105
+ return {"message": "Decision Helper API is running!"}
106
+
107
+
108
+ # Function to run the Telegram bot
109
+ def run_bot():
110
+ logger.info("Starting Telegram bot...")
111
+ # Use subprocess to run bot.py in parallel
112
+ import subprocess
113
+ subprocess.Popen(["python3", "bot.py"])
114
+
115
+
116
+ if __name__ == "__main__":
117
+ # When running app.py directly, start the bot as well.
118
+ run_bot()
119
+ # Uncomment the next lines to run the FastAPI server standalone.
120
+ # import uvicorn
121
+ # uvicorn.run(app, host="0.0.0.0", port=7860)