kevinkal commited on
Commit
b17b476
·
verified ·
1 Parent(s): 164dc5b

Update app.py with multimodal OpenRouter

Browse files
Files changed (1) hide show
  1. app.py +50 -5
app.py CHANGED
@@ -89,15 +89,12 @@ class ModelName(str, Enum):
89
  llama_3_3 = "meta-llama/llama-3.3-70b-instruct:free"
90
  mistral_small_3 ="mistralai/mistral-small-24b-instruct-2501:free"
91
 
92
- @app.post("/open-router")
93
- async def open_router(
94
  token: Annotated[str, Depends(verify_token)],
95
  model: ModelName = Query(..., description="Select a model"),
96
  prompt: str = Query(..., description="Enter your prompt")
97
  ):
98
- print(str(open_router_key))
99
- print(model)
100
- print(prompt)
101
  async with httpx.AsyncClient() as client:
102
  response = await client.post(
103
  url="https://openrouter.ai/api/v1/chat/completions",
@@ -118,5 +115,53 @@ async def open_router(
118
  }
119
  )
120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
122
  return response.json()
 
89
  llama_3_3 = "meta-llama/llama-3.3-70b-instruct:free"
90
  mistral_small_3 ="mistralai/mistral-small-24b-instruct-2501:free"
91
 
92
+ @app.post("/open-router/text")
93
+ async def open_router_text(
94
  token: Annotated[str, Depends(verify_token)],
95
  model: ModelName = Query(..., description="Select a model"),
96
  prompt: str = Query(..., description="Enter your prompt")
97
  ):
 
 
 
98
  async with httpx.AsyncClient() as client:
99
  response = await client.post(
100
  url="https://openrouter.ai/api/v1/chat/completions",
 
115
  }
116
  )
117
 
118
+ response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
119
+ return response.json()
120
+
121
+ class MultiModelName(str, Enum):
122
+ qwen_vl_plus = "qwen/qwen-vl-plus:free"
123
+ qwen_vl_72b = "qwen/qwen2.5-vl-72b-instruct:free"
124
+ gemini_2_flash_lite = "google/gemini-2.0-flash-lite-preview-02-05:free"
125
+ gemini_2_pro = "google/gemini-2.0-pro-exp-02-05:free"
126
+ llama_3_2_vision = "meta-llama/llama-3.2-11b-vision-instruct:free"
127
+
128
+ @app.post("/open-router/multimodal")
129
+ async def open_router_multimodal(
130
+ token: Annotated[str, Depends(verify_token)],
131
+ model: MultiModelName = Query(..., description="Select a model"),
132
+ prompt: str = Query(..., description="Enter your prompt (ex: What is in this image?"),
133
+ image_url: str = Query(..., description="Enter the image URL"),
134
+ ):
135
+ async with httpx.AsyncClient() as client:
136
+ response = await client.post(
137
+ url="https://openrouter.ai/api/v1/chat/completions",
138
+ headers={
139
+ "Authorization": f"Bearer {str(open_router_key)}",
140
+ "Content-Type": "application/json",
141
+ "HTTP-Referer": "<YOUR_SITE_URL>", # Optional
142
+ "X-Title": "<YOUR_SITE_NAME>", # Optional
143
+ },
144
+ json={
145
+ "model": model,
146
+ "messages": [
147
+ {
148
+ "role": "user",
149
+ "content": [
150
+ {
151
+ "type": "text",
152
+ "text": prompt,
153
+ },
154
+ {
155
+ "type": "image_url",
156
+ "image_url": {
157
+ "url": image_url,
158
+ }
159
+ }
160
+ ]
161
+ }
162
+ ],
163
+ }
164
+ )
165
+
166
  response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
167
  return response.json()