File size: 10,384 Bytes
da27d7f e5fe9d2 da27d7f 8ef9b69 d25ec2d da27d7f a0cbdfb 05eb9ce a0cbdfb da27d7f 732c365 da27d7f d22e826 50d64a6 6b447ee 50d64a6 da27d7f d22e826 da27d7f 829d976 da27d7f fbbfce6 829d976 fbbfce6 829d976 fbbfce6 829d976 a0cbdfb e1cd27e 8ef9b69 829d976 da27d7f dc33b5c da27d7f 829d976 da27d7f 829d976 dc33b5c da27d7f 829d976 dc33b5c da27d7f 8ef9b69 2f57100 f7d7188 2f57100 8ef9b69 03c75c9 da27d7f 829d976 73c21f4 da27d7f 73c21f4 829d976 da27d7f 73c21f4 d1edbcf 73c21f4 0a32f1f da27d7f 73c21f4 e583657 da27d7f e583657 73c21f4 e583657 73c21f4 e583657 da27d7f e583657 da27d7f ffe180f e6fd60f b7fc46d 0e70dd6 ffe180f 0e70dd6 7c2f128 ffe180f 7c2f128 ffe180f 8ef9b69 73c21f4 829d976 a0cbdfb da27d7f a0cbdfb da27d7f a0cbdfb da27d7f 8ef9b69 5903b07 8ef9b69 9603906 8ef9b69 da27d7f 829d976 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 |
import argparse
import os
import sys
import time
import uvicorn
import requests
import asyncio
import logging
from pathlib import Path
from fastapi import FastAPI, Depends, HTTPException
from fastapi.responses import HTMLResponse
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from pydantic import BaseModel, Field
from typing import Union, List, Dict, Any
from sse_starlette.sse import EventSourceResponse, ServerSentEvent
from utils.logger import logger
from networks.message_streamer import MessageStreamer
from messagers.message_composer import MessageComposer
from mocks.stream_chat_mocker import stream_chat_mock
from fastapi.middleware.cors import CORSMiddleware
class EmbeddingResponseItem(BaseModel):
object: str = "embedding"
index: int
embedding: List[List[float]]
class EmbeddingResponse(BaseModel):
object: str = "list"
data: List[EmbeddingResponseItem]
model: str
usage: Dict[str, Any]
class ChatAPIApp:
def __init__(self):
self.app = FastAPI(
docs_url="/",
title="HuggingFace LLM API",
swagger_ui_parameters={"defaultModelsExpandDepth": -1},
version="1.0",
)
self.setup_routes()
self.app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # You can specify specific origins here
allow_credentials=True,
allow_methods=["*"], # Or specify just the methods you need: ["GET", "POST"]
allow_headers=["*"], # Or specify headers you need
)
def get_available_models(self):
# https://platform.openai.com/docs/api-reference/models/list
# ANCHOR[id=available-models]: Available models
current_time = int(time.time())
self.available_models = {
"object": "list",
"data": [
{
"id": "mixtral-8x7b",
"description": "[mistralai/Mixtral-8x7B-Instruct-v0.1]: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1",
"object": "model",
"created": current_time,
"owned_by": "mistralai",
},
{
"id": "mistral-7b",
"description": "[mistralai/Mistral-7B-Instruct-v0.2]: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
"object": "model",
"created": current_time,
"owned_by": "mistralai",
},
{
"id": "nous-mixtral-8x7b",
"description": "[NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO]: https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"object": "model",
"created": current_time,
"owned_by": "NousResearch",
},
{
"id": "gemma-7b",
"description": "[google/gemma-7b-it]: https://huggingface.co/google/gemma-7b-it",
"object": "model",
"created": current_time,
"owned_by": "Google",
},
{
"id": "codellama-7b",
"description": "[codellama/CodeLlama-7b-hf]: https://huggingface.co/codellama/CodeLlama-7b-hf",
"object": "model",
"created": current_time,
"owned_by": "codellama",
},
{
"id": "bert-base-uncased",
"description": "[google-bert/bert-base-uncased]: https://huggingface.co/google-bert/bert-base-uncased",
"object": "embedding",
"created": current_time,
"owned_by": "google",
},
],
}
return self.available_models
def extract_api_key(
credentials: HTTPAuthorizationCredentials = Depends(HTTPBearer(auto_error=False)),
):
api_key = None
if credentials:
api_key = credentials.credentials
if not api_key.startswith("hf_"):
logger.error("Invalid HF Token format!")
raise HTTPException(status_code=403, detail="Invalid API Token format.")
else:
api_key = os.getenv("HF_TOKEN")
if not api_key:
logger.error("HF Token not provided in request or environment.")
raise HTTPException(status_code=403, detail="API token not provided.")
return api_key
class QueryRequest(BaseModel):
input: str
model: str = Field(default="bert-base-uncased")
encoding_format: str
class ChatCompletionsPostItem(BaseModel):
model: str = Field(
default="mixtral-8x7b",
description="(str) `mixtral-8x7b`",
)
messages: list = Field(
default=[{"role": "user", "content": "Hello, who are you?"}],
description="(list) Messages",
)
temperature: Union[float, None] = Field(
default=0.5,
description="(float) Temperature",
)
top_p: Union[float, None] = Field(
default=0.95,
description="(float) top p",
)
max_tokens: Union[int, None] = Field(
default=-1,
description="(int) Max tokens",
)
use_cache: bool = Field(
default=False,
description="(bool) Use cache",
)
stream: bool = Field(
default=False,
description="(bool) Stream",
)
def chat_completions(
self, item: ChatCompletionsPostItem, api_key: str = Depends(extract_api_key)
):
streamer = MessageStreamer(model=item.model)
composer = MessageComposer(model=item.model)
composer.merge(messages=item.messages)
# streamer.chat = stream_chat_mock
stream_response = streamer.chat_response(
prompt=composer.merged_str,
temperature=item.temperature,
top_p=item.top_p,
max_new_tokens=item.max_tokens,
api_key=api_key,
use_cache=item.use_cache,
)
if item.stream:
event_source_response = EventSourceResponse(
streamer.chat_return_generator(stream_response),
media_type="text/event-stream",
ping=2000,
ping_message_factory=lambda: ServerSentEvent(**{"comment": ""}),
)
return event_source_response
else:
data_response = streamer.chat_return_dict(stream_response)
return data_response
async def embedding(self, request: QueryRequest, api_key: str = Depends(extract_api_key)):
api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{request.model}"
headers = {"Authorization": f"Bearer {api_key}"}
response = requests.post(api_url, headers=headers, json={"inputs": request.input})
result = response.json()
if "error" in result:
logging.error(f"Error from Hugging Face API: {result['error']}")
error_detail = result.get('error', 'No detailed error message provided.')
raise HTTPException(status_code=503, detail=f"The model is currently loading, please re-run the query. Detail: {error_detail}")
if isinstance(result, list) and len(result) > 0 and isinstance(result[0], list):
flattened_embeddings = [item for sublist in result for item in sublist] # Flatten list of lists
data = [{"object": "embedding", "index": i, "embedding": embedding} for i, embedding in enumerate(flattened_embeddings)]
return EmbeddingResponse(
object="list",
data=data,
model=request.model,
usage={"prompt_tokens": len(request.input), "total_tokens": len(request.input)}
)
else:
logging.error(f"Unexpected response format: {result}")
raise HTTPException(status_code=500, detail="Unexpected response format.")
def setup_routes(self):
for prefix in ["", "/v1", "/api", "/api/v1"]:
if prefix in ["/api/v1"]:
include_in_schema = True
else:
include_in_schema = False
self.app.get(
prefix + "/models",
summary="Get available models",
include_in_schema=include_in_schema,
)(self.get_available_models)
self.app.post(
prefix + "/chat/completions",
summary="Chat completions in conversation session",
include_in_schema=include_in_schema,
)(self.chat_completions)
self.app.post(
prefix + "/embeddings", # Use the specific prefix for this route
summary="Generate embeddings for the given texts",
include_in_schema=include_in_schema,
response_model=EmbeddingResponse # Adapt based on your actual response model
)(self.embedding)
class ArgParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(ArgParser, self).__init__(*args, **kwargs)
self.add_argument(
"-s",
"--server",
type=str,
default="0.0.0.0",
help="Server IP for HF LLM Chat API",
)
self.add_argument(
"-p",
"--port",
type=int,
default=23333,
help="Server Port for HF LLM Chat API",
)
self.add_argument(
"-d",
"--dev",
default=False,
action="store_true",
help="Run in dev mode",
)
self.args = self.parse_args(sys.argv[1:])
app = ChatAPIApp().app
if __name__ == "__main__":
args = ArgParser().args
if args.dev:
uvicorn.run("__main__:app", host=args.server, port=args.port, reload=True)
else:
uvicorn.run("__main__:app", host=args.server, port=args.port, reload=False)
# python -m apis.chat_api # [Docker] on product mode
# python -m apis.chat_api -d # [Dev] on develop mode |