wilson989 commited on
Commit
7fe38b5
1 Parent(s): ed5ef60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -89
app.py CHANGED
@@ -1,117 +1,136 @@
1
- import os
2
- import time
3
  import json
4
  import random
5
- #import requests
6
- #from gevent import pywsgi
 
 
7
  import g4f
8
- from fastapi import FastAPI, Request, Response
9
- from fastapi.responses import JSONResponse, StreamingResponse
10
- from fastapi.middleware.cors import CORSMiddleware
11
- import logging
12
- import uvicorn
13
 
14
  from g4f import ChatCompletion
15
- #from contextlib import closing
 
16
 
17
- # Importa el módulo nest_asyncio
18
  import nest_asyncio
19
- g4f.debug.logging = True
20
 
21
  nest_asyncio.apply()
22
 
23
  app = FastAPI()
 
24
  app.add_middleware(
25
  CORSMiddleware,
26
  allow_origins=["*"],
27
- allow_credentials=True,
28
  allow_methods=["*"],
29
  allow_headers=["*"],
30
  )
31
- LOG = logging.getLogger(__name__)
32
 
33
 
34
  @app.post("/chat/completions")
35
  @app.post("/v1/chat/completions")
36
- @app.post("/")
37
  async def chat_completions(request: Request):
38
- data = await request.json()
39
- streaming = data.get('stream', False)
40
- model = data.get('model', 'gpt-4-32k')
41
- messages = data.get('messages')
42
-
43
- response = ChatCompletion.create(model=model, stream=streaming,
44
- messages=messages, provider=g4f.Provider.Bing)
45
-
46
- if not streaming:
47
- while 'curl_cffi.requests.errors.RequestsError' in response:
48
- response = ChatCompletion.create(model=model, stream=streaming,
49
- messages=messages, provider=g4f.Provider.Bing)
50
-
51
- completion_timestamp = int(time.time())
52
- completion_id = ''.join(random.choices(
53
- 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
54
-
55
- return JSONResponse({
56
- 'id': 'chatcmpl-%s' % completion_id,
57
- 'object': 'chat.completion',
58
- 'created': completion_timestamp,
59
- 'model': model,
60
- 'usage': {
61
- 'prompt_tokens': 0,
62
- 'completion_tokens': 0,
63
- 'total_tokens': 0
64
- },
65
- 'choices': [{
66
- 'message': {
67
- 'role': 'assistant',
68
- 'content': response
69
- },
70
- 'finish_reason': 'stop',
71
- 'index': 0
72
- }]
73
- })
74
-
75
- def stream():
76
- completion_data = {
77
- 'id': '',
78
- 'object': 'chat.completion.chunk',
79
- 'created': 0,
80
- 'model': 'gpt-4-32k',
81
- 'choices': [
82
  {
83
- 'delta': {
84
- 'content': ""
 
 
85
  },
86
- 'index': 0,
87
- 'finish_reason': None
88
  }
89
- ]
 
 
 
 
 
90
  }
91
 
92
- for token in response:
93
- completion_id = ''.join(
94
- random.choices('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
95
- completion_timestamp = int(time.time())
96
- completion_data['id'] = f'chatcmpl-{completion_id}'
97
- completion_data['created'] = completion_timestamp
98
- completion_data['choices'][0]['delta']['content'] = token
99
- if token.startswith("an error occured"):
100
- completion_data['choices'][0]['delta']['content'] = "Server Response Error, please try again.\n"
101
- completion_data['choices'][0]['delta']['stop'] = "error"
102
- yield 'data: %s\n\ndata: [DONE]\n\n' % json.dumps(completion_data, separators=(',' ':'))
103
- return
104
- yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
105
- time.sleep(0.1)
106
-
107
- completion_data['choices'][0]['finish_reason'] = "stop"
108
- completion_data['choices'][0]['delta']['content'] = ""
109
- yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
110
- yield 'data: [DONE]\n\n'
111
-
112
- return StreamingResponse(stream(), media_type='text/event-stream')
113
-
114
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
- # if __name__ == '__main__':
117
- # uvicorn.run("app:app", host=site_config['host'], port=site_config['port'])
 
 
 
1
  import json
2
  import random
3
+ import string
4
+ import time
5
+ # from typing import Any
6
+
7
  import g4f
8
+ from fastapi import FastAPI, Request
9
+ from fastapi.responses import StreamingResponse
 
 
 
10
 
11
  from g4f import ChatCompletion
12
+ from loguru import logger
13
+ from starlette.middleware.cors import CORSMiddleware
14
 
 
15
  import nest_asyncio
16
+ import os
17
 
18
  nest_asyncio.apply()
19
 
20
  app = FastAPI()
21
+
22
  app.add_middleware(
23
  CORSMiddleware,
24
  allow_origins=["*"],
 
25
  allow_methods=["*"],
26
  allow_headers=["*"],
27
  )
 
28
 
29
 
30
  @app.post("/chat/completions")
31
  @app.post("/v1/chat/completions")
 
32
  async def chat_completions(request: Request):
33
+ req_data = await request.json()
34
+ stream = req_data.get("stream", False)
35
+ model = req_data.get("model", "gpt-3.5-turbo")
36
+ messages = req_data.get("messages")
37
+ temperature = req_data.get("temperature", 1.0)
38
+ top_p = req_data.get("top_p", 1.0)
39
+ max_tokens = req_data.get("max_tokens", 0)
40
+
41
+ logger.info(
42
+ f"chat_completions: stream: {stream}, model: {model}, temperature: {temperature}, top_p: {top_p}, max_tokens: {max_tokens}"
43
+ )
44
+
45
+ response = await gen_resp(max_tokens, messages, model, stream, temperature, top_p)
46
+
47
+ completion_id = "".join(random.choices(string.ascii_letters + string.digits, k=28))
48
+ completion_timestamp = int(time.time())
49
+
50
+ if not stream:
51
+ logger.info(f"chat_completions: response: {response}")
52
+ return {
53
+ "id": f"chatcmpl-{completion_id}",
54
+ "object": "chat.completion",
55
+ "created": completion_timestamp,
56
+ "model": model,
57
+ "choices": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  {
59
+ "index": 0,
60
+ "message": {
61
+ "role": "assistant",
62
+ "content": response,
63
  },
64
+ "finish_reason": "stop",
 
65
  }
66
+ ],
67
+ "usage": {
68
+ "prompt_tokens": None,
69
+ "completion_tokens": None,
70
+ "total_tokens": None,
71
+ },
72
  }
73
 
74
+ def streaming():
75
+ for chunk in response:
76
+ completion_data = {
77
+ "id": f"chatcmpl-{completion_id}",
78
+ "object": "chat.completion.chunk",
79
+ "created": completion_timestamp,
80
+ "model": model,
81
+ "choices": [
82
+ {
83
+ "index": 0,
84
+ "delta": {
85
+ "content": chunk,
86
+ },
87
+ "finish_reason": None,
88
+ }
89
+ ],
90
+ }
91
+
92
+ content = json.dumps(completion_data, separators=(",", ":"))
93
+ yield f"data: {content}\n\n"
94
+ time.sleep(0)
95
+
96
+ end_completion_data: dict[str, Any] = {
97
+ "id": f"chatcmpl-{completion_id}",
98
+ "object": "chat.completion.chunk",
99
+ "created": completion_timestamp,
100
+ "model": model,
101
+ "choices": [
102
+ {
103
+ "index": 0,
104
+ "delta": {},
105
+ "finish_reason": "stop",
106
+ }
107
+ ],
108
+ }
109
+ content = json.dumps(end_completion_data, separators=(",", ":"))
110
+ yield f"data: {content}\n\n"
111
+
112
+ return StreamingResponse(streaming(), media_type="text/event-stream")
113
+
114
+
115
+ async def gen_resp(max_tokens, messages, model, stream, temperature, top_p):
116
+ MAX_ATTEMPTS = int(os.getenv("MAX_ATTEMPTS", 10))
117
+ attempts = 0
118
+ while True:
119
+ try:
120
+ response = ChatCompletion.create(
121
+ model=model,
122
+ stream=stream,
123
+ messages=messages,
124
+ temperature=temperature,
125
+ top_p=top_p,
126
+ max_tokens=max_tokens,
127
+ system_prompt="",
128
+ provider=g4f.Provider.Bing,
129
+ )
130
+ return response
131
+ except Exception as e:
132
+ logger.error(f"gen_resp: Exception: {e}")
133
+ attempts += 1
134
+ if attempts >= MAX_ATTEMPTS:
135
+ return "Lo siento, no he podido generar una respuesta de chat. Por favor, revisa tu conexión a Internet y la configuración de la API y vuelve a intentarlo."
136