wilson989 commited on
Commit
4570124
·
1 Parent(s): 47b1a59

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -114
app.py CHANGED
@@ -1,143 +1,112 @@
1
- # Importamos varios módulos y librerías necesarios para el funcionamiento del script, como json, random, string, time, g4f, fastapi, starlette, requests y anyio.
 
2
  import json
3
  import random
4
- import string
5
- import time
6
- # from typing import Any
7
-
8
  import g4f
9
- from fastapi import FastAPI, Request
10
- from fastapi.responses import StreamingResponse
 
 
 
11
 
12
- from g4f import ChatCompletion
13
- from loguru import logger
14
- from starlette.middleware.cors import CORSMiddleware
15
 
 
16
  import nest_asyncio
17
- import os # Importo el módulo os para usar la variable de entorno
18
- import requests # Importo la librería requests para hacer peticiones HTTP
19
 
 
20
  nest_asyncio.apply()
21
 
22
  app = FastAPI()
23
-
24
  app.add_middleware(
25
  CORSMiddleware,
26
  allow_origins=["*"],
 
27
  allow_methods=["*"],
28
  allow_headers=["*"],
29
  )
 
30
 
31
 
32
  @app.post("/chat/completions")
33
  @app.post("/v1/chat/completions")
 
34
  async def chat_completions(request: Request):
35
- req_data = await request.json()
36
- stream = req_data.get("stream", False)
37
- model = req_data.get("model", "gpt-4-32k")
38
- messages = req_data.get("messages")
39
- temperature = req_data.get("temperature", 1.0)
40
- top_p = req_data.get("top_p", 1.0)
41
- max_tokens = req_data.get("max_tokens", 0)
42
-
43
- logger.info(
44
- f"chat_completions: stream: {stream}, model: {model}, temperature: {temperature}, top_p: {top_p}, max_tokens: {max_tokens}"
45
- )
46
-
47
- response = await gen_resp(max_tokens, messages, model, stream, temperature, top_p)
48
-
49
- completion_id = "".join(random.choices(string.ascii_letters + string.digits, k=28))
50
- completion_timestamp = int(time.time())
51
-
52
- if not stream:
53
- logger.info(f"chat_completions: response: {response}")
54
- return {
55
- "id": f"chatcmpl-{completion_id}",
56
- "object": "chat.completion",
57
- "created": completion_timestamp,
58
- "model": model,
59
- "choices": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  {
61
- "index": 0,
62
- "message": {
63
- "role": "assistant",
64
- "content": response,
65
  },
66
- "finish_reason": "stop",
 
67
  }
68
- ],
69
- "usage": {
70
- "prompt_tokens": None,
71
- "completion_tokens": None,
72
- "total_tokens": None,
73
- },
74
  }
75
 
76
- def streaming():
77
- for chunk in response:
78
- completion_data = {
79
- "id": f"chatcmpl-{completion_id}",
80
- "object": "chat.completion.chunk",
81
- "created": completion_timestamp,
82
- "model": model,
83
- "choices": [
84
- {
85
- "index": 0,
86
- "delta": {
87
- "content": chunk,
88
- },
89
- "finish_reason": None,
90
- }
91
- ],
92
- }
93
-
94
- content = json.dumps(completion_data, separators=(",", ":"))
95
- yield f"data: {content}\n\n"
96
  time.sleep(0)
97
 
98
- end_completion_data: dict[str, Any] = {
99
- "id": f"chatcmpl-{completion_id}",
100
- "object": "chat.completion.chunk",
101
- "created": completion_timestamp,
102
- "model": model,
103
- "choices": [
104
- {
105
- "index": 0,
106
- "delta": {},
107
- "finish_reason": "stop",
108
- }
109
- ],
110
- }
111
- content = json.dumps(end_completion_data, separators=(",", ":"))
112
- yield f"data: {content}\n\n"
113
-
114
- return StreamingResponse(streaming(), media_type="text/event-stream")
115
-
116
 
117
- async def gen_resp(max_tokens, messages, model, stream, temperature, top_p):
118
- # Obtengo el valor de MAX_ATTEMPTS desde la variable de entorno o uso un valor por defecto de 10
119
- MAX_ATTEMPTS = int(os.getenv("MAX_ATTEMPTS", 10))
120
- attempts = 0
121
- while True:
122
- try:
123
- response = ChatCompletion.create(
124
- model=model,
125
- stream=stream,
126
- messages=messages,
127
- temperature=temperature,
128
- top_p=top_p,
129
- max_tokens=max_tokens,
130
- system_prompt="",
131
- provider=g4f.Provider.Bing,
132
- )
133
- return response
134
- except requests.exceptions.ContentTypeError as e: # Capturo el error de tipo ContentTypeError y muestro un mensaje adecuado al usuario. Esta es la parte que he modificado.
135
- logger.error(f"gen_resp: Exception: {e}")
136
- attempts += 1
137
- if attempts >= MAX_ATTEMPTS:
138
- return "Lo siento, no he podido generar una respuesta de chat. La API de Bing ha devuelto un tipo de contenido inesperado. Por favor, revisa la documentación de la API y vuelve a intentarlo."
139
- except Exception as e:
140
- logger.error(f"gen_resp: Exception: {e}")
141
- attempts += 1
142
- if attempts >= MAX_ATTEMPTS:
143
- return "Lo siento, no he podido generar una respuesta de chat. Por favor, revisa tu conexión a Internet y la configuración de la API y vuelve a intentarlo."
 
1
+ import os
2
+ import time
3
  import json
4
  import random
5
+ #import requests
6
+ #from gevent import pywsgi
 
 
7
  import g4f
8
+ from fastapi import FastAPI, Request, Response
9
+ from fastapi.responses import JSONResponse, StreamingResponse
10
+ from fastapi.middleware.cors import CORSMiddleware
11
+ import logging
12
+ import uvicorn
13
 
14
+ from g4f import ChatCompletion, Provider
15
+ #from contextlib import closing
 
16
 
17
+ # Importa el módulo nest_asyncio
18
  import nest_asyncio
 
 
19
 
20
+ # Llama a la función nest_asyncio.apply
21
  nest_asyncio.apply()
22
 
23
  app = FastAPI()
 
24
  app.add_middleware(
25
  CORSMiddleware,
26
  allow_origins=["*"],
27
+ allow_credentials=True,
28
  allow_methods=["*"],
29
  allow_headers=["*"],
30
  )
31
+ LOG = logging.getLogger(__name__)
32
 
33
 
34
  @app.post("/chat/completions")
35
  @app.post("/v1/chat/completions")
36
+ @app.post("/")
37
  async def chat_completions(request: Request):
38
+ data = await request.json()
39
+ streaming = data.get('stream', False)
40
+ model = data.get('model', 'gpt-4-32k')
41
+ messages = data.get('messages')
42
+
43
+ response = ChatCompletion.create(model=model, stream=streaming,
44
+ messages=messages, provider=g4f.Provider.Bing)
45
+
46
+ if not streaming:
47
+ while 'curl_cffi.requests.errors.RequestsError' in response:
48
+ response = ChatCompletion.create(model=model, stream=streaming,
49
+ messages=messages, provider=g4f.Provider.Bing)
50
+
51
+ completion_timestamp = int(time.time())
52
+ completion_id = ''.join(random.choices(
53
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
54
+
55
+ return JSONResponse({
56
+ 'id': 'chatcmpl-%s' % completion_id,
57
+ 'object': 'chat.completion',
58
+ 'created': completion_timestamp,
59
+ 'model': model,
60
+ 'usage': {
61
+ 'prompt_tokens': 0,
62
+ 'completion_tokens': 0,
63
+ 'total_tokens': 0
64
+ },
65
+ 'choices': [{
66
+ 'message': {
67
+ 'role': 'assistant',
68
+ 'content': response
69
+ },
70
+ 'finish_reason': 'stop',
71
+ 'index': 0
72
+ }]
73
+ })
74
+
75
+ def stream():
76
+ completion_data = {
77
+ 'id': '',
78
+ 'object': 'chat.completion.chunk',
79
+ 'created': 0,
80
+ 'model': 'gpt-4-32k',
81
+ 'choices': [
82
  {
83
+ 'delta': {
84
+ 'content': ""
 
 
85
  },
86
+ 'index': 0,
87
+ 'finish_reason': None
88
  }
89
+ ]
 
 
 
 
 
90
  }
91
 
92
+ for token in response:
93
+ completion_id = ''.join(
94
+ random.choices('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
95
+ completion_timestamp = int(time.time())
96
+ completion_data['id'] = f'chatcmpl-{completion_id}'
97
+ completion_data['created'] = completion_timestamp
98
+ completion_data['choices'][0]['delta']['content'] = token
99
+ if token.startswith("an error occured"):
100
+ completion_data['choices'][0]['delta']['content'] = "Server Response Error, please try again.\n"
101
+ completion_data['choices'][0]['delta']['stop'] = "error"
102
+ yield 'data: %s\n\ndata: [DONE]\n\n' % json.dumps(completion_data, separators=(',' ':'))
103
+ return
104
+ yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
 
 
 
 
 
 
 
105
  time.sleep(0)
106
 
107
+ completion_data['choices'][0]['finish_reason'] = "stop"
108
+ completion_data['choices'][0]['delta']['content'] = ""
109
+ yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
110
+ yield 'data: [DONE]\n\n'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
+ return StreamingResponse(stream(), media_type='text/event-stream')