juanpablosanchez commited on
Commit
e8fb1d3
·
verified ·
1 Parent(s): f689a80

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -1,9 +1,11 @@
1
- import gradio as gr
2
  from fastapi import FastAPI
3
  from pydantic import BaseModel
4
  from transformers import AutoTokenizer, AutoModelForTokenClassification
5
  import torch
 
6
  import uvicorn
 
7
 
8
  # Configurar FastAPI
9
  app = FastAPI()
@@ -38,6 +40,13 @@ async def predict(input: TextInput):
38
 
39
  return {"entities": entities}
40
 
 
 
 
 
 
 
 
41
  # Configurar Gradio
42
  def predict_gradio(text):
43
  response = requests.post("http://localhost:8000/predict", json={"text": text})
@@ -45,8 +54,4 @@ def predict_gradio(text):
45
  return entities
46
 
47
  demo = gr.Interface(fn=predict_gradio, inputs="text", outputs="json")
48
- demo.launch(share=True)
49
-
50
- # Iniciar el servidor de FastAPI
51
- if __name__ == "__main__":
52
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
1
+ import gradio as gr
2
  from fastapi import FastAPI
3
  from pydantic import BaseModel
4
  from transformers import AutoTokenizer, AutoModelForTokenClassification
5
  import torch
6
+ from threading import Thread
7
  import uvicorn
8
+ import requests
9
 
10
  # Configurar FastAPI
11
  app = FastAPI()
 
40
 
41
  return {"entities": entities}
42
 
43
+ # Iniciar el servidor de FastAPI en un hilo separado
44
+ def start_api():
45
+ uvicorn.run(app, host="0.0.0.0", port=8000)
46
+
47
+ api_thread = Thread(target=start_api, daemon=True)
48
+ api_thread.start()
49
+
50
  # Configurar Gradio
51
  def predict_gradio(text):
52
  response = requests.post("http://localhost:8000/predict", json={"text": text})
 
54
  return entities
55
 
56
  demo = gr.Interface(fn=predict_gradio, inputs="text", outputs="json")
57
+ demo.launch(share=True)