rest_api / app.py
ldhldh's picture
Update app.py
c5667e0
raw
history blame
1.52 kB
import gradio as gr
from gradio_client import Client as GrClient
from gradio import routes
from typing import List, Type
import requests, os, re, asyncio, queue
import math
import time
import datetime
import requests, json
loop = asyncio.get_event_loop()
# Monkey patch
def get_types(cls_set: List[Type], component: str):
docset = []
types = []
if component == "input":
for cls in cls_set:
doc = inspect.getdoc(cls)
doc_lines = doc.split("\n")
docset.append(doc_lines[1].split(":")[-1])
types.append(doc_lines[1].split(")")[0].split("(")[-1])
else:
for cls in cls_set:
doc = inspect.getdoc(cls)
doc_lines = doc.split("\n")
docset.append(doc_lines[-1].split(":")[-1])
types.append(doc_lines[-1].split(")")[0].split("(")[-1])
return docset, types
routes.get_types = get_types
history = dict()
def predict(user_id, gender, age):
# μΆ”μ²œ ν•΄μ„œ λ°˜ν™˜
return f"api μ‘λ‹΅μž…λ‹ˆλ‹€. μž…λ ₯:{x}"
def add(user_id, movie):
if not user_id in history:
history[user_id] = []
history[user_id].append(movie)
return "ok"
with gr.Blocks() as demo:
count = 0
aa = gr.Interface(
fn=chat,
inputs=["text", "text", "text"],
outputs="text",
description="chat",
)
bb = gr.Interface(
fn=add,
inputs=["text", "text"],
outputs="text",
description="chat",
)
demo.queue(max_size=32).launch(enable_queue=True)