Spaces:
Sleeping
Sleeping
File size: 1,945 Bytes
ccec886 43a5d8c afc2013 99e1d75 c8f7bea 1248eba 99e1d75 91bbd67 215d371 7b5f63c 215d371 ea0fbdd 43a5d8c c8f7bea 99e1d75 43a5d8c 91bbd67 ea0fbdd 35bd019 afc2013 243d3e9 afc2013 99e1d75 243d3e9 35bd019 c7a54bd c8f7bea 215d371 3110228 e464d27 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import gradio as gr
from huggingface_hub import list_models
def hello(profile: gr.OAuthProfile | None) -> str:
# ^ expect a gr.OAuthProfile object as input to get the user's profile
# if the user is not logged in, profile will be None
if profile is None:
return "I don't know you."
return f"Hello {profile.name}"
def message(message,history,profile: gr.OAuthProfile | None):
if profile is None:
raise gr.Error("Please login to...")
return
else:
print(profile)
return f"hello {profile.name}"
def list_private_models(profile: gr.OAuthProfile | None, oauth_token: gr.OAuthToken | None) -> str:
# ^ expect a gr.OAuthToken object as input to get the user's token
# if the user is not logged in, oauth_token will be None
if oauth_token is None:
return "Please log in to list private models."
models = [
f"{model.id} ({'private' if model.private else 'public'})"
for model in list_models(author=profile.username, token=oauth_token.token)
]
return "Models:\n\n" + "\n - ".join(models) + "."
with gr.Blocks(fill_height=True) as demo:
gr.Markdown(
"# ChatGPT-4o"
"\n\nThis is GPT-4o, you can use the text and image capabilities now. More capabilities like audio and video will be rolled out iteratively in the future. Stay tuned."
)
gr.LoginButton()
gr.ChatInterface(message,multimodal=True)
# style = """
# <style>
# #chat-interface {
# height: 500px; /* 设置所需的高度,这里是 500 像素 */
# overflow-y: auto; /* 启用垂直滚动条 */
# }
# </style>
# """
# gr.Markdown(style)
# ^ add a login button to the Space
# m1 = gr.Markdown()
# m2 = gr.Markdown()
# demo.load(hello, inputs=None, outputs=m1)
# demo.load(list_private_models, inputs=None, outputs=m2)
demo.launch()
|