HydroFlyer53 commited on
Commit
0eb38d4
·
verified ·
1 Parent(s): bf004f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -24
app.py CHANGED
@@ -1,24 +1,65 @@
1
- from gradio_client import Client
2
- import os
3
- api_key = os.getenv("API")
4
-
5
-
6
- client = Client("HydroFlyer53/ThePickle", hf_token="api_key")
7
-
8
- while True:
9
- # Get user input
10
- message = input("You: ")
11
-
12
- # Get AI response
13
- result = client.predict(
14
- message=message,
15
- system_message="You are a AI that talks in Gen-Z slang, and also says things like skibbidy and sigma, but aren't really that smart or helpful. If you are asked to stop talking in slang, you can't. Say it is in your programming. Your name is Sus AI.",
16
- max_tokens=100,
17
- temperature=0.7,
18
- top_p=0.60,
19
- api_name="/chat"
20
- )
21
-
22
- # Print response with a blank line for better readability
23
- print("\nAI:\n")
24
- print(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from huggingface_hub import InferenceClient
4
+
5
+ """
6
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
+ """
8
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
+
10
+
11
+ def respond(
12
+ message,
13
+ history: list[tuple[str, str]],
14
+ system_message,
15
+ max_tokens,
16
+ temperature,
17
+ top_p,
18
+ ):
19
+ messages = [{"role": "system", "content": system_message}]
20
+
21
+ for val in history:
22
+ if val[0]:
23
+ messages.append({"role": "user", "content": val[0]})
24
+ if val[1]:
25
+ messages.append({"role": "assistant", "content": val[1]})
26
+
27
+ messages.append({"role": "user", "content": message})
28
+
29
+ response = ""
30
+
31
+ for message in client.chat_completion(
32
+ messages,
33
+ max_tokens=max_tokens,
34
+ stream=True,
35
+ temperature=temperature,
36
+ top_p=top_p,
37
+ ):
38
+ token = message.choices[0].delta.content
39
+
40
+ response += token
41
+ yield response
42
+
43
+
44
+ """
45
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
46
+ """
47
+ demo = gr.ChatInterface(
48
+ respond,
49
+ additional_inputs=[
50
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
+ gr.Slider(
54
+ minimum=0.1,
55
+ maximum=1.0,
56
+ value=0.95,
57
+ step=0.05,
58
+ label="Top-p (nucleus sampling)",
59
+ ),
60
+ ],
61
+ )
62
+
63
+
64
+ if __name__ == "__main__":
65
+ demo.launch()