Akirami commited on
Commit
f12b0d7
1 Parent(s): eb4b8b8

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from functools import partial
4
+ from huggingface_hub import InferenceClient
5
+
6
+ css = """
7
+ #generate_button {
8
+ transition: background-color 1s ease-out, color 1s ease-out; border-color 1s ease-out;
9
+ }
10
+ """
11
+
12
+
13
+ def generate(prompt: str, hf_token: str, model: str):
14
+ messages = [{"role": "user", "content": prompt}]
15
+ if hf_token is None or not hf_token.strip():
16
+ hf_token = os.getenv("HUGGINGFACE_API_KEY")
17
+ client = InferenceClient(model, token=hf_token)
18
+ model_name = model.split("/")[1]
19
+ response = f"**{model_name}**\n\n"
20
+ for msg in client.chat_completion(messages, max_tokens=600, stream=True):
21
+ token = msg.choices[0].delta.content
22
+ response += token
23
+ yield response
24
+
25
+
26
+ def clear_token():
27
+ # returns a textbox with visibility set to False
28
+ # this will update the hf_token widget thus hiding it
29
+ return gr.Textbox(visible=False)
30
+
31
+
32
+ with gr.Blocks(css=css, theme="gradio/soft") as demo:
33
+ gr.Markdown("<center><h1>Code LLM Explorer</h1></center>")
34
+
35
+ prompt = gr.Textbox(
36
+ label="Prompt",
37
+ lines=2, # default two lines length
38
+ max_lines=5, # the Textbox entends upto 5 lines length
39
+ info="Type your Prompt here",
40
+ show_label=False,
41
+ value="Write Bubble Sort in Python",
42
+ )
43
+
44
+ hf_token = gr.Textbox(
45
+ label="HuggingFace Token",
46
+ type="password",
47
+ placeholder="Your Hugging Face Token",
48
+ show_label=False,
49
+ )
50
+
51
+ # gr.Group() will group the two buttons together
52
+ # so there will be no gap between two buttons
53
+ with gr.Group():
54
+ with gr.Row() as button_row:
55
+ # variant: 'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button.
56
+ generate_btn = gr.Button(
57
+ "Run", elem_id="generate_button", variant="primary", size="sm"
58
+ )
59
+ view_code = gr.Button(
60
+ "View Code", elem_id="generate_button", variant="secondary", size="sm"
61
+ )
62
+
63
+ with gr.Row() as output_row:
64
+ codellama_output = gr.Markdown("codellama/CodeLlama-34b-Instruct-hf")
65
+ stablecode_output = gr.Markdown("stabilityai/stable-code-instruct-3b")
66
+ deepseek_output = gr.Markdown("deepseek-ai/deepseek-coder-33b-instruct")
67
+
68
+ gr.on(
69
+ [prompt.submit, generate_btn.click], clear_token, inputs=None, outputs=hf_token
70
+ ).then(
71
+ fn=partial(generate, model="codellama/CodeLlama-34b-Instruct-hf"),
72
+ inputs=[prompt, hf_token],
73
+ outputs=codellama_output,
74
+ )
75
+
76
+ gr.on(
77
+ [prompt.submit, generate_btn.click], clear_token, inputs=None, outputs=hf_token
78
+ ).then(
79
+ fn=partial(generate, model="stabilityai/stable-code-instruct-3b"),
80
+ inputs=[prompt, hf_token],
81
+ outputs=stablecode_output,
82
+ )
83
+
84
+ gr.on(
85
+ [prompt.submit, generate_btn.click], clear_token, inputs=None, outputs=hf_token
86
+ ).then(
87
+ fn=partial(generate, model="microsoft/Phi-3-mini-4k-instruct"),
88
+ inputs=[prompt, hf_token],
89
+ outputs=deepseek_output,
90
+ )
91
+
92
+ demo.launch()