hackergeek98 commited on
Commit
12001da
·
verified ·
1 Parent(s): b2ce226

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -0
app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import os
4
+ from huggingface_hub import login
5
+
6
+ # Fetch token from environment (automatically loaded from secrets)
7
+ hf_token = os.getenv("gemma3")
8
+ login(hf_token)
9
+
10
+ # Initialize the client with your model
11
+ client = InferenceClient("hackergeek/gemma-finetuned")
12
+
13
+ def respond(
14
+ message: str,
15
+ history: list[tuple[str, str]],
16
+ system_message: str,
17
+ max_tokens: int,
18
+ temperature: float,
19
+ top_p: float,
20
+ ):
21
+ # Build a prompt from the system message and conversation history
22
+ prompt = f"{system_message}\n"
23
+ for user_msg, assistant_msg in history:
24
+ if user_msg:
25
+ prompt += f"User: {user_msg}\n"
26
+ if assistant_msg:
27
+ prompt += f"Assistant: {assistant_msg}\n"
28
+ prompt += f"User: {message}\nAssistant: "
29
+
30
+ # Call the text generation API with updated parameter name
31
+ response = client.text_generation(
32
+ model="hackergeek98/gemma-finetuned",
33
+ prompt=prompt,
34
+ max_new_tokens=max_tokens,
35
+ temperature=temperature,
36
+ top_p=top_p,
37
+ )
38
+
39
+ return response["generated_text"]
40
+
41
+ # Set up the Gradio Chat Interface
42
+ demo = gr.ChatInterface(
43
+ respond,
44
+ additional_inputs=[
45
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
46
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
47
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
48
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
49
+ ],
50
+ )
51
+
52
+ if __name__ == "__main__":
53
+ demo.launch()
54
+
55
+
56
+