hackergeek98 commited on
Commit
a3290f0
·
verified ·
1 Parent(s): b0f97a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -21
app.py CHANGED
@@ -6,40 +6,38 @@ from huggingface_hub import login
6
  # Fetch token from environment (automatically loaded from secrets)
7
  hf_token = os.getenv("gemma3")
8
  login(hf_token)
9
-
10
  client = InferenceClient("hackergeek98/gemma-finetuned")
11
 
12
  def respond(
13
- message,
14
  history: list[tuple[str, str]],
15
- system_message,
16
- max_tokens,
17
- temperature,
18
- top_p,
19
  ):
 
20
  prompt = f"{system_message}\n"
21
-
22
- # Add conversation history if needed
23
- for val in history:
24
- if val[0]:
25
- prompt += f"User: {val[0]}\n"
26
- if val[1]:
27
- prompt += f"Assistant: {val[1]}\n"
28
-
29
- prompt += f"User: {message}\nAssistant:"
30
-
31
- # Request generation from Hugging Face Inference API
32
  response = client.text_generation(
33
  model="hackergeek98/gemma-finetuned",
34
- inputs=prompt,
35
  max_tokens=max_tokens,
36
  temperature=temperature,
37
  top_p=top_p,
38
  )
39
 
40
- return response['generated_text']
41
 
42
- # Gradio interface setup
43
  demo = gr.ChatInterface(
44
  respond,
45
  additional_inputs=[
@@ -50,7 +48,7 @@ demo = gr.ChatInterface(
50
  ],
51
  )
52
 
53
- # Run the app
54
  if __name__ == "__main__":
55
  demo.launch()
56
 
 
 
6
  # Fetch token from environment (automatically loaded from secrets)
7
  hf_token = os.getenv("gemma3")
8
  login(hf_token)
9
+ # Initialize the client with your model
10
  client = InferenceClient("hackergeek98/gemma-finetuned")
11
 
12
  def respond(
13
+ message: str,
14
  history: list[tuple[str, str]],
15
+ system_message: str,
16
+ max_tokens: int,
17
+ temperature: float,
18
+ top_p: float,
19
  ):
20
+ # Build a prompt from the system message and conversation history
21
  prompt = f"{system_message}\n"
22
+ for user_msg, assistant_msg in history:
23
+ if user_msg:
24
+ prompt += f"User: {user_msg}\n"
25
+ if assistant_msg:
26
+ prompt += f"Assistant: {assistant_msg}\n"
27
+ prompt += f"User: {message}\nAssistant: "
28
+
29
+ # Call the text generation API with the correct 'prompt' parameter
 
 
 
30
  response = client.text_generation(
31
  model="hackergeek98/gemma-finetuned",
32
+ prompt=prompt,
33
  max_tokens=max_tokens,
34
  temperature=temperature,
35
  top_p=top_p,
36
  )
37
 
38
+ return response["generated_text"]
39
 
40
+ # Set up the Gradio Chat Interface
41
  demo = gr.ChatInterface(
42
  respond,
43
  additional_inputs=[
 
48
  ],
49
  )
50
 
 
51
  if __name__ == "__main__":
52
  demo.launch()
53
 
54
+