hackergeek98 commited on
Commit
9f2bf2d
·
verified ·
1 Parent(s): a3290f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -6,6 +6,7 @@ from huggingface_hub import login
6
  # Fetch token from environment (automatically loaded from secrets)
7
  hf_token = os.getenv("gemma3")
8
  login(hf_token)
 
9
  # Initialize the client with your model
10
  client = InferenceClient("hackergeek98/gemma-finetuned")
11
 
@@ -26,11 +27,11 @@ def respond(
26
  prompt += f"Assistant: {assistant_msg}\n"
27
  prompt += f"User: {message}\nAssistant: "
28
 
29
- # Call the text generation API with the correct 'prompt' parameter
30
  response = client.text_generation(
31
  model="hackergeek98/gemma-finetuned",
32
  prompt=prompt,
33
- max_tokens=max_tokens,
34
  temperature=temperature,
35
  top_p=top_p,
36
  )
@@ -52,3 +53,4 @@ if __name__ == "__main__":
52
  demo.launch()
53
 
54
 
 
 
6
  # Fetch token from environment (automatically loaded from secrets)
7
  hf_token = os.getenv("gemma3")
8
  login(hf_token)
9
+
10
  # Initialize the client with your model
11
  client = InferenceClient("hackergeek98/gemma-finetuned")
12
 
 
27
  prompt += f"Assistant: {assistant_msg}\n"
28
  prompt += f"User: {message}\nAssistant: "
29
 
30
+ # Call the text generation API with updated parameter name
31
  response = client.text_generation(
32
  model="hackergeek98/gemma-finetuned",
33
  prompt=prompt,
34
+ max_new_tokens=max_tokens,
35
  temperature=temperature,
36
  top_p=top_p,
37
  )
 
53
  demo.launch()
54
 
55
 
56
+