davnas commited on
Commit
be6d20e
·
verified ·
1 Parent(s): 2726e2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -45
app.py CHANGED
@@ -1,64 +1,94 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("davnas/lora_model")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  response = ""
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  gr.Slider(
53
  minimum=0.1,
54
  maximum=1.0,
55
  value=0.95,
56
  step=0.05,
57
- label="Top-p (nucleus sampling)",
58
  ),
59
  ],
 
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import json
4
 
5
+ # Initialize the client with the token
6
+ client = InferenceClient(
7
+ model="davnas/Italian_Cousine_2.1",
8
+ headers={"Content-Type": "application/json"}
9
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
12
+ # Format the prompt including history and system message
13
+ conversation = []
14
+
15
+ # Add system message if provided
16
+ if system_message:
17
+ conversation.append({"role": "system", "content": system_message})
18
+
19
+ # Add conversation history
20
+ for user_msg, assistant_msg in history:
21
+ conversation.append({"role": "user", "content": user_msg})
22
+ conversation.append({"role": "assistant", "content": assistant_msg})
23
+
24
+ # Add current message
25
+ conversation.append({"role": "user", "content": message})
26
+
27
+ # Convert conversation to prompt format
28
+ prompt = ""
29
+ for msg in conversation:
30
+ if msg["role"] == "system":
31
+ prompt += f"{msg['content']}\n"
32
+ elif msg["role"] == "user":
33
+ prompt += f"User: {msg['content']}\n"
34
+ elif msg["role"] == "assistant":
35
+ prompt += f"Assistant: {msg['content']}\n"
36
+
37
+ prompt += "Assistant:"
38
+
39
+ # Prepare parameters for text generation
40
+ parameters = {
41
+ "max_new_tokens": max_tokens,
42
+ "temperature": temperature,
43
+ "top_p": top_p,
44
+ "return_full_text": False
45
+ }
46
+
47
  response = ""
48
+ try:
49
+ # Use generate_text with proper parameters
50
+ for token in client.text_generation(
51
+ prompt,
52
+ stream=True,
53
+ **parameters
54
+ ):
55
+ response += token
56
+ yield response
57
+ except Exception as e:
58
+ yield f"Error: {str(e)}"
59
 
60
+ # Create the interface with message type
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  demo = gr.ChatInterface(
62
  respond,
63
  additional_inputs=[
64
+ gr.Textbox(
65
+ value="You are a helpful assistant knowledgeable about Italian cuisine.",
66
+ label="System message"
67
+ ),
68
+ gr.Slider(
69
+ minimum=1,
70
+ maximum=2048,
71
+ value=512,
72
+ step=1,
73
+ label="Max new tokens"
74
+ ),
75
+ gr.Slider(
76
+ minimum=0.1,
77
+ maximum=4.0,
78
+ value=0.7,
79
+ step=0.1,
80
+ label="Temperature"
81
+ ),
82
  gr.Slider(
83
  minimum=0.1,
84
  maximum=1.0,
85
  value=0.95,
86
  step=0.05,
87
+ label="Top-p (nucleus sampling)"
88
  ),
89
  ],
90
+ chatbot=gr.Chatbot(type="messages") # Use messages format instead of tuples
91
  )
92
 
 
93
  if __name__ == "__main__":
94
+ demo.launch(server_name="0.0.0.0", server_port=7860)