suayptalha commited on
Commit
8f1cf32
·
verified ·
1 Parent(s): 7b6d332

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -54
app.py CHANGED
@@ -2,72 +2,91 @@ import gradio as gr
2
  from gradio_client import Client, handle_file
3
  from huggingface_hub import InferenceClient
4
 
5
- # Initialize the InferenceClient for FastLlama model
6
- client = InferenceClient("Qwen/QwQ-32B-Preview")
7
-
8
- # Initialize the Moondream Client for image description
9
  moondream_client = Client("vikhyatk/moondream2")
10
 
11
- def respond(
12
- message,
13
- history: list[tuple[str, str]],
14
- system_message,
15
- max_tokens,
16
- temperature,
17
- top_p,
18
- image_input
19
- ):
20
- # Step 1: Handle the image and get its description using Moondream API
21
- image_file = handle_file(image_input)
22
- image_description = moondream_client.predict(
23
- img=image_file,
 
24
  prompt="Describe this image.",
25
  api_name="/answer_question"
26
  )
27
 
28
- # Step 2: Create the messages for the chat model
29
- messages = [{"role": "system", "content": system_message}]
30
-
31
- # Add history to the messages
32
- for val in history:
33
- if val[0]:
34
- messages.append({"role": "user", "content": val[0]})
35
- if val[1]:
36
- messages.append({"role": "assistant", "content": val[1]})
37
 
38
- # Add the image description to the user message
39
- messages.append({"role": "user", "content": f"Here is the description of the image: {image_description}. Can you comment on it?"})
40
-
41
- # Step 3: Get the response from the assistant
42
- response = ""
43
- for message in client.chat_completion(
44
- messages,
45
- max_tokens=max_tokens,
46
- stream=True,
47
- temperature=temperature,
48
- top_p=top_p,
49
- ):
50
- token = message.choices[0].delta.content
51
- response += token
52
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- # Set up Gradio interface
55
  demo = gr.ChatInterface(
56
- respond,
 
57
  additional_inputs=[
58
- gr.Textbox(value="You are a friendly assistant named FastLlama.", label="System message"),
59
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
60
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
61
  gr.Slider(
62
- minimum=0.1,
63
- maximum=1.0,
64
- value=0.95,
65
- step=0.05,
66
- label="Top-p (nucleus sampling)",
67
- ),
68
- gr.Image(type="pil", label="Upload Image for Description") # Image input
69
  ],
 
 
 
 
 
 
 
 
70
  )
71
 
72
  if __name__ == "__main__":
73
- demo.launch()
 
2
  from gradio_client import Client, handle_file
3
  from huggingface_hub import InferenceClient
4
 
5
+ # Moondream2 için Client kullanıyoruz
 
 
 
6
  moondream_client = Client("vikhyatk/moondream2")
7
 
8
+ # Qwen/QwQ-32B-Preview için InferenceClient kullanıyoruz
9
+ llama_client = InferenceClient("Qwen/QwQ-32B-Preview")
10
+
11
+ # Sohbet geçmişi
12
+ history = []
13
+
14
+ # Resim açıklama fonksiyonu
15
+ def describe_image(image, user_message, history):
16
+ # Resim var mı diye kontrol et
17
+ if image is None:
18
+ return "No image provided", history # Hata mesajı döndür
19
+ # Resmi Moondream2 API'sine gönderiyoruz
20
+ result = moondream_client.predict(
21
+ img=handle_file(image),
22
  prompt="Describe this image.",
23
  api_name="/answer_question"
24
  )
25
 
26
+ description = result # Moondream2'den açıklama alıyoruz
27
+ history.append({"role": "user", "content": user_message}) # string olarak
28
+ history.append({"role": "assistant", "content": description}) # string olarak
 
 
 
 
 
 
29
 
30
+ return description, history
31
+
32
+ # Text ve history ile sohbet fonksiyonu
33
+ def chat_with_text(user_message, history, max_new_tokens=250):
34
+ # Kullanıcı mesajını history'ye ekliyoruz
35
+ history.append({"role": "user", "content": user_message}) # string olarak
36
+
37
+ # Tüm geçmişi Qwen/QwQ-32B-Preview'e gönderiyoruz
38
+ texts = [{"role": msg["role"], "content": msg["content"]} for msg in history]
39
+ llama_result = llama_client.chat_completion(
40
+ messages=texts,
41
+ max_tokens=max_new_tokens,
42
+ temperature=0.7,
43
+ top_p=0.95
44
+ )
45
+
46
+ # Asistan cevabını alıyoruz ve history'ye ekliyoruz
47
+ assistant_reply = llama_result["choices"][0]["message"]["content"]
48
+ history.append({"role": "assistant", "content": assistant_reply}) # string olarak
49
+
50
+ return assistant_reply, history
51
+
52
+ # Resim ve/veya metin tabanlı sohbet fonksiyonu
53
+ def bot_streaming(message, history=None, max_new_tokens=250):
54
+ if history is None: # Eğer `history` verilmemişse boş bir liste kullanıyoruz
55
+ history = []
56
+
57
+ user_message = message.get("text", "")
58
+ image = message.get("image", None)
59
+
60
+ if image: # Resim varsa
61
+ response, history = describe_image(image, user_message, history)
62
+ else: # Sadece metin mesajı varsa
63
+ response, history = chat_with_text(user_message, history, max_new_tokens)
64
+
65
+ # Yalnızca metin döndürülmeli, tarihçe değil
66
+ return response, history
67
 
68
+ # Gradio arayüzü
69
  demo = gr.ChatInterface(
70
+ fn=bot_streaming,
71
+ title="Multimodal Chat Assistant",
72
  additional_inputs=[
 
 
 
73
  gr.Slider(
74
+ minimum=10,
75
+ maximum=500,
76
+ value=250,
77
+ step=10,
78
+ label="Maximum number of new tokens to generate",
79
+ )
 
80
  ],
81
+ description=(
82
+ "This demo combines text and image understanding using Moondream2 for visual "
83
+ "tasks and Qwen/QwQ-32B-Preview for conversational AI. Upload an image, ask questions, "
84
+ "or just chat!"
85
+ ),
86
+ stop_btn="Stop Generation",
87
+ fill_height=True,
88
+ multimodal=True,
89
  )
90
 
91
  if __name__ == "__main__":
92
+ demo.launch(debug=True)