sanbo commited on
Commit
398b781
·
1 Parent(s): c6414ce

update sth. at 2024-11-15 17:13:49

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +37 -15
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: ⚡
4
  colorFrom: purple
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 4.37.1
8
  app_file: app.py
9
  pinned: true
10
  short_description: Chatbot with Unlimited Vision,Image generation and WebSearch
 
4
  colorFrom: purple
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 5.5.0
8
  app_file: app.py
9
  pinned: true
10
  short_description: Chatbot with Unlimited Vision,Image generation and WebSearch
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from PIL import Image
4
  import requests
 
5
  import json
6
  import uuid
7
 
@@ -21,13 +22,23 @@ def check_service_status():
21
  检查各个服务的可用状态,返回服务状态字典。
22
  """
23
  services = {
24
- "Gemma": client_gemma.is_available(),
25
- "Mixtral": client_mixtral.is_available(),
26
- "Llama": client_llama.is_available(),
27
- "Yi": client_yi.is_available(),
28
  }
29
  return services
30
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  def get_service_status_markdown():
33
  """
@@ -44,8 +55,9 @@ def image_gen(prompt):
44
  """
45
  调用图像生成模型生成图像,返回生成的图像路径。
46
  """
47
- client = Client("KingNish/Image-Gen-Pro")
48
- image = client.predict("Image Generation", None, prompt, api_name="/image_gen_pro")
 
49
  return image
50
 
51
 
@@ -72,10 +84,11 @@ def process_llava_input(message, history, processor):
72
 
73
  def llava_answer(inputs, model):
74
  """
75
- 调用 LLaVA 模型回答图像问答请求。
76
  """
77
- # 这里调用模型生成回答的逻辑,返回回答结果(省略实现细节)
78
- answer = model.generate_answer(inputs)
 
79
  return answer
80
 
81
 
@@ -83,13 +96,21 @@ def llava_answer(inputs, model):
83
 
84
  def search(query):
85
  """
86
- 执行网络搜索,返回搜索结果文本。
87
  """
88
  search_results = []
89
  with requests.Session() as session:
90
  resp = session.get("https://www.google.com/search", params={"q": query, "num": 3})
91
- # TODO: 使用 BeautifulSoup 提取返回的搜索结果标题和链接
92
- # search_results = [(title, link), ...]
 
 
 
 
 
 
 
 
93
  return search_results
94
 
95
 
@@ -99,9 +120,10 @@ def respond(message, history, client):
99
  """
100
  根据输入的消息和历史记录,选择合适的模型生成回答。
101
  """
102
- # 根据输入的模型 client 来决定使用哪个模型生成回答
103
- response = client.predict(message)
104
- return response
 
105
 
106
 
107
  # ===================== Gradio 界面构建 =====================
 
2
  from huggingface_hub import InferenceClient
3
  from PIL import Image
4
  import requests
5
+ from bs4 import BeautifulSoup
6
  import json
7
  import uuid
8
 
 
22
  检查各个服务的可用状态,返回服务状态字典。
23
  """
24
  services = {
25
+ "Gemma": check_inference_client(client_gemma),
26
+ "Mixtral": check_inference_client(client_mixtral),
27
+ "Llama": check_inference_client(client_llama),
28
+ "Yi": check_inference_client(client_yi),
29
  }
30
  return services
31
 
32
+ def check_inference_client(client):
33
+ """
34
+ 尝试发送简单请求以检查服务的可用性。
35
+ """
36
+ try:
37
+ # 发送一个简单的预测请求以验证可用性,使用空文本预测请求
38
+ response = client.predict({"inputs": ""})
39
+ return True if response else False
40
+ except Exception:
41
+ return False
42
 
43
  def get_service_status_markdown():
44
  """
 
55
  """
56
  调用图像生成模型生成图像,返回生成的图像路径。
57
  """
58
+ client = InferenceClient("KingNish/Image-Gen-Pro")
59
+ response = client.predict("Image Generation", None, prompt, api_name="/image_gen_pro")
60
+ image = response.get("image") # 假设返回的结果包含图像
61
  return image
62
 
63
 
 
84
 
85
  def llava_answer(inputs, model):
86
  """
87
+ 调用 LLaVA 模型回答图像问答请求,返回回答结果。
88
  """
89
+ # 使用模型生成回答的逻辑
90
+ output = model.generate(**inputs)
91
+ answer = output[0]["generated_text"] # 假设模型返回文本在 `generated_text` 字段
92
  return answer
93
 
94
 
 
96
 
97
  def search(query):
98
  """
99
+ 执行网络搜索,返回搜索结果标题和链接。
100
  """
101
  search_results = []
102
  with requests.Session() as session:
103
  resp = session.get("https://www.google.com/search", params={"q": query, "num": 3})
104
+ soup = BeautifulSoup(resp.text, "html.parser")
105
+
106
+ # 提取搜索结果的标题和链接
107
+ for item in soup.select('div.g'):
108
+ title_element = item.select_one("h3")
109
+ link_element = item.select_one("a")
110
+ if title_element and link_element:
111
+ title = title_element.get_text()
112
+ link = link_element["href"]
113
+ search_results.append((title, link))
114
  return search_results
115
 
116
 
 
120
  """
121
  根据输入的消息和历史记录,选择合适的模型生成回答。
122
  """
123
+ # 使用指定的模型 client 来生成回答
124
+ response = client.predict({"inputs": message})
125
+ answer = response.get("generated_text") # 假设返回结果包含生成的文本
126
+ return answer
127
 
128
 
129
  # ===================== Gradio 界面构建 =====================