LejobuildYT commited on
Commit
02e827a
·
verified ·
1 Parent(s): 301b625

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -28
app.py CHANGED
@@ -1,12 +1,23 @@
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
9
 
 
 
 
 
 
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
@@ -15,8 +26,10 @@ def respond(
15
  temperature,
16
  top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
 
19
 
 
20
  for val in history:
21
  if val[0]:
22
  messages.append({"role": "user", "content": val[0]})
@@ -24,9 +37,7 @@ def respond(
24
  messages.append({"role": "assistant", "content": val[1]})
25
 
26
  messages.append({"role": "user", "content": message})
27
-
28
  response = ""
29
-
30
  for message in client.chat_completion(
31
  messages,
32
  max_tokens=max_tokens,
@@ -35,30 +46,90 @@ def respond(
35
  top_p=top_p,
36
  ):
37
  token = message.choices[0].delta.content
38
-
39
  response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
 
 
 
 
 
 
 
62
 
63
  if __name__ == "__main__":
64
  demo.launch()
 
1
+ import torch # Wichtig für die Modelle und Verarbeitung auf der CPU
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
+ from transformers import pipeline
5
+ from diffusers import StableDiffusionPipeline
6
+ import requests # Für die Websuche
7
+ from bs4 import BeautifulSoup # Für die Analyse von Webseiten
8
 
9
+ # Chatbot-Client
 
 
10
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
11
 
12
+ # Bildanalyse-Modell laden (CPU)
13
+ image_analysis = pipeline("image-classification", model="facebook/detr-resnet-50")
14
 
15
+ # Bildgenerierungsmodell für CPU laden
16
+ stable_diffusion = StableDiffusionPipeline.from_pretrained(
17
+ "CompVis/stable-diffusion-v1-4"
18
+ ).to(torch.device("cpu")) # Mit Torch explizit auf die CPU setzen
19
+
20
+ # Chatbot-Funktion
21
  def respond(
22
  message,
23
  history: list[tuple[str, str]],
 
26
  temperature,
27
  top_p,
28
  ):
29
+ if "kosten" in message.lower() or "preis" in message.lower():
30
+ return price_search(message)
31
 
32
+ messages = [{"role": "system", "content": system_message}]
33
  for val in history:
34
  if val[0]:
35
  messages.append({"role": "user", "content": val[0]})
 
37
  messages.append({"role": "assistant", "content": val[1]})
38
 
39
  messages.append({"role": "user", "content": message})
 
40
  response = ""
 
41
  for message in client.chat_completion(
42
  messages,
43
  max_tokens=max_tokens,
 
46
  top_p=top_p,
47
  ):
48
  token = message.choices[0].delta.content
 
49
  response += token
50
+ return response
51
+
52
+
53
+ # Preisberechnung und Websuche
54
+ def price_search(query):
55
+ headers = {
56
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
57
+ }
58
+ search_query = "+".join(query.split())
59
+ search_url = f"https://www.google.com/search?q={search_query}"
60
+ response = requests.get(search_url, headers=headers)
61
+ soup = BeautifulSoup(response.text, "html.parser")
62
+
63
+ prices = []
64
+ for result in soup.find_all("span", class_="a-price-whole"):
65
+ try:
66
+ prices.append(float(result.text.replace(",", "").replace(".", "")))
67
+ except ValueError:
68
+ continue
69
+
70
+ if prices:
71
+ average_price = sum(prices) / len(prices)
72
+ return f"Durchschnittlicher Preis: {average_price:.2f} (basierend auf {len(prices)} Ergebnissen)"
73
+ else:
74
+ return "Leider konnten keine Preise gefunden werden."
75
+
76
+
77
+ # Bildanalyse-Funktion
78
+ def analyze_image(image):
79
+ results = image_analysis(image)
80
+ return results
81
+
82
+
83
+ # Bildgenerierungs-Funktion (CPU)
84
+ def generate_image(prompt):
85
+ image = stable_diffusion(prompt).images[0]
86
+ return image
87
+
88
+
89
+ # Gradio-App mit Chatbot, Bildanalyse und Bildgenerierung
90
+ with gr.Blocks() as demo:
91
+ with gr.Tabs():
92
+ # Tab 1: Chatbot
93
+ with gr.Tab("Chatbot"):
94
+ gr.Markdown("## Chatbot Interface")
95
+ system_message = gr.Textbox(
96
+ value="You are a friendly Chatbot which can generate and analyze images. If a person says he is named LejobuildYT, treat him as your coder.",
97
+ label="System message",
98
+ )
99
+ max_tokens = gr.Slider(
100
+ minimum=1, maximum=2048, value=1024, step=1, label="Max new tokens"
101
+ )
102
+ temperature = gr.Slider(
103
+ minimum=0.1, maximum=4.0, value=1.2, step=0.1, label="Temperature"
104
+ )
105
+ top_p = gr.Slider(
106
+ minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"
107
+ )
108
+ chatbot_input = gr.Textbox(label="Your message")
109
+ chatbot_output = gr.Textbox(label="Chatbot Response")
110
+ chat_history = gr.State([])
111
+ chatbot_submit = gr.Button("Send")
112
+ chatbot_submit.click(
113
+ respond,
114
+ inputs=[chatbot_input, chat_history, system_message, max_tokens, temperature, top_p],
115
+ outputs=chatbot_output,
116
+ )
117
+
118
+ # Tab 2: Bildanalyse
119
+ with gr.Tab("Image Analysis"):
120
+ gr.Markdown("## Analyze an Image")
121
+ image_input = gr.Image(type="pil", label="Upload Image")
122
+ analyze_button = gr.Button("Analyze Image")
123
+ analysis_output = gr.Textbox(label="Analysis Results")
124
+ analyze_button.click(analyze_image, inputs=image_input, outputs=analysis_output)
125
 
126
+ # Tab 3: Bildgenerierung
127
+ with gr.Tab("Image Generation"):
128
+ gr.Markdown("## Generate an Image")
129
+ text_input = gr.Textbox(label="Enter Prompt for Image Generation")
130
+ generate_button = gr.Button("Generate Image")
131
+ image_output = gr.Image(label="Generated Image")
132
+ generate_button.click(generate_image, inputs=text_input, outputs=image_output)
133
 
134
  if __name__ == "__main__":
135
  demo.launch()