muhammedkaan commited on
Commit
56fe62c
·
verified ·
1 Parent(s): 9bfbe26

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -31
app.py CHANGED
@@ -1,11 +1,24 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  def respond(
11
  message,
@@ -14,19 +27,31 @@ def respond(
14
  max_tokens,
15
  temperature,
16
  top_p,
 
17
  ):
18
  messages = [{"role": "system", "content": system_message}]
19
-
 
 
 
 
 
 
20
  for val in history:
21
  if val[0]:
22
  messages.append({"role": "user", "content": val[0]})
23
  if val[1]:
24
  messages.append({"role": "assistant", "content": val[1]})
25
-
26
  messages.append({"role": "user", "content": message})
27
-
 
 
 
 
 
 
28
  response = ""
29
-
30
  for message in client.chat_completion(
31
  messages,
32
  max_tokens=max_tokens,
@@ -35,30 +60,31 @@ def respond(
35
  top_p=top_p,
36
  ):
37
  token = message.choices[0].delta.content
38
-
39
  response += token
40
  yield response
 
 
 
 
 
 
 
 
 
41
 
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import json
4
+ import datetime
5
 
6
+ # Hugging Face Model
 
 
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
+ # Kullanıcı verilerini kaydetmek için (isteğe bağlı bir JSON dosyası kullanılıyor)
10
+ LOG_FILE = "user_interactions.json"
11
+
12
+ def load_logs():
13
+ try:
14
+ with open(LOG_FILE, "r") as f:
15
+ return json.load(f)
16
+ except FileNotFoundError:
17
+ return {}
18
+
19
+ def save_logs(data):
20
+ with open(LOG_FILE, "w") as f:
21
+ json.dump(data, f, indent=4)
22
 
23
  def respond(
24
  message,
 
27
  max_tokens,
28
  temperature,
29
  top_p,
30
+ user_role
31
  ):
32
  messages = [{"role": "system", "content": system_message}]
33
+ logs = load_logs()
34
+ user_id = "user_123" # Burada platformdan alınan kullanıcı kimliği olmalı
35
+
36
+ # Kullanıcı verisini kaydet
37
+ if user_id not in logs:
38
+ logs[user_id] = []
39
+
40
  for val in history:
41
  if val[0]:
42
  messages.append({"role": "user", "content": val[0]})
43
  if val[1]:
44
  messages.append({"role": "assistant", "content": val[1]})
45
+
46
  messages.append({"role": "user", "content": message})
47
+
48
+ # Kullanıcı rolüne göre sistem mesajını ayarla
49
+ if user_role == "Öğrenci":
50
+ messages.append({"role": "system", "content": "Lütfen eğitim seviyesine uygun yanıt ver."})
51
+ elif user_role == "Eğitmen":
52
+ messages.append({"role": "system", "content": "Eğitmenlere yönelik daha derinlemesine açıklamalar ver."})
53
+
54
  response = ""
 
55
  for message in client.chat_completion(
56
  messages,
57
  max_tokens=max_tokens,
 
60
  top_p=top_p,
61
  ):
62
  token = message.choices[0].delta.content
 
63
  response += token
64
  yield response
65
+
66
+ # Kullanıcı etkileşimini kaydet
67
+ logs[user_id].append({
68
+ "timestamp": str(datetime.datetime.now()),
69
+ "message": message,
70
+ "response": response,
71
+ "role": user_role
72
+ })
73
+ save_logs(logs)
74
 
75
+ # Arayüz
76
+ with gr.Blocks() as demo:
77
+ gr.Markdown("## Educted AI Chatbot \nEğitim için özelleştirilmiş yapay zeka destekli sohbet botu.")
78
+
79
+ chat = gr.ChatInterface(
80
+ respond,
81
+ additional_inputs=[
82
+ gr.Textbox(value="You are an AI tutor.", label="Sistem Mesajı"),
83
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Maksimum Token"),
84
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Sıcaklık (Temperature)"),
85
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
86
+ gr.Radio(["Öğrenci", "Eğitmen"], value="Öğrenci", label="Kullanıcı Rolü")
87
+ ],
88
+ )
89
+
90
+ chat.launch()