joermd commited on
Commit
ccccb88
·
verified ·
1 Parent(s): c8a9083

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -114
app.py CHANGED
@@ -1,143 +1,112 @@
1
  import numpy as np
2
  import streamlit as st
3
- import os
4
- import sys
5
-
6
-
7
-
8
- # Create supported models
9
- model_links = {
10
- "Mistral-Nemo-Base-2407": "joermd/speedy-llama2",
11
- }
12
-
13
- #Random dog images for error message
14
- random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
15
- "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
16
- "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
17
- "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
18
- "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
19
- "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
20
- "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
21
- "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
22
- "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
23
- "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
24
- "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
25
- "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
26
- "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
27
-
28
-
 
29
 
30
  def reset_conversation():
31
  '''
32
- Resets Conversation
33
  '''
34
  st.session_state.conversation = []
35
  st.session_state.messages = []
36
  return None
37
-
38
-
39
-
40
-
41
- # Define the available models
42
- models =[key for key in model_links.keys()]
43
-
44
- # Create the sidebar with the dropdown for model selection
45
- selected_model = st.sidebar.selectbox("Select Model", models)
46
-
47
- # Create a temperature slider
48
- temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
49
-
50
- # Create a max_token slider
51
- max_token_value = st.sidebar.slider('Select a max_token value', 1000, 9000, (5000))
52
-
53
- #Add reset button to clear conversation
54
- st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
55
-
56
-
57
- # Create model description
58
- st.sidebar.write(f"You're now chatting with **{selected_model}**")
59
- st.sidebar.markdown("*Generated content may be inaccurate or false.*")
60
- # st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).")
61
-
62
-
63
-
64
-
65
- if "prev_option" not in st.session_state:
66
- st.session_state.prev_option = selected_model
67
 
68
- if st.session_state.prev_option != selected_model:
69
- st.session_state.messages = []
70
- # st.write(f"Changed to {selected_model}")
71
- st.session_state.prev_option = selected_model
72
- reset_conversation()
73
-
74
-
75
-
76
- #Pull in the model we want to use
77
- repo_id = model_links[selected_model]
78
-
79
-
80
- st.subheader(f'{selected_model}')
81
- # # st.title(f'ChatBot Using {selected_model}')
82
-
83
- # Set a default model
84
- if selected_model not in st.session_state:
85
- st.session_state[selected_model] = model_links[selected_model]
86
-
87
- # Initialize chat history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  if "messages" not in st.session_state:
89
  st.session_state.messages = []
90
 
91
-
92
- # Display chat messages from history on app rerun
93
  for message in st.session_state.messages:
94
  with st.chat_message(message["role"]):
95
  st.markdown(message["content"])
96
 
97
-
98
-
99
- # Accept user input
100
- if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
101
- # Display user message in chat message container
102
  with st.chat_message("user"):
103
  st.markdown(prompt)
104
- # Add user message to chat history
105
  st.session_state.messages.append({"role": "user", "content": prompt})
106
 
107
-
108
- # Display assistant response in chat message container
109
  with st.chat_message("assistant"):
110
  try:
111
- stream = client.chat.completions.create(
112
- model=model_links[selected_model],
113
- messages=[
114
- {"role": m["role"], "content": m["content"]}
115
- for m in st.session_state.messages
116
- ],
117
- temperature=temp_values,#0.5,
118
- stream=True,
119
- max_tokens=max_token_value,
120
- )
121
-
122
- response = st.write_stream(stream)
123
 
124
  except Exception as e:
125
- # st.empty()
126
- response = "😵‍💫 Looks like someone unplugged something!\
127
- \n Either the model space is being updated or something is down.\
128
- \n\
129
- \n Try again later. \
130
- \n\
131
- \n Here's a random pic of a 🐶:"
132
  st.write(response)
133
- random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
134
  st.image(random_dog_pick)
135
- st.write("This was the error message:")
136
  st.write(e)
137
 
138
-
139
-
140
-
141
-
142
-
143
- st.session_state.messages.append({"role": "assistant", "content": response})
 
1
  import numpy as np
2
  import streamlit as st
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import torch
5
+
6
+ # التحقق من توفر GPU
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
8
+
9
+ @st.cache_resource
10
+ def load_model():
11
+ """
12
+ تحميل النموذج والمُرمِّز مع التخزين المؤقت
13
+ """
14
+ model = AutoModelForCausalLM.from_pretrained(
15
+ "joermd/speedy-llama2",
16
+ torch_dtype=torch.float16,
17
+ device_map=device
18
+ )
19
+ tokenizer = AutoTokenizer.from_pretrained("joermd/speedy-llama2")
20
+ return model, tokenizer
21
+
22
+ # الصور العشوائية للكلاب عند حدوث خطأ
23
+ random_dog = [
24
+ "0f476473-2d8b-415e-b944-483768418a95.jpg",
25
+ "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
26
+ "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
27
+ "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
28
+ "42a98d03-5ed7-4b3b-af89-7c4376cb14c3.jpg"
29
+ ]
30
 
31
  def reset_conversation():
32
  '''
33
+ إعادة تعيين المحادثة
34
  '''
35
  st.session_state.conversation = []
36
  st.session_state.messages = []
37
  return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
+ def generate_response(prompt, temperature, max_length):
40
+ """
41
+ توليد استجابة من النموذج
42
+ """
43
+ try:
44
+ inputs = tokenizer.encode(prompt, return_tensors="pt").to(device)
45
+
46
+ with torch.no_grad():
47
+ outputs = model.generate(
48
+ inputs,
49
+ max_length=max_length,
50
+ temperature=temperature,
51
+ do_sample=True,
52
+ pad_token_id=tokenizer.eos_token_id
53
+ )
54
+
55
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
56
+ return response
57
+ except Exception as e:
58
+ return str(e)
59
+
60
+ # تحميل النموذج والمُرمِّز
61
+ try:
62
+ with st.spinner('جاري تحميل النموذج... قد يستغرق هذا بضع دقائق...'):
63
+ model, tokenizer = load_model()
64
+ except Exception as e:
65
+ st.error(f"حدث خطأ أثناء تحميل النموذج: {str(e)}")
66
+ st.stop()
67
+
68
+ # إعداد واجهة Streamlit
69
+ st.subheader('Mistral Chat')
70
+
71
+ # إضافة أزرار التحكم في الشريط الجانبي
72
+ temp_values = st.sidebar.slider('اختر قيمة درجة الحرارة', 0.0, 1.0, 0.5)
73
+ max_token_value = st.sidebar.slider('اختر الحد الأقصى للرموز', 100, 2000, 500)
74
+ st.sidebar.button('إعادة تعيين المحادثة', on_click=reset_conversation)
75
+
76
+ # تهيئة سجل المحادثة
77
  if "messages" not in st.session_state:
78
  st.session_state.messages = []
79
 
80
+ # عرض رسائل المحادثة السابقة
 
81
  for message in st.session_state.messages:
82
  with st.chat_message(message["role"]):
83
  st.markdown(message["content"])
84
 
85
+ # معالجة إدخال المستخدم
86
+ if prompt := st.chat_input("اسألني سؤالاً"):
87
+ # عرض رسالة المستخدم
 
 
88
  with st.chat_message("user"):
89
  st.markdown(prompt)
 
90
  st.session_state.messages.append({"role": "user", "content": prompt})
91
 
92
+ # عرض رد المساعد
 
93
  with st.chat_message("assistant"):
94
  try:
95
+ # توليد الرد
96
+ with st.spinner('جاري التفكير...'):
97
+ response = generate_response(
98
+ prompt,
99
+ temperature=temp_values,
100
+ max_length=max_token_value
101
+ )
102
+ st.write(response)
 
 
 
 
103
 
104
  except Exception as e:
105
+ response = "😵‍💫 يبدو أن هناك خطأ ما!\n حاول مرة أخرى لاحقاً.\n\n إليك صورة عشوائية لكلب 🐶:"
 
 
 
 
 
 
106
  st.write(response)
107
+ random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
108
  st.image(random_dog_pick)
109
+ st.write("رسالة الخطأ:")
110
  st.write(e)
111
 
112
+ st.session_state.messages.append({"role": "assistant", "content": response})