AmmarFahmy commited on
Commit
d54ea9f
·
verified ·
1 Parent(s): bc3f6a2

uploaded some files

Browse files
Files changed (4) hide show
  1. HuggingChatAPI.py +212 -0
  2. exportchat.py +55 -0
  3. promptTemplate.py +91 -0
  4. requirements.txt +20 -0
HuggingChatAPI.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from hugchat import hugchat
2
+
3
+ import time
4
+
5
+ from typing import Any, List, Mapping, Optional
6
+
7
+ from langchain.callbacks.manager import CallbackManagerForLLMRun
8
+ from langchain.llms.base import LLM
9
+
10
+
11
+ # THIS IS A CUSTOM LLM WRAPPER Based on hugchat library
12
+ # Reference :
13
+ # - Langchain custom LLM wrapper : https://python.langchain.com/docs/modules/model_io/models/llms/how_to/custom_llm
14
+ # - HugChat library : https://github.com/Soulter/hugging-chat-api
15
+ # - I am Alessandro Ciciarelli the owner of IntelligenzaArtificialeItalia.net , my dream is to democratize AI and make it accessible to everyone.
16
+
17
+ class HuggingChat(LLM):
18
+
19
+ """HuggingChat LLM wrapper."""
20
+
21
+ chatbot : Optional[hugchat.ChatBot] = None
22
+
23
+
24
+ email: Optional[str] = None
25
+ psw: Optional[str] = None
26
+ cookie_path : Optional[str] = None
27
+
28
+ conversation : Optional[str] = None
29
+ model: Optional[int] = 0 # 0 = OpenAssistant/oasst-sft-6-llama-30b-xor , 1 = meta-llama/Llama-2-70b-chat-hf
30
+
31
+ temperature: Optional[float] = 0.9
32
+ top_p: Optional[float] = 0.95
33
+ repetition_penalty: Optional[float] = 1.2
34
+ top_k: Optional[int] = 50
35
+ truncate: Optional[int] = 1024
36
+ watermark: Optional[bool] = False
37
+ max_new_tokens: Optional[int] = 1024
38
+ stop: Optional[list] = ["</s>"]
39
+ return_full_text: Optional[bool] = False
40
+ stream_resp: Optional[bool] = True
41
+ use_cache: Optional[bool] = False
42
+ is_retry: Optional[bool] = False
43
+ retry_count: Optional[int] = 5
44
+
45
+ avg_response_time: float = 0.0
46
+ log : Optional[bool] = False
47
+
48
+
49
+ @property
50
+ def _llm_type(self) -> str:
51
+ return "🤗CUSTOM LLM WRAPPER Based on hugging-chat-api library"
52
+
53
+
54
+ def create_chatbot(self) -> None:
55
+ if not any([self.email, self.psw, self.cookie_path]):
56
+ raise ValueError("email, psw, or cookie_path is required.")
57
+
58
+ try:
59
+ if self.email and self.psw:
60
+ # Create a ChatBot using email and psw
61
+ from hugchat.login import Login
62
+ start_time = time.time()
63
+ sign = Login(self.email, self.psw)
64
+ cookies = sign.login()
65
+ end_time = time.time()
66
+ if self.log : print(f"\n[LOG] Login successfull in {round(end_time - start_time)} seconds")
67
+ else:
68
+ # Create a ChatBot using cookie_path
69
+ cookies = self.cookie_path and hugchat.ChatBot(cookie_path=self.cookie_path)
70
+
71
+ self.chatbot = cookies.get_dict() and hugchat.ChatBot(cookies=cookies.get_dict())
72
+ if self.log : print(f"[LOG] LLM WRAPPER created successfully")
73
+
74
+ except Exception as e:
75
+ raise ValueError("LogIn failed. Please check your credentials or cookie_path. " + str(e))
76
+
77
+ # Setup ChatBot info
78
+ self.chatbot.switch_llm(self.model)
79
+ if self.log : print(f"[LOG] LLM WRAPPER switched to model { 'OpenAssistant/oasst-sft-6-llama-30b-xor' if self.model == 0 else 'meta-llama/Llama-2-70b-chat-hf'}")
80
+
81
+ self.conversation = self.conversation or self.chatbot.new_conversation()
82
+ self.chatbot.change_conversation(self.conversation)
83
+ if self.log : print(f"[LOG] LLM WRAPPER changed conversation to {self.conversation}\n")
84
+
85
+
86
+
87
+ def _call(
88
+ self,
89
+ prompt: str,
90
+ stop: Optional[List[str]] = None,
91
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
92
+ **kwargs: Any,
93
+ ) -> str:
94
+ if stop:
95
+ raise ValueError("stop kwargs are not permitted.")
96
+
97
+ self.create_chatbot() if not self.chatbot else None
98
+
99
+ try:
100
+ if self.log : print(f"[LOG] LLM WRAPPER called with prompt: {prompt}")
101
+ start_time = time.time()
102
+ resp = self.chatbot.chat(
103
+ prompt,
104
+ temperature=self.temperature,
105
+ top_p=self.top_p,
106
+ repetition_penalty=self.repetition_penalty,
107
+ top_k=self.top_k,
108
+ truncate=self.truncate,
109
+ watermark=self.watermark,
110
+ max_new_tokens=self.max_new_tokens,
111
+ stop=self.stop,
112
+ return_full_text=self.return_full_text,
113
+ stream=self.stream_resp,
114
+ use_cache=self.use_cache,
115
+ is_retry=self.is_retry,
116
+ retry_count=self.retry_count,
117
+ )
118
+
119
+ end_time = time.time()
120
+
121
+ self.avg_response_time = (self.avg_response_time + (end_time - start_time)) / 2 if self.avg_response_time else end_time - start_time
122
+
123
+ if self.log : print(f"[LOG] LLM WRAPPER response time: {round(end_time - start_time)} seconds")
124
+ if self.log : print(f"[LOG] LLM WRAPPER avg response time: {round(self.avg_response_time)} seconds")
125
+ if self.log : print(f"[LOG] LLM WRAPPER response: {resp}\n\n")
126
+
127
+ return str(resp)
128
+
129
+ except Exception as e:
130
+ raise ValueError("ChatBot failed, please check your parameters. " + str(e))
131
+
132
+ @property
133
+ def _identifying_params(self) -> Mapping[str, Any]:
134
+ """Get the identifying parameters."""
135
+ parms = {
136
+ "model": "HuggingChat",
137
+ "temperature": self.temperature,
138
+ "top_p": self.top_p,
139
+ "repetition_penalty": self.repetition_penalty,
140
+ "top_k": self.top_k,
141
+ "truncate": self.truncate,
142
+ "watermark": self.watermark,
143
+ "max_new_tokens": self.max_new_tokens,
144
+ "stop": self.stop,
145
+ "return_full_text": self.return_full_text,
146
+ "stream": self.stream_resp,
147
+ "use_cache": self.use_cache,
148
+ "is_retry": self.is_retry,
149
+ "retry_count": self.retry_count,
150
+ "avg_response_time": self.avg_response_time,
151
+ }
152
+ return parms
153
+
154
+ @property
155
+ def _get_avg_response_time(self) -> float:
156
+ """Get the average response time."""
157
+ return self.avg_response_time
158
+
159
+
160
+
161
+ #HOW TO USE IT
162
+ # 1) Install the library : pip install -U hugchat langchain
163
+ # 2) Get your HuggingFace credentials : https://huggingface.co/
164
+ # 3) Import the library and enjoy it : from HCA import HCA
165
+
166
+ # EXAMPLE 1 : Using email and psw
167
+ # llm = HCA(email="YOUR_EMAIL", psw="YOUR_psw")
168
+
169
+ # EXAMPLE 2 : Using cookie file
170
+ # llm = HCA(cookie_path="YOUR_COOKIE_PATH")
171
+
172
+ # EXAMPLE 3 : Modify the default parameters
173
+ # llm = HCA(email="YOUR_EMAIL", psw="YOUR_psw", log=True , model=1, temperature=0.9, top_p=0.95, repetition_penalty=1.2, top_k=50, truncate=1024, watermark=False, max_new_tokens=1024, stop=["</s>"], return_full_text=False, stream=True, use_cache=False, is_retry=False, retry_count=5)
174
+
175
+ # EXAMPLE 4 : Using the LLM
176
+ # print(llm("Hello, how are you?"))
177
+
178
+
179
+ # EXAMPLE 5 : simple use
180
+ # from HCA import HCA
181
+ #llm = HCA(email="YOUR_EMAIL", psw="YOUR_psw" , log=True, model=1)
182
+ #txt = input("\n\nYou (write 'exit' for stop): ")
183
+ #while txt != "exit":
184
+ #print("Bot : " + llm(txt) + "\n")
185
+ #print("Avg response time : " + str(llm._get_avg_response_time))
186
+ #txt = input("You : ")
187
+
188
+
189
+
190
+ #from hugchat import hugchat
191
+ #from hugchat.login import Login
192
+ #from langchain.llms.base import LLM
193
+ #from typing import Optional, List, Mapping, Any
194
+ #from time import sleep
195
+
196
+
197
+ # THIS IS A CUSTOM LLM WRAPPER Based on hugchat library
198
+ # Reference :
199
+ # - Langchain custom LLM wrapper : https://python.langchain.com/docs/modules/model_io/models/llms/how_to/custom_llm
200
+ # - HugChat library : https://github.com/Soulter/hugging-chat-api
201
+
202
+
203
+
204
+
205
+
206
+ #llm = HuggingChat(email = "YOUR-EMAIL" , psw = = "YOUR-PSW" ) #for start new chat
207
+
208
+
209
+ #print(llm("Hello, how are you?"))
210
+ #print(llm("what is AI?"))
211
+ #print(llm("Can you resume your previus answer?")) #now memory work well
212
+
exportchat.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ PYTHON FILE FOR EXPORT CHAT FUNCTION
3
+ """
4
+
5
+ import streamlit as st
6
+ from datetime import datetime
7
+
8
+
9
+ def export_chat():
10
+ if 'generated' in st.session_state:
11
+ # save message in reverse order frist message always bot
12
+ # the chat is stored in a html file format
13
+ html_chat = ""
14
+ html_chat += '<html><head><title>ChatBOT Intelligenza Artificiale Italia 🧠🤖🇮🇹</title>'
15
+ # create two simply css box for bot and user like whatsapp
16
+ html_chat += '<style> .bot { background-color: #e5e5ea; padding: 10px; border-radius: 10px; margin: 10px; width: 50%; float: left; } .user { background-color: #dcf8c6; padding: 10px; border-radius: 10px; margin: 10px; width: 50%; float: right; } </style>'
17
+ html_chat += '</head><body>'
18
+ # add header
19
+ html_chat += '<center><h1>ChatBOT Intelligenza Artificiale Italia 🧠🤖🇮🇹</h1>'
20
+ # add link for danation
21
+ html_chat += '<h3>🤗 Support the project with a donation for the development of new features 🤗</h3>'
22
+ html_chat += '<br><a href="https://rebrand.ly/SupportAUTOGPTfree"><img src="https://www.paypalobjects.com/en_US/i/btn/btn_donateCC_LG.gif" alt="PayPal donate button" /></a>'
23
+ # add subheader with date and time
24
+ html_chat += '<br><br><h5>' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + \
25
+ '</h5></center><br><br>'
26
+ # add chat
27
+ # add solid container
28
+ html_chat += '<div style="padding: 10px; border-radius: 10px; margin: 10px; width: 100%; float: left;">'
29
+ for i in range(len(st.session_state['generated'])-1, -1, -1):
30
+ html_chat += '<div class="bot">' + \
31
+ st.session_state["generated"][i] + '</div><br>'
32
+ html_chat += '<div class="user">' + \
33
+ st.session_state['past'][i] + '</div><br>'
34
+ html_chat += '</div>'
35
+ # add footer
36
+ html_chat += '<br><br><center><small>Thanks you for using our ChatBOT 🧠🤖🇮🇹</small>'
37
+ # add link for danation
38
+ html_chat += '<h6>🤗 Support the project with a donation for the development of new features 🤗</h6>'
39
+ html_chat += '<br><a href="https://rebrand.ly/SupportAUTOGPTfree"><img src="https://www.paypalobjects.com/en_US/i/btn/btn_donateCC_LG.gif" alt="PayPal donate button" /></a><center>'
40
+
41
+ html_chat += '</body></html>'
42
+
43
+ # save file
44
+ # with open('chat.html', 'w') as f:
45
+ # f.write(html_chat)
46
+
47
+ with open('chat.html', 'wb') as f:
48
+ f.write(html_chat.encode('utf-8'))
49
+ # download file
50
+ st.download_button(
51
+ label="📚 Download chat",
52
+ data=html_chat,
53
+ file_name='chat.html',
54
+ mime='text/html'
55
+ )
promptTemplate.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contains the template for the prompt to be used for injecting the context into the model.
3
+
4
+ With this technique we can use different plugin for different type of question and answer.
5
+ Like :
6
+ - Internet
7
+ - Data
8
+ - Code
9
+ - PDF
10
+ - Audio
11
+ - Video
12
+
13
+ """
14
+
15
+ from datetime import datetime
16
+ now = datetime.now()
17
+
18
+ # def prompt4conversation(prompt,context):
19
+ # final_prompt = f""" GENERAL INFORMATION : ( today is {now.strftime("%d/%m/%Y %H:%M:%S")} , You are a friendly helpful assistant.
20
+ # ISTRUCTION : IN YOUR ANSWER NEVER INCLUDE THE USER QUESTION or MESSAGE , WRITE ALWAYS ONLY YOUR ACCURATE ANSWER!
21
+ # PREVIUS MESSAGE : ({context})
22
+ # NOW THE USER ASK : {prompt} .
23
+ # WRITE THE ANSWER :"""
24
+ # return final_prompt
25
+
26
+ def prompt4conversation(prompt,context):
27
+ final_prompt = f""" GENERAL INFORMATION : ( today is {now.strftime("%d/%m/%Y %H:%M:%S")} ,
28
+ PREVIUS MESSAGE : ({context})
29
+ NOW THE USER ASK : {prompt} .
30
+ WRITE THE ANSWER :"""
31
+ return final_prompt
32
+
33
+ def prompt4conversationInternet(prompt,context, internet, resume):
34
+ final_prompt = f""" GENERAL INFORMATION : ( today is {now.strftime("%d/%m/%Y %H:%M:%S")} ,
35
+ PREVIUS MESSAGE : ({context})
36
+ NOW THE USER ASK : {prompt}.
37
+ INTERNET RESULT TO USE TO ANSWER : ({internet})
38
+ INTERNET RESUME : ({resume})
39
+ NOW THE USER ASK : {prompt}.
40
+ WRITE THE FRIENDLY ANSWER BASED ON INTERNET INFORMATION :"""
41
+ return final_prompt
42
+
43
+ def prompt4Data(prompt, context, solution):
44
+ final_prompt = f""" ISTRUCTION : IN YOUR ANSWER NEVER INCLUDE THE USER QUESTION or MESSAGE , YOU MUST MAKE THE CORRECT ANSWER MORE ARGUMENTED ! IF THE CORRECT ANSWER CONTAINS CODE YOU ARE OBLIGED TO INSERT IT IN YOUR NEW ANSWER!
45
+ PREVIUS MESSAGE : ({context})
46
+ NOW THE USER ASK : {prompt}
47
+ THIS IS THE CORRECT ANSWER : ({solution})
48
+ MAKE THE ANSWER MORE ARGUMENTED, WITHOUT CHANGING ANYTHING OF THE CORRECT ANSWER :"""
49
+ return final_prompt
50
+
51
+ def prompt4Code(prompt, context, solution):
52
+ final_prompt = f""" ISTRUCTION : IN YOUR ANSWER NEVER INCLUDE THE USER QUESTION or MESSAGE , THE CORRECT ANSWER CONTAINS CODE YOU ARE OBLIGED TO INSERT IT IN YOUR NEW ANSWER!
53
+ PREVIUS MESSAGE : ({context})
54
+ NOW THE USER ASK : {prompt}
55
+ THIS IS THE CODE FOR THE ANSWER : ({solution})
56
+ WITHOUT CHANGING ANYTHING OF THE CODE of CORRECT ANSWER , MAKE THE ANSWER MORE DETALIED INCLUDING THE CORRECT CODE :"""
57
+ return final_prompt
58
+
59
+
60
+ def prompt4Context(prompt, context, solution):
61
+ final_prompt = f""" ISTRUCTION : IN YOUR ANSWER NEVER INCLUDE THE USER QUESTION or MESSAGE ,WRITE ALWAYS ONLY YOUR ACCURATE ANSWER!
62
+ PREVIUS MESSAGE : ({context})
63
+ NOW THE USER ASK : {prompt}
64
+ THIS IS THE CORRECT ANSWER : ({solution})
65
+ WITHOUT CHANGING ANYTHING OF CORRECT ANSWER , MAKE THE ANSWER MORE DETALIED:"""
66
+ return final_prompt
67
+
68
+
69
+ def prompt4Audio(prompt, context, solution):
70
+ final_prompt = f""" ISTRUCTION : IN YOUR ANSWER NEVER INCLUDE THE USER QUESTION or MESSAGE ,WRITE ALWAYS ONLY YOUR ACCURATE ANSWER!
71
+ PREVIUS MESSAGE : ({context})
72
+ NOW THE USER ASK : {prompt}
73
+ THIS IS THE CORRECT ANSWER based on Audio text gived in input : ({solution})
74
+ WITHOUT CHANGING ANYTHING OF CORRECT ANSWER , MAKE THE ANSWER MORE DETALIED:"""
75
+ return final_prompt
76
+
77
+ def prompt4YT(prompt, context, solution):
78
+ final_prompt = f""" ISTRUCTION : IN YOUR ANSWER NEVER INCLUDE THE USER QUESTION or MESSAGE ,WRITE ALWAYS ONLY YOUR ACCURATE ANSWER!
79
+ PREVIUS MESSAGE : ({context})
80
+ NOW THE USER ASK : {prompt}
81
+ THIS IS THE CORRECT ANSWER based on Youtube video gived in input : ({solution})
82
+ WITHOUT CHANGING ANYTHING OF CORRECT ANSWER , MAKE THE ANSWER MORE DETALIED:"""
83
+ return final_prompt
84
+
85
+
86
+ #HOW TO ADD YOUR OWN PROMPT :
87
+ # 1) ADD YOUR FUNCTION HERE, for example : def prompt4Me(prompt, context):
88
+ # 2) WRITE THE PROMPT TEMPLATE FOR YOUR FUNCTION, for example : template = f"YOU IS : {context} , NOW THE USER ASK : {prompt} . WRITE THE ANSWER :"
89
+ # 3) RETURN THE TEMPLATE, for example : return template
90
+ # 4) IMPORT YOUR FUNCTION IN THE MAIN FILE (streamlit_app.py) , for example : from promptTemplate import prompt4Me
91
+ # 5) FOLLOW OTHER SPTEP IN THE MAIN FILE (streamlit_app.py)
requirements.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ beautifulsoup4==4.12.2
2
+ docx2txt==0.8
3
+ sketch==0.4.2
4
+ hugchat
5
+ langchain
6
+ streamlit==1.24.0
7
+ streamlit_extras==0.2.7
8
+ pandas==2.0.1
9
+ pdfplumber==0.9.0
10
+ pydub==0.25.1
11
+ requests
12
+ urllib3
13
+ duckduckgo_search
14
+ SpeechRecognition==3.8.1
15
+ youtube_search_python==1.6.6
16
+ youtube_transcript_api==0.6.1
17
+ chromadb==0.4.24
18
+ ffmpeg-python
19
+ ffprobe
20
+ huggingface_hub