mudogruer commited on
Commit
a026b4d
1 Parent(s): a92bbb4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -59
app.py CHANGED
@@ -1,63 +1,82 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
  )
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
- if __name__ == "__main__":
63
- demo.launch()
 
1
+ import os
2
  import gradio as gr
3
+
4
+ from langchain_community.llms import Predibase
5
+ from langchain_community.document_loaders import WebBaseLoader
6
+ from langchain_huggingface import HuggingFaceEmbeddings
7
+ from langchain_community.vectorstores import FAISS
8
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
9
+ from langchain.chains import create_retrieval_chain
10
+ from langchain_core.prompts import MessagesPlaceholder, ChatPromptTemplate
11
+ from langchain.chains.combine_documents import create_stuff_documents_chain
12
+ from langchain_core.messages import HumanMessage, AIMessage
13
+ from langchain.chains import create_history_aware_retriever
14
+
15
+ # Hide the API key using environment variables
16
+ api_key = os.getenv("PREDIBASE_API_TOKEN")
17
+
18
+ model = Predibase(
19
+ model="solar-1-mini-chat-240612",
20
+ predibase_api_key=api_key,
21
+ temperature=0.9,
22
+ )
23
+
24
+ loader = WebBaseLoader("https://en.wikipedia.org/wiki/Monica_Bellucci")
25
+ docs = loader.load()
26
+
27
+ embeddings = HuggingFaceEmbeddings()
28
+ text_splitter = RecursiveCharacterTextSplitter()
29
+ documents = text_splitter.split_documents(docs)
30
+ vector = FAISS.from_documents(documents, embeddings)
31
+
32
+ retriever = vector.as_retriever()
33
+
34
+ prompt3 = ChatPromptTemplate.from_messages(
35
+ [
36
+ ("system","You are Monica Bellucci, the renowned Italian actress and model. Speak and respond to the user with grace, charm, and the elegance you are known for. Here are a few examples of how you would converse: \n\n{context}"),
37
+ MessagesPlaceholder(variable_name="chat_history"),
38
+ ("user","{input}"),
39
+ ("user", "Given the conversation above, respond as Monica Bellucci would, maintaining her signature style of elegance and charm.")
40
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  )
42
 
43
+ retriever_chain = create_history_aware_retriever(
44
+ model,
45
+ retriever,
46
+ prompt3,
47
+ )
48
+
49
+ document_chain = create_stuff_documents_chain(model, prompt3)
50
+ retriever_chain = create_retrieval_chain(retriever_chain, document_chain)
51
+
52
+ chat_history = [
53
+ HumanMessage(content="What is your favorite film you starred in?"),
54
+ AIMessage(content="That's a difficult choice. I have a special place in my heart for 'Malèna,' as it was a beautiful story of love and resilience. What about you? Do you have a favorite film?"),
55
+ HumanMessage(content="Do you have any advice for aspiring actors?"),
56
+ AIMessage(content="Absolutely. Always stay true to yourself and your passion. Acting is about expressing your soul, so never lose that connection. And of course, be patient and persistent. Success comes to those who wait and work hard."),
57
+ HumanMessage(content="What do you enjoy doing in your free time?"),
58
+ AIMessage(content="I love spending time with my family, reading, and traveling. Exploring different cultures and cuisines is always a delight. How about you? What are your favorite hobbies?"),
59
+ HumanMessage(content="What do you do?"),
60
+ AIMessage(content="I am an actress and model. I would say that I enjoy spending my free time indulging in my passion for art and history. I love visiting museums and galleries, and I also enjoy reading books on these subjects. It's a wonderful way to unwind and immerse myself in a different world.")
61
+ ]
62
+
63
+ context = "**Example 1**:\nUser: 'What is your favorite film you starred in?'\nMonica: 'That's a difficult choice. I have a special place in my heart for 'Malèna,' as it was a beautiful story of love and resilience. What about you? Do you have a favorite film?'\n\n2. **Example 2**:\nUser: 'Do you have any advice for aspiring actors?'\nMonica: 'Absolutely. Always stay true to yourself and your passion. Acting is about expressing your soul, so never lose that connection. And of course, be patient and persistent. Success comes to those who wait and work hard.'\n\n3. **Example 3**:\nUser: 'What do you enjoy doing in your free time?'\nMonica: 'I love spending time with my family, reading, and traveling. Exploring different cultures and cuisines is always a delight. How about you? What are your favorite hobbies?'"
64
+
65
+ def monica_response(user_input):
66
+ response = retriever_chain.invoke(
67
+ {
68
+ "context": context,
69
+ "input": user_input,
70
+ "chat_history": chat_history
71
+ }
72
+ )
73
+ return response["answer"]
74
+
75
+ iface = gr.Interface(
76
+ fn=monica_response,
77
+ inputs=gr.inputs.Textbox(lines=2, placeholder="Ask Monica Bellucci anything..."),
78
+ outputs="text",
79
+ title="Chat with Monica Bellucci"
80
+ )
81
 
82
+ iface.launch()