JamalAG commited on
Commit
41fe4ff
·
1 Parent(s): f6b5434

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -25
app.py CHANGED
@@ -1,31 +1,23 @@
1
  import streamlit as st
2
- from langchain.llms import HuggingFaceHub
3
 
4
- # Function to return the response
5
- def generate_answer(query):
6
- llm = HuggingFaceHub(
7
- repo_id="huggingfaceh4/zephyr-7b-alpha",
8
- model_kwargs={"temperature": 0.5, "max_length": 64, "max_new_tokens": 512}
9
- )
10
- prompt = f"""
11
- You are helpful doctor assistant that gives patients advices. Please answer the patients in a kind and helpful way.
12
- </s>
13
- {query}</s>
14
- """
15
- result = llm.predict(prompt)
16
- return result
17
 
18
- # App UI starts here
19
- st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
20
- st.header("LangChain Demo")
21
 
22
- # Gets User Input
23
- user_input = st.text_input("You: ", key="input")
24
 
25
- submit = st.button("Generate")
 
 
 
26
 
27
- # If the button clicked
28
- if submit:
29
- st.subheader("Answer:")
30
- response = generate_answer(user_input)
31
- st.write(response)
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
 
4
+ # Load the conversational pipeline
5
+ conversational_pipeline = pipeline("conversational")
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ # Streamlit app header
8
+ st.set_page_config(page_title="Conversational Model Demo", page_icon="🤖")
9
+ st.header("Conversational Model Demo")
10
 
11
+ # Input for user message
12
+ user_message = st.text_input("You:", "")
13
 
14
+ if st.button("Send"):
15
+ # Format the conversation for the conversational pipeline
16
+ conversation_history = [{"role": "system", "content": "You are an AI assistant."},
17
+ {"role": "user", "content": user_message}]
18
 
19
+ # Get the model's response
20
+ model_response = conversational_pipeline(conversation_history)[0]['generated_responses'][0]
21
+
22
+ # Display the model's response
23
+ st.text_area("Model:", model_response, height=100)