JamalAG commited on
Commit
0778b80
·
1 Parent(s): a5a0bc7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -16
app.py CHANGED
@@ -1,22 +1,31 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
 
4
- # Load the conversational pipeline
5
- conversational_pipeline = pipeline("conversational")
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- # Streamlit app header
8
- st.title("Hugging Face Conversational Model Demo")
 
9
 
10
- # Input for user message
11
- user_message = st.text_input("You:", "")
12
 
13
- if st.button("Send"):
14
- # Format the conversation for the conversational pipeline
15
- conversation_history = [{"role": "system", "content": "You are a helpful assistant."},
16
- {"role": "user", "content": user_message}]
17
 
18
- # Get the model's response
19
- model_response = conversational_pipeline(conversation_history)[0]['generated_responses'][0]
20
-
21
- # Display the model's response
22
- st.text_area("Model:", model_response, height=100)
 
1
  import streamlit as st
2
+ from langchain.llms import HuggingFaceHub
3
 
4
+ # Function to return the response
5
+ def generate_answer(query):
6
+ llm = HuggingFaceHub(
7
+ repo_id="huggingfaceh4/zephyr-7b-alpha",
8
+ model_kwargs={"temperature": 0.5, "max_length": 64, "max_new_tokens": 512}
9
+ )
10
+ prompt = f"""
11
+ You are helpful doctor assistant that gives patients advices. Please answer the patients in a kind and helpful way.
12
+ </s>
13
+ {query}</s>
14
+ """
15
+ result = llm.predict(prompt)
16
+ return result
17
 
18
+ # App UI starts here
19
+ st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
20
+ st.header("LangChain Demo")
21
 
22
+ # Gets User Input
23
+ user_input = st.text_input("You: ", key="input")
24
 
25
+ submit = st.button("Generate")
 
 
 
26
 
27
+ # If the button clicked
28
+ if submit:
29
+ st.subheader("Answer:")
30
+ response = generate_answer(user_input)
31
+ st.write(response)