HassanDataSci commited on
Commit
5931998
·
verified ·
1 Parent(s): d02e430

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -25
app.py CHANGED
@@ -1,34 +1,38 @@
1
  import streamlit as st
2
  from transformers import pipeline
 
3
 
4
- # Load a text generation model
5
  @st.cache_resource
6
- def load_model():
7
- return pipeline("text-generation", model="EleutherAI/gpt-neo-125M")
8
 
9
- # Initialize the model
10
- generator = load_model()
 
11
 
12
- st.title("Trump-Inspired Text Generator")
13
- st.write("Enter a prompt to receive a response.")
14
 
15
- # User input
16
- user_input = st.text_input("Enter your prompt:")
 
17
 
18
- if user_input:
19
- # Hidden style prompt for model guidance, but not shown to user
20
- trump_prompt = f"In the style of Donald Trump: {user_input}"
21
-
22
- # Generate the response with adjusted parameters
23
- response = generator(
24
- trump_prompt,
25
- max_length=30,
26
- num_return_sequences=1,
27
- do_sample=True,
28
- temperature=0.9,
29
- top_p=0.85
30
- )
31
 
32
- # Extract and display the generated text
33
- generated_text = response[0]["generated_text"]
34
- st.write("Response:", generated_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  from transformers import pipeline
3
+ from io import BytesIO
4
 
5
+ # Load Hugging Face models for question-answering and text-to-speech
6
  @st.cache_resource
7
+ def load_qa_pipeline():
8
+ return pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
9
 
10
+ @st.cache_resource
11
+ def load_tts_pipeline():
12
+ return pipeline("text-to-speech", model="espnet/kan-bayashi-ljspeech-vits")
13
 
14
+ qa_pipeline = load_qa_pipeline()
15
+ tts_pipeline = load_tts_pipeline()
16
 
17
+ # Streamlit interface
18
+ st.title("Virtual Assistant")
19
+ st.write("Ask me anything!")
20
 
21
+ # User query
22
+ user_query = st.text_input("Type your question here:")
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ if user_query:
25
+ # Generate answer using the QA model
26
+ context = "This is the context of the assistant. The assistant will answer general knowledge questions." # Customize context for better QA accuracy
27
+ qa_result = qa_pipeline({"question": user_query, "context": context})
28
+ answer = qa_result['answer']
29
+
30
+ # Display answer as text
31
+ st.write(f"Answer: {answer}")
32
+
33
+ # Convert answer to audio using TTS model
34
+ tts_audio = tts_pipeline(answer, return_tensors="pt").audio
35
+
36
+ # Streamlit audio player for TTS output
37
+ audio_bytes = BytesIO(tts_audio)
38
+ st.audio(audio_bytes, format="audio/wav")