sachitksh123 commited on
Commit
8fce208
·
verified ·
1 Parent(s): 8f76f02

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -12
app.py CHANGED
@@ -1,22 +1,15 @@
1
  import streamlit as st
2
  import requests
3
  from transformers import pipeline
4
- import urllib3
5
  import os
6
 
7
- # Disable warnings
8
- urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
9
-
10
- # Create a custom session
11
- session = requests.Session()
12
- session.verify = False # Disable SSL verification
13
-
14
  # Load the token from Hugging Face secrets
15
- HUGGINGFACE_TOKEN = os.environ.get("hf_token") # Set the default to empty if not found
 
16
 
17
  # Set up the text generation pipeline with the token
18
  pipe = pipeline("text-generation", model="mistralai/Mistral-7B-v0.1",
19
- use_auth_token=HUGGINGFACE_TOKEN, request_session=session)
20
 
21
  # Streamlit application
22
  st.title("Text Generation with Hugging Face")
@@ -26,8 +19,7 @@ user_input = st.text_input("You: ", "Who are you?")
26
 
27
  if st.button("Generate Response"):
28
  if user_input:
29
- messages = [{"role": "user", "content": user_input}]
30
- response = pipe(messages)
31
  generated_text = response[0]['generated_text'] # Adjust according to the response format
32
  st.text_area("Bot:", generated_text, height=200)
33
  else:
 
1
  import streamlit as st
2
  import requests
3
  from transformers import pipeline
 
4
  import os
5
 
 
 
 
 
 
 
 
6
  # Load the token from Hugging Face secrets
7
+ HUGGINGFACE_TOKEN = os.environ.get("hf_token") # Ensure this token is correctly set
8
+ print("Hugging Face Token:", HUGGINGFACE_TOKEN) # Debugging line
9
 
10
  # Set up the text generation pipeline with the token
11
  pipe = pipeline("text-generation", model="mistralai/Mistral-7B-v0.1",
12
+ use_auth_token=HUGGINGFACE_TOKEN)
13
 
14
  # Streamlit application
15
  st.title("Text Generation with Hugging Face")
 
19
 
20
  if st.button("Generate Response"):
21
  if user_input:
22
+ response = pipe(user_input)
 
23
  generated_text = response[0]['generated_text'] # Adjust according to the response format
24
  st.text_area("Bot:", generated_text, height=200)
25
  else: