joermd commited on
Commit
7e0b3c1
·
verified ·
1 Parent(s): 1fbf9eb

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -0
app.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import streamlit as st
3
+ import os
4
+ from dotenv import load_dotenv
5
+ import requests
6
+
7
+ # Load environment variables
8
+ load_dotenv()
9
+
10
+ # Hugging Face API URL and token
11
+ HUGGINGFACE_API_URL = "https://api-inference.huggingface.co/models/joermd/llma-speedy"
12
+ HUGGINGFACE_API_TOKEN = os.environ.get('HUGGINGFACEHUB_API_TOKEN')
13
+
14
+ # Random dog images for error messages
15
+ random_dog = [
16
+ "0f476473-2d8b-415e-b944-483768418a95.jpg",
17
+ "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
18
+ # Add more images as needed
19
+ ]
20
+
21
+ def reset_conversation():
22
+ '''Resets conversation'''
23
+ st.session_state.conversation = []
24
+ st.session_state.messages = []
25
+ return None
26
+
27
+ # Create sidebar controls
28
+ temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5)
29
+ max_token_value = st.sidebar.slider('Select a max_token value', 1000, 9000, 5000)
30
+ st.sidebar.button('Reset Chat', on_click=reset_conversation)
31
+
32
+ # Set the model and display its name
33
+ model_name = "joermd/llma-speedy"
34
+ st.sidebar.write(f"You're now chatting with **{model_name}**")
35
+ st.sidebar.markdown("*Generated content may be inaccurate or false.*")
36
+
37
+ # Initialize chat history
38
+ if "messages" not in st.session_state:
39
+ st.session_state.messages = []
40
+
41
+ # Display chat messages from history on app rerun
42
+ for message in st.session_state.messages:
43
+ with st.chat_message(message["role"]):
44
+ st.markdown(message["content"])
45
+
46
+ # Accept user input
47
+ if prompt := st.chat_input(f"Hi, I'm {model_name}, ask me a question"):
48
+ with st.chat_message("user"):
49
+ st.markdown(prompt)
50
+ st.session_state.messages.append({"role": "user", "content": prompt})
51
+
52
+ # Display assistant response
53
+ with st.chat_message("assistant"):
54
+ try:
55
+ headers = {"Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}"}
56
+ payload = {
57
+ "inputs": prompt,
58
+ "parameters": {"temperature": temp_values, "max_new_tokens": max_token_value}
59
+ }
60
+ response = requests.post(HUGGINGFACE_API_URL, headers=headers, json=payload)
61
+
62
+ if response.status_code == 200:
63
+ result = response.json()
64
+ assistant_response = result.get("generated_text", "No response generated.")
65
+ else:
66
+ assistant_response = "Error: Unable to reach the model."
67
+ st.write(f"Status Code: {response.status_code}")
68
+
69
+ except Exception as e:
70
+ assistant_response = "😵‍💫 Connection issue! Try again later. Here's a 🐶:"
71
+ st.image(f'https://random.dog/{random_dog[np.random.randint(len(random_dog))]}')
72
+ st.write("Error message:")
73
+ st.write(e)
74
+
75
+ st.markdown(assistant_response)
76
+ st.session_state.messages.append({"role": "assistant", "content": assistant_response})