Yapp99 commited on
Commit
3cd1270
·
1 Parent(s): 5a7fb2a

Proof of concept

Browse files
Files changed (5) hide show
  1. .gitignore +2 -0
  2. README.md +0 -1
  3. app.py +42 -0
  4. init.py +42 -0
  5. pages/Settings.py +12 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ test**.py
2
+ __pycache__
README.md CHANGED
@@ -9,5 +9,4 @@ app_file: app.py
9
  pinned: false
10
  short_description: Ui wrapper for my other LLM Api endpoint for demo purpose
11
  ---
12
-
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
9
  pinned: false
10
  short_description: Ui wrapper for my other LLM Api endpoint for demo purpose
11
  ---
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ from init import init
4
+
5
+ init()
6
+
7
+ # url = "http://127.0.0.1:5723"
8
+ url = "https://yapp99-llm-endpoint.hf.space"
9
+
10
+ st.set_page_config(page_title="Chat")
11
+
12
+
13
+ def send_chat_request():
14
+ data = dict(
15
+ chat_history=[
16
+ {
17
+ "role": "system",
18
+ "content": st.session_state.system_prompt,
19
+ },
20
+ *st.session_state.messages,
21
+ ],
22
+ **st.session_state.params,
23
+ )
24
+
25
+ with requests.Session() as session:
26
+ response = session.post(f"{url}/chat", json=data)
27
+
28
+ return response.json()["response"]
29
+
30
+
31
+ for message in st.session_state.messages:
32
+ with st.chat_message(message["role"]):
33
+ st.markdown(message["content"])
34
+
35
+ if prompt := st.chat_input("What is up?"):
36
+ st.chat_message("user").markdown(prompt)
37
+ st.session_state.messages.append({"role": "user", "content": prompt})
38
+
39
+ response = send_chat_request()
40
+ with st.chat_message("assistant"):
41
+ st.markdown(response)
42
+ st.session_state.messages.append({"role": "assistant", "content": response})
init.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+
4
+ def init():
5
+ if "system_prompt" not in st.session_state:
6
+ st.session_state.system_prompt = "You are a helpful and knowledgeable assistant, but is willing to bend the facts to play along with unrealistic requests"
7
+
8
+ if "messages" not in st.session_state:
9
+ st.session_state.messages = []
10
+
11
+ if "params" not in st.session_state:
12
+ st.session_state.params = dict(
13
+ model="llama3.2",
14
+ max_tokens=65536,
15
+ temperature=0.8,
16
+ top_p=0.95,
17
+ min_p=0.05,
18
+ typical_p=1.0,
19
+ frequency_penalty=0.0,
20
+ presence_penalty=0.0,
21
+ repeat_penalty=1.0,
22
+ top_k=40,
23
+ tfs_z=1.0,
24
+ mirostat_mode=0,
25
+ mirostat_tau=5.0,
26
+ mirostat_eta=0.1,
27
+ )
28
+
29
+
30
+ # max_tokens: int = 65536
31
+ # temperature: float = 0.8
32
+ # top_p: float = 0.95
33
+ # min_p: float = 0.05
34
+ # typical_p: float = 1.0
35
+ # frequency_penalty: float = 0.0
36
+ # presence_penalty: float = 0.0
37
+ # repeat_penalty: float = 1.0
38
+ # top_k: int = 40
39
+ # tfs_z: float = 1.0
40
+ # mirostat_mode: int = 0
41
+ # mirostat_tau: float = 5.0
42
+ # mirostat_eta: float = 0.1
pages/Settings.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from init import init
3
+
4
+ init()
5
+
6
+ st.header("Settings")
7
+
8
+ st.session_state.params["model"] = st.selectbox(
9
+ "Select Model", ["llama3.2", "falcon-mamba", "mistral-nemo"]
10
+ )
11
+
12
+ st.session_state.system_prompt = st.text_input("Input", st.session_state.system_prompt)