JuliaTsk commited on
Commit
eadad30
Β·
verified Β·
1 Parent(s): a1f679e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -0
app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from typing import Generator
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+
5
+ st.set_page_config(
6
+ page_icon="πŸ’¬",
7
+ page_title="Chat App",
8
+ layout="wide",
9
+ )
10
+
11
+ model_name = "JuliaTsk/SuccinctLabs-chat-finetuned"
12
+
13
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
14
+ model = AutoModelForCausalLM.from_pretrained(model_name)
15
+ st.title("ChatGPT-like clone 🎈")
16
+
17
+
18
+ def generate_chat_responses(chat_completion) -> Generator[str, None, None]:
19
+ for chunk in chat_completion:
20
+ if chunk.choices[0].delta.content:
21
+ yield chunk.choices[0].delta.content
22
+
23
+
24
+ left, right = st.columns([2, 6], vertical_alignment="top")
25
+ max_tokens_range = 32768
26
+ max_tokens = left.slider(
27
+ label="Max Tokens:",
28
+ min_value=128,
29
+ max_value=max_tokens_range,
30
+ # Default value or max allowed if less
31
+ value=min(1024, max_tokens_range),
32
+ step=128,
33
+ help=f"Adjust the maximum number of tokens (words) for the model's response."
34
+ )
35
+ temperature = left.slider(
36
+ label="Temperature:",
37
+ min_value=0.0,
38
+ max_value=1.0,
39
+ value=0.7,
40
+ step=0.01,
41
+ help=f"Controls randomness: a low value means less random responses."
42
+ )
43
+
44
+ if "messages" not in st.session_state:
45
+ st.session_state.messages = []
46
+
47
+ for message in st.session_state.messages:
48
+ avatar = 'πŸ€–' if message["role"] == "assistant" else 'πŸ‘¨β€πŸ’»'
49
+ with right.chat_message(message["role"], avatar=avatar):
50
+ right.markdown(message["content"])
51
+
52
+
53
+ prompt = st.chat_input("Say something")
54
+ if prompt:
55
+ with right.chat_message("user", avatar='πŸ‘¨β€πŸ’»'):
56
+ right.markdown(prompt)
57
+ st.session_state.messages.append({"role": "user", "content": prompt})
58
+ with right.chat_message("assistant"):
59
+ inputs = tokenizer(prompt, return_tensors="pt")
60
+ outputs = model.generate(inputs["input_ids"], max_length=100, num_return_sequences=1)
61
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
62
+
63
+ st.session_state.messages.append({"role": "assistant", "content": generated_text})
64
+