Grey01 commited on
Commit
b19ea6f
·
verified ·
1 Parent(s): 9b65a20

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -50
app.py CHANGED
@@ -1,58 +1,56 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
-
4
- # configuring streamlit page settings
5
- st.set_page_config(
6
- page_title='Digital Ink',
7
- layout = 'centered'
 
 
8
  )
9
-
10
- generation_args = {
11
- "max_new_tokens": 1000,
12
- "return_full_text": False,
13
- "num_beams": 5,
14
- "do_sample": True,
15
- "top_k": 60,
16
- }
17
-
18
- # Initialize the model pipeline
19
- chat_pipeline = pipeline("text-generation", model="microsoft/Phi-3-mini-128k-instruct")
20
-
21
-
22
- # Streamlit app
23
- st.title("Digital Ink")
24
-
25
- # Initialize the chat history
26
- if 'chat_history' not in st.session_state:
27
- st.session_state.chat_history = []
28
-
29
- #display chat history
30
- for message in st.session_state.chat_history:
31
  with st.chat_message(message["role"]):
32
  st.markdown(message["content"])
33
 
34
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  # User input
37
- user_input = st.chat_input("Ask Digital Ink..")
38
-
39
- if user_input:
40
- # Add user message to chat history
41
- st.session_state.message.append({"role": "system", "content": "You are a helpful assistant named Digital Ink. Your purpose is to provide creative engaging and effective marketing content.You can introduce your self as follows: I'm Digital Ink, a marketing content generation model. I'm designed to assist you in creating engaging and effective marketing content, such as blog posts, social media posts, and product descriptions"})
42
- st.session_state.message.append({"role": "user", "content": user_input})
43
- st.chat_state.chat_message("user").markdown(user_input)
44
-
45
- # Generate response from chatbot
46
- context = [msg['content'] for msg in st.session_state.messages]
47
- message = [
48
- {"role": "system", "content": "You are a helpful assistant named Digital Ink. Your purpose is to provide creative engaging and effective marketing content.You can introduce your self as follows: I'm Digital Ink, a marketing content generation model. I'm designed to assist you in creating engaging and effective marketing content, such as blog posts, social media posts, and product descriptions"},
49
- {"role": "user", "content": user_input},
50
- {"role": "assistant", "content": ""},
51
- {"role": "user", "content": ""},
52
- ]
53
- response = chat_pipeline(message, **generation_args)[0]['generated_text']
54
-
55
- # Add assistant response to chat history
56
- st.session_state.messages.append({"role": "assistant", "content": response})
57
  with st.chat_message("assistant"):
58
- st.markdown(response)
 
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+
4
+ # Load your model and tokenizer (same as before)
5
+ model = AutoModelForCausalLM.from_pretrained(
6
+ "marketeam/PhiMarketing",
7
+ device_map="cuda",
8
+ torch_dtype="auto",
9
+ trust_remote_code=True,
10
  )
11
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct")
12
+
13
+ # Initialize chat history
14
+ if "messages" not in st.session_state:
15
+ st.session_state.messages = [
16
+ {
17
+ "role": "system",
18
+ "content": "You are a helpful assistant named Digital Ink. Your purpose is to provide creative engaging and effective marketing content.You can introduce your self as follows: I'm Digital Ink, a marketing content generation model. I'm designed to assist you in creating engaging and effective marketing content, such as blog posts, social media posts, and product descriptions",
19
+ }
20
+ ]
21
+
22
+ # Display chat messages from history
23
+ for message in st.session_state.messages:
 
 
 
 
 
 
 
 
 
24
  with st.chat_message(message["role"]):
25
  st.markdown(message["content"])
26
 
27
+ # Function for generating responses
28
+ def generate_response(message, model, tokenizer):
29
+ pipe = pipeline(
30
+ "text-generation",
31
+ model=model,
32
+ tokenizer=tokenizer,
33
+ )
34
+ generation_args = {
35
+ "max_new_tokens": 1100,
36
+ "return_full_text": False,
37
+ "num_beams": 5,
38
+ "do_sample": True,
39
+ "top_k": 60,
40
+ }
41
+ st.session_state.messages.append({"role": "user", "content": message})
42
+ output = pipe(st.session_state.messages, **generation_args)
43
+ st.session_state.messages.append(
44
+ {"role": "assistant", "content": output[0]["generated_text"]}
45
+ )
46
+ return output[0]["generated_text"]
47
 
48
  # User input
49
+ if prompt := st.chat_input("What is up?"):
50
+ # Display user message in chat message container
51
+ with st.chat_message("user"):
52
+ st.markdown(prompt)
53
+ # Generate and display assistant response in chat message container
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  with st.chat_message("assistant"):
55
+ response = generate_response(prompt, model, tokenizer)
56
+ st.markdown(response)