DJOMGA TOUKO Peter Charles commited on
Commit
be22e6d
·
1 Parent(s): 7602e2f

Simple conversation implementation

Browse files
Files changed (4) hide show
  1. .streamlit/config.toml +4 -0
  2. README.md +2 -0
  3. app.py +81 -2
  4. requirements.txt +3 -0
.streamlit/config.toml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ [server]
2
+ runOnSave = true
3
+ headless = true
4
+ maxUploadSize = 2000
README.md CHANGED
@@ -10,3 +10,5 @@ pinned: false
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
+
14
+ To use this project, you will need to get an API key from OpenAI.
app.py CHANGED
@@ -1,4 +1,83 @@
1
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- x = st.slider('Select a value')
4
- st.write(x, 'squared is', x * x)
 
1
  import streamlit as st
2
+ from openai import OpenAI
3
+
4
+
5
+ model = "gpt-3.5-turbo"
6
+
7
+
8
+ st.sidebar.title('OpenAI Simple Conversation')
9
+ st.sidebar.write('Please provide your OpenAI key to be used for this conversation.')
10
+
11
+ def onchange_openai_key():
12
+ print(st.session_state.openai_key)
13
+
14
+ openai_key = st.sidebar.text_input('OpenAI key', on_change=onchange_openai_key, key='openai_key')
15
+
16
+ st.title('OpenAI Simple Conversation')
17
+ st.write(f'Ask any question that can be answer by the LLM {model}.')
18
+
19
+ def submit_openai_key(model=model):
20
+ if(openai_key == None or openai_key==''):
21
+ st.sidebar.write('Please provide the key before')
22
+ return
23
+ else:
24
+ client = OpenAI(api_key=openai_key)
25
+ model = model
26
+ completion = client.chat.completions.create(
27
+ model=model,
28
+ messages=[
29
+ {"role": "system", "content": "You are an assistant giving simple and short answer for question of child"},
30
+ {"role": "user", "content": "count from 0 to 10"}
31
+ ]
32
+ )
33
+ st.sidebar.write(f'Simple count : {completion.choices[0].message.content}')
34
+
35
+
36
+ def askQuestion(model=model, question=''):
37
+ if(openai_key == None or openai_key==''):
38
+ print('Please provide the key before')
39
+ return 'LLM API is not defined. Please provide the key before'
40
+ else:
41
+ client = OpenAI(api_key=openai_key)
42
+ model = model
43
+ completion = client.chat.completions.create(
44
+ model=model,
45
+ messages=[
46
+ {"role": "system", "content": "You are an assistant giving simple and short answer for question of child"},
47
+ {"role": "user", "content": f'{question}'}
48
+ ]
49
+ )
50
+ return completion.choices[0].message.content
51
+
52
+
53
+
54
+ # Initialize chat history
55
+ if "messages" not in st.session_state:
56
+ st.session_state.messages = []
57
+
58
+ # Display chat messages from history on app rerun
59
+ for message in st.session_state.messages:
60
+ with st.chat_message(message["role"]):
61
+ st.markdown(message["content"])
62
+
63
+ # React to user input
64
+ if prompt := st.chat_input("What is up?"):
65
+ with st.status('Running', expanded=True) as status:
66
+ # Display user message in chat message container
67
+ st.chat_message("user").markdown(prompt)
68
+ # Add user message to chat history
69
+ st.session_state.messages.append({"role": "user", "content": prompt})
70
+
71
+ response = askQuestion(question=prompt)
72
+ # Display assistant response in chat message container
73
+ with st.chat_message("assistant"):
74
+ st.markdown(response)
75
+
76
+ # Add assistant response to chat history
77
+ st.session_state.messages.append({"role": "assistant", "content": response})
78
+ status.update(label='Reponse of last question', state="complete", expanded=True)
79
+
80
+
81
+ submit_key = st.sidebar.button(label='Submit', on_click=submit_openai_key)
82
+
83
 
 
 
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ streamlit
2
+ openai
3
+ watchdog