Wedyan2023 commited on
Commit
dc808bb
·
verified ·
1 Parent(s): cd136f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -36
app.py CHANGED
@@ -7,7 +7,6 @@ import numpy as np
7
  import streamlit as st
8
  from openai import OpenAI
9
  import os
10
- import sys
11
  from dotenv import load_dotenv
12
 
13
  load_dotenv()
@@ -23,25 +22,14 @@ model_links = {
23
  "Meta-Llama-3-8B": "meta-llama/Meta-Llama-3-8B-Instruct"
24
  }
25
 
26
- # Random dog images for error messages
27
- random_dog = [
28
- "0f476473-2d8b-415e-b944-483768418a95.jpg",
29
- "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
30
- "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
31
- "1326984c-39b0-492c-a773-f120d747a7e2.jpg"
32
- ]
33
-
34
  # Reset conversation
35
  def reset_conversation():
36
  st.session_state.conversation = []
37
  st.session_state.messages = []
38
  return None
39
 
40
- # Define the available models
41
- models = [key for key in model_links.keys()]
42
-
43
  # Sidebar for model selection
44
- selected_model = st.sidebar.selectbox("Select Model", models)
45
 
46
  # Temperature slider
47
  temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5)
@@ -115,29 +103,48 @@ if task_choice == "Data Generation":
115
  st.code(system_prompt)
116
 
117
  if st.button("Generate Examples"):
118
- # Generate examples by concatenating all inputs and sending it to the model
 
 
 
119
  with st.spinner("Generating..."):
120
- st.session_state.messages.append({"role": "system", "content": system_prompt})
121
-
122
- try:
123
- stream = client.chat.completions.create(
124
- model=model_links[selected_model],
125
- messages=[
126
- {"role": m["role"], "content": m["content"]}
127
- for m in st.session_state.messages
128
- ],
129
- temperature=temp_values,
130
- stream=True,
131
- max_tokens=3000,
132
- )
133
- response = st.write_stream(stream)
134
- except Exception as e:
135
- response = "Error during generation."
136
- random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
137
- st.image(random_dog_pick)
138
- st.write(e)
139
-
140
- st.session_state.messages.append({"role": "assistant", "content": response})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
 
142
  else:
143
  # Data labeling workflow (for future implementation based on classification)
@@ -145,4 +152,3 @@ else:
145
 
146
 
147
 
148
-
 
7
  import streamlit as st
8
  from openai import OpenAI
9
  import os
 
10
  from dotenv import load_dotenv
11
 
12
  load_dotenv()
 
22
  "Meta-Llama-3-8B": "meta-llama/Meta-Llama-3-8B-Instruct"
23
  }
24
 
 
 
 
 
 
 
 
 
25
  # Reset conversation
26
  def reset_conversation():
27
  st.session_state.conversation = []
28
  st.session_state.messages = []
29
  return None
30
 
 
 
 
31
  # Sidebar for model selection
32
+ selected_model = st.sidebar.selectbox("Select Model", list(model_links.keys()))
33
 
34
  # Temperature slider
35
  temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5)
 
103
  st.code(system_prompt)
104
 
105
  if st.button("Generate Examples"):
106
+ # Break generation into smaller chunks if needed to ensure enough examples
107
+ all_generated_examples = []
108
+ remaining_examples = num_to_generate
109
+
110
  with st.spinner("Generating..."):
111
+ while remaining_examples > 0:
112
+ # Generating examples in chunks to avoid token limits
113
+ chunk_size = min(remaining_examples, 5)
114
+ try:
115
+ # Append the new chunk system prompt
116
+ st.session_state.messages.append({"role": "system", "content": system_prompt})
117
+
118
+ stream = client.chat.completions.create(
119
+ model=model_links[selected_model],
120
+ messages=[
121
+ {"role": m["role"], "content": m["content"]}
122
+ for m in st.session_state.messages
123
+ ],
124
+ temperature=temp_values,
125
+ stream=True,
126
+ max_tokens=3000,
127
+ )
128
+
129
+ # Parse the response and break it into individual examples
130
+ response = st.write_stream(stream)
131
+ generated_examples = response.split("\n")[:chunk_size] # Assuming each example is on a new line
132
+
133
+ # Store the new examples
134
+ all_generated_examples.extend(generated_examples)
135
+ remaining_examples -= chunk_size
136
+
137
+ except Exception as e:
138
+ st.error("Error during generation.")
139
+ st.write(e)
140
+ break
141
+
142
+ # Display all generated examples
143
+ for idx, example in enumerate(all_generated_examples):
144
+ st.write(f"Example {idx+1}: {example}")
145
+
146
+ # Update session state to prevent repetition of old prompts
147
+ st.session_state.messages = [] # Clear messages after each generation
148
 
149
  else:
150
  # Data labeling workflow (for future implementation based on classification)
 
152
 
153
 
154