Wedyan2023 commited on
Commit
8e6b434
·
verified ·
1 Parent(s): a618573

Create app10.py

Browse files
Files changed (1) hide show
  1. app10.py +186 -0
app10.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## update of aap7.py
2
+
3
+ import os
4
+ import streamlit as st
5
+ from openai import OpenAI
6
+ from dotenv import load_dotenv
7
+ from langchain_core.prompts import PromptTemplate
8
+
9
+ # Load environment variables
10
+ load_dotenv()
11
+ ##openai_api_key = os.getenv("OPENAI_API_KEY")
12
+
13
+ # Initialize the client
14
+ client = OpenAI(
15
+ base_url="https://api-inference.huggingface.co/v1",
16
+ api_key=os.environ.get('TOKEN2') # Add your Huggingface token here
17
+ )
18
+
19
+
20
+ # Initialize the OpenAI client
21
+ ##client = OpenAI(
22
+ ##base_url="https://api-inference.huggingface.co/v1",
23
+ ##api_key=openai_api_key
24
+ ##)
25
+
26
+ # Define reset function for the conversation
27
+ def reset_conversation():
28
+ st.session_state.conversation = []
29
+ st.session_state.messages = []
30
+
31
+ # Streamlit interface setup
32
+ st.title("🤖 Text Data Generation & Labeling App")
33
+ st.sidebar.title("Settings")
34
+
35
+ # Sidebar settings
36
+ selected_model = st.sidebar.selectbox("Select Model", ["meta-llama/Meta-Llama-3-8B-Instruct"])
37
+ temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.5)
38
+ st.sidebar.button("Reset Conversation", on_click=reset_conversation)
39
+ st.sidebar.write(f"You're now chatting with **{selected_model}**")
40
+ st.sidebar.markdown("*Note: Generated content may be inaccurate or false.*")
41
+
42
+ # Initialize conversation state
43
+ if "messages" not in st.session_state:
44
+ st.session_state.messages = []
45
+
46
+ # Display conversation
47
+ for message in st.session_state.messages:
48
+ with st.chat_message(message["role"]):
49
+ st.markdown(message["content"])
50
+
51
+ # Main logic: choose between Data Generation and Data Labeling
52
+ task_choice = st.selectbox("Choose Task", ["Data Generation", "Data Labeling"])
53
+
54
+ if task_choice == "Data Generation":
55
+ classification_type = st.selectbox(
56
+ "Choose Classification Type",
57
+ ["Sentiment Analysis", "Binary Classification", "Multi-Class Classification"]
58
+ )
59
+
60
+ if classification_type == "Sentiment Analysis":
61
+ labels = ["Positive", "Negative", "Neutral"]
62
+ elif classification_type == "Binary Classification":
63
+ label_1 = st.text_input("Enter first class")
64
+ label_2 = st.text_input("Enter second class")
65
+ labels = [label_1, label_2]
66
+ else: # Multi-Class Classification
67
+ num_classes = st.slider("How many classes?", 3, 10, 3)
68
+ labels = [st.text_input(f"Class {i+1}") for i in range(num_classes)]
69
+
70
+ domain = st.selectbox("Choose Domain", ["Restaurant reviews", "E-commerce reviews", "Custom"])
71
+ if domain == "Custom":
72
+ domain = st.text_input("Specify custom domain")
73
+
74
+ min_words = st.number_input("Minimum words per example", min_value=10, max_value=90, value=10)
75
+ max_words = st.number_input("Maximum words per example", min_value=10, max_value=90, value=90)
76
+
77
+ use_few_shot = st.radio("Use few-shot examples?", ["Yes", "No"])
78
+ few_shot_examples = []
79
+ if use_few_shot == "Yes":
80
+ num_examples = st.slider("Number of few-shot examples", 1, 5, 1)
81
+ for i in range(num_examples):
82
+ content = st.text_area(f"Example {i+1} Content")
83
+ label = st.selectbox(f"Example {i+1} Label", labels)
84
+ few_shot_examples.append({"content": content, "label": label})
85
+
86
+ num_to_generate = st.number_input("Number of examples to generate", 1, 100, 10)
87
+ user_prompt = st.text_area("Enter additional instructions", "")
88
+
89
+ # Construct the LangChain prompt
90
+ prompt_template = PromptTemplate(
91
+ input_variables=["classification_type", "domain", "num_examples", "min_words", "max_words", "labels", "user_prompt"],
92
+ template=(
93
+ "You are a professional {classification_type} expert tasked with generating examples for {domain}.\n"
94
+ "Use the following parameters:\n"
95
+ "- Number of examples: {num_examples}\n"
96
+ "- Word range: {min_words}-{max_words}\n"
97
+ "- Labels: {labels}\n"
98
+ "{user_prompt}"
99
+ )
100
+ )
101
+ system_prompt = prompt_template.format(
102
+ classification_type=classification_type,
103
+ domain=domain,
104
+ num_examples=num_to_generate,
105
+ min_words=min_words,
106
+ max_words=max_words,
107
+ labels=", ".join(labels),
108
+ user_prompt=user_prompt
109
+ )
110
+
111
+ st.write("System Prompt:")
112
+ st.code(system_prompt)
113
+
114
+ if st.button("Generate Examples"):
115
+ with st.spinner("Generating..."):
116
+ st.session_state.messages.append({"role": "system", "content": system_prompt})
117
+ try:
118
+ stream = client.chat.completions.create(
119
+ model=selected_model,
120
+ messages=[{"role": "system", "content": system_prompt}],
121
+ temperature=temperature,
122
+ stream=True,
123
+ max_tokens=3000,
124
+ )
125
+ response = st.write_stream(stream)
126
+ st.session_state.messages.append({"role": "assistant", "content": response})
127
+ except Exception as e:
128
+ st.error("An error occurred during generation.")
129
+ st.error(f"Details: {e}")
130
+
131
+ elif task_choice == "Data Labeling":
132
+ # Labeling logic
133
+ labeling_type = st.selectbox(
134
+ "Classification Type for Labeling",
135
+ ["Sentiment Analysis", "Binary Classification", "Multi-Class Classification"]
136
+ )
137
+
138
+ if labeling_type == "Sentiment Analysis":
139
+ labels = ["Positive", "Negative", "Neutral"]
140
+ elif labeling_type == "Binary Classification":
141
+ label_1 = st.text_input("First label for classification")
142
+ label_2 = st.text_input("Second label for classification")
143
+ labels = [label_1, label_2]
144
+ else: # Multi-Class Classification
145
+ num_classes = st.slider("Number of labels", 3, 10, 3)
146
+ labels = [st.text_input(f"Label {i+1}") for i in range(num_classes)]
147
+
148
+ use_few_shot_labeling = st.radio("Add few-shot examples for labeling?", ["Yes", "No"])
149
+ few_shot_labeling_examples = []
150
+ if use_few_shot_labeling == "Yes":
151
+ num_labeling_examples = st.slider("Number of few-shot labeling examples", 1, 5, 1)
152
+ for i in range(num_labeling_examples):
153
+ content = st.text_area(f"Labeling Example {i+1} Content")
154
+ label = st.selectbox(f"Label for Example {i+1}", labels)
155
+ few_shot_labeling_examples.append({"content": content, "label": label})
156
+
157
+ text_to_classify = st.text_area("Enter text to classify")
158
+
159
+ if st.button("Classify Text"):
160
+ if text_to_classify:
161
+ labeling_prompt = (
162
+ f"You are an expert in {labeling_type.lower()} classification. Classify this text using: {', '.join(labels)}.\n\n"
163
+ )
164
+ if few_shot_labeling_examples:
165
+ labeling_prompt += "Example classifications:\n"
166
+ for ex in few_shot_labeling_examples:
167
+ labeling_prompt += f"Text: {ex['content']} - Label: {ex['label']}\n"
168
+ labeling_prompt += f"\nClassify this: {text_to_classify}"
169
+
170
+ with st.spinner("Classifying..."):
171
+ st.session_state.messages.append({"role": "system", "content": labeling_prompt})
172
+ try:
173
+ stream = client.chat.completions.create(
174
+ model=selected_model,
175
+ messages=[{"role": "system", "content": labeling_prompt}],
176
+ temperature=temperature,
177
+ stream=True,
178
+ max_tokens=3000,
179
+ )
180
+ labeling_response = st.write_stream(stream)
181
+ st.write("Label:", labeling_response)
182
+ except Exception as e:
183
+ st.error("An error occurred during classification.")
184
+ st.error(f"Details: {e}")
185
+ else:
186
+ st.warning("Please enter text to classify.")