Wedyan2023 commited on
Commit
8654ff1
·
verified ·
1 Parent(s): 3d079ce

Create app7.py

Browse files
Files changed (1) hide show
  1. app7.py +184 -0
app7.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from openai import OpenAI
4
+ from dotenv import load_dotenv
5
+ from langchain_core.prompts import PromptTemplate
6
+
7
+ # Load environment variables
8
+ load_dotenv()
9
+ ##openai_api_key = os.getenv("OPENAI_API_KEY")
10
+
11
+ # Initialize the client
12
+ client = OpenAI(
13
+ base_url="https://api-inference.huggingface.co/v1",
14
+ api_key=os.environ.get('GP_WED') # Add your Huggingface token here
15
+ )
16
+
17
+
18
+ # Initialize the OpenAI client
19
+ ##client = OpenAI(
20
+ ##base_url="https://api-inference.huggingface.co/v1",
21
+ ##api_key=openai_api_key
22
+ ##)
23
+
24
+ # Define reset function for the conversation
25
+ def reset_conversation():
26
+ st.session_state.conversation = []
27
+ st.session_state.messages = []
28
+
29
+ # Streamlit interface setup
30
+ st.title("LangChain-Based Data Interaction App")
31
+ st.sidebar.title("Settings")
32
+
33
+ # Sidebar settings
34
+ selected_model = st.sidebar.selectbox("Select Model", ["meta-llama/Meta-Llama-3-8B-Instruct"])
35
+ temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.5)
36
+ st.sidebar.button("Reset Conversation", on_click=reset_conversation)
37
+ st.sidebar.write(f"You're now chatting with **{selected_model}**")
38
+ st.sidebar.markdown("*Note: Generated content may be inaccurate or false.*")
39
+
40
+ # Initialize conversation state
41
+ if "messages" not in st.session_state:
42
+ st.session_state.messages = []
43
+
44
+ # Display conversation
45
+ for message in st.session_state.messages:
46
+ with st.chat_message(message["role"]):
47
+ st.markdown(message["content"])
48
+
49
+ # Main logic: choose between Data Generation and Data Labeling
50
+ task_choice = st.selectbox("Choose Task", ["Data Generation", "Data Labeling"])
51
+
52
+ if task_choice == "Data Generation":
53
+ classification_type = st.selectbox(
54
+ "Choose Classification Type",
55
+ ["Sentiment Analysis", "Binary Classification", "Multi-Class Classification"]
56
+ )
57
+
58
+ if classification_type == "Sentiment Analysis":
59
+ labels = ["Positive", "Negative", "Neutral"]
60
+ elif classification_type == "Binary Classification":
61
+ label_1 = st.text_input("Enter first class")
62
+ label_2 = st.text_input("Enter second class")
63
+ labels = [label_1, label_2]
64
+ else: # Multi-Class Classification
65
+ num_classes = st.slider("How many classes?", 3, 10, 3)
66
+ labels = [st.text_input(f"Class {i+1}") for i in range(num_classes)]
67
+
68
+ domain = st.selectbox("Choose Domain", ["Restaurant reviews", "E-commerce reviews", "Custom"])
69
+ if domain == "Custom":
70
+ domain = st.text_input("Specify custom domain")
71
+
72
+ min_words = st.number_input("Minimum words per example", min_value=10, max_value=90, value=10)
73
+ max_words = st.number_input("Maximum words per example", min_value=10, max_value=90, value=90)
74
+
75
+ use_few_shot = st.radio("Use few-shot examples?", ["Yes", "No"])
76
+ few_shot_examples = []
77
+ if use_few_shot == "Yes":
78
+ num_examples = st.slider("Number of few-shot examples", 1, 5, 1)
79
+ for i in range(num_examples):
80
+ content = st.text_area(f"Example {i+1} Content")
81
+ label = st.selectbox(f"Example {i+1} Label", labels)
82
+ few_shot_examples.append({"content": content, "label": label})
83
+
84
+ num_to_generate = st.number_input("Number of examples to generate", 1, 100, 10)
85
+ user_prompt = st.text_area("Enter additional instructions", "")
86
+
87
+ # Construct the LangChain prompt
88
+ prompt_template = PromptTemplate(
89
+ input_variables=["classification_type", "domain", "num_examples", "min_words", "max_words", "labels", "user_prompt"],
90
+ template=(
91
+ "You are a professional {classification_type} expert tasked with generating examples for {domain}.\n"
92
+ "Use the following parameters:\n"
93
+ "- Number of examples: {num_examples}\n"
94
+ "- Word range: {min_words}-{max_words}\n"
95
+ "- Labels: {labels}\n"
96
+ "{user_prompt}"
97
+ )
98
+ )
99
+ system_prompt = prompt_template.format(
100
+ classification_type=classification_type,
101
+ domain=domain,
102
+ num_examples=num_to_generate,
103
+ min_words=min_words,
104
+ max_words=max_words,
105
+ labels=", ".join(labels),
106
+ user_prompt=user_prompt
107
+ )
108
+
109
+ st.write("System Prompt:")
110
+ st.code(system_prompt)
111
+
112
+ if st.button("Generate Examples"):
113
+ with st.spinner("Generating..."):
114
+ st.session_state.messages.append({"role": "system", "content": system_prompt})
115
+ try:
116
+ stream = client.chat.completions.create(
117
+ model=selected_model,
118
+ messages=[{"role": "system", "content": system_prompt}],
119
+ temperature=temperature,
120
+ stream=True,
121
+ max_tokens=3000,
122
+ )
123
+ response = st.write_stream(stream)
124
+ st.session_state.messages.append({"role": "assistant", "content": response})
125
+ except Exception as e:
126
+ st.error("An error occurred during generation.")
127
+ st.error(f"Details: {e}")
128
+
129
+ elif task_choice == "Data Labeling":
130
+ # Labeling logic
131
+ labeling_type = st.selectbox(
132
+ "Classification Type for Labeling",
133
+ ["Sentiment Analysis", "Binary Classification", "Multi-Class Classification"]
134
+ )
135
+
136
+ if labeling_type == "Sentiment Analysis":
137
+ labels = ["Positive", "Negative", "Neutral"]
138
+ elif labeling_type == "Binary Classification":
139
+ label_1 = st.text_input("First label for classification")
140
+ label_2 = st.text_input("Second label for classification")
141
+ labels = [label_1, label_2]
142
+ else: # Multi-Class Classification
143
+ num_classes = st.slider("Number of labels", 3, 10, 3)
144
+ labels = [st.text_input(f"Label {i+1}") for i in range(num_classes)]
145
+
146
+ use_few_shot_labeling = st.radio("Add few-shot examples for labeling?", ["Yes", "No"])
147
+ few_shot_labeling_examples = []
148
+ if use_few_shot_labeling == "Yes":
149
+ num_labeling_examples = st.slider("Number of few-shot labeling examples", 1, 5, 1)
150
+ for i in range(num_labeling_examples):
151
+ content = st.text_area(f"Labeling Example {i+1} Content")
152
+ label = st.selectbox(f"Label for Example {i+1}", labels)
153
+ few_shot_labeling_examples.append({"content": content, "label": label})
154
+
155
+ text_to_classify = st.text_area("Enter text to classify")
156
+
157
+ if st.button("Classify Text"):
158
+ if text_to_classify:
159
+ labeling_prompt = (
160
+ f"You are an expert in {labeling_type.lower()} classification. Classify this text using: {', '.join(labels)}.\n\n"
161
+ )
162
+ if few_shot_labeling_examples:
163
+ labeling_prompt += "Example classifications:\n"
164
+ for ex in few_shot_labeling_examples:
165
+ labeling_prompt += f"Text: {ex['content']} - Label: {ex['label']}\n"
166
+ labeling_prompt += f"\nClassify this: {text_to_classify}"
167
+
168
+ with st.spinner("Classifying..."):
169
+ st.session_state.messages.append({"role": "system", "content": labeling_prompt})
170
+ try:
171
+ stream = client.chat.completions.create(
172
+ model=selected_model,
173
+ messages=[{"role": "system", "content": labeling_prompt}],
174
+ temperature=temperature,
175
+ stream=True,
176
+ max_tokens=3000,
177
+ )
178
+ labeling_response = st.write_stream(stream)
179
+ st.write("Label:", labeling_response)
180
+ except Exception as e:
181
+ st.error("An error occurred during classification.")
182
+ st.error(f"Details: {e}")
183
+ else:
184
+ st.warning("Please enter text to classify.")