Wedyan2023 commited on
Commit
b7f505b
·
verified ·
1 Parent(s): 7d415c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -71
app.py CHANGED
@@ -17,80 +17,115 @@ client = OpenAI(
17
  api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
18
  )
19
 
20
- # Define Llama 3 model
21
- model_link = "meta-llama/Meta-Llama-3-8B-Instruct"
22
- model_info = {
23
- 'description': """The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n
24
- It was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
25
- 'logo': 'Llama_logo.png'
26
- }
27
-
28
- # Random dog images for error message
29
- random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
30
- "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
31
- "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
32
- "1326984c-39b0-492c-a773-f120d747a7e2.jpg"]
33
-
34
  def reset_conversation():
35
- '''Resets Conversation'''
36
  st.session_state.conversation = []
37
  st.session_state.messages = []
38
  return None
39
 
40
- # Create a temperature slider
41
- temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
42
-
43
- # Add reset button to clear conversation
44
- st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
45
-
46
- # Create model description
47
- st.sidebar.write(f"You're now chatting with **Llama 3**")
48
- st.sidebar.markdown(model_info['description'])
49
- st.sidebar.image(model_info['logo'])
50
- st.sidebar.markdown("*Generated content may be inaccurate or false.*")
51
- st.sidebar.markdown("\nRun into issues? \nTry again later as GPU access might be limited.")
52
-
53
- # Initialize chat history
54
- if "messages" not in st.session_state:
55
- st.session_state.messages = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
- # Display chat messages from history on app rerun
58
- for message in st.session_state.messages:
59
- with st.chat_message(message["role"]):
60
- st.markdown(message["content"])
61
-
62
- # Accept user input
63
- if prompt := st.chat_input(f"Hi, I'm Llama 3, ask me a question"):
64
-
65
- # Display user message in chat message container
66
- with st.chat_message("user"):
67
- st.markdown(prompt)
68
- # Add user message to chat history
69
- st.session_state.messages.append({"role": "user", "content": prompt})
70
-
71
- # Display assistant response in chat message container
72
- with st.chat_message("assistant"):
73
-
74
- try:
75
- stream = client.chat.completions.create(
76
- model=model_link,
77
- messages=[
78
- {"role": m["role"], "content": m["content"]}
79
- for m in st.session_state.messages
80
- ],
81
- temperature=temp_values,
82
- stream=True,
83
- max_tokens=3000,
84
- )
85
-
86
- response = st.write_stream(stream)
87
-
88
- except Exception as e:
89
- response = "😵‍💫 Looks like something went wrong! Try again later.\nHere's a random pic of a 🐶:"
90
- st.write(response)
91
- random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
92
- st.image(random_dog_pick)
93
- st.write("This was the error message:")
94
- st.write(e)
95
-
96
- st.session_state.messages.append({"role": "assistant", "content": response})
 
17
  api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
18
  )
19
 
20
+ # Function to reset conversation
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def reset_conversation():
 
22
  st.session_state.conversation = []
23
  st.session_state.messages = []
24
  return None
25
 
26
+ # Define classification options
27
+ classification_types = ["Sentiment Analysis", "Binary Classification", "Multi-Class Classification"]
28
+
29
+ # Start with a selection between data generation or labeling
30
+ st.sidebar.write("Choose Task:")
31
+ task = st.sidebar.radio("Do you want to generate data or label data?", ("Data Generation", "Data Labeling"))
32
+
33
+ # If the user selects Data Labeling
34
+ if task == "Data Labeling":
35
+ st.sidebar.write("Choose Classification Type:")
36
+ classification_type = st.sidebar.radio("Select a classification type:", classification_types)
37
+
38
+ # Handle Sentiment Analysis
39
+ if classification_type == "Sentiment Analysis":
40
+ st.sidebar.write("Classes: Positive, Negative, Neutral (fixed)")
41
+ class_labels = ["Positive", "Negative", "Neutral"]
42
+
43
+ # Handle Binary Classification
44
+ elif classification_type == "Binary Classification":
45
+ class_1 = st.sidebar.text_input("Enter Class 1:")
46
+ class_2 = st.sidebar.text_input("Enter Class 2:")
47
+ class_labels = [class_1, class_2]
48
+
49
+ # Handle Multi-Class Classification
50
+ elif classification_type == "Multi-Class Classification":
51
+ class_labels = []
52
+ for i in range(1, 11): # Allow up to 10 classes
53
+ label = st.sidebar.text_input(f"Enter Class {i} (leave blank to stop):")
54
+ if label:
55
+ class_labels.append(label)
56
+ else:
57
+ break
58
+
59
+ # Domain selection
60
+ st.sidebar.write("Specify the Domain:")
61
+ domain = st.sidebar.radio("Choose a domain:", ("Restaurant Reviews", "E-commerce Reviews", "Custom"))
62
+ if domain == "Custom":
63
+ domain = st.sidebar.text_input("Enter Custom Domain:")
64
+
65
+ # Specify example length
66
+ st.sidebar.write("Specify the Length of Examples:")
67
+ min_words = st.sidebar.number_input("Minimum word count (10 to 90):", 10, 90, 10)
68
+ max_words = st.sidebar.number_input("Maximum word count (10 to 90):", min_words, 90, 50)
69
+
70
+ # Few-shot examples option
71
+ use_few_shot = st.sidebar.radio("Do you want to use few-shot examples?", ("Yes", "No"))
72
+ few_shot_examples = []
73
+ if use_few_shot == "Yes":
74
+ num_examples = st.sidebar.number_input("How many few-shot examples? (1 to 5)", 1, 5, 1)
75
+ for i in range(num_examples):
76
+ example_text = st.text_area(f"Enter example {i+1}:")
77
+ example_label = st.selectbox(f"Select the label for example {i+1}:", class_labels)
78
+ few_shot_examples.append({"text": example_text, "label": example_label})
79
+
80
+ # Generate the system prompt based on classification type
81
+ if classification_type == "Sentiment Analysis":
82
+ system_prompt = f"You are a propositional sentiment analysis expert. Your role is to generate sentiment analysis reviews based on the data entered and few-shot examples provided, if any, for the domain '{domain}'."
83
+ elif classification_type == "Binary Classification":
84
+ system_prompt = f"You are an expert in binary classification. Your task is to label examples for the domain '{domain}' with either '{class_1}' or '{class_2}', based on the data provided."
85
+ else: # Multi-Class Classification
86
+ system_prompt = f"You are an expert in multi-class classification. Your role is to label examples for the domain '{domain}' using the provided class labels."
87
+
88
+ st.sidebar.write("System Prompt:")
89
+ st.sidebar.write(system_prompt)
90
+
91
+ # Step-by-step thinking
92
+ st.sidebar.write("Generated Data:")
93
+ st.sidebar.write("Think step by step to ensure accuracy in classification.")
94
+
95
+ # Accept user input for generating or labeling data
96
+ if prompt := st.chat_input(f"Hi, I'm ready to help with {classification_type} for {domain}. Ask me a question or provide data to classify."):
97
+
98
+ # Display user message in chat message container
99
+ with st.chat_message("user"):
100
+ st.markdown(prompt)
101
+ # Add user message to chat history
102
+ st.session_state.messages.append({"role": "user", "content": prompt})
103
+
104
+ # Display assistant response in chat message container
105
+ with st.chat_message("assistant"):
106
+
107
+ try:
108
+ # Stream the response from the model
109
+ stream = client.chat.completions.create(
110
+ model="meta-llama/Meta-Llama-3-8B-Instruct",
111
+ messages=[
112
+ {"role": m["role"], "content": m["content"]}
113
+ for m in st.session_state.messages
114
+ ],
115
+ temperature=0.5,
116
+ stream=True,
117
+ max_tokens=3000,
118
+ )
119
+
120
+ response = st.write_stream(stream)
121
+
122
+ except Exception as e:
123
+ response = "😵‍💫 Something went wrong. Try again later."
124
+ st.write(response)
125
+
126
+ st.session_state.messages.append({"role": "assistant", "content": response})
127
+
128
+ # If the user selects Data Generation
129
+ else:
130
+ st.sidebar.write("This feature will allow you to generate new data. Coming soon!")
131