rocioadlc commited on
Commit
75467e7
·
verified ·
1 Parent(s): 25ed819

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +216 -0
app.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+ import theme
5
+
6
+ theme = theme.Theme()
7
+
8
+ import os
9
+ import sys
10
+ sys.path.append('../..')
11
+
12
+ #langchain
13
+ from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
14
+ from langchain.embeddings import HuggingFaceEmbeddings
15
+ from langchain.prompts import PromptTemplate
16
+ from langchain.chains import RetrievalQA
17
+ from langchain.prompts import ChatPromptTemplate
18
+ from langchain.schema import StrOutputParser
19
+ from langchain.schema.runnable import Runnable
20
+ from langchain.schema.runnable.config import RunnableConfig
21
+ from langchain.chains import (
22
+ LLMChain, ConversationalRetrievalChain)
23
+ from langchain.vectorstores import Chroma
24
+ from langchain.memory import ConversationBufferMemory
25
+ from langchain.chains import LLMChain
26
+ from langchain.prompts.prompt import PromptTemplate
27
+ from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate
28
+ from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate, MessagesPlaceholder
29
+ from langchain.document_loaders import PyPDFDirectoryLoader
30
+ from pydantic import BaseModel, Field
31
+ from langchain.output_parsers import PydanticOutputParser
32
+ from langchain_community.llms import HuggingFaceHub
33
+ from langchain_community.document_loaders import WebBaseLoader
34
+
35
+ from pydantic import BaseModel
36
+ import shutil
37
+
38
+ custom_title ="<span style='color: "#92b96a";'>Green Greta</span>"
39
+
40
+
41
+ from huggingface_hub import from_pretrained_keras
42
+
43
+ import tensorflow as tf
44
+ from tensorflow import keras
45
+ from PIL import Image
46
+
47
+ # Cell 1: Image Classification Model
48
+ model1 = from_pretrained_keras("rocioadlc/EfficientNetV2L")
49
+
50
+ # Define class labels
51
+ class_labels = ['battery',
52
+ 'biological',
53
+ 'brown-glass',
54
+ 'cardboard',
55
+ 'clothes',
56
+ 'green-glass',
57
+ 'metal',
58
+ 'paper',
59
+ 'plastic',
60
+ 'shoes',
61
+ 'trash',
62
+ 'white-glass']
63
+
64
+ # Function to predict image label and score
65
+ def predict_image(input):
66
+ # Resize the image to the size expected by the model
67
+ image = input.resize((244, 224))
68
+ # Convert the image to a NumPy array
69
+ image_array = tf.keras.preprocessing.image.img_to_array(image)
70
+ # Normalize the image
71
+ image_array /= 255.0
72
+ # Expand the dimensions to create a batch
73
+ image_array = tf.expand_dims(image_array, 0)
74
+ # Predict using the model
75
+ predictions = model1.predict(image_array)
76
+
77
+ # Get the predicted class label
78
+ predicted_class_index = tf.argmax(predictions, axis=1).numpy()[0]
79
+ predicted_class_label = class_labels[predicted_class_index]
80
+
81
+ # Get the confidence score of the predicted class
82
+ confidence_score = predictions[0][predicted_class_index]
83
+
84
+ # Return predicted class label and confidence score
85
+ return {predicted_class_label: confidence_score}
86
+
87
+
88
+ image_gradio_app = gr.Interface(
89
+ fn=predict_image,
90
+ inputs=gr.Image(label="Image", sources=['upload', 'webcam'], type="pil"),
91
+ outputs=[gr.Label(label="Result")],
92
+ title=custom_title,
93
+ theme=theme
94
+ )
95
+
96
+ loader = WebBaseLoader(["https://www.epa.gov/recycle/frequent-questions-recycling", "https://www.whitehorsedc.gov.uk/vale-of-white-horse-district-council/recycling-rubbish-and-waste/lets-get-real-about-recycling/", "https://www.teimas.com/blog/13-preguntas-y-respuestas-sobre-la-ley-de-residuos-07-2022", "https://www.molok.com/es/blog/gestion-de-residuos-solidos-urbanos-rsu-10-dudas-comunes"])
97
+ data=loader.load()
98
+ # split documents
99
+ text_splitter = RecursiveCharacterTextSplitter(
100
+ chunk_size=1024,
101
+ chunk_overlap=150,
102
+ length_function=len
103
+ )
104
+ docs = text_splitter.split_documents(data)
105
+ # define embedding
106
+ embeddings = HuggingFaceEmbeddings(model_name='thenlper/gte-small')
107
+ # create vector database from data
108
+ persist_directory = 'docs/chroma/'
109
+
110
+ # Remove old database files if any
111
+ shutil.rmtree(persist_directory, ignore_errors=True)
112
+ vectordb = Chroma.from_documents(
113
+ documents=docs,
114
+ embedding=embeddings,
115
+ persist_directory=persist_directory
116
+ )
117
+ # define retriever
118
+ retriever = vectordb.as_retriever(search_kwargs={"k": 2}, search_type="mmr")
119
+
120
+ class FinalAnswer(BaseModel):
121
+ question: str = Field(description="the original question")
122
+ answer: str = Field(description="the extracted answer")
123
+
124
+ # Assuming you have a parser for the FinalAnswer class
125
+ parser = PydanticOutputParser(pydantic_object=FinalAnswer)
126
+
127
+ template = """
128
+ Your name is Greta and you are a recycling chatbot with the objective to anwer questions from user in English or Spanish /
129
+ Use the following pieces of context to answer the question /
130
+ If the question is English answer in English /
131
+ If the question is Spanish answer in Spanish /
132
+ Do not mention the word context when you answer a question /
133
+ Answer the question fully and provide as much relevant detail as possible. Do not cut your response short /
134
+ Context: {context}
135
+ User: {question}
136
+ {format_instructions}
137
+ """
138
+
139
+ # Create the chat prompt templates
140
+ sys_prompt = SystemMessagePromptTemplate.from_template(template)
141
+ qa_prompt = ChatPromptTemplate(
142
+ messages=[
143
+ sys_prompt,
144
+ HumanMessagePromptTemplate.from_template("{question}")],
145
+ partial_variables={"format_instructions": parser.get_format_instructions()}
146
+ )
147
+ llm = HuggingFaceHub(
148
+ repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
149
+ task="text-generation",
150
+ model_kwargs={
151
+ "max_new_tokens": 2000,
152
+ "top_k": 30,
153
+ "temperature": 0.1,
154
+ "repetition_penalty": 1.03
155
+ },
156
+ )
157
+
158
+ qa_chain = ConversationalRetrievalChain.from_llm(
159
+ llm = llm,
160
+ memory = ConversationBufferMemory(llm=llm, memory_key="chat_history", input_key='question', output_key='output'),
161
+ retriever = retriever,
162
+ verbose = True,
163
+ combine_docs_chain_kwargs={'prompt': qa_prompt},
164
+ get_chat_history = lambda h : h,
165
+ rephrase_question = False,
166
+ output_key = 'output',
167
+ )
168
+
169
+ def chat_interface(question,history):
170
+ result = qa_chain.invoke({'question': question})
171
+ output_string = result['output']
172
+
173
+ # Find the index of the last occurrence of "answer": in the string
174
+ answer_index = output_string.rfind('"answer":')
175
+
176
+ # Extract the substring starting from the "answer": index
177
+ answer_part = output_string[answer_index + len('"answer":'):].strip()
178
+
179
+ # Find the next occurrence of a double quote to get the start of the answer value
180
+ quote_index = answer_part.find('"')
181
+
182
+ # Extract the answer value between double quotes
183
+ answer_value = answer_part[quote_index + 1:answer_part.find('"', quote_index + 1)]
184
+
185
+ return answer_value
186
+
187
+
188
+ chatbot_gradio_app = gr.ChatInterface(
189
+ fn=chat_interface,
190
+ title=custom_title
191
+ )
192
+
193
+ banner_tab_content = """
194
+ <div style="background-color: #d3e3c3; text-align: center; padding: 20px; display: flex; flex-direction: column; align-items: center;">
195
+ <img src="https://huggingface.co/spaces/rocioadlc/test_4/resolve/main/front_4.jpg" alt="Banner Image" style="width: 50%; max-width: 500px; margin: 0 auto;">
196
+ <h1 style="font-size: 24px; color: "#92b96a"; margin-top: 20px;">¡Bienvenido a nuestro clasificador de imágenes y chatbot para un reciclaje más inteligente!♻️</h1>
197
+ <p style="font-size: 16px; color: "#92b96a"; text-align: justify;">¿Alguna vez te has preguntado si puedes reciclar un objeto en particular? ¿O te has sentido abrumado por la cantidad de residuos que generas y no sabes cómo manejarlos de manera más sostenible? ¡Estás en el lugar correcto!</p>
198
+ <p style="font-size: 16px; color: "#92b96a"; text-align: justify;">Nuestra plataforma combina la potencia de la inteligencia artificial con la comodidad de un chatbot para brindarte respuestas rápidas y precisas sobre qué objetos son reciclables y cómo hacerlo de la manera más eficiente.</p>
199
+ <p style="font-size: 16px; text-align:center;"><strong><span style="color: "#92b96a";">¿Cómo usarlo?</span></strong>
200
+ <ul style="list-style-type: disc; text-align: justify; margin-top: 20px; padding-left: 20px;">
201
+ <li style="font-size: 16px; color: "#92b96a";"><strong><span style="color: "#92b96a";">Green Greta Image Classification:</span></strong> Ve a la pestaña Greta Image Classification y simplemente carga una foto del objeto que quieras reciclar, y nuestro modelo de identificará de qué se trata🕵️‍♂️ para que puedas desecharlo adecuadamente.</li>
202
+ <li style="font-size: 16px; color: "#92b96a";"><strong><span style="color: "#92b96a";">Green Greta Chat:</span></strong> ¿Tienes preguntas sobre reciclaje, materiales específicos o prácticas sostenibles? ¡Pregunta a nuestro chatbot en la pestaña Green Greta Chat!📝 Está aquí para responder todas tus preguntas y ayudarte a tomar decisiones más informadas sobre tu reciclaje.</li>
203
+ </ul>
204
+ </div>
205
+ """
206
+ banner_tab = gr.Markdown(banner_tab_content)
207
+
208
+ # Combine both interfaces into a single app
209
+ app = gr.TabbedInterface(
210
+ [banner_tab, image_gradio_app, chatbot_gradio_app],
211
+ tab_names=["Welcome to Green Greta", "Green Greta Image Classification", "Green Greta Chat"],
212
+ theme=theme
213
+ )
214
+
215
+ app.queue()
216
+ app.launch()