Sasiraj01 commited on
Commit
f83b708
·
verified ·
1 Parent(s): 5c19b81

Upload app-5.py

Browse files
Files changed (1) hide show
  1. app-5.py +59 -0
app-5.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from groq import ChatGroq
4
+ from llama_index import load_index_from_storage, ServiceContext, set_global_service_context
5
+ from llama_index.vector_stores.faiss import FaissVectorStore
6
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
7
+ from llama_index.storage.storage_context import StorageContext
8
+ from PIL import Image
9
+
10
+ # Initialize Groq Vision LLM
11
+ vision_llm = ChatGroq(
12
+ model_name="meta-llama/llama-4-scout-17b-16e-instruct",
13
+ api_key="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"
14
+ )
15
+
16
+ # Load FAISS index from persisted directory
17
+ faiss_index_path = "faiss_store"
18
+ vector_store = FaissVectorStore.from_persist_dir(faiss_index_path)
19
+ storage_context = StorageContext.from_defaults(vector_store=vector_store)
20
+ index = load_index_from_storage(storage_context)
21
+
22
+ # Setup embedding context
23
+ embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en")
24
+ service_context = ServiceContext.from_defaults(embed_model=embed_model)
25
+ set_global_service_context(service_context)
26
+
27
+ # Setup query engine
28
+ query_engine = index.as_query_engine()
29
+
30
+ def multimodal_skin_rag(image, question):
31
+ context = query_engine.query(question)
32
+ user_prompt = f"""Based on the following skincare context, answer the user's question with reference to the image (if relevant):
33
+
34
+ Context:
35
+ {context}
36
+
37
+ Question:
38
+ {question}
39
+ """
40
+ messages = [
41
+ {"role": "system", "content": "You are a skincare advisor who understands image-based inputs and medical-grade text."},
42
+ {"role": "user", "content": user_prompt}
43
+ ]
44
+ response = vision_llm.chat(messages=messages, images=[image])
45
+ return response.choices[0].message.content
46
+
47
+ demo = gr.Interface(
48
+ fn=multimodal_skin_rag,
49
+ inputs=[
50
+ gr.Image(type="pil", label="Upload Skin Image"),
51
+ gr.Textbox(label="Describe your skin concern or ask a question")
52
+ ],
53
+ outputs="text",
54
+ title="SkinCare Assistant: FAISS + Groq LLM",
55
+ description="Upload a skin image and ask any skincare-related question. This system retrieves relevant content using FAISS and answers using Groq's Vision LLM."
56
+ )
57
+
58
+ if __name__ == "__main__":
59
+ demo.launch()