HEHEBOIOG commited on
Commit
46186d0
·
verified ·
1 Parent(s): 815ec5a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -0
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from groq import Groq
3
+ import torch
4
+ import transformers
5
+ from transformers import pipeline
6
+ from langchain_groq import ChatGroq
7
+ from langchain.vectorstores import Chroma
8
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
9
+ from langchain.chains import RetrievalQA
10
+ from langchain.embeddings import HuggingFaceBgeEmbeddings
11
+ from langchain.prompts import PromptTemplate
12
+ import streamlit as st
13
+
14
+ # Groq API Configuration
15
+ GROQ_API_KEY = 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
16
+
17
+ # Initialize Groq Client
18
+ groq_client = Groq(api_key=GROQ_API_KEY)
19
+
20
+ # Configure Llama 3.2 LLM with Groq
21
+ def configure_groq_llm(
22
+ model_name="llama3-70b-8192",
23
+ temperature=0.7,
24
+ max_tokens=2048
25
+ ):
26
+ return ChatGroq(
27
+ groq_api_key=GROQ_API_KEY,
28
+ model_name=model_name,
29
+ temperature=temperature,
30
+ max_tokens=max_tokens
31
+ )
32
+
33
+ # Embedding Configuration
34
+ def get_embeddings(model_name="BAAI/bge-base-en"):
35
+ encode_kwargs = {'normalize_embeddings': True}
36
+ return HuggingFaceBgeEmbeddings(
37
+ model_name=model_name,
38
+ encode_kwargs=encode_kwargs
39
+ )
40
+
41
+ # Prompt Template
42
+ def create_llama_prompt():
43
+ template = """
44
+ Use the following context to answer the question:
45
+ Context: {context}
46
+ Question: {question}
47
+ Helpful Answer:"""
48
+
49
+ return PromptTemplate(
50
+ template=template,
51
+ input_variables=["context", "question"]
52
+ )
53
+
54
+ # Initialize Components
55
+ embeddings = get_embeddings()
56
+ llm = configure_groq_llm()
57
+ vectordb = Chroma(persist_directory='db', embedding_function=embeddings)
58
+ retriever = vectordb.as_retriever(search_kwargs={"k": 5})
59
+ prompt = create_llama_prompt()
60
+
61
+ # QA Chain Configuration
62
+ qa_chain = RetrievalQA.from_chain_type(
63
+ llm=llm,
64
+ chain_type="stuff",
65
+ retriever=retriever,
66
+ chain_type_kwargs={"prompt": prompt},
67
+ return_source_documents=True
68
+ )
69
+
70
+ # Streamlit Interface
71
+ def groq_nlp_chatbot():
72
+ st.title("Groq Llama 3.2 Chatbot")
73
+
74
+ user_input = st.text_input("Your Question:")
75
+ if user_input:
76
+ try:
77
+ response = qa_chain(user_input)
78
+ st.text_area("Bot's Response:", response['result'])
79
+ except Exception as e:
80
+ st.error(f"Error processing request: {e}")
81
+
82
+ if __name__ == "__main__":
83
+ groq_nlp_chatbot()