Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,16 @@
|
|
1 |
import os
|
2 |
import json
|
3 |
from langchain_groq import ChatGroq
|
4 |
-
from
|
5 |
from qdrant_client import QdrantClient
|
6 |
from langchain.chains import RetrievalQA
|
7 |
-
from
|
8 |
from fastapi.responses import HTMLResponse
|
9 |
from fastapi.staticfiles import StaticFiles
|
10 |
from fastapi.encoders import jsonable_encoder
|
11 |
from fastapi.templating import Jinja2Templates
|
12 |
from fastapi import FastAPI, Request, Form, Response
|
13 |
-
from
|
14 |
|
15 |
os.environ["TRANSFORMERS_FORCE_CPU"] = "true"
|
16 |
|
@@ -33,16 +33,14 @@ api_key = os.environ.get("API_KEY")
|
|
33 |
llm = ChatGroq(
|
34 |
model="mixtral-8x7b-32768",
|
35 |
api_key=api_key,
|
36 |
-
|
37 |
|
38 |
print("LLM Initialized....")
|
39 |
|
40 |
prompt_template = """Use the following pieces of information to answer the user's question.
|
41 |
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
42 |
-
|
43 |
Context: {context}
|
44 |
Question: {question}
|
45 |
-
|
46 |
Only return the helpful answer below and nothing else.
|
47 |
Helpful answer:
|
48 |
"""
|
@@ -77,4 +75,4 @@ async def get_response(query: str = Form(...)):
|
|
77 |
response_data = jsonable_encoder(json.dumps({"answer": answer, "source_document": source_document, "doc": doc}))
|
78 |
|
79 |
res = Response(response_data)
|
80 |
-
return res
|
|
|
1 |
import os
|
2 |
import json
|
3 |
from langchain_groq import ChatGroq
|
4 |
+
from langchain_core.prompts import PromptTemplate
|
5 |
from qdrant_client import QdrantClient
|
6 |
from langchain.chains import RetrievalQA
|
7 |
+
from langchain_community.vectorstores import Qdrant
|
8 |
from fastapi.responses import HTMLResponse
|
9 |
from fastapi.staticfiles import StaticFiles
|
10 |
from fastapi.encoders import jsonable_encoder
|
11 |
from fastapi.templating import Jinja2Templates
|
12 |
from fastapi import FastAPI, Request, Form, Response
|
13 |
+
from langchain_community.embeddings import SentenceTransformerEmbeddings
|
14 |
|
15 |
os.environ["TRANSFORMERS_FORCE_CPU"] = "true"
|
16 |
|
|
|
33 |
llm = ChatGroq(
|
34 |
model="mixtral-8x7b-32768",
|
35 |
api_key=api_key,
|
36 |
+
)
|
37 |
|
38 |
print("LLM Initialized....")
|
39 |
|
40 |
prompt_template = """Use the following pieces of information to answer the user's question.
|
41 |
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
|
|
42 |
Context: {context}
|
43 |
Question: {question}
|
|
|
44 |
Only return the helpful answer below and nothing else.
|
45 |
Helpful answer:
|
46 |
"""
|
|
|
75 |
response_data = jsonable_encoder(json.dumps({"answer": answer, "source_document": source_document, "doc": doc}))
|
76 |
|
77 |
res = Response(response_data)
|
78 |
+
return res
|