Spaces:
Sleeping
Sleeping
fix input textbox bug and endpoint management
Browse files- app.py +6 -2
- assets/style.css +9 -0
- spinoza_project/source/backend/llm_utils.py +16 -0
app.py
CHANGED
@@ -3,7 +3,11 @@ import time
|
|
3 |
import yaml
|
4 |
from langchain.prompts.chat import ChatPromptTemplate
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
-
from spinoza_project.source.backend.llm_utils import
|
|
|
|
|
|
|
|
|
7 |
from spinoza_project.source.backend.document_store import pickle_to_document_store
|
8 |
from spinoza_project.source.backend.get_prompts import get_qa_prompts
|
9 |
from spinoza_project.source.frontend.utils import (
|
@@ -38,7 +42,7 @@ for source in config["prompt_naming"]:
|
|
38 |
## Building LLM
|
39 |
print("Building LLM")
|
40 |
model = "gpt35turbo"
|
41 |
-
llm =
|
42 |
|
43 |
## Loading_tools
|
44 |
print("Loading Databases")
|
|
|
3 |
import yaml
|
4 |
from langchain.prompts.chat import ChatPromptTemplate
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
+
from spinoza_project.source.backend.llm_utils import (
|
7 |
+
get_llm,
|
8 |
+
get_llm_api,
|
9 |
+
get_vectorstore,
|
10 |
+
)
|
11 |
from spinoza_project.source.backend.document_store import pickle_to_document_store
|
12 |
from spinoza_project.source.backend.get_prompts import get_qa_prompts
|
13 |
from spinoza_project.source.frontend.utils import (
|
|
|
42 |
## Building LLM
|
43 |
print("Building LLM")
|
44 |
model = "gpt35turbo"
|
45 |
+
llm = get_llm_api()
|
46 |
|
47 |
## Loading_tools
|
48 |
print("Loading Databases")
|
assets/style.css
CHANGED
@@ -118,6 +118,11 @@ a {
|
|
118 |
height: calc(-100px + 100vh) !important;
|
119 |
}
|
120 |
|
|
|
|
|
|
|
|
|
|
|
121 |
#accordion-spinoza>open>span:nth-child(1) {
|
122 |
color: #000000;
|
123 |
font-size: large;
|
@@ -157,6 +162,10 @@ a {
|
|
157 |
|
158 |
}
|
159 |
|
|
|
|
|
|
|
|
|
160 |
footer {
|
161 |
position: fixed;
|
162 |
left: 0;
|
|
|
118 |
height: calc(-100px + 100vh) !important;
|
119 |
}
|
120 |
|
121 |
+
#accordion-spinoza {
|
122 |
+
height: 15cm;
|
123 |
+
}
|
124 |
+
|
125 |
+
|
126 |
#accordion-spinoza>open>span:nth-child(1) {
|
127 |
color: #000000;
|
128 |
font-size: large;
|
|
|
162 |
|
163 |
}
|
164 |
|
165 |
+
textarea.scroll-hide {
|
166 |
+
max-height: 42px;
|
167 |
+
}
|
168 |
+
|
169 |
footer {
|
170 |
position: fixed;
|
171 |
left: 0;
|
spinoza_project/source/backend/llm_utils.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
from langchain_openai import AzureChatOpenAI
|
2 |
from msal import ConfidentialClientApplication
|
3 |
from langchain_openai import AzureOpenAIEmbeddings
|
@@ -62,6 +63,21 @@ def get_llm():
|
|
62 |
return LLM(AzureChatOpenAI(temperature=0))
|
63 |
|
64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
def get_vectorstore(index_name, model="text-embedding-ada-002"):
|
66 |
os.environ["AZURE_OPENAI_ENDPOINT"] = (
|
67 |
f"{os.getenv('OPENAI_API_ENDPOINT')}{os.getenv('DEPLOYMENT_EMB_ID')}/embeddings?api-version={os.getenv('OPENAI_API_VERSION')}"
|
|
|
1 |
+
from tracemalloc import stop
|
2 |
from langchain_openai import AzureChatOpenAI
|
3 |
from msal import ConfidentialClientApplication
|
4 |
from langchain_openai import AzureOpenAIEmbeddings
|
|
|
63 |
return LLM(AzureChatOpenAI(temperature=0))
|
64 |
|
65 |
|
66 |
+
def get_llm_api():
|
67 |
+
return LLM(
|
68 |
+
AzureChatOpenAI(
|
69 |
+
deployment_name=os.getenv("DEPLOYMENT_NAME"),
|
70 |
+
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
71 |
+
azure_endpoint=os.getenv("OPENAI_API_BASE"),
|
72 |
+
openai_api_version=os.getenv("OPENAI_API_VERSION"),
|
73 |
+
streaming=True,
|
74 |
+
temperature=0,
|
75 |
+
max_tokens=2048, # 1024,
|
76 |
+
stop=["<|im_end|>"],
|
77 |
+
)
|
78 |
+
)
|
79 |
+
|
80 |
+
|
81 |
def get_vectorstore(index_name, model="text-embedding-ada-002"):
|
82 |
os.environ["AZURE_OPENAI_ENDPOINT"] = (
|
83 |
f"{os.getenv('OPENAI_API_ENDPOINT')}{os.getenv('DEPLOYMENT_EMB_ID')}/embeddings?api-version={os.getenv('OPENAI_API_VERSION')}"
|