Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
@@ -85,14 +85,14 @@ embedding_model = OpenAIEmbeddings(
|
|
85 |
)
|
86 |
# This initializes the Azure OpenAI embeddings model using the specified endpoint, API key, and model name.
|
87 |
|
88 |
-
|
89 |
-
# Initialize the Azure Chat OpenAI model
|
90 |
llm = ChatOpenAI(
|
91 |
-
openai_api_base=endpoint,
|
92 |
-
openai_api_key=api_key,
|
93 |
-
model="gpt-4o-mini",
|
94 |
-
streaming=False
|
95 |
)
|
|
|
96 |
# This initializes the Chat OpenAI model with the provided endpoint, API key, deployment name, and a temperature setting of 0 (to control response variability).
|
97 |
|
98 |
# set the LLM and embedding model in the LlamaIndex settings.
|
|
|
85 |
)
|
86 |
# This initializes the Azure OpenAI embeddings model using the specified endpoint, API key, and model name.
|
87 |
|
88 |
+
# Initialize the Chat OpenAI model
|
|
|
89 |
llm = ChatOpenAI(
|
90 |
+
openai_api_base=endpoint, # Complete the code to define the endpoint
|
91 |
+
openai_api_key=api_key, # Complete the code to provide the API key
|
92 |
+
model="gpt-4o-mini", # Complete the code to define the Azure deployment name
|
93 |
+
streaming=False # Complete the code to set the temperature for response variability
|
94 |
)
|
95 |
+
|
96 |
# This initializes the Chat OpenAI model with the provided endpoint, API key, deployment name, and a temperature setting of 0 (to control response variability).
|
97 |
|
98 |
# set the LLM and embedding model in the LlamaIndex settings.
|