Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -117,7 +117,7 @@ def update_vectors(files, parser):
|
|
117 |
label="Select documents to query"
|
118 |
)
|
119 |
|
120 |
-
def generate_chunked_response(prompt, model, max_tokens=
|
121 |
print(f"Starting generate_chunked_response with {num_calls} calls")
|
122 |
full_response = ""
|
123 |
messages = [{"role": "user", "content": prompt}]
|
@@ -377,7 +377,7 @@ After writing the document, please provide a list of sources used in your respon
|
|
377 |
for i in range(num_calls):
|
378 |
for message in client.chat_completion(
|
379 |
messages=[{"role": "user", "content": prompt}],
|
380 |
-
max_tokens=
|
381 |
temperature=temperature,
|
382 |
stream=True,
|
383 |
):
|
@@ -438,7 +438,7 @@ Write a detailed and complete response that answers the following user question:
|
|
438 |
logging.info(f"API call {i+1}/{num_calls}")
|
439 |
for message in client.chat_completion(
|
440 |
messages=[{"role": "user", "content": prompt}],
|
441 |
-
max_tokens=
|
442 |
temperature=temperature,
|
443 |
stream=True,
|
444 |
):
|
|
|
117 |
label="Select documents to query"
|
118 |
)
|
119 |
|
120 |
+
def generate_chunked_response(prompt, model, max_tokens=10000, num_calls=3, temperature=0.2, should_stop=False):
|
121 |
print(f"Starting generate_chunked_response with {num_calls} calls")
|
122 |
full_response = ""
|
123 |
messages = [{"role": "user", "content": prompt}]
|
|
|
377 |
for i in range(num_calls):
|
378 |
for message in client.chat_completion(
|
379 |
messages=[{"role": "user", "content": prompt}],
|
380 |
+
max_tokens=10000,
|
381 |
temperature=temperature,
|
382 |
stream=True,
|
383 |
):
|
|
|
438 |
logging.info(f"API call {i+1}/{num_calls}")
|
439 |
for message in client.chat_completion(
|
440 |
messages=[{"role": "user", "content": prompt}],
|
441 |
+
max_tokens=10000,
|
442 |
temperature=temperature,
|
443 |
stream=True,
|
444 |
):
|