Spaces:
Sleeping
Sleeping
make sys prompt optional
Browse files
app.py
CHANGED
@@ -33,6 +33,10 @@ def bot(message, history, aws_access, aws_secret, aws_token, system_prompt, temp
|
|
33 |
try:
|
34 |
llm = LLM.create_llm(model)
|
35 |
messages = llm.generate_body(message, history)
|
|
|
|
|
|
|
|
|
36 |
|
37 |
config = Config(
|
38 |
read_timeout = 600,
|
@@ -53,9 +57,9 @@ def bot(message, history, aws_access, aws_secret, aws_token, system_prompt, temp
|
|
53 |
response = br.converse_stream(
|
54 |
modelId = model,
|
55 |
messages = messages,
|
56 |
-
system =
|
57 |
inferenceConfig = {
|
58 |
-
"temperature":
|
59 |
"maxTokens": max_tokens,
|
60 |
}
|
61 |
)
|
|
|
33 |
try:
|
34 |
llm = LLM.create_llm(model)
|
35 |
messages = llm.generate_body(message, history)
|
36 |
+
if system_prompt:
|
37 |
+
sys_prompt = [{"text": system_prompt}]
|
38 |
+
else:
|
39 |
+
sys_prompt = []
|
40 |
|
41 |
config = Config(
|
42 |
read_timeout = 600,
|
|
|
57 |
response = br.converse_stream(
|
58 |
modelId = model,
|
59 |
messages = messages,
|
60 |
+
system = sys_prompt,
|
61 |
inferenceConfig = {
|
62 |
+
"temperature": 1,
|
63 |
"maxTokens": max_tokens,
|
64 |
}
|
65 |
)
|