Do0rMaMu commited on
Commit
5cffeec
·
verified ·
1 Parent(s): b5fdeae

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +3 -4
main.py CHANGED
@@ -25,13 +25,12 @@ app = FastAPI()
25
  @app.post("/generate_response")
26
  async def generate_response(item: Validation):
27
  # Construct the complete prompt using the given system and user prompts
28
- prompt = f"""<|begin_of_text|><|start_header_id|>system<|end_header_id|> \n
29
- { item.system_prompt }<|eot_id|> \n <|start_header_id|>user<|end_header_id|>
30
- { item.user_prompt }<|eot_id|> \n <|start_header_id|>assistant<|end_header_id|>"""
31
 
32
  # Call the Llama model to generate a response
33
  output = llm(prompt, max_tokens = item.max_tokens,temperature = item.temperature, echo=True)
34
 
35
  # Extract and return the text from the response
36
  return output['choices'][0]['text']
37
-
 
25
  @app.post("/generate_response")
26
  async def generate_response(item: Validation):
27
  # Construct the complete prompt using the given system and user prompts
28
+ prompt = f"""\nSystem\n
29
+ { item.system_prompt } \nQuestion\n
30
+ { item.user_prompt }"""
31
 
32
  # Call the Llama model to generate a response
33
  output = llm(prompt, max_tokens = item.max_tokens,temperature = item.temperature, echo=True)
34
 
35
  # Extract and return the text from the response
36
  return output['choices'][0]['text']