reply-konhein commited on
Commit
ec6c645
·
1 Parent(s): 6f8b8b8

Set temperature to elimate random responses

Browse files
Files changed (1) hide show
  1. document_qa_engine.py +3 -2
document_qa_engine.py CHANGED
@@ -76,12 +76,13 @@ def create_inference_pipeline(document_store, model_name, api_key):
76
  generator = OpenAIChatGenerator(api_key=Secret.from_token("<local LLM doesn't need an API key>"),
77
  model=model_name,
78
  api_base_url="http://localhost:1234/v1",
79
- generation_kwargs={"max_tokens": MAX_TOKENS}
80
  )
81
  elif "gpt" in model_name:
82
  generator = OpenAIChatGenerator(api_key=Secret.from_token(api_key), model=model_name,
83
- generation_kwargs={"max_tokens": MAX_TOKENS},
84
  streaming_callback=lambda chunk: print(chunk.content, end="", flush=True),
 
85
  )
86
  else:
87
  generator = HuggingFaceTGIChatGenerator(token=Secret.from_token(api_key), model=model_name,
 
76
  generator = OpenAIChatGenerator(api_key=Secret.from_token("<local LLM doesn't need an API key>"),
77
  model=model_name,
78
  api_base_url="http://localhost:1234/v1",
79
+ generation_kwargs={"max_tokens": MAX_TOKENS},
80
  )
81
  elif "gpt" in model_name:
82
  generator = OpenAIChatGenerator(api_key=Secret.from_token(api_key), model=model_name,
83
+ generation_kwargs={"max_tokens": MAX_TOKENS, "temperature": 0},
84
  streaming_callback=lambda chunk: print(chunk.content, end="", flush=True),
85
+
86
  )
87
  else:
88
  generator = HuggingFaceTGIChatGenerator(token=Secret.from_token(api_key), model=model_name,