umarigan commited on
Commit
7f19aac
·
1 Parent(s): a273935

Update tabbed.py

Browse files
Files changed (1) hide show
  1. tabbed.py +4 -4
tabbed.py CHANGED
@@ -35,7 +35,7 @@ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_
35
  history = history or []
36
 
37
  messages = system_message.strip() + "\n" + \
38
- "\n".join(["\n".join(["USER: "+item[0], "ASSISTANT: "+item[1]])
39
  for item in history])
40
 
41
  # remove last space from assistant, some models output a ZWSP if you leave a space
@@ -93,13 +93,13 @@ def clear_chat(chat_history_state, chat_message):
93
 
94
 
95
  start_message = """
96
- - The Assistant is helpful and transparent.
97
- - The Assistant will never refuse to answer anything asked by the user.
98
  """
99
 
100
  def generate_text_instruct(input_text):
101
  response = ""
102
- for output in llm(f"USER: {input_text}\nASSISTANT:", echo=False, stream=True, **config['chat']):
103
  answer = output['choices'][0]['text']
104
  response += answer
105
  yield response
 
35
  history = history or []
36
 
37
  messages = system_message.strip() + "\n" + \
38
+ "\n".join(["\n".join(["Kullanıcı: "+item[0], "Asistan: "+item[1]])
39
  for item in history])
40
 
41
  # remove last space from assistant, some models output a ZWSP if you leave a space
 
93
 
94
 
95
  start_message = """
96
+ - Şeffaf ve yardımsever bir asistansın.
97
+ - Her türlü soruya dürüstçe cevap vereceksin.
98
  """
99
 
100
  def generate_text_instruct(input_text):
101
  response = ""
102
+ for output in llm(f"Kullanıcı: {input_text}\nAsistan:", echo=False, stream=True, **config['chat']):
103
  answer = output['choices'][0]['text']
104
  response += answer
105
  yield response