ranamhamoud commited on
Commit
007f40d
·
verified ·
1 Parent(s): c2b7b7c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -12
app.py CHANGED
@@ -53,20 +53,17 @@ def generate(
53
  model: str,
54
  message: str,
55
  chat_history: list[tuple[str, str]],
56
- system_prompt: str,
57
  max_new_tokens: int = 1024,
58
- temperature: float = 0.6,
59
- top_p: float = 0.9,
60
- top_k: int = 50,
61
- repetition_penalty: float = 1.2,
62
  ) -> Iterator[str]:
63
  if chat_history is None:
64
  logging.error("chat_history is None, initializing to empty list.")
65
  chat_history = [] # Initialize to an empty list if None is passed
66
 
67
  conversation = []
68
- if system_prompt:
69
- conversation.append({"role": "system", "content": system_prompt})
70
  for user, assistant in chat_history:
71
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
72
  conversation.append({"role": "user", "content": message})
@@ -91,11 +88,11 @@ def generate(
91
  streamer=streamer,
92
  max_new_tokens=max_new_tokens,
93
  do_sample=True,
94
- top_p=top_p,
95
- top_k=top_k,
96
- temperature=temperature,
97
- num_beams=1,
98
- repetition_penalty=repetition_penalty,
99
  )
100
  t = Thread(target=model.generate, kwargs=generate_kwargs)
101
  t.start()
 
53
  model: str,
54
  message: str,
55
  chat_history: list[tuple[str, str]],
 
56
  max_new_tokens: int = 1024,
57
+ # temperature: float = 0.6,
58
+ # top_p: float = 0.9,
59
+ # top_k: int = 50,
60
+ # repetition_penalty: float = 1.2,
61
  ) -> Iterator[str]:
62
  if chat_history is None:
63
  logging.error("chat_history is None, initializing to empty list.")
64
  chat_history = [] # Initialize to an empty list if None is passed
65
 
66
  conversation = []
 
 
67
  for user, assistant in chat_history:
68
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
69
  conversation.append({"role": "user", "content": message})
 
88
  streamer=streamer,
89
  max_new_tokens=max_new_tokens,
90
  do_sample=True,
91
+ # top_p=top_p,
92
+ # top_k=top_k,
93
+ # temperature=temperature,
94
+ # num_beams=1,
95
+ # repetition_penalty=repetition_penalty,
96
  )
97
  t = Thread(target=model.generate, kwargs=generate_kwargs)
98
  t.start()