ranamhamoud commited on
Commit
3856850
·
verified ·
1 Parent(s): 0f4b183

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -42,7 +42,9 @@ if torch.cuda.is_available():
42
  tokenizer = AutoTokenizer.from_pretrained(model_id)
43
  tokenizer.use_default_system_prompt = False
44
 
45
-
 
 
46
  @spaces.GPU
47
  def generate(
48
  message: str,
@@ -56,7 +58,7 @@ def generate(
56
  conversation = []
57
  for user, assistant in chat_history:
58
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
59
- conversation.append({"role": "user", "content": message})
60
 
61
  input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
62
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
@@ -99,7 +101,6 @@ chat_interface = gr.ChatInterface(
99
 
100
  with gr.Blocks(css="style.css") as demo:
101
  gr.Markdown(DESCRIPTION)
102
- gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
103
  chat_interface.render()
104
  gr.Markdown(LICENSE)
105
 
 
42
  tokenizer = AutoTokenizer.from_pretrained(model_id)
43
  tokenizer.use_default_system_prompt = False
44
 
45
+ def make_prompt(entry):
46
+ return f"### Human: YOUR INSTRUCTION HERE: {entry} ### Assistant:"
47
+
48
  @spaces.GPU
49
  def generate(
50
  message: str,
 
58
  conversation = []
59
  for user, assistant in chat_history:
60
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
61
+ conversation.append({"role": "user", "content": make_prompt(message)})
62
 
63
  input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
64
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
 
101
 
102
  with gr.Blocks(css="style.css") as demo:
103
  gr.Markdown(DESCRIPTION)
 
104
  chat_interface.render()
105
  gr.Markdown(LICENSE)
106