Tonic commited on
Commit
f465778
·
1 Parent(s): 968fe0b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import transformers
2
  from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM
3
- from peft import PeftModel, PeftConfig
4
  import torch
5
  import gradio as gr
6
  import json
@@ -59,7 +58,7 @@ class FalconChatBot:
59
  # Encode the conversation using the tokenizer
60
  input_ids = tokenizer.encode(conversation, return_tensors="pt", add_special_tokens=False)
61
  # Generate a response using the Falcon model
62
- response = model.generate(input_ids=input_ids, max_length=max_length, use_cache=False, early_stopping=False, bos_token_id=model.config.bos_token_id, eos_token_id=model.config.eos_token_id, pad_token_id=peft_model.config.eos_token_id, temperature=0.4, do_sample=True)
63
  # Decode the generated response to text
64
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)
65
  # Append the Falcon-like conversation to the history
 
1
  import transformers
2
  from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM
 
3
  import torch
4
  import gradio as gr
5
  import json
 
58
  # Encode the conversation using the tokenizer
59
  input_ids = tokenizer.encode(conversation, return_tensors="pt", add_special_tokens=False)
60
  # Generate a response using the Falcon model
61
+ response = model.generate(input_ids=input_ids, max_length=max_length, use_cache=False, early_stopping=False, bos_token_id=model.config.bos_token_id, eos_token_id=model.config.eos_token_id, pad_token_id=model.config.eos_token_id, temperature=0.4, do_sample=True)
62
  # Decode the generated response to text
63
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)
64
  # Append the Falcon-like conversation to the history