RakeshUtekar commited on
Commit
d8690ff
·
verified ·
1 Parent(s): 598de1d

Update qwen_model.py

Browse files
Files changed (1) hide show
  1. qwen_model.py +2 -2
qwen_model.py CHANGED
@@ -1,7 +1,7 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
2
 
3
  # Replace with your target Qwen model on Hugging Face
4
- MODEL_NAME = "Qwen/Qwen2.5-7B-Instruct-1M"
5
 
6
  # Initialize tokenizer and model
7
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
@@ -18,7 +18,7 @@ qwen_pipeline = pipeline(
18
  tokenizer=tokenizer
19
  )
20
 
21
- def generate_response(retrieved_texts, query, max_new_tokens=200):
22
  """
23
  Generates a response based on the retrieved texts and query using Qwen.
24
  Args:
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
2
 
3
  # Replace with your target Qwen model on Hugging Face
4
+ MODEL_NAME = "Qwen/Qwen2.5-7B-Instruct"
5
 
6
  # Initialize tokenizer and model
7
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
 
18
  tokenizer=tokenizer
19
  )
20
 
21
+ def generate_response(retrieved_texts, query, max_new_tokens=500):
22
  """
23
  Generates a response based on the retrieved texts and query using Qwen.
24
  Args: