Spaces:
Runtime error
Runtime error
File size: 1,825 Bytes
ddd9fdd 73cd0d4 ddd9fdd 73cd0d4 b0e07b8 ddd9fdd 73cd0d4 ddd9fdd 73cd0d4 ddd9fdd 73cd0d4 ddd9fdd 73cd0d4 ddd9fdd 73cd0d4 ddd9fdd 73cd0d4 ddd9fdd 73cd0d4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import os
from dotenv import load_dotenv
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Load environment variables if needed
load_dotenv()
# Use the Qwen2.5-7B-Instruct-1M model from Hugging Face
MODEL_NAME = "Qwen/Qwen2.5-7B-Instruct"
# Initialize tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
device_map="auto", # or "cpu", "cuda", etc. as appropriate
trust_remote_code=True
)
# Create pipeline
qwen_pipeline = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer
)
def generate_response(retrieved_texts, query, max_new_tokens=512):
"""
Generates a response based on the retrieved texts and query using the Qwen pipeline.
Args:
retrieved_texts (list): List of retrieved text strings.
query (str): The user's query string.
max_new_tokens (int): Maximum number of tokens for the generated answer.
Returns:
str: Generated response.
"""
# Construct a simple prompt using your retrieved context
context = "\n".join(retrieved_texts)
prompt = f"Context:\n{context}\n\nQuestion: {query}\nAnswer:"
# Generate the text
result = qwen_pipeline(
prompt,
max_new_tokens=max_new_tokens,
do_sample=True, # or False if you prefer deterministic output
temperature=0.7, # adjust as needed
)
# Extract the generated text from the pipeline's output
generated_text = result[0]["generated_text"]
# Optional: Clean up the output to isolate the answer portion
if "Answer:" in generated_text:
answer_part = generated_text.split("Answer:")[-1].strip()
else:
answer_part = generated_text
return answer_part
|