File size: 824 Bytes
782cb1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

class CodeAssistant:
    def __init__(self):
        self.model_name = "Qwen/Qwen2.5-Coder-32B-Instruct"
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
        self.model = AutoModelForCausalLM.from_pretrained(
            self.model_name,
            torch_dtype=torch.bfloat16,
            device_map="auto"
        )
    
    def generate_response(self, query):
        inputs = self.tokenizer(query, return_tensors="pt").to(self.model.device)
        outputs = self.model.generate(
            **inputs,
            max_length=2048,
            temperature=0.7,
            top_p=0.95,
            do_sample=True
        )
        return self.tokenizer.decode(outputs[0], skip_special_tokens=True)