from sklearn import pipeline from transformers import AutoProcessor, AutoModelForImageTextToText def get_chat_transformers(messages: list): model = AutoModelForImageTextToText.from_pretrained("Xkev/Llama-3.2V-11B-cot") pipe = pipeline("text-generation", model=model, device_map="auto",) pipe = pipeline("text-generation", model="meta-llama/Llama-3.1-8B-Instruct") outputs = pipe(messages) return outputs[0]["generated_text"][-1]