Spaces:
Sleeping
Sleeping
File size: 1,449 Bytes
d362bfa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import torch
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
class LocalModel:
def __init__(self, model_name: str, max_tokens: int, temperature: float):
self.max_tokens = max_tokens
self.temperature = temperature
# Load the model locally. For a demo, you may choose a lighter model if needed.
self.model = AutoModelForCausalLM.from_pretrained(
model_name, torch_dtype=torch.float16
)
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.pipeline = pipeline(
"text-generation",
model=self.model,
tokenizer=self.tokenizer,
)
def __call__(self, prompt: str, **kwargs) -> str:
# Adjust the call signature as needed by your agent
result = self.pipeline(
prompt,
max_new_tokens=self.max_tokens,
temperature=self.temperature,
**kwargs,
)
output = result[0]["generated_text"]
logger.info(f"Model output: {output}")
# Assuming the result is a list with one dict containing the generated text:
return result[0]["generated_text"]
if __name__ == "__main__":
local_model = LocalModel("Qwen/Qwen2.5-1.5B", max_tokens=100, temperature=0.5)
output = local_model("A big foot")
print(output)
|