textgeneration / question_paper.py
Yash Sachdeva
solar
5ed2b9f
raw
history blame
636 Bytes
import transformers
import torch
from fastapi import FastAPI
from transformers import AutoModelForCausalLM, AutoTokenizer
app = FastAPI()
@app.get("/")
def llama():
tokenizer = AutoTokenizer.from_pretrained("Upstage/SOLAR-10.7B-v1.0")
model = AutoModelForCausalLM.from_pretrained("Upstage/SOLAR-10.7B-v1.0", device_map="auto", torch_dtype=torch.float16,)
text = "Hi, my name is "
inputs = tokenizer(text, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=64)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
return tokenizer.decode(outputs[0], skip_special_tokens=True)