Spaces:
Sleeping
Sleeping
import gradio as gr | |
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
tokenizer = AutoTokenizer.from_pretrained("georgiyozhegov/calculator-8m") | |
model = AutoModelForCausalLM.from_pretrained("georgiyozhegov/calculator-8m") | |
def solve(problem): | |
prompt = f"find {problem}\nstep" | |
inputs = tokenizer(prompt, return_tensors="pt", return_token_type_ids=False) | |
with torch.no_grad(): | |
outputs = model.generate( | |
input_ids=inputs["input_ids"], | |
attention_mask=inputs["attention_mask"], | |
max_length=32, | |
do_sample=True, | |
top_k=50, | |
top_p=0.98 | |
) | |
count = 0 | |
for index, token in enumerate(outputs[0]): | |
if token == 6: count += 1 | |
if count >= 2: break | |
output = tokenizer.decode(outputs[0][:index]) | |
return output | |
examples = [ | |
["2 + 3"], | |
["10 / 0.5"], | |
] | |
demo = gr.Interface( | |
fn=solve, | |
inputs=gr.Textbox(lines=5, label="Problem"), | |
outputs=gr.Textbox(label="Solution"), | |
examples=examples, | |
) | |
demo.launch() | |