Spaces:
Sleeping
Sleeping
import streamlit as st | |
import transformers | |
import torch | |
import json | |
import os | |
from transformers import AutoTokenizer, TextStreamer , pipeline | |
model_id = "WizardLM/WizardMath-7B-V1.1" | |
# Configuration | |
runtimeFlag = "cuda:0" #Run on GPU (you can't run GPTQ on cpu) | |
cache_dir = None # by default, don't set a cache directory. This is automatically updated if you connect Google Drive. | |
scaling_factor = 1.0 # allows for a max sequence length of 16384*6 = 98304! Unfortunately, requires Colab Pro and a V100 or A100 to have sufficient RAM. | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, | |
device_map="auto", | |
offload_folder="offload", | |
pad_token_id=tokenizer.eos_token_id, | |
offload_state_dict = True, | |
torch_dtype=torch.float16, | |
# rope_scaling = {"type": "dynamic", "factor": scaling_factor} | |
) | |
pipe = pipeline( | |
"text-generation", | |
model=model, | |
tokenizer=tokenizer, | |
max_new_tokens=512, | |
temperature=0.7, | |
top_p=0.95, | |
repetition_penalty=1.15 | |
) | |
question = st.text_area("Enter questoin") | |
if text: | |
out = pipe(question)[0]['generated_text'] | |
st.write(out) |