Spaces:
Runtime error
Runtime error
from fastapi import FastAPI | |
from transformers import AutoModelForCausalLM | |
# Wrap problematic imports in try-except blocks | |
try: | |
from peft import PeftModel, PeftConfig | |
except ImportError as e: | |
print(f"Error importing from peft: {e}") | |
raise | |
try: | |
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer | |
except ImportError as e: | |
print(f"Error importing from mistral_common: {e}") | |
raise | |
# Initialize FastAPI app | |
app = FastAPI() | |
# Load PEFT model configuration and base model | |
try: | |
config = PeftConfig.from_pretrained("frankmorales2020/Mistral-7B-text-to-sql-flash-attention-2-dataeval") | |
base_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3") | |
model = PeftModel.from_pretrained(base_model, "frankmorales2020/Mistral-7B-text-to-sql-flash-attention-2-dataeval") | |
# Load recommended tokenizer | |
tokenizer = MistralTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3") | |
# Create the pipeline | |
from transformers import pipeline | |
pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer) | |
except Exception as e: | |
print(f"Error loading model or creating pipeline: {e}") | |
raise | |
def home(): | |
return {"message": "Hello World"} | |
def generate(text: str): | |
try: | |
output = pipe(text) | |
return {"output": output[0]['generated_text']} | |
except Exception as e: | |
return {"error": str(e)} |