Spaces:
Sleeping
Sleeping
File size: 3,629 Bytes
697d9eb 6bfd727 32a5890 79ca19a 8504d0e 0370b13 7983ea4 8504d0e 6bfd727 8504d0e 6bfd727 0370b13 6e17f3a cc2e0bf 3d560b0 cc2e0bf b13c647 cc2e0bf 307add3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
from fastapi import FastAPI, UploadFile, File
from transformers import pipeline
from fastai.vision.all import *
from PIL import Image
# NOTE - we configure docs_url to serve the interactive Docs at the root path
# of the app. This way, we can use the docs as a landing page for the app on Spaces.
app = FastAPI(docs_url="/")
pipe = pipeline("text2text-generation", model="google/flan-t5-small")
categories = ('Heart', 'Oblong', 'Oval', 'Round', 'Square')
learn = load_learner('model.pkl')
@app.get("/generate")
def generate(text: str):
"""
Using the text2text-generation pipeline from `transformers`, generate text
from the given input text. The model used is `google/flan-t5-small`, which
can be found [here](https://huggingface.co/google/flan-t5-small).
"""
output = pipe(text)
return {"output": output[0]["generated_text"]}
@app.post("/face-analyse")
async def face_analyse(file: UploadFile = File(...)):
# Read the uploaded file content
request_object_content = await file.read()
try:
# Attempt to open the image
img = Image.open(io.BytesIO(request_object_content))
except Exception as e:
return {"error": "Failed to open the image file. Make sure it is a valid image file."}
# Check if img is None or not
if img is None:
return {"error": "Failed to open the image file."}
try:
# Resize the image to 300x300 pixels
img = img.resize((300, 300))
except Exception as e:
return {"error": "Failed to resize the image."}
try:
# Assuming 'learn' is your image classifier model
pred, idx, probs = learn.predict(img)
except Exception as e:
return {"error": "Failed to make predictions."}
# Assuming categories is a list of category labels
return dict(zip(categories, map(float, probs)))
# Initialize the Meta-Llama-3-70B-Instruct pipeline
llama_model_id = "meta-llama/Meta-Llama-3-70B-Instruct"
llama_pipeline = pipeline(
"text-generation",
model=llama_model_id,
model_kwargs={"torch_dtype": torch.bfloat16},
device_map="auto",
)
@app.post("/frame-details")
def frame_details(text: str):
"""
Extract structured information from a given text about frames using the
Meta-Llama-3-70B-Instruct model. The model will output details like price, color, etc.
"""
messages = [
{"role": "system", "content": "You are an api chatbot for frames and glasses who always responds with only a json. Extract the infomation given into a structured json for frame details"},
{"role": "user", "content": text},
]
terminators = [
llama_pipeline.tokenizer.eos_token_id,
llama_pipeline.tokenizer.convert_tokens_to_ids("")
]
outputs = llama_pipeline(
messages,
max_new_tokens=256,
eos_token_id=terminators,
do_sample=True,
temperature=0.6,
top_p=0.9,
)
# Extract the last generated text from the output
generated_text = outputs[0]["generated_text"]
# Parse the generated text to extract structured information (this is an example and should be customized)
# Here, you would add your own logic to parse the generated text
# For now, we'll assume the generated text is in JSON format
try:
extracted_info = eval(generated_text) # It's recommended to use `json.loads` in a real application
except Exception as e:
return {"error": "Failed to parse the generated text."}
return extracted_info
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000) |