File size: 3,253 Bytes
697d9eb
6bfd727
32a5890
79ca19a
fc9d8f9
a941852
 
8504d0e
53f5b81
a941852
8504d0e
 
 
 
 
0370b13
7983ea4
8504d0e
6285eb6
 
 
 
6bfd727
 
 
 
 
8504d0e
6bfd727
 
0370b13
 
6e17f3a
 
cc2e0bf
3d560b0
cc2e0bf
 
 
 
 
 
 
 
 
 
b13c647
 
 
 
 
 
cc2e0bf
 
 
 
 
 
 
 
307add3
 
f8bd053
 
 
 
 
 
 
 
 
 
 
 
a941852
f8bd053
 
a941852
f8bd053
 
 
 
 
 
 
 
 
 
 
a941852
f8bd053
 
 
a941852
f8bd053
a941852
f8bd053
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
from fastapi import FastAPI, UploadFile, File
from transformers import pipeline
from fastai.vision.all import *
from PIL import Image
import os
import io
import json

access_token = os.getenv("HF_TOKEN")

# NOTE - we configure docs_url to serve the interactive Docs at the root path
# of the app. This way, we can use the docs as a landing page for the app on Spaces.
app = FastAPI(docs_url="/")

pipe = pipeline("text2text-generation", model="google/flan-t5-small")
categories = ('Heart', 'Oblong', 'Oval', 'Round', 'Square')
learn = load_learner('model.pkl')

# Überprüfe, ob das Zugriffstoken vorhanden ist
if access_token is None:
    raise ValueError("Access token is missing. Make sure it is set as an environment variable.")

@app.get("/generate")
def generate(text: str):
    """
    Using the text2text-generation pipeline from `transformers`, generate text
    from the given input text. The model used is `google/flan-t5-small`, which
    can be found [here](https://huggingface.co/google/flan-t5-small).
    """
    output = pipe(text)
    return {"output": output[0]["generated_text"]}

@app.post("/face-analyse")
async def face_analyse(file: UploadFile = File(...)):
    # Read the uploaded file content
    request_object_content = await file.read()
    
    try:
        # Attempt to open the image
        img = Image.open(io.BytesIO(request_object_content))
    except Exception as e:
        return {"error": "Failed to open the image file. Make sure it is a valid image file."}

    # Check if img is None or not
    if img is None:
        return {"error": "Failed to open the image file."}
    
    try:
        # Resize the image to 300x300 pixels
        img = img.resize((300, 300))
    except Exception as e:
        return {"error": "Failed to resize the image."}

    try:
        # Assuming 'learn' is your image classifier model
        pred, idx, probs = learn.predict(img)
    except Exception as e:
        return {"error": "Failed to make predictions."}

    # Assuming categories is a list of category labels
    return dict(zip(categories, map(float, probs)))

# Initialisiere das Modell und den Tokenizer
model = "meta-llama/CodeLlama-7b-hf"
tokenizer = AutoTokenizer.from_pretrained(model)
llama_pipeline = pipeline(
    "text-generation",
    model=model,
    torch_dtype=torch.float16,
    device_map="auto",
)

@app.get("/generate_json")
def generate_code(text: str):
    """
    Using the Code Llama pipeline from `transformers`, generate code
    from the given input text. The model used is `meta-llama/CodeLlama-7b-hf`.
    """
    try:
        sequences = llama_pipeline(
            text,
            do_sample=True,
            top_k=10,
            temperature=0.1,
            top_p=0.95,
            num_return_sequences=1,
            eos_token_id=tokenizer.eos_token_id,
            max_length=200,
        )

        generated_text = sequences[0]["generated_text"]
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

    return {"generated_text": generated_text}

# Beispielaufruf mit curl:
# curl -X 'GET' \
#   'http://localhost:8000/generate_code?text=import%20socket%0A%0Adef%20ping_exponential_backoff(host%3A%20str)%3A' \
#   -H 'accept: application/json'