Spaces:
Sleeping
Sleeping
File size: 3,562 Bytes
fd25dfc 1a96e8e af16851 137ee4f b64d09e 137ee4f 1a96e8e af16851 b64d09e af16851 b64d09e af16851 137ee4f 031cff7 137ee4f fd25dfc b64d09e 031cff7 137ee4f 1a96e8e 137ee4f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
import torch
from datasets import load_dataset
import pandas as pd
import re
from numpy.random import randn
from keras.models import load_model
from matplotlib import pyplot
import matplotlib
import tensorflow as tf
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import chatbot
import textgen
import imagegen
app = FastAPI()
# Add CORS middleware to allow any origin
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Allows all origins
allow_credentials=True,
allow_methods=["*"], # Allows all methods (GET, POST, etc.)
allow_headers=["*"], # Allows all headers
)
@app.get("/")
def root():
return "Hello World"
# Define the Pydantic model to parse JSON input
from pydantic import BaseModel
class HistoryRequest(BaseModel):
user: list[str]
ai: list[str]
@app.post("/chatbot")
async def generate_response(history: HistoryRequest):
print("Chatbot request")
global model
try:
# check if model is already loaded
if not isinstance(model, chatbot.ChatBot):
model = chatbot.ChatBot(
'models/fine-tuned-gpt2',
GPT2Tokenizer.from_pretrained('models/fine-tuned-gpt2'),
GPT2LMHeadModel.from_pretrained('models/fine-tuned-gpt2'),
torch.device("cuda" if torch.cuda.is_available() else "cpu")
)
except NameError:
# if model is not defined, load
model = chatbot.ChatBot(
'models/fine-tuned-gpt2',
GPT2Tokenizer.from_pretrained('models/fine-tuned-gpt2'),
GPT2LMHeadModel.from_pretrained('models/fine-tuned-gpt2'),
torch.device("cuda" if torch.cuda.is_available() else "cpu")
)
response = model.generate_response(history)
return response
class TextGenInput(BaseModel):
user: str
@app.post("/text-gen")
async def generate_text(input: TextGenInput):
print("Generating text request")
global model
directory = 'models/fine-tuned-gpt2-textgen'
try:
# check if model is already loaded
if not isinstance(model, textgen.TextGen):
model = textgen.TextGen(
GPT2Tokenizer.from_pretrained(directory),
GPT2LMHeadModel.from_pretrained(directory),
torch.device("cuda" if torch.cuda.is_available() else "cpu")
)
except NameError:
# if model is not defined, load
model = textgen.TextGen(
GPT2Tokenizer.from_pretrained(directory),
GPT2LMHeadModel.from_pretrained(directory),
torch.device("cuda" if torch.cuda.is_available() else "cpu")
)
response = model.generate_text(input.user)
return response
@app.get("/face-gen")
async def generate_face():
print("Generating face image request")
global model
try:
# check if model is already loaded
if not isinstance(model, imagegen.FaceGen):
model = imagegen.FaceGen(
model = load_model('models/face-gen-gan/generator_model_100.h5'),
device = '/CPU:0'
)
except NameError:
# if model is not defined, load
model = imagegen.FaceGen(
model = load_model('models/face-gen-gan/generator_model_100.h5'),
device = '/GPU:0' if tf.config.list_physical_devices('GPU') else '/CPU:0'
)
response = model.generate_image(latent_dim=100,n_samples=128,seed=randn(100*128))
return response |