Spaces:
Sleeping
Sleeping
from transformers import GPT2Tokenizer, GPT2LMHeadModel | |
import torch | |
from datasets import load_dataset | |
import pandas as pd | |
import re | |
from numpy.random import randn | |
from keras.models import load_model | |
from matplotlib import pyplot | |
import matplotlib | |
import tensorflow as tf | |
from fastapi import FastAPI | |
from fastapi.middleware.cors import CORSMiddleware | |
import chatbot | |
import textgen | |
import imagegen | |
app = FastAPI() | |
# Add CORS middleware to allow any origin | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], # Allows all origins | |
allow_credentials=True, | |
allow_methods=["*"], # Allows all methods (GET, POST, etc.) | |
allow_headers=["*"], # Allows all headers | |
) | |
def root(): | |
return "Hello World" | |
# Define the Pydantic model to parse JSON input | |
from pydantic import BaseModel | |
class HistoryRequest(BaseModel): | |
user: list[str] | |
ai: list[str] | |
async def generate_response(history: HistoryRequest): | |
print("Chatbot request") | |
global model | |
try: | |
# check if model is already loaded | |
if not isinstance(model, chatbot.ChatBot): | |
model = chatbot.ChatBot( | |
'models/fine-tuned-gpt2', | |
GPT2Tokenizer.from_pretrained('models/fine-tuned-gpt2'), | |
GPT2LMHeadModel.from_pretrained('models/fine-tuned-gpt2'), | |
torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
) | |
except NameError: | |
# if model is not defined, load | |
model = chatbot.ChatBot( | |
'models/fine-tuned-gpt2', | |
GPT2Tokenizer.from_pretrained('models/fine-tuned-gpt2'), | |
GPT2LMHeadModel.from_pretrained('models/fine-tuned-gpt2'), | |
torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
) | |
response = model.generate_response(history) | |
return response | |
class TextGenInput(BaseModel): | |
user: str | |
async def generate_text(input: TextGenInput): | |
print("Generating text request") | |
global model | |
directory = 'models/fine-tuned-gpt2-textgen' | |
try: | |
# check if model is already loaded | |
if not isinstance(model, textgen.TextGen): | |
model = textgen.TextGen( | |
GPT2Tokenizer.from_pretrained(directory), | |
GPT2LMHeadModel.from_pretrained(directory), | |
torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
) | |
except NameError: | |
# if model is not defined, load | |
model = textgen.TextGen( | |
GPT2Tokenizer.from_pretrained(directory), | |
GPT2LMHeadModel.from_pretrained(directory), | |
torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
) | |
response = model.generate_text(input.user) | |
return response | |
async def generate_face(): | |
print("Generating face image request") | |
global model | |
try: | |
# check if model is already loaded | |
if not isinstance(model, imagegen.FaceGen): | |
model = imagegen.FaceGen( | |
model = load_model('models/face-gen-gan/generator_model_100.h5'), | |
device = '/CPU:0' | |
) | |
except NameError: | |
# if model is not defined, load | |
model = imagegen.FaceGen( | |
model = load_model('models/face-gen-gan/generator_model_100.h5'), | |
device = '/GPU:0' if tf.config.list_physical_devices('GPU') else '/CPU:0' | |
) | |
response = model.generate_image(latent_dim=100,n_samples=128,seed=randn(100*128)) | |
return response |