Spaces:
Runtime error
Runtime error
Upload 9 files
Browse files- .env +1 -0
- .gitignore +2 -0
- Dockerfile +16 -0
- README.md +6 -5
- main.py +33 -0
- mainHistory.py +46 -0
- models.md +7 -0
- pyproject.toml +17 -0
- requirements.txt +4 -0
.env
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
HF_HOME=./models
|
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
node_modules
|
Dockerfile
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11
|
2 |
+
|
3 |
+
WORKDIR /api
|
4 |
+
|
5 |
+
COPY ./ /api/
|
6 |
+
|
7 |
+
RUN pip install --upgrade pip
|
8 |
+
RUN pip install poetry
|
9 |
+
RUN poetry install --no-root
|
10 |
+
|
11 |
+
RUN mkdir /api/cache
|
12 |
+
RUN chmod a+rwx /api/cache
|
13 |
+
|
14 |
+
EXPOSE 7860
|
15 |
+
|
16 |
+
CMD ["python", "main.py"]
|
README.md
CHANGED
@@ -1,10 +1,11 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: docker
|
7 |
pinned: false
|
|
|
8 |
---
|
9 |
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Micapi
|
3 |
+
emoji: 🚀
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: red
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
+
app_file: main.py
|
9 |
---
|
10 |
|
11 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
main.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from transformers import CLIPProcessor, CLIPModel
|
3 |
+
import torch
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
# Get the directory of the script
|
7 |
+
script_directory = os.path.dirname(os.path.realpath(__file__))
|
8 |
+
# Specify the directory where the cache will be stored (same folder as the script)
|
9 |
+
cache_directory = os.path.join(script_directory, "cache")
|
10 |
+
# Create the cache directory if it doesn't exist
|
11 |
+
os.makedirs(cache_directory, exist_ok=True)
|
12 |
+
|
13 |
+
# Load the CLIP processor and model
|
14 |
+
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32", cache_dir=cache_directory)
|
15 |
+
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", cache_dir=cache_directory)
|
16 |
+
|
17 |
+
# Text description to generate image
|
18 |
+
text = "a cat sitting on a table"
|
19 |
+
|
20 |
+
# Tokenize text and get features
|
21 |
+
inputs = clip_processor(text, return_tensors="pt", padding=True)
|
22 |
+
|
23 |
+
with torch.no_grad():
|
24 |
+
image_features = clip_model.get_image_features(**inputs)
|
25 |
+
|
26 |
+
# Generate image from features
|
27 |
+
generated_image = clip_model.generate_images(image_features)
|
28 |
+
|
29 |
+
# Save the generated image
|
30 |
+
output_image_path = "generated_image.png"
|
31 |
+
Image.fromarray(generated_image).save(output_image_path)
|
32 |
+
|
33 |
+
print("Image generated and saved as:", output_image_path)
|
mainHistory.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi.staticfiles import StaticFiles
|
2 |
+
from fastapi.responses import FileResponse
|
3 |
+
from pydantic import BaseModel
|
4 |
+
from fastapi import FastAPI
|
5 |
+
|
6 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
7 |
+
|
8 |
+
model_name = "facebook/blenderbot-1B-distill"
|
9 |
+
|
10 |
+
# https://huggingface.co/models?sort=trending&search=facebook%2Fblenderbot
|
11 |
+
# facebook/blenderbot-3B
|
12 |
+
# facebook/blenderbot-1B-distill
|
13 |
+
# facebook/blenderbot-400M-distill
|
14 |
+
# facebook/blenderbot-90M
|
15 |
+
# facebook/blenderbot_small-90M
|
16 |
+
|
17 |
+
# https://www.youtube.com/watch?v=irjYqV6EebU
|
18 |
+
|
19 |
+
app = FastAPI()
|
20 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
21 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
22 |
+
|
23 |
+
class req(BaseModel):
|
24 |
+
prompt: str
|
25 |
+
|
26 |
+
@app.get("/")
|
27 |
+
def read_root():
|
28 |
+
return FileResponse(path="templates/index.html", media_type="text/html")
|
29 |
+
|
30 |
+
@app.post("/api")
|
31 |
+
def read_root(data: req):
|
32 |
+
print("Prompt:", data.prompt)
|
33 |
+
|
34 |
+
input_text = data.prompt
|
35 |
+
|
36 |
+
# Tokenize the input text
|
37 |
+
input_ids = tokenizer.encode(input_text, return_tensors="pt")
|
38 |
+
|
39 |
+
# Generate output using the model
|
40 |
+
output_ids = model.generate(input_ids, num_beams=5, no_repeat_ngram_size=2)
|
41 |
+
generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
42 |
+
|
43 |
+
answer_data = { "answer": generated_text }
|
44 |
+
print("Answer:", generated_text)
|
45 |
+
|
46 |
+
return answer_data
|
models.md
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Model list
|
2 |
+
* [microsoft/DialoGPT-small](https://huggingface.co/microsoft/DialoGPT-small)
|
3 |
+
* [microsoft/DialoGPT-medium](https://huggingface.co/microsoft/DialoGPT-medium)
|
4 |
+
* [microsoft/DialoGPT-large](https://huggingface.co/microsoft/DialoGPT-large)
|
5 |
+
|
6 |
+
# Download locations
|
7 |
+
* Github Codespaces: /home/codespace/.local/lib/python3.10/site-packages/transformers/models/
|
pyproject.toml
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "img gen"
|
3 |
+
version = "0.0.1"
|
4 |
+
description = "A project to test image generation with AI models"
|
5 |
+
authors = ["CubeBeveled <[email protected]>"]
|
6 |
+
readme = "README.md"
|
7 |
+
|
8 |
+
[tool.poetry.dependencies]
|
9 |
+
python = "^3.11"
|
10 |
+
transformers = "^4.39.1"
|
11 |
+
torch = "^2.2.1"
|
12 |
+
pillow = "^10.2.0"
|
13 |
+
|
14 |
+
|
15 |
+
[build-system]
|
16 |
+
requires = ["poetry-core"]
|
17 |
+
build-backend = "poetry.core.masonry.api"
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
torch
|
3 |
+
poetry
|
4 |
+
pillow
|