Upload pipeline.py with huggingface_hub
Browse files- pipeline.py +21 -0
pipeline.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
3 |
+
from diffusers import StableDiffusionPipeline
|
4 |
+
import torch
|
5 |
+
|
6 |
+
# Load models
|
7 |
+
t5_model = T5ForConditionalGeneration.from_pretrained('t5_model')
|
8 |
+
t5_tokenizer = T5Tokenizer.from_pretrained('t5_tokenizer')
|
9 |
+
ArtifyAI_model = StableDiffusionPipeline.from_pretrained('ArtifyAI_model', torch_dtype=torch.float16)
|
10 |
+
ArtifyAI_model = ArtifyAI_model.to('cuda')
|
11 |
+
|
12 |
+
# Combined pipeline
|
13 |
+
def t5_to_image_pipeline(input_text):
|
14 |
+
# T5 model processing
|
15 |
+
t5_inputs = t5_tokenizer.encode(input_text, return_tensors='pt', truncation=True)
|
16 |
+
summary_ids = t5_model.generate(t5_inputs, max_length=50, num_beams=5, early_stopping=True)
|
17 |
+
generated_text = t5_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
18 |
+
|
19 |
+
# Generate image from text using Stable Diffusion
|
20 |
+
image = ArtifyAI_model(generated_text).images[0]
|
21 |
+
return image
|