Keltezaa commited on
Commit
e3a6668
·
verified ·
1 Parent(s): 46e5485

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -0
app.py CHANGED
@@ -9,12 +9,34 @@ from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPip
9
  from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
10
  from diffusers.utils import load_image
11
  from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
 
12
  import copy
13
  import random
14
  import time
15
  import requests
16
  import pandas as pd
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  #Load prompts for randomization
19
  df = pd.read_csv('prompts.csv', header=None)
20
  prompt_values = df.values.flatten()
 
9
  from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
10
  from diffusers.utils import load_image
11
  from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
12
+ from transformers import AutoModelForCausalLM, CLIPTokenizer, CLIPProcessor, CLIPModel, LongformerTokenizer, LongformerModel
13
  import copy
14
  import random
15
  import time
16
  import requests
17
  import pandas as pd
18
 
19
+ # Disable tokenizer parallelism
20
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
21
+
22
+ # Initialize the CLIP tokenizer and model
23
+ clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch16")
24
+ clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
25
+ clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
26
+
27
+ # Initialize the Longformer tokenizer and model
28
+ longformer_tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096")
29
+ longformer_model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
30
+
31
+ def process_input(input_text):
32
+ # Tokenize and truncate input
33
+ inputs = clip_processor(text=input_text, return_tensors="pt", padding=True, truncation=True, max_length=77)
34
+ return inputs
35
+
36
+ # Example usage
37
+ input_text = "Your long prompt goes here..."
38
+ inputs = process_input(input_text)
39
+
40
  #Load prompts for randomization
41
  df = pd.read_csv('prompts.csv', header=None)
42
  prompt_values = df.values.flatten()