Update app.py
Browse files
app.py
CHANGED
@@ -27,7 +27,7 @@ longformer_tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-b
|
|
27 |
longformer_model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
|
28 |
|
29 |
# Load prompts for randomization
|
30 |
-
|
31 |
prompt_values = df.values.flatten()
|
32 |
|
33 |
# Load LoRAs from JSON file
|
@@ -45,7 +45,7 @@ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef
|
|
45 |
|
46 |
MAX_SEED = 2**32 - 1
|
47 |
|
48 |
-
|
49 |
# Tokenize and truncate input
|
50 |
inputs = clip_processor(text=input_text, return_tensors="pt", padding=True, truncation=True, max_length=77)
|
51 |
return inputs
|
|
|
27 |
longformer_model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
|
28 |
|
29 |
# Load prompts for randomization
|
30 |
+
def = pd.read_csv('prompts.csv', header=None)
|
31 |
prompt_values = df.values.flatten()
|
32 |
|
33 |
# Load LoRAs from JSON file
|
|
|
45 |
|
46 |
MAX_SEED = 2**32 - 1
|
47 |
|
48 |
+
def process_input(input_text):
|
49 |
# Tokenize and truncate input
|
50 |
inputs = clip_processor(text=input_text, return_tensors="pt", padding=True, truncation=True, max_length=77)
|
51 |
return inputs
|