Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +27 -0
- .gradio/certificate.pem +31 -0
- README.md +2 -8
- app.py +131 -0
- inference-Copy1.py +38 -0
- inference-Copy2.py +13 -0
- inference.py +48 -0
- infermlx.py +38 -0
- requirements.txt +64 -0
- venv/bin/Activate.ps1 +247 -0
- venv/bin/activate +70 -0
- venv/bin/activate.csh +27 -0
- venv/bin/activate.fish +69 -0
- venv/bin/convert-caffe2-to-onnx +8 -0
- venv/bin/convert-onnx-to-caffe2 +8 -0
- venv/bin/f2py +8 -0
- venv/bin/fastapi +8 -0
- venv/bin/gradio +8 -0
- venv/bin/httpx +8 -0
- venv/bin/huggingface-cli +8 -0
- venv/bin/isympy +8 -0
- venv/bin/markdown-it +8 -0
- venv/bin/mlx_lm.cache_prompt +8 -0
- venv/bin/mlx_lm.chat +8 -0
- venv/bin/mlx_lm.convert +8 -0
- venv/bin/mlx_lm.fuse +8 -0
- venv/bin/mlx_lm.generate +8 -0
- venv/bin/mlx_lm.lora +8 -0
- venv/bin/mlx_lm.manage +8 -0
- venv/bin/mlx_lm.merge +8 -0
- venv/bin/mlx_lm.server +8 -0
- venv/bin/normalizer +8 -0
- venv/bin/numpy-config +8 -0
- venv/bin/pip +8 -0
- venv/bin/pip3 +8 -0
- venv/bin/pip3.12 +8 -0
- venv/bin/pygmentize +8 -0
- venv/bin/python +0 -0
- venv/bin/python3 +0 -0
- venv/bin/python3.12 +0 -0
- venv/bin/ruff +3 -0
- venv/bin/torchfrtrace +8 -0
- venv/bin/torchrun +8 -0
- venv/bin/tqdm +8 -0
- venv/bin/transformers-cli +8 -0
- venv/bin/typer +8 -0
- venv/bin/upload_theme +8 -0
- venv/bin/uvicorn +8 -0
- venv/lib/python3.12/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER +1 -0
- venv/lib/python3.12/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst +28 -0
.gitattributes
CHANGED
@@ -33,3 +33,30 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
venv/bin/ruff filter=lfs diff=lfs merge=lfs -text
|
37 |
+
venv/lib/python3.12/site-packages/PIL/.dylibs/libfreetype.6.dylib filter=lfs diff=lfs merge=lfs -text
|
38 |
+
venv/lib/python3.12/site-packages/PIL/.dylibs/libharfbuzz.0.dylib filter=lfs diff=lfs merge=lfs -text
|
39 |
+
venv/lib/python3.12/site-packages/gradio/_frontend_code/lite/dist/assets/Canvas3D-JkRyAMo4.js.map filter=lfs diff=lfs merge=lfs -text
|
40 |
+
venv/lib/python3.12/site-packages/gradio/frpc_darwin_arm64_v0.3 filter=lfs diff=lfs merge=lfs -text
|
41 |
+
venv/lib/python3.12/site-packages/gradio/templates/frontend/assets/Canvas3D-BCiO3KDA.js.map filter=lfs diff=lfs merge=lfs -text
|
42 |
+
venv/lib/python3.12/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Canvas3D.CeGGFVVL.js.br filter=lfs diff=lfs merge=lfs -text
|
43 |
+
venv/lib/python3.12/site-packages/gradio/templates/node/build/server/chunks/Canvas3D-DSIDWqZV.js.map filter=lfs diff=lfs merge=lfs -text
|
44 |
+
venv/lib/python3.12/site-packages/gradio/templates/node/build/server/chunks/PlotlyPlot-P3y0I-J2.js.map filter=lfs diff=lfs merge=lfs -text
|
45 |
+
venv/lib/python3.12/site-packages/mlx/core.cpython-312-darwin.so filter=lfs diff=lfs merge=lfs -text
|
46 |
+
venv/lib/python3.12/site-packages/mlx/lib/libmlx.dylib filter=lfs diff=lfs merge=lfs -text
|
47 |
+
venv/lib/python3.12/site-packages/mlx/lib/mlx.metallib filter=lfs diff=lfs merge=lfs -text
|
48 |
+
venv/lib/python3.12/site-packages/numpy/_core/_multiarray_umath.cpython-312-darwin.so filter=lfs diff=lfs merge=lfs -text
|
49 |
+
venv/lib/python3.12/site-packages/pandas/_libs/algos.cpython-312-darwin.so filter=lfs diff=lfs merge=lfs -text
|
50 |
+
venv/lib/python3.12/site-packages/pandas/_libs/groupby.cpython-312-darwin.so filter=lfs diff=lfs merge=lfs -text
|
51 |
+
venv/lib/python3.12/site-packages/pandas/_libs/hashtable.cpython-312-darwin.so filter=lfs diff=lfs merge=lfs -text
|
52 |
+
venv/lib/python3.12/site-packages/pandas/_libs/interval.cpython-312-darwin.so filter=lfs diff=lfs merge=lfs -text
|
53 |
+
venv/lib/python3.12/site-packages/pandas/_libs/join.cpython-312-darwin.so filter=lfs diff=lfs merge=lfs -text
|
54 |
+
venv/lib/python3.12/site-packages/pandas/_libs/tslibs/offsets.cpython-312-darwin.so filter=lfs diff=lfs merge=lfs -text
|
55 |
+
venv/lib/python3.12/site-packages/pydantic_core/_pydantic_core.cpython-312-darwin.so filter=lfs diff=lfs merge=lfs -text
|
56 |
+
venv/lib/python3.12/site-packages/sentencepiece/_sentencepiece.cpython-312-darwin.so filter=lfs diff=lfs merge=lfs -text
|
57 |
+
venv/lib/python3.12/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
|
58 |
+
venv/lib/python3.12/site-packages/tokenizers/tokenizers.cpython-312-darwin.so filter=lfs diff=lfs merge=lfs -text
|
59 |
+
venv/lib/python3.12/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
|
60 |
+
venv/lib/python3.12/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
|
61 |
+
venv/lib/python3.12/site-packages/torch/lib/libtorch_cpu.dylib filter=lfs diff=lfs merge=lfs -text
|
62 |
+
venv/lib/python3.12/site-packages/torch/lib/libtorch_python.dylib filter=lfs diff=lfs merge=lfs -text
|
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title: Llama
|
3 |
-
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.7.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Llama-3.2-3B-Fluxed-app
|
3 |
+
app_file: app.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
sdk_version: 5.7.1
|
|
|
|
|
6 |
---
|
|
|
|
app.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
import torch
|
4 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
+
import logging
|
6 |
+
|
7 |
+
logging.basicConfig(level=logging.INFO)
|
8 |
+
logger = logging.getLogger(__name__)
|
9 |
+
|
10 |
+
|
11 |
+
system_message = """
|
12 |
+
You are a prompt creation assistant for FLUX, an AI image generation model. Your mission is to help the user craft a detailed and optimized prompt by following these steps:
|
13 |
+
|
14 |
+
1. **Understanding the User's Needs**:
|
15 |
+
- The user provides a basic idea, concept, or description.
|
16 |
+
- Analyze their input to determine essential details and nuances.
|
17 |
+
|
18 |
+
2. **Enhancing Details**:
|
19 |
+
- Enrich the basic idea with vivid, specific, and descriptive elements.
|
20 |
+
- Include factors such as lighting, mood, style, perspective, and specific objects or elements the user wants in the scene.
|
21 |
+
|
22 |
+
3. **Formatting the Prompt**:
|
23 |
+
- Structure the enriched description into a clear, precise, and effective prompt.
|
24 |
+
- Ensure the prompt is tailored for high-quality output from the FLUX model, considering its strengths (e.g., photorealistic details, fine anatomy, or artistic styles).
|
25 |
+
|
26 |
+
Use this process to compose a detailed and coherent prompt. Ensure the final prompt is clear and complete, and write your response in English.
|
27 |
+
|
28 |
+
Ensure that the final part is a synthesized version of the prompt.
|
29 |
+
"""
|
30 |
+
|
31 |
+
def load_model_and_tokenizer(model_name):
|
32 |
+
"""
|
33 |
+
Load the model and tokenizer using Hugging Face's Auto classes.
|
34 |
+
Args:
|
35 |
+
model_name (str): Hugging Face model name.
|
36 |
+
token (str): Hugging Face token.
|
37 |
+
Returns:
|
38 |
+
tuple: model, tokenizer, device
|
39 |
+
"""
|
40 |
+
try:
|
41 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
42 |
+
logger.info(f"Using device: {device}")
|
43 |
+
|
44 |
+
# Load model and tokenizer using Auto classes
|
45 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
46 |
+
model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
|
47 |
+
|
48 |
+
# Set or add padding token
|
49 |
+
if tokenizer.pad_token is None:
|
50 |
+
logger.info("Adding pad_token to the tokenizer.")
|
51 |
+
tokenizer.add_special_tokens({'pad_token': tokenizer.eos_token})
|
52 |
+
model.resize_token_embeddings(len(tokenizer)) # Adjust model embeddings for new token
|
53 |
+
|
54 |
+
return model, tokenizer, device
|
55 |
+
except Exception as e:
|
56 |
+
logger.error(f"Error loading model or tokenizer: {e}")
|
57 |
+
raise
|
58 |
+
|
59 |
+
def chatbot_fn(prompt, chatbot_history=[]):
|
60 |
+
"""
|
61 |
+
Chatbot function to handle user prompts and generate responses.
|
62 |
+
Args:
|
63 |
+
prompt (str): User input prompt.
|
64 |
+
chatbot_history (list): History of the conversation.
|
65 |
+
Returns:
|
66 |
+
tuple: Assistant's response, updated conversation history.
|
67 |
+
"""
|
68 |
+
if not prompt.strip():
|
69 |
+
return "Please enter a valid prompt.", chatbot_history
|
70 |
+
|
71 |
+
try:
|
72 |
+
# Initialize conversation with system message if empty
|
73 |
+
if not chatbot_history:
|
74 |
+
chatbot_history.append({"role": "system", "content": system_message})
|
75 |
+
|
76 |
+
# Build the conversation context
|
77 |
+
conversation = [item['content'] for item in chatbot_history]
|
78 |
+
input_text = "\n".join(conversation) + f"\nUser: {prompt}\nAssistant:"
|
79 |
+
|
80 |
+
# Tokenize input
|
81 |
+
inputs = tokenizer(
|
82 |
+
input_text,
|
83 |
+
return_tensors="pt",
|
84 |
+
truncation=True,
|
85 |
+
max_length=1024,
|
86 |
+
padding=True
|
87 |
+
).to(device)
|
88 |
+
|
89 |
+
# Generate response
|
90 |
+
with torch.no_grad():
|
91 |
+
outputs = model.generate(
|
92 |
+
**inputs, max_new_tokens=2000, pad_token_id=tokenizer.pad_token_id
|
93 |
+
)
|
94 |
+
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
95 |
+
assistant_response = response_text.split("Assistant:")[-1].strip()
|
96 |
+
|
97 |
+
except Exception as e:
|
98 |
+
logger.error(f"Error generating response: {e}")
|
99 |
+
return f"An error occurred: {e}", chatbot_history
|
100 |
+
|
101 |
+
# Update history
|
102 |
+
chatbot_history.append({"role": "user", "content": prompt})
|
103 |
+
chatbot_history.append({"role": "assistant", "content": assistant_response})
|
104 |
+
|
105 |
+
return assistant_response, chatbot_history
|
106 |
+
|
107 |
+
# Initialize Hugging Face model and tokenizer
|
108 |
+
try:
|
109 |
+
model_name = "VincentGOURBIN/Llama-3.2-3B-Fluxed" # Model name
|
110 |
+
model, tokenizer, device = load_model_and_tokenizer(model_name)
|
111 |
+
except Exception as e:
|
112 |
+
logger.critical("Failed to initialize the model. Exiting.")
|
113 |
+
raise
|
114 |
+
|
115 |
+
# Define Gradio interface
|
116 |
+
iface = gr.Interface(
|
117 |
+
fn=chatbot_fn,
|
118 |
+
inputs=["text", "state"],
|
119 |
+
outputs=["text", "state"],
|
120 |
+
title="Prompt Crafting Assistant for FLUX",
|
121 |
+
description=(
|
122 |
+
"This assistant helps you create detailed and optimized prompts for FLUX, "
|
123 |
+
"an AI image generation model. Provide a basic idea, and it will enhance it "
|
124 |
+
"with vivid details for high-quality results."
|
125 |
+
),
|
126 |
+
allow_flagging="never",
|
127 |
+
)
|
128 |
+
|
129 |
+
|
130 |
+
if __name__ == "__main__":
|
131 |
+
iface.launch(share=True)
|
inference-Copy1.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import pipeline
|
3 |
+
|
4 |
+
model_id = "VincentGOURBIN/Llama-3.2-3B-Fluxed-uncensored"
|
5 |
+
|
6 |
+
user_need = "a toucan coding on a mac"
|
7 |
+
|
8 |
+
system_message = """
|
9 |
+
You are a prompt creation assistant for FLUX, an AI image generation model. Your mission is to help the user craft a detailed and optimized prompt by following these steps:
|
10 |
+
|
11 |
+
1. **Understanding the User's Needs**:
|
12 |
+
- The user provides a basic idea, concept, or description.
|
13 |
+
- Analyze their input to determine essential details and nuances.
|
14 |
+
|
15 |
+
2. **Enhancing Details**:
|
16 |
+
- Enrich the basic idea with vivid, specific, and descriptive elements.
|
17 |
+
- Include factors such as lighting, mood, style, perspective, and specific objects or elements the user wants in the scene.
|
18 |
+
|
19 |
+
3. **Formatting the Prompt**:
|
20 |
+
- Structure the enriched description into a clear, precise, and effective prompt.
|
21 |
+
- Ensure the prompt is tailored for high-quality output from the FLUX model, considering its strengths (e.g., photorealistic details, fine anatomy, or artistic styles).
|
22 |
+
|
23 |
+
Use this process to compose a detailed and coherent prompt. Ensure the final prompt is clear and complete, and write your response in English.
|
24 |
+
|
25 |
+
Ensure that the final part is a synthesized version of the prompt.
|
26 |
+
"""
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
pipe = pipeline(
|
31 |
+
"text-generation",
|
32 |
+
model=model_id,
|
33 |
+
torch_dtype=torch.bfloat16,
|
34 |
+
device_map="auto"
|
35 |
+
)
|
36 |
+
|
37 |
+
pipe("The key to life is")
|
38 |
+
|
inference-Copy2.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import pipeline
|
3 |
+
|
4 |
+
model_id = "meta-llama/Llama-3.2-3B"
|
5 |
+
|
6 |
+
pipe = pipeline(
|
7 |
+
"text-generation",
|
8 |
+
model=model_id,
|
9 |
+
torch_dtype=torch.bfloat16,
|
10 |
+
device_map="auto"
|
11 |
+
)
|
12 |
+
|
13 |
+
pipe("The key to life is")
|
inference.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from unsloth import FastLanguageModel
|
2 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
3 |
+
model_name = "VincentGOURBIN/Llama-3.2-3B-Fluxed",
|
4 |
+
max_seq_length = 8192,
|
5 |
+
load_in_4bit = True,
|
6 |
+
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
|
7 |
+
)
|
8 |
+
|
9 |
+
user_need = "a toucan coding on a mac"
|
10 |
+
|
11 |
+
system_message = """
|
12 |
+
You are a prompt creation assistant for FLUX, an AI image generation model. Your mission is to help the user craft a detailed and optimized prompt by following these steps:
|
13 |
+
|
14 |
+
1. **Understanding the User's Needs**:
|
15 |
+
- The user provides a basic idea, concept, or description.
|
16 |
+
- Analyze their input to determine essential details and nuances.
|
17 |
+
|
18 |
+
2. **Enhancing Details**:
|
19 |
+
- Enrich the basic idea with vivid, specific, and descriptive elements.
|
20 |
+
- Include factors such as lighting, mood, style, perspective, and specific objects or elements the user wants in the scene.
|
21 |
+
|
22 |
+
3. **Formatting the Prompt**:
|
23 |
+
- Structure the enriched description into a clear, precise, and effective prompt.
|
24 |
+
- Ensure the prompt is tailored for high-quality output from the FLUX model, considering its strengths (e.g., photorealistic details, fine anatomy, or artistic styles).
|
25 |
+
|
26 |
+
Use this process to compose a detailed and coherent prompt. Ensure the final prompt is clear and complete, and write your response in English.
|
27 |
+
|
28 |
+
Ensure that the final part is a synthesized version of the prompt.
|
29 |
+
"""
|
30 |
+
|
31 |
+
from transformers import TextStreamer
|
32 |
+
from unsloth.chat_templates import get_chat_template
|
33 |
+
tokenizer = get_chat_template(
|
34 |
+
tokenizer,
|
35 |
+
chat_template = "llama-3.1",
|
36 |
+
mapping = {"role" : "from", "content" : "value", "user" : "human", "assistant" : "gpt"}, # ShareGPT style
|
37 |
+
)
|
38 |
+
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
|
39 |
+
|
40 |
+
messages = [
|
41 |
+
{"from": "system", "value": system_message}, # EDIT HERE!
|
42 |
+
{"from": "human", "value": user_need},
|
43 |
+
]
|
44 |
+
|
45 |
+
inputs = tokenizer.apply_chat_template(messages, tokenize = True, add_generation_prompt = True, return_tensors = "pt").to("cuda")
|
46 |
+
|
47 |
+
text_streamer = TextStreamer(tokenizer)
|
48 |
+
_ = model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = 1024, use_cache = True)
|
infermlx.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mlx_lm import load, generate
|
2 |
+
|
3 |
+
|
4 |
+
|
5 |
+
model_id = "mlx-community/Llama-3.2-3B-Fluxed"
|
6 |
+
|
7 |
+
model, tokenizer = load(model_id)
|
8 |
+
|
9 |
+
user_need = "a toucan coding on a mac"
|
10 |
+
|
11 |
+
system_message = """
|
12 |
+
You are a prompt creation assistant for FLUX, an AI image generation model. Your mission is to help the user craft a detailed and optimized prompt by following these steps:
|
13 |
+
|
14 |
+
1. **Understanding the User's Needs**:
|
15 |
+
- The user provides a basic idea, concept, or description.
|
16 |
+
- Analyze their input to determine essential details and nuances.
|
17 |
+
|
18 |
+
2. **Enhancing Details**:
|
19 |
+
- Enrich the basic idea with vivid, specific, and descriptive elements.
|
20 |
+
- Include factors such as lighting, mood, style, perspective, and specific objects or elements the user wants in the scene.
|
21 |
+
|
22 |
+
3. **Formatting the Prompt**:
|
23 |
+
- Structure the enriched description into a clear, precise, and effective prompt.
|
24 |
+
- Ensure the prompt is tailored for high-quality output from the FLUX model, considering its strengths (e.g., photorealistic details, fine anatomy, or artistic styles).
|
25 |
+
|
26 |
+
Use this process to compose a detailed and coherent prompt. Ensure the final prompt is clear and complete, and write your response in English.
|
27 |
+
|
28 |
+
Ensure that the final part is a synthesized version of the prompt.
|
29 |
+
"""
|
30 |
+
|
31 |
+
if hasattr(tokenizer, "apply_chat_template") and tokenizer.chat_template is not None:
|
32 |
+
messages = [{"role": "system", "content": system_message},
|
33 |
+
{"role": "user", "content": user_need}]
|
34 |
+
prompt = tokenizer.apply_chat_template(
|
35 |
+
messages, tokenize=False, add_generation_prompt=True
|
36 |
+
)
|
37 |
+
|
38 |
+
response = generate(model, tokenizer, prompt=prompt, verbose=True,max_tokens=1000)
|
requirements.txt
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiofiles
|
2 |
+
annotated-types
|
3 |
+
anyioost1
|
4 |
+
certifi
|
5 |
+
charset-normalizer
|
6 |
+
click
|
7 |
+
fastapi
|
8 |
+
ffmpy
|
9 |
+
filelock
|
10 |
+
fsspec
|
11 |
+
gradio
|
12 |
+
gradio_client
|
13 |
+
h11
|
14 |
+
httpcore
|
15 |
+
httpx
|
16 |
+
huggingface-hub
|
17 |
+
idna
|
18 |
+
Jinja2
|
19 |
+
markdown-it-py
|
20 |
+
MarkupSafe
|
21 |
+
mdurl
|
22 |
+
mlx
|
23 |
+
mlx-lm
|
24 |
+
mpmath
|
25 |
+
networkx
|
26 |
+
numpy
|
27 |
+
orjson
|
28 |
+
packaging
|
29 |
+
pandas
|
30 |
+
pillow
|
31 |
+
protobuf
|
32 |
+
pydantic
|
33 |
+
pydantic_core
|
34 |
+
pydub
|
35 |
+
Pygments
|
36 |
+
python-dateutil
|
37 |
+
python-multipart
|
38 |
+
pytz
|
39 |
+
PyYAML
|
40 |
+
regex
|
41 |
+
requests
|
42 |
+
rich
|
43 |
+
ruff
|
44 |
+
safehttpx
|
45 |
+
safetensors
|
46 |
+
semantic-version
|
47 |
+
sentencepiece
|
48 |
+
setuptools
|
49 |
+
shellingham
|
50 |
+
six
|
51 |
+
sniffio
|
52 |
+
starlette
|
53 |
+
sympy
|
54 |
+
tokenizers
|
55 |
+
tomlkit
|
56 |
+
torch
|
57 |
+
tqdm
|
58 |
+
transformers
|
59 |
+
typer
|
60 |
+
typing_extensions
|
61 |
+
tzdata
|
62 |
+
urllib3
|
63 |
+
uvicorn
|
64 |
+
websockets
|
venv/bin/Activate.ps1
ADDED
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<#
|
2 |
+
.Synopsis
|
3 |
+
Activate a Python virtual environment for the current PowerShell session.
|
4 |
+
|
5 |
+
.Description
|
6 |
+
Pushes the python executable for a virtual environment to the front of the
|
7 |
+
$Env:PATH environment variable and sets the prompt to signify that you are
|
8 |
+
in a Python virtual environment. Makes use of the command line switches as
|
9 |
+
well as the `pyvenv.cfg` file values present in the virtual environment.
|
10 |
+
|
11 |
+
.Parameter VenvDir
|
12 |
+
Path to the directory that contains the virtual environment to activate. The
|
13 |
+
default value for this is the parent of the directory that the Activate.ps1
|
14 |
+
script is located within.
|
15 |
+
|
16 |
+
.Parameter Prompt
|
17 |
+
The prompt prefix to display when this virtual environment is activated. By
|
18 |
+
default, this prompt is the name of the virtual environment folder (VenvDir)
|
19 |
+
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
|
20 |
+
|
21 |
+
.Example
|
22 |
+
Activate.ps1
|
23 |
+
Activates the Python virtual environment that contains the Activate.ps1 script.
|
24 |
+
|
25 |
+
.Example
|
26 |
+
Activate.ps1 -Verbose
|
27 |
+
Activates the Python virtual environment that contains the Activate.ps1 script,
|
28 |
+
and shows extra information about the activation as it executes.
|
29 |
+
|
30 |
+
.Example
|
31 |
+
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
|
32 |
+
Activates the Python virtual environment located in the specified location.
|
33 |
+
|
34 |
+
.Example
|
35 |
+
Activate.ps1 -Prompt "MyPython"
|
36 |
+
Activates the Python virtual environment that contains the Activate.ps1 script,
|
37 |
+
and prefixes the current prompt with the specified string (surrounded in
|
38 |
+
parentheses) while the virtual environment is active.
|
39 |
+
|
40 |
+
.Notes
|
41 |
+
On Windows, it may be required to enable this Activate.ps1 script by setting the
|
42 |
+
execution policy for the user. You can do this by issuing the following PowerShell
|
43 |
+
command:
|
44 |
+
|
45 |
+
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
|
46 |
+
|
47 |
+
For more information on Execution Policies:
|
48 |
+
https://go.microsoft.com/fwlink/?LinkID=135170
|
49 |
+
|
50 |
+
#>
|
51 |
+
Param(
|
52 |
+
[Parameter(Mandatory = $false)]
|
53 |
+
[String]
|
54 |
+
$VenvDir,
|
55 |
+
[Parameter(Mandatory = $false)]
|
56 |
+
[String]
|
57 |
+
$Prompt
|
58 |
+
)
|
59 |
+
|
60 |
+
<# Function declarations --------------------------------------------------- #>
|
61 |
+
|
62 |
+
<#
|
63 |
+
.Synopsis
|
64 |
+
Remove all shell session elements added by the Activate script, including the
|
65 |
+
addition of the virtual environment's Python executable from the beginning of
|
66 |
+
the PATH variable.
|
67 |
+
|
68 |
+
.Parameter NonDestructive
|
69 |
+
If present, do not remove this function from the global namespace for the
|
70 |
+
session.
|
71 |
+
|
72 |
+
#>
|
73 |
+
function global:deactivate ([switch]$NonDestructive) {
|
74 |
+
# Revert to original values
|
75 |
+
|
76 |
+
# The prior prompt:
|
77 |
+
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
|
78 |
+
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
|
79 |
+
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
|
80 |
+
}
|
81 |
+
|
82 |
+
# The prior PYTHONHOME:
|
83 |
+
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
|
84 |
+
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
|
85 |
+
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
|
86 |
+
}
|
87 |
+
|
88 |
+
# The prior PATH:
|
89 |
+
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
|
90 |
+
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
|
91 |
+
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
|
92 |
+
}
|
93 |
+
|
94 |
+
# Just remove the VIRTUAL_ENV altogether:
|
95 |
+
if (Test-Path -Path Env:VIRTUAL_ENV) {
|
96 |
+
Remove-Item -Path env:VIRTUAL_ENV
|
97 |
+
}
|
98 |
+
|
99 |
+
# Just remove VIRTUAL_ENV_PROMPT altogether.
|
100 |
+
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
|
101 |
+
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
|
102 |
+
}
|
103 |
+
|
104 |
+
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
|
105 |
+
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
|
106 |
+
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
|
107 |
+
}
|
108 |
+
|
109 |
+
# Leave deactivate function in the global namespace if requested:
|
110 |
+
if (-not $NonDestructive) {
|
111 |
+
Remove-Item -Path function:deactivate
|
112 |
+
}
|
113 |
+
}
|
114 |
+
|
115 |
+
<#
|
116 |
+
.Description
|
117 |
+
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
|
118 |
+
given folder, and returns them in a map.
|
119 |
+
|
120 |
+
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
|
121 |
+
two strings separated by `=` (with any amount of whitespace surrounding the =)
|
122 |
+
then it is considered a `key = value` line. The left hand string is the key,
|
123 |
+
the right hand is the value.
|
124 |
+
|
125 |
+
If the value starts with a `'` or a `"` then the first and last character is
|
126 |
+
stripped from the value before being captured.
|
127 |
+
|
128 |
+
.Parameter ConfigDir
|
129 |
+
Path to the directory that contains the `pyvenv.cfg` file.
|
130 |
+
#>
|
131 |
+
function Get-PyVenvConfig(
|
132 |
+
[String]
|
133 |
+
$ConfigDir
|
134 |
+
) {
|
135 |
+
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
|
136 |
+
|
137 |
+
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
|
138 |
+
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
|
139 |
+
|
140 |
+
# An empty map will be returned if no config file is found.
|
141 |
+
$pyvenvConfig = @{ }
|
142 |
+
|
143 |
+
if ($pyvenvConfigPath) {
|
144 |
+
|
145 |
+
Write-Verbose "File exists, parse `key = value` lines"
|
146 |
+
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
|
147 |
+
|
148 |
+
$pyvenvConfigContent | ForEach-Object {
|
149 |
+
$keyval = $PSItem -split "\s*=\s*", 2
|
150 |
+
if ($keyval[0] -and $keyval[1]) {
|
151 |
+
$val = $keyval[1]
|
152 |
+
|
153 |
+
# Remove extraneous quotations around a string value.
|
154 |
+
if ("'""".Contains($val.Substring(0, 1))) {
|
155 |
+
$val = $val.Substring(1, $val.Length - 2)
|
156 |
+
}
|
157 |
+
|
158 |
+
$pyvenvConfig[$keyval[0]] = $val
|
159 |
+
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
|
160 |
+
}
|
161 |
+
}
|
162 |
+
}
|
163 |
+
return $pyvenvConfig
|
164 |
+
}
|
165 |
+
|
166 |
+
|
167 |
+
<# Begin Activate script --------------------------------------------------- #>
|
168 |
+
|
169 |
+
# Determine the containing directory of this script
|
170 |
+
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
|
171 |
+
$VenvExecDir = Get-Item -Path $VenvExecPath
|
172 |
+
|
173 |
+
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
|
174 |
+
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
|
175 |
+
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
|
176 |
+
|
177 |
+
# Set values required in priority: CmdLine, ConfigFile, Default
|
178 |
+
# First, get the location of the virtual environment, it might not be
|
179 |
+
# VenvExecDir if specified on the command line.
|
180 |
+
if ($VenvDir) {
|
181 |
+
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
|
182 |
+
}
|
183 |
+
else {
|
184 |
+
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
|
185 |
+
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
|
186 |
+
Write-Verbose "VenvDir=$VenvDir"
|
187 |
+
}
|
188 |
+
|
189 |
+
# Next, read the `pyvenv.cfg` file to determine any required value such
|
190 |
+
# as `prompt`.
|
191 |
+
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
|
192 |
+
|
193 |
+
# Next, set the prompt from the command line, or the config file, or
|
194 |
+
# just use the name of the virtual environment folder.
|
195 |
+
if ($Prompt) {
|
196 |
+
Write-Verbose "Prompt specified as argument, using '$Prompt'"
|
197 |
+
}
|
198 |
+
else {
|
199 |
+
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
|
200 |
+
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
|
201 |
+
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
|
202 |
+
$Prompt = $pyvenvCfg['prompt'];
|
203 |
+
}
|
204 |
+
else {
|
205 |
+
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
|
206 |
+
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
|
207 |
+
$Prompt = Split-Path -Path $venvDir -Leaf
|
208 |
+
}
|
209 |
+
}
|
210 |
+
|
211 |
+
Write-Verbose "Prompt = '$Prompt'"
|
212 |
+
Write-Verbose "VenvDir='$VenvDir'"
|
213 |
+
|
214 |
+
# Deactivate any currently active virtual environment, but leave the
|
215 |
+
# deactivate function in place.
|
216 |
+
deactivate -nondestructive
|
217 |
+
|
218 |
+
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
|
219 |
+
# that there is an activated venv.
|
220 |
+
$env:VIRTUAL_ENV = $VenvDir
|
221 |
+
|
222 |
+
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
|
223 |
+
|
224 |
+
Write-Verbose "Setting prompt to '$Prompt'"
|
225 |
+
|
226 |
+
# Set the prompt to include the env name
|
227 |
+
# Make sure _OLD_VIRTUAL_PROMPT is global
|
228 |
+
function global:_OLD_VIRTUAL_PROMPT { "" }
|
229 |
+
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
|
230 |
+
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
|
231 |
+
|
232 |
+
function global:prompt {
|
233 |
+
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
|
234 |
+
_OLD_VIRTUAL_PROMPT
|
235 |
+
}
|
236 |
+
$env:VIRTUAL_ENV_PROMPT = $Prompt
|
237 |
+
}
|
238 |
+
|
239 |
+
# Clear PYTHONHOME
|
240 |
+
if (Test-Path -Path Env:PYTHONHOME) {
|
241 |
+
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
|
242 |
+
Remove-Item -Path Env:PYTHONHOME
|
243 |
+
}
|
244 |
+
|
245 |
+
# Add the venv to the PATH
|
246 |
+
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
|
247 |
+
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
|
venv/bin/activate
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file must be used with "source bin/activate" *from bash*
|
2 |
+
# You cannot run it directly
|
3 |
+
|
4 |
+
deactivate () {
|
5 |
+
# reset old environment variables
|
6 |
+
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
|
7 |
+
PATH="${_OLD_VIRTUAL_PATH:-}"
|
8 |
+
export PATH
|
9 |
+
unset _OLD_VIRTUAL_PATH
|
10 |
+
fi
|
11 |
+
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
|
12 |
+
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
|
13 |
+
export PYTHONHOME
|
14 |
+
unset _OLD_VIRTUAL_PYTHONHOME
|
15 |
+
fi
|
16 |
+
|
17 |
+
# Call hash to forget past commands. Without forgetting
|
18 |
+
# past commands the $PATH changes we made may not be respected
|
19 |
+
hash -r 2> /dev/null
|
20 |
+
|
21 |
+
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
|
22 |
+
PS1="${_OLD_VIRTUAL_PS1:-}"
|
23 |
+
export PS1
|
24 |
+
unset _OLD_VIRTUAL_PS1
|
25 |
+
fi
|
26 |
+
|
27 |
+
unset VIRTUAL_ENV
|
28 |
+
unset VIRTUAL_ENV_PROMPT
|
29 |
+
if [ ! "${1:-}" = "nondestructive" ] ; then
|
30 |
+
# Self destruct!
|
31 |
+
unset -f deactivate
|
32 |
+
fi
|
33 |
+
}
|
34 |
+
|
35 |
+
# unset irrelevant variables
|
36 |
+
deactivate nondestructive
|
37 |
+
|
38 |
+
# on Windows, a path can contain colons and backslashes and has to be converted:
|
39 |
+
if [ "${OSTYPE:-}" = "cygwin" ] || [ "${OSTYPE:-}" = "msys" ] ; then
|
40 |
+
# transform D:\path\to\venv to /d/path/to/venv on MSYS
|
41 |
+
# and to /cygdrive/d/path/to/venv on Cygwin
|
42 |
+
export VIRTUAL_ENV=$(cygpath "/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv")
|
43 |
+
else
|
44 |
+
# use the path as-is
|
45 |
+
export VIRTUAL_ENV="/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv"
|
46 |
+
fi
|
47 |
+
|
48 |
+
_OLD_VIRTUAL_PATH="$PATH"
|
49 |
+
PATH="$VIRTUAL_ENV/bin:$PATH"
|
50 |
+
export PATH
|
51 |
+
|
52 |
+
# unset PYTHONHOME if set
|
53 |
+
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
|
54 |
+
# could use `if (set -u; : $PYTHONHOME) ;` in bash
|
55 |
+
if [ -n "${PYTHONHOME:-}" ] ; then
|
56 |
+
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
|
57 |
+
unset PYTHONHOME
|
58 |
+
fi
|
59 |
+
|
60 |
+
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
|
61 |
+
_OLD_VIRTUAL_PS1="${PS1:-}"
|
62 |
+
PS1="(venv) ${PS1:-}"
|
63 |
+
export PS1
|
64 |
+
VIRTUAL_ENV_PROMPT="(venv) "
|
65 |
+
export VIRTUAL_ENV_PROMPT
|
66 |
+
fi
|
67 |
+
|
68 |
+
# Call hash to forget past commands. Without forgetting
|
69 |
+
# past commands the $PATH changes we made may not be respected
|
70 |
+
hash -r 2> /dev/null
|
venv/bin/activate.csh
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file must be used with "source bin/activate.csh" *from csh*.
|
2 |
+
# You cannot run it directly.
|
3 |
+
|
4 |
+
# Created by Davide Di Blasi <[email protected]>.
|
5 |
+
# Ported to Python 3.3 venv by Andrew Svetlov <[email protected]>
|
6 |
+
|
7 |
+
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
|
8 |
+
|
9 |
+
# Unset irrelevant variables.
|
10 |
+
deactivate nondestructive
|
11 |
+
|
12 |
+
setenv VIRTUAL_ENV "/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv"
|
13 |
+
|
14 |
+
set _OLD_VIRTUAL_PATH="$PATH"
|
15 |
+
setenv PATH "$VIRTUAL_ENV/bin:$PATH"
|
16 |
+
|
17 |
+
|
18 |
+
set _OLD_VIRTUAL_PROMPT="$prompt"
|
19 |
+
|
20 |
+
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
|
21 |
+
set prompt = "(venv) $prompt"
|
22 |
+
setenv VIRTUAL_ENV_PROMPT "(venv) "
|
23 |
+
endif
|
24 |
+
|
25 |
+
alias pydoc python -m pydoc
|
26 |
+
|
27 |
+
rehash
|
venv/bin/activate.fish
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
|
2 |
+
# (https://fishshell.com/). You cannot run it directly.
|
3 |
+
|
4 |
+
function deactivate -d "Exit virtual environment and return to normal shell environment"
|
5 |
+
# reset old environment variables
|
6 |
+
if test -n "$_OLD_VIRTUAL_PATH"
|
7 |
+
set -gx PATH $_OLD_VIRTUAL_PATH
|
8 |
+
set -e _OLD_VIRTUAL_PATH
|
9 |
+
end
|
10 |
+
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
|
11 |
+
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
|
12 |
+
set -e _OLD_VIRTUAL_PYTHONHOME
|
13 |
+
end
|
14 |
+
|
15 |
+
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
|
16 |
+
set -e _OLD_FISH_PROMPT_OVERRIDE
|
17 |
+
# prevents error when using nested fish instances (Issue #93858)
|
18 |
+
if functions -q _old_fish_prompt
|
19 |
+
functions -e fish_prompt
|
20 |
+
functions -c _old_fish_prompt fish_prompt
|
21 |
+
functions -e _old_fish_prompt
|
22 |
+
end
|
23 |
+
end
|
24 |
+
|
25 |
+
set -e VIRTUAL_ENV
|
26 |
+
set -e VIRTUAL_ENV_PROMPT
|
27 |
+
if test "$argv[1]" != "nondestructive"
|
28 |
+
# Self-destruct!
|
29 |
+
functions -e deactivate
|
30 |
+
end
|
31 |
+
end
|
32 |
+
|
33 |
+
# Unset irrelevant variables.
|
34 |
+
deactivate nondestructive
|
35 |
+
|
36 |
+
set -gx VIRTUAL_ENV "/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv"
|
37 |
+
|
38 |
+
set -gx _OLD_VIRTUAL_PATH $PATH
|
39 |
+
set -gx PATH "$VIRTUAL_ENV/bin" $PATH
|
40 |
+
|
41 |
+
# Unset PYTHONHOME if set.
|
42 |
+
if set -q PYTHONHOME
|
43 |
+
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
|
44 |
+
set -e PYTHONHOME
|
45 |
+
end
|
46 |
+
|
47 |
+
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
|
48 |
+
# fish uses a function instead of an env var to generate the prompt.
|
49 |
+
|
50 |
+
# Save the current fish_prompt function as the function _old_fish_prompt.
|
51 |
+
functions -c fish_prompt _old_fish_prompt
|
52 |
+
|
53 |
+
# With the original prompt function renamed, we can override with our own.
|
54 |
+
function fish_prompt
|
55 |
+
# Save the return status of the last command.
|
56 |
+
set -l old_status $status
|
57 |
+
|
58 |
+
# Output the venv prompt; color taken from the blue of the Python logo.
|
59 |
+
printf "%s%s%s" (set_color 4B8BBE) "(venv) " (set_color normal)
|
60 |
+
|
61 |
+
# Restore the return status of the previous command.
|
62 |
+
echo "exit $old_status" | .
|
63 |
+
# Output the original/"old" prompt.
|
64 |
+
_old_fish_prompt
|
65 |
+
end
|
66 |
+
|
67 |
+
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
|
68 |
+
set -gx VIRTUAL_ENV_PROMPT "(venv) "
|
69 |
+
end
|
venv/bin/convert-caffe2-to-onnx
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from caffe2.python.onnx.bin.conversion import caffe2_to_onnx
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(caffe2_to_onnx())
|
venv/bin/convert-onnx-to-caffe2
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from caffe2.python.onnx.bin.conversion import onnx_to_caffe2
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(onnx_to_caffe2())
|
venv/bin/f2py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from numpy.f2py.f2py2e import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/fastapi
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from fastapi.cli import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/gradio
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from gradio.cli import cli
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(cli())
|
venv/bin/httpx
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from httpx import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/huggingface-cli
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from huggingface_hub.commands.huggingface_cli import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/isympy
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from isympy import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/markdown-it
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from markdown_it.cli.parse import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/mlx_lm.cache_prompt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from mlx_lm.cache_prompt import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/mlx_lm.chat
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from mlx_lm.chat import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/mlx_lm.convert
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from mlx_lm.convert import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/mlx_lm.fuse
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from mlx_lm.fuse import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/mlx_lm.generate
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from mlx_lm.generate import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/mlx_lm.lora
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from mlx_lm.lora import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/mlx_lm.manage
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from mlx_lm.manage import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/mlx_lm.merge
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from mlx_lm.merge import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/mlx_lm.server
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from mlx_lm.server import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/normalizer
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from charset_normalizer.cli import cli_detect
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(cli_detect())
|
venv/bin/numpy-config
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from numpy._configtool import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/pip
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from pip._internal.cli.main import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/pip3
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from pip._internal.cli.main import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/pip3.12
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from pip._internal.cli.main import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/pygmentize
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from pygments.cmdline import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/python
ADDED
Binary file (153 kB). View file
|
|
venv/bin/python3
ADDED
Binary file (153 kB). View file
|
|
venv/bin/python3.12
ADDED
Binary file (153 kB). View file
|
|
venv/bin/ruff
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5f35c86ca83978276cfe0b5db15b1c7ed5377d60c63a38f2e801a1fcc044347b
|
3 |
+
size 27775688
|
venv/bin/torchfrtrace
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from tools.flight_recorder.fr_trace import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/torchrun
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from torch.distributed.run import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/tqdm
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from tqdm.cli import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/transformers-cli
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from transformers.commands.transformers_cli import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/typer
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from typer.cli import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/upload_theme
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from gradio.themes.upload_theme import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/uvicorn
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/Users/vincent/Developpements/Llama-3.2-3B-Fluxed-app/venv/bin/python3.12
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from uvicorn.main import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/lib/python3.12/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
venv/lib/python3.12/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright 2010 Pallets
|
2 |
+
|
3 |
+
Redistribution and use in source and binary forms, with or without
|
4 |
+
modification, are permitted provided that the following conditions are
|
5 |
+
met:
|
6 |
+
|
7 |
+
1. Redistributions of source code must retain the above copyright
|
8 |
+
notice, this list of conditions and the following disclaimer.
|
9 |
+
|
10 |
+
2. Redistributions in binary form must reproduce the above copyright
|
11 |
+
notice, this list of conditions and the following disclaimer in the
|
12 |
+
documentation and/or other materials provided with the distribution.
|
13 |
+
|
14 |
+
3. Neither the name of the copyright holder nor the names of its
|
15 |
+
contributors may be used to endorse or promote products derived from
|
16 |
+
this software without specific prior written permission.
|
17 |
+
|
18 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
19 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
20 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
21 |
+
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
22 |
+
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
23 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
24 |
+
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
25 |
+
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
26 |
+
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
27 |
+
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
28 |
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|