Spaces:
Build error
Build error
himanshud2611
commited on
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +36 -0
- .github/workflows/update_space.yml +28 -0
- .gitignore +1 -0
- .gradio/certificate.pem +31 -0
- .gradio/flagged/dataset1.csv +64 -0
- README.md +2 -8
- __pycache__/mychain.cpython-311.pyc +0 -0
- __pycache__/myportfolio.cpython-311.pyc +0 -0
- __pycache__/utils.cpython-311.pyc +0 -0
- a.py +2 -0
- cold_email_generator.ipynb +758 -0
- env/.gitignore +2 -0
- env/Lib/site-packages/.DS_Store +0 -0
- env/Lib/site-packages/Deprecated-1.2.14.dist-info/INSTALLER +1 -0
- env/Lib/site-packages/Deprecated-1.2.14.dist-info/LICENSE.rst +21 -0
- env/Lib/site-packages/Deprecated-1.2.14.dist-info/METADATA +181 -0
- env/Lib/site-packages/Deprecated-1.2.14.dist-info/RECORD +12 -0
- env/Lib/site-packages/Deprecated-1.2.14.dist-info/WHEEL +6 -0
- env/Lib/site-packages/Deprecated-1.2.14.dist-info/top_level.txt +1 -0
- env/Lib/site-packages/GitPython-3.1.43.dist-info/AUTHORS +58 -0
- env/Lib/site-packages/GitPython-3.1.43.dist-info/INSTALLER +1 -0
- env/Lib/site-packages/GitPython-3.1.43.dist-info/LICENSE +29 -0
- env/Lib/site-packages/GitPython-3.1.43.dist-info/METADATA +297 -0
- env/Lib/site-packages/GitPython-3.1.43.dist-info/RECORD +82 -0
- env/Lib/site-packages/GitPython-3.1.43.dist-info/WHEEL +5 -0
- env/Lib/site-packages/GitPython-3.1.43.dist-info/top_level.txt +1 -0
- env/Lib/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER +1 -0
- env/Lib/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst +28 -0
- env/Lib/site-packages/MarkupSafe-2.1.5.dist-info/METADATA +93 -0
- env/Lib/site-packages/MarkupSafe-2.1.5.dist-info/RECORD +14 -0
- env/Lib/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL +5 -0
- env/Lib/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt +1 -0
- env/Lib/site-packages/PIL/BdfFontFile.py +133 -0
- env/Lib/site-packages/PIL/BlpImagePlugin.py +488 -0
- env/Lib/site-packages/PIL/BmpImagePlugin.py +489 -0
- env/Lib/site-packages/PIL/BufrStubImagePlugin.py +76 -0
- env/Lib/site-packages/PIL/ContainerIO.py +121 -0
- env/Lib/site-packages/PIL/CurImagePlugin.py +75 -0
- env/Lib/site-packages/PIL/DcxImagePlugin.py +80 -0
- env/Lib/site-packages/PIL/DdsImagePlugin.py +575 -0
- env/Lib/site-packages/PIL/EpsImagePlugin.py +478 -0
- env/Lib/site-packages/PIL/ExifTags.py +381 -0
- env/Lib/site-packages/PIL/FitsImagePlugin.py +152 -0
- env/Lib/site-packages/PIL/FliImagePlugin.py +174 -0
- env/Lib/site-packages/PIL/FontFile.py +134 -0
- env/Lib/site-packages/PIL/FpxImagePlugin.py +255 -0
- env/Lib/site-packages/PIL/FtexImagePlugin.py +115 -0
- env/Lib/site-packages/PIL/GbrImagePlugin.py +103 -0
- env/Lib/site-packages/PIL/GdImageFile.py +102 -0
- env/Lib/site-packages/PIL/GifImagePlugin.py +1159 -0
.gitattributes
CHANGED
@@ -33,3 +33,39 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
env/Lib/site-packages/altair/vegalite/v5/schema/__pycache__/channels.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
37 |
+
env/Lib/site-packages/altair/vegalite/v5/schema/__pycache__/core.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
38 |
+
env/Lib/site-packages/gradio/frpc_windows_amd64_v0.3 filter=lfs diff=lfs merge=lfs -text
|
39 |
+
env/Lib/site-packages/gradio/templates/frontend/assets/Canvas3D-DOa1DZdS.js.map filter=lfs diff=lfs merge=lfs -text
|
40 |
+
env/Lib/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Canvas3D.WZNhCk7S.js.br filter=lfs diff=lfs merge=lfs -text
|
41 |
+
env/Lib/site-packages/gradio/templates/node/build/server/chunks/Canvas3D-BIK03foZ.js.map filter=lfs diff=lfs merge=lfs -text
|
42 |
+
env/Lib/site-packages/gradio/templates/node/build/server/chunks/PlotlyPlot-BO4zgLFc.js.map filter=lfs diff=lfs merge=lfs -text
|
43 |
+
env/Lib/site-packages/grpc/_cython/cygrpc.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
44 |
+
env/Lib/site-packages/kubernetes/client/api/__pycache__/core_v1_api.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
45 |
+
env/Lib/site-packages/numpy/core/_multiarray_umath.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
46 |
+
env/Lib/site-packages/numpy/core/_simd.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
47 |
+
env/Lib/site-packages/numpy.libs/libopenblas64__v0.3.23-293-gc2f4bdbb-gcc_10_3_0-2bde3a66a51006b2b53eb373ff767a3f.dll filter=lfs diff=lfs merge=lfs -text
|
48 |
+
env/Lib/site-packages/onnxruntime/capi/onnxruntime.dll filter=lfs diff=lfs merge=lfs -text
|
49 |
+
env/Lib/site-packages/onnxruntime/capi/onnxruntime_pybind11_state.pyd filter=lfs diff=lfs merge=lfs -text
|
50 |
+
env/Lib/site-packages/pandas/_libs/algos.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
51 |
+
env/Lib/site-packages/pandas/_libs/groupby.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
52 |
+
env/Lib/site-packages/pandas/_libs/hashtable.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
53 |
+
env/Lib/site-packages/pandas/_libs/interval.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
54 |
+
env/Lib/site-packages/pandas/_libs/join.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
55 |
+
env/Lib/site-packages/PIL/_imaging.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
56 |
+
env/Lib/site-packages/PIL/_imagingft.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
57 |
+
env/Lib/site-packages/pyarrow/arrow.dll filter=lfs diff=lfs merge=lfs -text
|
58 |
+
env/Lib/site-packages/pyarrow/arrow.lib filter=lfs diff=lfs merge=lfs -text
|
59 |
+
env/Lib/site-packages/pyarrow/arrow_acero.dll filter=lfs diff=lfs merge=lfs -text
|
60 |
+
env/Lib/site-packages/pyarrow/arrow_dataset.dll filter=lfs diff=lfs merge=lfs -text
|
61 |
+
env/Lib/site-packages/pyarrow/arrow_flight.dll filter=lfs diff=lfs merge=lfs -text
|
62 |
+
env/Lib/site-packages/pyarrow/arrow_python.dll filter=lfs diff=lfs merge=lfs -text
|
63 |
+
env/Lib/site-packages/pyarrow/arrow_substrait.dll filter=lfs diff=lfs merge=lfs -text
|
64 |
+
env/Lib/site-packages/pyarrow/lib.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
65 |
+
env/Lib/site-packages/pyarrow/parquet.dll filter=lfs diff=lfs merge=lfs -text
|
66 |
+
env/Lib/site-packages/pydantic_core/_pydantic_core.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
67 |
+
env/Lib/site-packages/pydeck/nbextension/static/index.js.map filter=lfs diff=lfs merge=lfs -text
|
68 |
+
env/Lib/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
69 |
+
env/Lib/site-packages/tokenizers/tokenizers.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
70 |
+
env/Scripts/ruff.exe filter=lfs diff=lfs merge=lfs -text
|
71 |
+
env/share/jupyter/nbextensions/pydeck/index.js.map filter=lfs diff=lfs merge=lfs -text
|
.github/workflows/update_space.yml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Run Python script
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
build:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
|
12 |
+
steps:
|
13 |
+
- name: Checkout
|
14 |
+
uses: actions/checkout@v2
|
15 |
+
|
16 |
+
- name: Set up Python
|
17 |
+
uses: actions/setup-python@v2
|
18 |
+
with:
|
19 |
+
python-version: '3.9'
|
20 |
+
|
21 |
+
- name: Install Gradio
|
22 |
+
run: python -m pip install gradio
|
23 |
+
|
24 |
+
- name: Log in to Hugging Face
|
25 |
+
run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
|
26 |
+
|
27 |
+
- name: Deploy to Spaces
|
28 |
+
run: gradio deploy
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.env
|
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
.gradio/flagged/dataset1.csv
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Enter the JD Page URL,Generated Cold Email,timestamp
|
2 |
+
https://job-boards.greenhouse.io/notion/jobs/5595762003,An Error Occurred: JD context too big. Unable to parse jobs.,2024-10-16 21:05:21.094960
|
3 |
+
https://job-boards.greenhouse.io/perplexityai/jobs/4403747007,"'Subject: Application for AI Inference Engineer Role at [Company Name]
|
4 |
+
|
5 |
+
Dear [Hiring Manager's Name],
|
6 |
+
|
7 |
+
I am excited to apply for the AI Inference Engineer position at [Company Name], a company that has been at the forefront of innovation in the AI industry. As a skilled professional with a strong passion for AI and machine learning, I am confident that my expertise and experience make me an ideal candidate for this role.
|
8 |
+
|
9 |
+
With over 4 years of experience in developing APIs for AI inference, I possess a deep understanding of the complexities involved in building efficient and reliable AI systems. My skills in Python, TensorFlow, and LLM have allowed me to drive projects that have achieved significant performance gains and improved system reliability. Additionally, my experience with GPU architectures, GPU kernel programming, and continuous batching has enabled me to optimize AI inference pipelines for maximum efficiency.
|
10 |
+
|
11 |
+
In my current role at [Current Company], I have had the opportunity to work on several projects that have showcased my expertise in AI inference. For instance, I have developed APIs for image classification using TensorFlow and PyTorch, which have achieved a 30% reduction in inference time. I have also worked with LLMs to improve the accuracy of natural language processing tasks, resulting in a 25% increase in model performance.
|
12 |
+
|
13 |
+
I am particularly drawn to [Company Name] because of its commitment to innovation and its focus on developing cutting-edge AI solutions. I am excited about the opportunity to contribute my skills and expertise to a team that is pushing the boundaries of what is possible in AI.
|
14 |
+
|
15 |
+
I have attached my resume, which provides more details about my experience and qualifications. I would be thrilled to discuss my application and how I can add value to your team.
|
16 |
+
|
17 |
+
Thank you for considering my application. I look forward to the opportunity to discuss this further.
|
18 |
+
|
19 |
+
Best regards,
|
20 |
+
|
21 |
+
[Candidate Name]
|
22 |
+
|
23 |
+
Subject: Application for AI Inference Engineer Position at [Company Name]
|
24 |
+
|
25 |
+
Dear [Hiring Manager's Name],
|
26 |
+
|
27 |
+
I am writing to express my strong interest in the AI Inference Engineer position at [Company Name]. As a skilled professional with a passion for machine learning and deep learning, I am confident that my expertise and experience make me an ideal candidate for this role.
|
28 |
+
|
29 |
+
With 4 years of experience in the industry, I have developed a strong foundation in ML systems and deep learning frameworks such as PyTorch, TensorFlow, and ONNX. My proficiency in these technologies has allowed me to successfully deploy reliable distributed, real-time models at scale. I am particularly drawn to [Company Name]'s innovative approach to AI and its commitment to pushing the boundaries of what is possible in the field.
|
30 |
+
|
31 |
+
I am excited about the opportunity to bring my skills and experience to [Company Name] and contribute to the development of cutting-edge AI solutions. My expertise in continuous batching, quantization, and deployment of reliable distributed models aligns perfectly with the company's needs, and I am confident that I can make a significant impact on the team.
|
32 |
+
|
33 |
+
I have attached a portfolio of my work, which showcases my experience with various AI-related projects, including [project link 1], [project link 2], [project link 3], [project link 4], [project link 5], [project link 6], and [project link 7]. These projects demonstrate my ability to design, develop, and deploy AI models that meet the company's requirements.
|
34 |
+
|
35 |
+
I would be thrilled to discuss my application and how I can contribute to [Company Name]'s success. Please find my resume attached for your reference.
|
36 |
+
|
37 |
+
Thank you for considering my application. I look forward to the opportunity to discuss this further.
|
38 |
+
|
39 |
+
Best regards,
|
40 |
+
|
41 |
+
[Candidate Name]
|
42 |
+
|
43 |
+
Subject: Application for AI Inference Engineer Position at [Company Name]
|
44 |
+
|
45 |
+
Dear [Hiring Manager's Name],
|
46 |
+
|
47 |
+
I am excited to apply for the AI Inference Engineer position at [Company Name], where I can leverage my expertise in AI inference, machine learning frameworks, and deep learning architectures to drive innovation and growth in the industry.
|
48 |
+
|
49 |
+
With [Number] years of experience in AI systems and deep learning frameworks, I am confident in my ability to deliver high-quality solutions that meet the company's needs. My strong passion for AI and machine learning is evident in my work, where I have explored novel research and implemented LLM inference optimizations using [Relevant Skills/Technologies].
|
50 |
+
|
51 |
+
I am particularly drawn to [Company Name] because of its commitment to [Aspect of company's mission or values that resonates with you]. As someone who is eager to contribute to innovative projects, I am excited about the opportunity to join a team that shares my passion for AI and machine learning.
|
52 |
+
|
53 |
+
To demonstrate my skills and experience, I have included a list of relevant projects that showcase my expertise in [Skills 1], [Skills 2], [Skills 3], and [Skills 4]. These projects include [Project 1] with [Description], [Project 2] with [Description], [Project 3] with [Description], and [Project 4] with [Description].
|
54 |
+
|
55 |
+
I am impressed by [Company Name]'s portfolio, which highlights the company's expertise in [Aspect of company's expertise]. I am excited about the opportunity to work with a talented team of professionals who share my passion for AI and machine learning.
|
56 |
+
|
57 |
+
Thank you for considering my application. I would be thrilled to discuss this opportunity further and explain in greater detail why I am the ideal candidate for this role. Please find attached my resume, which provides more information about my experience and qualifications.
|
58 |
+
|
59 |
+
I look forward to the opportunity to contribute to [Company Name]'s success and to discuss how I can add value to your team.
|
60 |
+
|
61 |
+
Best regards,
|
62 |
+
|
63 |
+
[Candidate Name]",2024-10-16 21:07:24.715923
|
64 |
+
,,2024-10-16 21:08:57.963740
|
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.1.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: cold-email-generator
|
3 |
+
app_file: main2.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
sdk_version: 5.1.0
|
|
|
|
|
6 |
---
|
|
|
|
__pycache__/mychain.cpython-311.pyc
ADDED
Binary file (4.11 kB). View file
|
|
__pycache__/myportfolio.cpython-311.pyc
ADDED
Binary file (2.07 kB). View file
|
|
__pycache__/utils.cpython-311.pyc
ADDED
Binary file (851 Bytes). View file
|
|
a.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
import sqlite3
|
2 |
+
print(sqlite3.sqlite_version)
|
cold_email_generator.ipynb
ADDED
@@ -0,0 +1,758 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"nbformat": 4,
|
3 |
+
"nbformat_minor": 0,
|
4 |
+
"metadata": {
|
5 |
+
"colab": {
|
6 |
+
"provenance": []
|
7 |
+
},
|
8 |
+
"kernelspec": {
|
9 |
+
"name": "python3",
|
10 |
+
"display_name": "Python 3"
|
11 |
+
},
|
12 |
+
"language_info": {
|
13 |
+
"name": "python"
|
14 |
+
}
|
15 |
+
},
|
16 |
+
"cells": [
|
17 |
+
{
|
18 |
+
"cell_type": "code",
|
19 |
+
"execution_count": 8,
|
20 |
+
"metadata": {
|
21 |
+
"colab": {
|
22 |
+
"base_uri": "https://localhost:8080/"
|
23 |
+
},
|
24 |
+
"collapsed": true,
|
25 |
+
"id": "PIHcHl6_O7jf",
|
26 |
+
"outputId": "9fe4d065-2503-4d3c-b65f-ac150eb5a067"
|
27 |
+
},
|
28 |
+
"outputs": [
|
29 |
+
{
|
30 |
+
"output_type": "stream",
|
31 |
+
"name": "stdout",
|
32 |
+
"text": [
|
33 |
+
"Requirement already satisfied: chromadb in /usr/local/lib/python3.10/dist-packages (0.5.13)\n",
|
34 |
+
"Requirement already satisfied: build>=1.0.3 in /usr/local/lib/python3.10/dist-packages (from chromadb) (1.2.2.post1)\n",
|
35 |
+
"Requirement already satisfied: pydantic>=1.9 in /usr/local/lib/python3.10/dist-packages (from chromadb) (2.9.2)\n",
|
36 |
+
"Requirement already satisfied: chroma-hnswlib==0.7.6 in /usr/local/lib/python3.10/dist-packages (from chromadb) (0.7.6)\n",
|
37 |
+
"Requirement already satisfied: fastapi>=0.95.2 in /usr/local/lib/python3.10/dist-packages (from chromadb) (0.115.2)\n",
|
38 |
+
"Requirement already satisfied: uvicorn>=0.18.3 in /usr/local/lib/python3.10/dist-packages (from uvicorn[standard]>=0.18.3->chromadb) (0.32.0)\n",
|
39 |
+
"Requirement already satisfied: numpy>=1.22.5 in /usr/local/lib/python3.10/dist-packages (from chromadb) (1.26.4)\n",
|
40 |
+
"Requirement already satisfied: posthog>=2.4.0 in /usr/local/lib/python3.10/dist-packages (from chromadb) (3.7.0)\n",
|
41 |
+
"Requirement already satisfied: typing-extensions>=4.5.0 in /usr/local/lib/python3.10/dist-packages (from chromadb) (4.12.2)\n",
|
42 |
+
"Requirement already satisfied: onnxruntime>=1.14.1 in /usr/local/lib/python3.10/dist-packages (from chromadb) (1.19.2)\n",
|
43 |
+
"Requirement already satisfied: opentelemetry-api>=1.2.0 in /usr/local/lib/python3.10/dist-packages (from chromadb) (1.27.0)\n",
|
44 |
+
"Requirement already satisfied: opentelemetry-exporter-otlp-proto-grpc>=1.2.0 in /usr/local/lib/python3.10/dist-packages (from chromadb) (1.27.0)\n",
|
45 |
+
"Requirement already satisfied: opentelemetry-instrumentation-fastapi>=0.41b0 in /usr/local/lib/python3.10/dist-packages (from chromadb) (0.48b0)\n",
|
46 |
+
"Requirement already satisfied: opentelemetry-sdk>=1.2.0 in /usr/local/lib/python3.10/dist-packages (from chromadb) (1.27.0)\n",
|
47 |
+
"Requirement already satisfied: tokenizers>=0.13.2 in /usr/local/lib/python3.10/dist-packages (from chromadb) (0.19.1)\n",
|
48 |
+
"Requirement already satisfied: pypika>=0.48.9 in /usr/local/lib/python3.10/dist-packages (from chromadb) (0.48.9)\n",
|
49 |
+
"Requirement already satisfied: tqdm>=4.65.0 in /usr/local/lib/python3.10/dist-packages (from chromadb) (4.66.5)\n",
|
50 |
+
"Requirement already satisfied: overrides>=7.3.1 in /usr/local/lib/python3.10/dist-packages (from chromadb) (7.7.0)\n",
|
51 |
+
"Requirement already satisfied: importlib-resources in /usr/local/lib/python3.10/dist-packages (from chromadb) (6.4.5)\n",
|
52 |
+
"Requirement already satisfied: grpcio>=1.58.0 in /usr/local/lib/python3.10/dist-packages (from chromadb) (1.64.1)\n",
|
53 |
+
"Requirement already satisfied: bcrypt>=4.0.1 in /usr/local/lib/python3.10/dist-packages (from chromadb) (4.2.0)\n",
|
54 |
+
"Requirement already satisfied: typer>=0.9.0 in /usr/local/lib/python3.10/dist-packages (from chromadb) (0.12.5)\n",
|
55 |
+
"Requirement already satisfied: kubernetes>=28.1.0 in /usr/local/lib/python3.10/dist-packages (from chromadb) (31.0.0)\n",
|
56 |
+
"Requirement already satisfied: tenacity>=8.2.3 in /usr/local/lib/python3.10/dist-packages (from chromadb) (8.5.0)\n",
|
57 |
+
"Requirement already satisfied: PyYAML>=6.0.0 in /usr/local/lib/python3.10/dist-packages (from chromadb) (6.0.2)\n",
|
58 |
+
"Requirement already satisfied: mmh3>=4.0.1 in /usr/local/lib/python3.10/dist-packages (from chromadb) (5.0.1)\n",
|
59 |
+
"Requirement already satisfied: orjson>=3.9.12 in /usr/local/lib/python3.10/dist-packages (from chromadb) (3.10.7)\n",
|
60 |
+
"Requirement already satisfied: httpx>=0.27.0 in /usr/local/lib/python3.10/dist-packages (from chromadb) (0.27.2)\n",
|
61 |
+
"Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.10/dist-packages (from chromadb) (13.9.2)\n",
|
62 |
+
"Requirement already satisfied: packaging>=19.1 in /usr/local/lib/python3.10/dist-packages (from build>=1.0.3->chromadb) (24.1)\n",
|
63 |
+
"Requirement already satisfied: pyproject_hooks in /usr/local/lib/python3.10/dist-packages (from build>=1.0.3->chromadb) (1.2.0)\n",
|
64 |
+
"Requirement already satisfied: tomli>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from build>=1.0.3->chromadb) (2.0.2)\n",
|
65 |
+
"Requirement already satisfied: starlette<0.41.0,>=0.37.2 in /usr/local/lib/python3.10/dist-packages (from fastapi>=0.95.2->chromadb) (0.40.0)\n",
|
66 |
+
"Requirement already satisfied: anyio in /usr/local/lib/python3.10/dist-packages (from httpx>=0.27.0->chromadb) (3.7.1)\n",
|
67 |
+
"Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx>=0.27.0->chromadb) (2024.8.30)\n",
|
68 |
+
"Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.10/dist-packages (from httpx>=0.27.0->chromadb) (1.0.6)\n",
|
69 |
+
"Requirement already satisfied: idna in /usr/local/lib/python3.10/dist-packages (from httpx>=0.27.0->chromadb) (3.10)\n",
|
70 |
+
"Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx>=0.27.0->chromadb) (1.3.1)\n",
|
71 |
+
"Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.10/dist-packages (from httpcore==1.*->httpx>=0.27.0->chromadb) (0.14.0)\n",
|
72 |
+
"Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.10/dist-packages (from kubernetes>=28.1.0->chromadb) (1.16.0)\n",
|
73 |
+
"Requirement already satisfied: python-dateutil>=2.5.3 in /usr/local/lib/python3.10/dist-packages (from kubernetes>=28.1.0->chromadb) (2.8.2)\n",
|
74 |
+
"Requirement already satisfied: google-auth>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from kubernetes>=28.1.0->chromadb) (2.27.0)\n",
|
75 |
+
"Requirement already satisfied: websocket-client!=0.40.0,!=0.41.*,!=0.42.*,>=0.32.0 in /usr/local/lib/python3.10/dist-packages (from kubernetes>=28.1.0->chromadb) (1.8.0)\n",
|
76 |
+
"Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from kubernetes>=28.1.0->chromadb) (2.32.3)\n",
|
77 |
+
"Requirement already satisfied: requests-oauthlib in /usr/local/lib/python3.10/dist-packages (from kubernetes>=28.1.0->chromadb) (1.3.1)\n",
|
78 |
+
"Requirement already satisfied: oauthlib>=3.2.2 in /usr/local/lib/python3.10/dist-packages (from kubernetes>=28.1.0->chromadb) (3.2.2)\n",
|
79 |
+
"Requirement already satisfied: urllib3>=1.24.2 in /usr/local/lib/python3.10/dist-packages (from kubernetes>=28.1.0->chromadb) (2.2.3)\n",
|
80 |
+
"Requirement already satisfied: durationpy>=0.7 in /usr/local/lib/python3.10/dist-packages (from kubernetes>=28.1.0->chromadb) (0.9)\n",
|
81 |
+
"Requirement already satisfied: coloredlogs in /usr/local/lib/python3.10/dist-packages (from onnxruntime>=1.14.1->chromadb) (15.0.1)\n",
|
82 |
+
"Requirement already satisfied: flatbuffers in /usr/local/lib/python3.10/dist-packages (from onnxruntime>=1.14.1->chromadb) (24.3.25)\n",
|
83 |
+
"Requirement already satisfied: protobuf in /usr/local/lib/python3.10/dist-packages (from onnxruntime>=1.14.1->chromadb) (3.20.3)\n",
|
84 |
+
"Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from onnxruntime>=1.14.1->chromadb) (1.13.3)\n",
|
85 |
+
"Requirement already satisfied: deprecated>=1.2.6 in /usr/local/lib/python3.10/dist-packages (from opentelemetry-api>=1.2.0->chromadb) (1.2.14)\n",
|
86 |
+
"Requirement already satisfied: importlib-metadata<=8.4.0,>=6.0 in /usr/local/lib/python3.10/dist-packages (from opentelemetry-api>=1.2.0->chromadb) (8.4.0)\n",
|
87 |
+
"Requirement already satisfied: googleapis-common-protos~=1.52 in /usr/local/lib/python3.10/dist-packages (from opentelemetry-exporter-otlp-proto-grpc>=1.2.0->chromadb) (1.65.0)\n",
|
88 |
+
"Requirement already satisfied: opentelemetry-exporter-otlp-proto-common==1.27.0 in /usr/local/lib/python3.10/dist-packages (from opentelemetry-exporter-otlp-proto-grpc>=1.2.0->chromadb) (1.27.0)\n",
|
89 |
+
"Requirement already satisfied: opentelemetry-proto==1.27.0 in /usr/local/lib/python3.10/dist-packages (from opentelemetry-exporter-otlp-proto-grpc>=1.2.0->chromadb) (1.27.0)\n",
|
90 |
+
"Requirement already satisfied: opentelemetry-instrumentation-asgi==0.48b0 in /usr/local/lib/python3.10/dist-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb) (0.48b0)\n",
|
91 |
+
"Requirement already satisfied: opentelemetry-instrumentation==0.48b0 in /usr/local/lib/python3.10/dist-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb) (0.48b0)\n",
|
92 |
+
"Requirement already satisfied: opentelemetry-semantic-conventions==0.48b0 in /usr/local/lib/python3.10/dist-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb) (0.48b0)\n",
|
93 |
+
"Requirement already satisfied: opentelemetry-util-http==0.48b0 in /usr/local/lib/python3.10/dist-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb) (0.48b0)\n",
|
94 |
+
"Requirement already satisfied: setuptools>=16.0 in /usr/local/lib/python3.10/dist-packages (from opentelemetry-instrumentation==0.48b0->opentelemetry-instrumentation-fastapi>=0.41b0->chromadb) (71.0.4)\n",
|
95 |
+
"Requirement already satisfied: wrapt<2.0.0,>=1.0.0 in /usr/local/lib/python3.10/dist-packages (from opentelemetry-instrumentation==0.48b0->opentelemetry-instrumentation-fastapi>=0.41b0->chromadb) (1.16.0)\n",
|
96 |
+
"Requirement already satisfied: asgiref~=3.0 in /usr/local/lib/python3.10/dist-packages (from opentelemetry-instrumentation-asgi==0.48b0->opentelemetry-instrumentation-fastapi>=0.41b0->chromadb) (3.8.1)\n",
|
97 |
+
"Requirement already satisfied: monotonic>=1.5 in /usr/local/lib/python3.10/dist-packages (from posthog>=2.4.0->chromadb) (1.6)\n",
|
98 |
+
"Requirement already satisfied: backoff>=1.10.0 in /usr/local/lib/python3.10/dist-packages (from posthog>=2.4.0->chromadb) (2.2.1)\n",
|
99 |
+
"Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.9->chromadb) (0.7.0)\n",
|
100 |
+
"Requirement already satisfied: pydantic-core==2.23.4 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.9->chromadb) (2.23.4)\n",
|
101 |
+
"Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich>=10.11.0->chromadb) (3.0.0)\n",
|
102 |
+
"Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich>=10.11.0->chromadb) (2.18.0)\n",
|
103 |
+
"Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /usr/local/lib/python3.10/dist-packages (from tokenizers>=0.13.2->chromadb) (0.24.7)\n",
|
104 |
+
"Requirement already satisfied: click>=8.0.0 in /usr/local/lib/python3.10/dist-packages (from typer>=0.9.0->chromadb) (8.1.7)\n",
|
105 |
+
"Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.10/dist-packages (from typer>=0.9.0->chromadb) (1.5.4)\n",
|
106 |
+
"Requirement already satisfied: httptools>=0.5.0 in /usr/local/lib/python3.10/dist-packages (from uvicorn[standard]>=0.18.3->chromadb) (0.6.2)\n",
|
107 |
+
"Requirement already satisfied: python-dotenv>=0.13 in /usr/local/lib/python3.10/dist-packages (from uvicorn[standard]>=0.18.3->chromadb) (1.0.1)\n",
|
108 |
+
"Requirement already satisfied: uvloop!=0.15.0,!=0.15.1,>=0.14.0 in /usr/local/lib/python3.10/dist-packages (from uvicorn[standard]>=0.18.3->chromadb) (0.21.0)\n",
|
109 |
+
"Requirement already satisfied: watchfiles>=0.13 in /usr/local/lib/python3.10/dist-packages (from uvicorn[standard]>=0.18.3->chromadb) (0.24.0)\n",
|
110 |
+
"Requirement already satisfied: websockets>=10.4 in /usr/local/lib/python3.10/dist-packages (from uvicorn[standard]>=0.18.3->chromadb) (13.1)\n",
|
111 |
+
"Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from google-auth>=1.0.1->kubernetes>=28.1.0->chromadb) (5.5.0)\n",
|
112 |
+
"Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from google-auth>=1.0.1->kubernetes>=28.1.0->chromadb) (0.4.1)\n",
|
113 |
+
"Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.10/dist-packages (from google-auth>=1.0.1->kubernetes>=28.1.0->chromadb) (4.9)\n",
|
114 |
+
"Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers>=0.13.2->chromadb) (3.16.1)\n",
|
115 |
+
"Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->tokenizers>=0.13.2->chromadb) (2024.6.1)\n",
|
116 |
+
"Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.10/dist-packages (from importlib-metadata<=8.4.0,>=6.0->opentelemetry-api>=1.2.0->chromadb) (3.20.2)\n",
|
117 |
+
"Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->chromadb) (0.1.2)\n",
|
118 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->kubernetes>=28.1.0->chromadb) (3.4.0)\n",
|
119 |
+
"Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio->httpx>=0.27.0->chromadb) (1.2.2)\n",
|
120 |
+
"Requirement already satisfied: humanfriendly>=9.1 in /usr/local/lib/python3.10/dist-packages (from coloredlogs->onnxruntime>=1.14.1->chromadb) (10.0)\n",
|
121 |
+
"Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from sympy->onnxruntime>=1.14.1->chromadb) (1.3.0)\n",
|
122 |
+
"Requirement already satisfied: pyasn1<0.7.0,>=0.4.6 in /usr/local/lib/python3.10/dist-packages (from pyasn1-modules>=0.2.1->google-auth>=1.0.1->kubernetes>=28.1.0->chromadb) (0.6.1)\n"
|
123 |
+
]
|
124 |
+
}
|
125 |
+
],
|
126 |
+
"source": [
|
127 |
+
"%pip install -qU langchain-groq\n",
|
128 |
+
"!pip install chromadb"
|
129 |
+
]
|
130 |
+
},
|
131 |
+
{
|
132 |
+
"cell_type": "code",
|
133 |
+
"source": [
|
134 |
+
"%pip install -qU langchain_community beautifulsoup4"
|
135 |
+
],
|
136 |
+
"metadata": {
|
137 |
+
"colab": {
|
138 |
+
"base_uri": "https://localhost:8080/"
|
139 |
+
},
|
140 |
+
"collapsed": true,
|
141 |
+
"id": "BcrwN2IJgTK-",
|
142 |
+
"outputId": "266f99f8-8f16-4a2f-c9b6-950dc14b13c0"
|
143 |
+
},
|
144 |
+
"execution_count": 12,
|
145 |
+
"outputs": [
|
146 |
+
{
|
147 |
+
"output_type": "stream",
|
148 |
+
"name": "stdout",
|
149 |
+
"text": [
|
150 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m27.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
151 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.0/1.0 MB\u001b[0m \u001b[31m48.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
152 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.3/49.3 kB\u001b[0m \u001b[31m3.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
153 |
+
"\u001b[?25h"
|
154 |
+
]
|
155 |
+
}
|
156 |
+
]
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"cell_type": "code",
|
160 |
+
"source": [
|
161 |
+
"!pip install playwright"
|
162 |
+
],
|
163 |
+
"metadata": {
|
164 |
+
"collapsed": true,
|
165 |
+
"colab": {
|
166 |
+
"base_uri": "https://localhost:8080/"
|
167 |
+
},
|
168 |
+
"id": "xkni7hWljpWt",
|
169 |
+
"outputId": "84c9768f-cd49-435d-a5f8-089e38f6bdd9"
|
170 |
+
},
|
171 |
+
"execution_count": 26,
|
172 |
+
"outputs": [
|
173 |
+
{
|
174 |
+
"output_type": "stream",
|
175 |
+
"name": "stdout",
|
176 |
+
"text": [
|
177 |
+
"Collecting playwright\n",
|
178 |
+
" Downloading playwright-1.47.0-py3-none-manylinux1_x86_64.whl.metadata (3.5 kB)\n",
|
179 |
+
"Collecting greenlet==3.0.3 (from playwright)\n",
|
180 |
+
" Downloading greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl.metadata (3.8 kB)\n",
|
181 |
+
"Collecting pyee==12.0.0 (from playwright)\n",
|
182 |
+
" Downloading pyee-12.0.0-py3-none-any.whl.metadata (2.8 kB)\n",
|
183 |
+
"Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from pyee==12.0.0->playwright) (4.12.2)\n",
|
184 |
+
"Downloading playwright-1.47.0-py3-none-manylinux1_x86_64.whl (38.1 MB)\n",
|
185 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m38.1/38.1 MB\u001b[0m \u001b[31m34.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
186 |
+
"\u001b[?25hDownloading greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl (616 kB)\n",
|
187 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m616.0/616.0 kB\u001b[0m \u001b[31m36.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
188 |
+
"\u001b[?25hDownloading pyee-12.0.0-py3-none-any.whl (14 kB)\n",
|
189 |
+
"Installing collected packages: pyee, greenlet, playwright\n",
|
190 |
+
" Attempting uninstall: greenlet\n",
|
191 |
+
" Found existing installation: greenlet 3.1.1\n",
|
192 |
+
" Uninstalling greenlet-3.1.1:\n",
|
193 |
+
" Successfully uninstalled greenlet-3.1.1\n",
|
194 |
+
"Successfully installed greenlet-3.0.3 playwright-1.47.0 pyee-12.0.0\n"
|
195 |
+
]
|
196 |
+
}
|
197 |
+
]
|
198 |
+
},
|
199 |
+
{
|
200 |
+
"cell_type": "code",
|
201 |
+
"source": [
|
202 |
+
"from langchain_groq import ChatGroq\n",
|
203 |
+
"\n",
|
204 |
+
"llm = ChatGroq(\n",
|
205 |
+
" model=\"llama-3.2-1b-preview\",\n",
|
206 |
+
" temperature=0,\n",
|
207 |
+
" max_tokens=None,\n",
|
208 |
+
" timeout=None,\n",
|
209 |
+
" groq_api_key = \"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\"\n",
|
210 |
+
")\n",
|
211 |
+
"\n",
|
212 |
+
"response = llm.invoke(\"Write a cold email to Professor at Computer Science and Engineering Department to request for an internship in Trust Lab in the area of Machine Learning\")\n",
|
213 |
+
"\n",
|
214 |
+
"print(response.content)"
|
215 |
+
],
|
216 |
+
"metadata": {
|
217 |
+
"colab": {
|
218 |
+
"base_uri": "https://localhost:8080/"
|
219 |
+
},
|
220 |
+
"id": "Abvttqe4QUvG",
|
221 |
+
"outputId": "84940c8f-70bd-4c88-bdf2-55e48aaac42d"
|
222 |
+
},
|
223 |
+
"execution_count": 10,
|
224 |
+
"outputs": [
|
225 |
+
{
|
226 |
+
"output_type": "stream",
|
227 |
+
"name": "stdout",
|
228 |
+
"text": [
|
229 |
+
"Here's a sample cold email:\n",
|
230 |
+
"\n",
|
231 |
+
"Subject: Application for Internship in Trust Lab - Machine Learning\n",
|
232 |
+
"\n",
|
233 |
+
"Dear Professor [Professor's Name],\n",
|
234 |
+
"\n",
|
235 |
+
"I hope this email finds you well. My name is [Your Name], and I am a [Your Current Degree Level, e.g., undergraduate, graduate] student in Computer Science and Engineering at [University Name]. I am writing to express my interest in an internship opportunity at the Trust Lab, a leading research group in Machine Learning, within the Computer Science and Engineering Department.\n",
|
236 |
+
"\n",
|
237 |
+
"As a student of Computer Science and Engineering, I have been fascinated by the intersection of Machine Learning and Trust, and I am eager to contribute to the research and development of innovative solutions in this area. The Trust Lab's work on trust-aware machine learning algorithms and their applications in various domains aligns with my academic interests and career goals.\n",
|
238 |
+
"\n",
|
239 |
+
"I am particularly drawn to the Trust Lab's research on [specific area of research, e.g., trust-aware neural networks, trust-based decision-making systems]. I believe that my academic background, research experience, and passion for Machine Learning make me an ideal candidate for an internship at the Trust Lab.\n",
|
240 |
+
"\n",
|
241 |
+
"I would be thrilled to discuss my application and how I can contribute to the Trust Lab's research efforts. I have attached my resume and a brief research proposal outlining my interests and goals. I would appreciate the opportunity to schedule a meeting or call to discuss my application further.\n",
|
242 |
+
"\n",
|
243 |
+
"Thank you for considering my application. I look forward to the opportunity to discuss this further.\n",
|
244 |
+
"\n",
|
245 |
+
"Best regards,\n",
|
246 |
+
"\n",
|
247 |
+
"[Your Name]\n",
|
248 |
+
"\n",
|
249 |
+
"Attachments:\n",
|
250 |
+
"\n",
|
251 |
+
"* Resume\n",
|
252 |
+
"* Research proposal (optional)\n",
|
253 |
+
"\n",
|
254 |
+
"Note: Make sure to customize the email by replacing [Professor's Name], [University Name], and [Your Name] with the actual information. Also, proofread the email for grammar and spelling errors before sending it.\n"
|
255 |
+
]
|
256 |
+
}
|
257 |
+
]
|
258 |
+
},
|
259 |
+
{
|
260 |
+
"cell_type": "code",
|
261 |
+
"source": [
|
262 |
+
"from langchain_community.document_loaders import WebBaseLoader\n",
|
263 |
+
"\n",
|
264 |
+
"loader = WebBaseLoader(\"https://job-boards.greenhouse.io/notion/jobs/5595762003\")\n",
|
265 |
+
"\n",
|
266 |
+
"career_page_data = loader.load().pop().page_content\n",
|
267 |
+
"print(career_page_data)\n"
|
268 |
+
],
|
269 |
+
"metadata": {
|
270 |
+
"colab": {
|
271 |
+
"base_uri": "https://localhost:8080/"
|
272 |
+
},
|
273 |
+
"collapsed": true,
|
274 |
+
"id": "TgiNfagVf-pG",
|
275 |
+
"outputId": "dea4db80-8aac-472f-e636-cff9ce8c9dcf"
|
276 |
+
},
|
277 |
+
"execution_count": 39,
|
278 |
+
"outputs": [
|
279 |
+
{
|
280 |
+
"output_type": "stream",
|
281 |
+
"name": "stdout",
|
282 |
+
"text": [
|
283 |
+
"Job Application for AI Product Engineer at NotionBack to jobsAI Product EngineerSan Francisco, California; New York, New York;ApplyAbout Us:\n",
|
284 |
+
"We're on a mission to make it possible for every person, team, and company to be able to tailor their software to solve any problem and take on any challenge. Computers may be our most powerful tools, but most of us can't build or modify the software we use on them every day. At Notion, we want to change this with focus, design, and craft.\n",
|
285 |
+
"We've been working on this together since 2016, and have customers like Pixar, Mitsubishi, Figma, Plaid, Match Group, and thousands more on this journey with us. Today, we're growing fast and excited for new teammates to join us who are the best at what they do. We're passionate about building a company as diverse and creative as the millions of people Notion reaches worldwide.\n",
|
286 |
+
"Notion is an in person company, and currently requires its employees to come to the office for two Anchor Days (Mondays & Thursdays) and requests that employees spend the majority of their week in the office (including a third day).\n",
|
287 |
+
"About The Role:\n",
|
288 |
+
"We are looking for an AI Product Engineer to join our small but nimble AI team whose mission is to make Notion an AI-powered product. As an AI Product Engineer, you will work on developing and implementing AI-powered products, incorporating large language models (LLMs), embeddings, and other AI technologies into Notion’s product.\n",
|
289 |
+
"What you’ll achieve:\n",
|
290 |
+
"\n",
|
291 |
+
"Work with the team to prototype and experiment with new AI features\n",
|
292 |
+
"Productionize and launch new AI technology integrations into Notion’s core product\n",
|
293 |
+
"Collaborate with cross-functional teams to deliver product features on time\n",
|
294 |
+
"Stay up-to-date with the latest AI technologies and trends\n",
|
295 |
+
"\n",
|
296 |
+
"Skills You'll Need to Bring:\n",
|
297 |
+
"\n",
|
298 |
+
"Expertise building and prototyping: You understand how parts of a system fit together, from the user interface to the data model. You are familiar with relational database systems like Postgres or MySQL, and have experience building products from ground up.\n",
|
299 |
+
"Shipping quality user interfaces: You partner closely with product and design to craft beautiful user experiences for large audiences, leveraging web technologies like HTML, CSS, JavaScript, and a modern UI framework like React.\n",
|
300 |
+
"Problem-solving: You approach problems holistically, starting with a clear and accurate understanding of the context. You think critically about the implications of what you're building and how it will impact real people's lives. You can navigate ambiguity flawlessly, decompose complex problems into clean solutions, while also balancing the business impact of what you’re building.\n",
|
301 |
+
"Empathetic communication and collaboration: You communicate nuanced ideas clearly, whether you're explaining technical decisions in writing or brainstorming in real time. In disagreements, you engage thoughtfully with other perspectives and compromise when needed. You enjoy collaborating with both fellow engineers and cross-functional partners. You are a lifelong learner and invest in both your own growth and the growth, learning, and development of your teammates.\n",
|
302 |
+
"Impact-orientation and user focus: You care about business impact and prioritize projects accordingly. You understand the balance between craft, speed, and the bottom line. You think critically about the implications of what you're building, and how it shapes real people's lives. You understand that reach comes with responsibility for our impact—good and bad. Work isn't a solo endeavor for you, and you enjoy collaborating cross-functionally to accomplish shared goals.\n",
|
303 |
+
"\n",
|
304 |
+
"Nice to Haves:\n",
|
305 |
+
"\n",
|
306 |
+
"You have extensive experience building AI products using LLMs, embeddings and other ML technologies.\n",
|
307 |
+
"You're proficient with any part of our technology stack: React, TypeScript, Node.js, and Postgres.\n",
|
308 |
+
"You have experience driving teams toward shared goals and can balance business priorities with individuals’ strengths, areas of interest, and career development goals.\n",
|
309 |
+
"You've heard of computing pioneers like Ada Lovelace, Douglas Engelbart, Alan Kay, and others—and understand why we're big fans of their work.\n",
|
310 |
+
"You have interests outside of technology, such as in art, history, or social sciences.\n",
|
311 |
+
"\n",
|
312 |
+
"\n",
|
313 |
+
"We hire talented and passionate people from a variety of backgrounds because we want our global employee base to represent the wide diversity of our customers. If you’re excited about a role but your past experience doesn’t align perfectly with every bullet point listed in the job description, we still encourage you to apply. If you’re a builder at heart, share our company values, and enthusiastic about making software toolmaking ubiquitous, we want to hear from you.\n",
|
314 |
+
"Notion is proud to be an equal opportunity employer. We do not discriminate in hiring or any employment decision based on race, color, religion, national origin, age, sex (including pregnancy, childbirth, or related medical conditions), marital status, ancestry, physical or mental disability, genetic information, veteran status, gender identity or expression, sexual orientation, or other applicable legally protected characteristic. Notion considers qualified applicants with criminal histories, consistent with applicable federal, state and local law. Notion is also committed to providing reasonable accommodations for qualified individuals with disabilities and disabled veterans in our job application procedures. If you need assistance or an accommodation due to a disability, please let your recruiter know.\n",
|
315 |
+
"Notion is committed to providing highly competitive cash compensation, equity, and benefits. The compensation offered for this role will be based on multiple factors such as location, the role’s scope and complexity, and the candidate’s experience and expertise, and may vary from the range provided below. For roles based in San Francisco, the estimated base salary range for this role is $150,000 - $280,000 per year. \n",
|
316 |
+
"#LI-OnsiteApply for this job*indicates a required fieldFirst Name*Last Name*Email*Phone*Resume/CV*AttachAttachDropboxGoogle DriveEnter manuallyEnter manuallyAccepted file types: pdf, doc, docx, txt, rtfCover LetterAttachAttachDropboxGoogle DriveEnter manuallyEnter manuallyAccepted file types: pdf, doc, docx, txt, rtfStarting August 1st 2022, employees across the globe will work and collaborate together in our offices for two Anchor Days each week. The other three days are meant to be flexible; the offices will be open and everyone’s encouraged to come in, but you can also work from home as well.\n",
|
317 |
+
"Notion will consider requests for accommodations to this policy. When requested, Notion will provide a reasonable accommodation for physical or mental disabilities, or other reasons recognized by applicable law.\n",
|
318 |
+
"Please confirm you have read and understand our return to office policy.*Select...Will you now or in the future require Notion to sponsor an immigration case in order to employ you? \n",
|
319 |
+
"\n",
|
320 |
+
"*Select...If you’ll require Notion to commence, i.e., “sponsor,” an immigration or work permit case in order to employ you, either now or at some point in the near future, then you should answer yes. An example of an immigration or work permit case that may require sponsorship now or in the future would be an H-1B or other employment-based work permit sponsorship.What are the pronouns that you would like our team to use when addressing you? Select...Why do you want to work at Notion?*LinkedIn ProfileWebsite or PortfolioHow did you hear about this opportunity? (select all that apply) Select...Voluntary Self-Identification\n",
|
321 |
+
"For government reporting purposes, we ask candidates to respond to the below self-identification survey.\n",
|
322 |
+
"Completion of the form is entirely voluntary. Whatever your decision, it will not be considered in the hiring\n",
|
323 |
+
"process or thereafter. Any information that you do provide will be recorded and maintained in a\n",
|
324 |
+
"confidential file.\n",
|
325 |
+
"As set forth in Notion’s Equal Employment Opportunity policy,\n",
|
326 |
+
"we do not discriminate on the basis of any protected group status under any applicable law.\n",
|
327 |
+
"GenderSelect...Are you Hispanic/Latino?Select...Race & Ethnicity Definitions\n",
|
328 |
+
" If you believe you belong to any of the categories of protected veterans listed below, please indicate by making the appropriate selection.\n",
|
329 |
+
" As a government contractor subject to the Vietnam Era Veterans Readjustment Assistance Act (VEVRAA), we request this information in order to measure\n",
|
330 |
+
" the effectiveness of the outreach and positive recruitment efforts we undertake pursuant to VEVRAA. Classification of protected categories\n",
|
331 |
+
" is as follows:\n",
|
332 |
+
"\n",
|
333 |
+
"A \"disabled veteran\" is one of the following: a veteran of the U.S. military, ground, naval or air service who is entitled to compensation (or who but for the receipt of military retired pay would be entitled to compensation) under laws administered by the Secretary of Veterans Affairs; or a person who was discharged or released from active duty because of a service-connected disability.\n",
|
334 |
+
"A \"recently separated veteran\" means any veteran during the three-year period beginning on the date of such veteran's discharge or release from active duty in the U.S. military, ground, naval, or air service.\n",
|
335 |
+
"An \"active duty wartime or campaign badge veteran\" means a veteran who served on active duty in the U.S. military, ground, naval or air service during a war, or in a campaign or expedition for which a campaign badge has been authorized under the laws administered by the Department of Defense.\n",
|
336 |
+
"An \"Armed forces service medal veteran\" means a veteran who, while serving on active duty in the U.S. military, ground, naval or air service, participated in a United States military operation for which an Armed Forces service medal was awarded pursuant to Executive Order 12985.\n",
|
337 |
+
"Veteran StatusSelect...Voluntary Self-Identification of Disability\n",
|
338 |
+
"\n",
|
339 |
+
"\n",
|
340 |
+
"Form CC-305\n",
|
341 |
+
"Page 1 of 1\n",
|
342 |
+
"\n",
|
343 |
+
"\n",
|
344 |
+
"OMB Control Number 1250-0005\n",
|
345 |
+
"Expires 04/30/2026\n",
|
346 |
+
"\n",
|
347 |
+
"\n",
|
348 |
+
"\n",
|
349 |
+
"Why are you being asked to complete this form?\n",
|
350 |
+
"We are a federal contractor or subcontractor. The law requires us to provide equal employment opportunity to qualified people with disabilities. We have a goal of having at least 7% of our workers as people with disabilities. The law says we must measure our progress towards this goal. To do this, we must ask applicants and employees if they have a disability or have ever had one. People can become disabled, so we need to ask this question at least every five years.\n",
|
351 |
+
"Completing this form is voluntary, and we hope that you will choose to do so. Your answer is confidential. No one who makes hiring decisions will see it. Your decision to complete the form and your answer will not harm you in any way. If you want to learn more about the law or this form, visit the U.S. Department of Labor’s Office of Federal Contract Compliance Programs (OFCCP) website at www.dol.gov/ofccp.\n",
|
352 |
+
"How do you know if you have a disability?\n",
|
353 |
+
"A disability is a condition that substantially limits one or more of your “major life activities.” If you have or have ever had such a condition, you are a person with a disability. Disabilities include, but are not limited to:\n",
|
354 |
+
"\n",
|
355 |
+
"Alcohol or other substance use disorder (not currently using drugs illegally)\n",
|
356 |
+
"Autoimmune disorder, for example, lupus, fibromyalgia, rheumatoid arthritis, HIV/AIDS\n",
|
357 |
+
"Blind or low vision\n",
|
358 |
+
"Cancer (past or present)\n",
|
359 |
+
"Cardiovascular or heart disease\n",
|
360 |
+
"Celiac disease\n",
|
361 |
+
"Cerebral palsy\n",
|
362 |
+
"Deaf or serious difficulty hearing\n",
|
363 |
+
"Diabetes\n",
|
364 |
+
"Disfigurement, for example, disfigurement caused by burns, wounds, accidents, or congenital disorders\n",
|
365 |
+
"Epilepsy or other seizure disorder\n",
|
366 |
+
"Gastrointestinal disorders, for example, Crohn's Disease, irritable bowel syndrome\n",
|
367 |
+
"Intellectual or developmental disability\n",
|
368 |
+
"Mental health conditions, for example, depression, bipolar disorder, anxiety disorder, schizophrenia, PTSD\n",
|
369 |
+
"Missing limbs or partially missing limbs\n",
|
370 |
+
"Mobility impairment, benefiting from the use of a wheelchair, scooter, walker, leg brace(s) and/or other supports\n",
|
371 |
+
"Nervous system condition, for example, migraine headaches, Parkinson’s disease, multiple sclerosis (MS)\n",
|
372 |
+
"Neurodivergence, for example, attention-deficit/hyperactivity disorder (ADHD), autism spectrum disorder, dyslexia, dyspraxia, other learning disabilities\n",
|
373 |
+
"Partial or complete paralysis (any cause)\n",
|
374 |
+
"Pulmonary or respiratory conditions, for example, tuberculosis, asthma, emphysema\n",
|
375 |
+
"Short stature (dwarfism)\n",
|
376 |
+
"Traumatic brain injury\n",
|
377 |
+
"\n",
|
378 |
+
"Disability StatusSelect...PUBLIC BURDEN STATEMENT: According to the Paperwork Reduction Act of 1995 no persons are required to respond to a collection of information unless such collection displays a valid OMB control number. This survey should take about 5 minutes to complete.\n",
|
379 |
+
"Submit applicationPowered byGreenhouseRead our Privacy Policy\n"
|
380 |
+
]
|
381 |
+
}
|
382 |
+
]
|
383 |
+
},
|
384 |
+
{
|
385 |
+
"cell_type": "code",
|
386 |
+
"source": [
|
387 |
+
"from langchain_core.prompts import PromptTemplate\n",
|
388 |
+
"import json\n",
|
389 |
+
"\n",
|
390 |
+
"prompt_template = PromptTemplate.from_template(\n",
|
391 |
+
" \"\"\"\n",
|
392 |
+
" ### SCRAPED TEXT FROM WEBSITE:\n",
|
393 |
+
" {page_data}\n",
|
394 |
+
" ### INSTRUCTION:\n",
|
395 |
+
" The scraped text is from the career's page of a website.\n",
|
396 |
+
" Your job is to extract the job postings and return them in JSON format containing the following keys: `role`, `experience`, `skills` and `description`. The skills sections shouldn't be in dictionary format but in list.\n",
|
397 |
+
" Only return the valid JSON.\n",
|
398 |
+
" ### VALID JSON (NO PREAMBLE):\n",
|
399 |
+
" \"\"\"\n",
|
400 |
+
")\n",
|
401 |
+
"\n",
|
402 |
+
"chain_template = prompt_template | llm\n",
|
403 |
+
"response = chain_template.invoke({\"page_data\": career_page_data})\n",
|
404 |
+
"type(response.content)\n",
|
405 |
+
"# Assuming response.content is already a dictionary/list (deserialized JSON)\n",
|
406 |
+
"#type(print(response.content.strip(\"```json\").strip(\"```\")))\n",
|
407 |
+
"\n"
|
408 |
+
],
|
409 |
+
"metadata": {
|
410 |
+
"colab": {
|
411 |
+
"base_uri": "https://localhost:8080/"
|
412 |
+
},
|
413 |
+
"id": "wrqaEn0Uoa8Z",
|
414 |
+
"outputId": "050524bc-8b07-44ab-dd0c-a31ba40808b4"
|
415 |
+
},
|
416 |
+
"execution_count": 59,
|
417 |
+
"outputs": [
|
418 |
+
{
|
419 |
+
"output_type": "execute_result",
|
420 |
+
"data": {
|
421 |
+
"text/plain": [
|
422 |
+
"str"
|
423 |
+
]
|
424 |
+
},
|
425 |
+
"metadata": {},
|
426 |
+
"execution_count": 59
|
427 |
+
}
|
428 |
+
]
|
429 |
+
},
|
430 |
+
{
|
431 |
+
"cell_type": "code",
|
432 |
+
"source": [
|
433 |
+
"print(response.content)"
|
434 |
+
],
|
435 |
+
"metadata": {
|
436 |
+
"colab": {
|
437 |
+
"base_uri": "https://localhost:8080/"
|
438 |
+
},
|
439 |
+
"id": "fWOOba6hz6ST",
|
440 |
+
"outputId": "ed5979a1-c40e-4e7d-b49d-b0b189427771"
|
441 |
+
},
|
442 |
+
"execution_count": 60,
|
443 |
+
"outputs": [
|
444 |
+
{
|
445 |
+
"output_type": "stream",
|
446 |
+
"name": "stdout",
|
447 |
+
"text": [
|
448 |
+
"```json\n",
|
449 |
+
"{\n",
|
450 |
+
" \"role\": \"AI Product Engineer\",\n",
|
451 |
+
" \"experience\": {\n",
|
452 |
+
" \"years\": 0,\n",
|
453 |
+
" \"education\": {\n",
|
454 |
+
" \"degrees\": []\n",
|
455 |
+
" },\n",
|
456 |
+
" \"training\": []\n",
|
457 |
+
" },\n",
|
458 |
+
" \"skills\": [\n",
|
459 |
+
" \"Expertise building and prototyping\",\n",
|
460 |
+
" \"Shipping quality user interfaces\",\n",
|
461 |
+
" \"Problem-solving\",\n",
|
462 |
+
" \"Empathetic communication and collaboration\",\n",
|
463 |
+
" \"Impact-orientation and user focus\",\n",
|
464 |
+
" \"Nice to Haves: Extensive experience building AI products using LLMs, embeddings and other ML technologies, Proficient with React, TypeScript, Node.js, and Postgres, Experience driving teams toward shared goals and balancing business priorities with individuals’ strengths, areas of interest, and career development goals\"\n",
|
465 |
+
" ],\n",
|
466 |
+
" \"description\": \"We're on a mission to make it possible for every person, team, and company to be able to tailor their software to solve any problem and take on any challenge. Computers may be our most powerful tools, but most of us can't build or modify the software we use on them every day. At Notion, we want to change this with focus, design, and craft. We've been working on this together since 2016, and have customers like Pixar, Mitsubishi, Figma, Plaid, Match Group, and thousands more on this journey with us. Today, we're growing fast and excited for new teammates to join us who are the best at what they do. We're passionate about building a company as diverse and creative as the millions of people Notion reaches worldwide. Notion is an in-person company, and currently requires its employees to come to the office for two Anchor Days (Mondays & Thursdays) and requests that employees spend the majority of their week in the office (including a third day). About The Role: We are looking for an AI Product Engineer to join our small but nimble AI team whose mission is to make Notion an AI-powered product. As an AI Product Engineer, you will work on developing and implementing AI-powered products, incorporating large language models (LLMs), embeddings, and other AI technologies into Notion’s product. What you’ll achieve: Work with the team to prototype and experiment with new AI features Productionize and launch new AI technology integrations into Notion’s core product Collaborate with cross-functional teams to deliver product features on time Stay up-to-date with the latest AI technologies and trends. Skills You'll Need to Bring: Expertise building and prototyping: You understand how parts of a system fit together, from the user interface to the data model. You are familiar with relational database systems like Postgres or MySQL, and have experience building products from ground up. Shipping quality user interfaces: You partner closely with product and design to craft beautiful user experiences for large audiences, leveraging web technologies like HTML, CSS, JavaScript, and a modern UI framework like React. Problem-solving: You approach problems holistically, starting with a clear and accurate understanding of the context. You think critically about the implications of what you're building and how it will impact real people's lives. You can navigate ambiguity flawlessly, decompose complex problems into clean solutions, while also balancing the business impact of what you’re building. Empathetic communication and collaboration: You communicate nuanced ideas clearly, whether you're explaining technical decisions in writing or brainstorming in real time. In disagreements, you engage thoughtfully with other perspectives and compromise when needed. You enjoy collaborating with both fellow engineers and cross-functional partners. You are a lifelong learner and invest in both your own growth and the growth, learning, and development of your teammates. Impact-orientation and user focus: You care about business impact and prioritize projects accordingly. You understand the balance between craft, speed, and the bottom line. You think critically about the implications of what you're building, and how it shapes real people's lives. You understand that reach comes with responsibility for our impact—good and bad. Work isn't a solo endeavor for you, and you enjoy collaborating cross-functionally to accomplish shared goals. Nice to Haves: You have extensive experience building AI products using LLMs, embeddings and other ML technologies. You're proficient with any part of our technology stack: React, TypeScript, Node.js, and Postgres. You have experience driving teams toward shared goals and can balance business priorities with individuals’ strengths, areas of interest, and career development goals. You've heard of computing pioneers like Ada Lovelace, Douglas Engelbart, Alan Kay, and others—and understand why we're big fans of their work. You have interests outside of technology, such as in art, history, or social sciences. We hire talented and passionate people from a variety of backgrounds because we want our global employee base to represent the wide diversity of our customers. If you’re excited about a role but your past experience doesn’t align perfectly with every bullet point listed in the job description, we still encourage you to apply. If you’re a builder at heart, share our company values, and enthusiastic about making software toolmaking ubiquitous, we want to hear from you. Notion is proud to be an equal opportunity employer. We do not discriminate in hiring or any employment decision based on race, color, religion, national origin, age, sex (including pregnancy, childbirth, or related medical conditions), marital status, ancestry, physical or mental disability, genetic information, veteran status, gender identity or expression, sexual orientation, or other applicable legally protected characteristic. Notion considers qualified applicants with criminal histories, consistent with applicable federal, state and local law. Notion is also committed to providing reasonable accommodations for qualified individuals with disabilities and disabled veterans in our job application procedures. If you need assistance or an accommodation due to a disability, please let your recruiter know. Notion is committed to providing highly competitive cash compensation, equity, and benefits. The compensation offered for this role will be based on multiple factors such as location, the role’s scope and complexity, and the candidate’s experience and expertise, and may vary from the range provided below. For roles based in San Francisco, the estimated base salary range for this role is $150,000 - $280,000 per year.\n",
|
467 |
+
"#LI-Onsite\n",
|
468 |
+
"Apply for this job*indicates a required field\n",
|
469 |
+
"First Name*Last Name*Email*Phone*Resume/CV*Attach\n",
|
470 |
+
"AttachDropboxGoogle DriveEnter manuallyEnter manuallyAccepted file types: pdf, doc, docx, txt, rtf\n",
|
471 |
+
"Starting August 1st 2022, employees across the globe will work and collaborate together in our offices for two Anchor Days each week. The other three days are meant to be flexible; the offices will be open and everyone’s encouraged to come in, but you can also work from home as well.\n",
|
472 |
+
"Notion will consider requests for accommodations to this policy. When requested, Notion will provide a reasonable accommodation for physical or mental disabilities, or other reasons recognized by applicable law.\n",
|
473 |
+
"Please confirm you have read and understand our return to office policy.*Select...\n",
|
474 |
+
"Will you now or in the future require Notion to sponsor an immigration case in order to employ you? \n",
|
475 |
+
"Select...\n",
|
476 |
+
"Why do you want to work at Notion?*LinkedIn ProfileWebsite or Portfolio\n",
|
477 |
+
"How did you hear about this opportunity? (select all that apply) Select...\n",
|
478 |
+
"Voluntary Self-Identification\n",
|
479 |
+
"For government reporting purposes, we ask candidates to respond to the below self-identification survey.\n",
|
480 |
+
"Completion of the form is entirely voluntary, and we hope that you will choose to do so. Your answer is confidential. No one who makes hiring decisions will see it. Your decision to complete the form and your answer will not harm you in any way. If you want to learn more about the law or this form, visit the U.S. Department of Labor’s Office of Federal Contract Compliance Programs (OFCCP) website at www.dol.gov/ofccp.\n",
|
481 |
+
"How do you know if you have a disability?\n",
|
482 |
+
"A disability is a condition that substantially limits one or more of your “major life activities.” If you have or have ever had such a condition, you are a person with a disability. Disabilities include, but are not limited to:\n",
|
483 |
+
"Alcohol or other substance use disorder (not currently using drugs illegally)\n",
|
484 |
+
"Autoimmune disorder, for example, lupus, fibromyalgia, rheumatoid arthritis, HIV/AIDS\n",
|
485 |
+
"Blind or low vision\n",
|
486 |
+
"Cancer (past or present)\n",
|
487 |
+
"Cardiovascular or heart disease\n",
|
488 |
+
"Celiac disease\n",
|
489 |
+
"Cerebral palsy\n",
|
490 |
+
"Deaf or serious difficulty hearing\n",
|
491 |
+
"Diabetes\n",
|
492 |
+
"Disfigurement, for example, disfigurement caused by burns, wounds, accidents, or congenital disorders\n",
|
493 |
+
"Epilepsy or other seizure disorder\n",
|
494 |
+
"Gastrointestinal disorders, for example, Crohn's Disease, irritable bowel syndrome\n",
|
495 |
+
"Intellectual or developmental disability\n",
|
496 |
+
"Mental health conditions, for example, depression, bipolar disorder, anxiety disorder, schizophrenia, PTSD\n",
|
497 |
+
"Missing limbs or partially missing limbs\n",
|
498 |
+
"Mobility impairment, benefiting from the use of a wheelchair, scooter, walker, leg brace(s) and/or other supports\n",
|
499 |
+
"Nervous system condition, for example, migraine headaches, Parkinson’s disease, multiple sclerosis (MS)\n",
|
500 |
+
"Neurodivergence, for example, attention-deficit/hyperactivity disorder (ADHD), autism spectrum disorder, dyslexia, dyspraxia, other learning disabilities\n",
|
501 |
+
"Partial or complete paralysis (any cause)\n",
|
502 |
+
"Pulmonary or respiratory conditions, for example, tuberculosis, asthma, emphysema\n",
|
503 |
+
"Short stature (dwarfism)\n",
|
504 |
+
"Traumatic brain injury\n",
|
505 |
+
"Disability StatusSelect...\n",
|
506 |
+
"PUBLIC BURDEN STATEMENT: According to the Paperwork Reduction Act of 1995 no persons are required to respond to a collection of information unless such collection displays a valid OMB control number. This survey should take about 5 minutes to complete.\n",
|
507 |
+
"Submit applicationPowered byGreenhouseRead our Privacy Policy\n",
|
508 |
+
"```\n"
|
509 |
+
]
|
510 |
+
}
|
511 |
+
]
|
512 |
+
},
|
513 |
+
{
|
514 |
+
"cell_type": "code",
|
515 |
+
"source": [
|
516 |
+
"from langchain_core.output_parsers import JsonOutputParser\n",
|
517 |
+
"\n",
|
518 |
+
"output_parser = JsonOutputParser()\n",
|
519 |
+
"json_response = output_parser.parse(response.content)\n",
|
520 |
+
"print(json_response)\n",
|
521 |
+
"type(json_response)"
|
522 |
+
],
|
523 |
+
"metadata": {
|
524 |
+
"colab": {
|
525 |
+
"base_uri": "https://localhost:8080/"
|
526 |
+
},
|
527 |
+
"id": "5XDOZXtYtpjk",
|
528 |
+
"outputId": "da0c8c7a-0f5d-4219-8913-1c7462b43fb1"
|
529 |
+
},
|
530 |
+
"execution_count": 61,
|
531 |
+
"outputs": [
|
532 |
+
{
|
533 |
+
"output_type": "stream",
|
534 |
+
"name": "stdout",
|
535 |
+
"text": [
|
536 |
+
"{'role': 'AI Product Engineer', 'experience': {'years': 0, 'education': {'degrees': []}, 'training': []}, 'skills': ['Expertise building and prototyping', 'Shipping quality user interfaces', 'Problem-solving', 'Empathetic communication and collaboration', 'Impact-orientation and user focus', 'Nice to Haves: Extensive experience building AI products using LLMs, embeddings and other ML technologies, Proficient with React, TypeScript, Node.js, and Postgres, Experience driving teams toward shared goals and balancing business priorities with individuals’ strengths, areas of interest, and career development goals'], 'description': \"We're on a mission to make it possible for every person, team, and company to be able to tailor their software to solve any problem and take on any challenge. Computers may be our most powerful tools, but most of us can't build or modify the software we use on them every day. At Notion, we want to change this with focus, design, and craft. We've been working on this together since 2016, and have customers like Pixar, Mitsubishi, Figma, Plaid, Match Group, and thousands more on this journey with us. Today, we're growing fast and excited for new teammates to join us who are the best at what they do. We're passionate about building a company as diverse and creative as the millions of people Notion reaches worldwide. Notion is an in-person company, and currently requires its employees to come to the office for two Anchor Days (Mondays & Thursdays) and requests that employees spend the majority of their week in the office (including a third day). About The Role: We are looking for an AI Product Engineer to join our small but nimble AI team whose mission is to make Notion an AI-powered product. As an AI Product Engineer, you will work on developing and implementing AI-powered products, incorporating large language models (LLMs), embeddings, and other AI technologies into Notion’s product. What you’ll achieve: Work with the team to prototype and experiment with new AI features Productionize and launch new AI technology integrations into Notion’s core product Collaborate with cross-functional teams to deliver product features on time Stay up-to-date with the latest AI technologies and trends. Skills You'll Need to Bring: Expertise building and prototyping: You understand how parts of a system fit together, from the user interface to the data model. You are familiar with relational database systems like Postgres or MySQL, and have experience building products from ground up. Shipping quality user interfaces: You partner closely with product and design to craft beautiful user experiences for large audiences, leveraging web technologies like HTML, CSS, JavaScript, and a modern UI framework like React. Problem-solving: You approach problems holistically, starting with a clear and accurate understanding of the context. You think critically about the implications of what you're building and how it will impact real people's lives. You can navigate ambiguity flawlessly, decompose complex problems into clean solutions, while also balancing the business impact of what you’re building. Empathetic communication and collaboration: You communicate nuanced ideas clearly, whether you're explaining technical decisions in writing or brainstorming in real time. In disagreements, you engage thoughtfully with other perspectives and compromise when needed. You enjoy collaborating with both fellow engineers and cross-functional partners. You are a lifelong learner and invest in both your own growth and the growth, learning, and development of your teammates. Impact-orientation and user focus: You care about business impact and prioritize projects accordingly. You understand the balance between craft, speed, and the bottom line. You think critically about the implications of what you're building, and how it shapes real people's lives. You understand that reach comes with responsibility for our impact—good and bad. Work isn't a solo endeavor for you, and you enjoy collaborating cross-functionally to accomplish shared goals. Nice to Haves: You have extensive experience building AI products using LLMs, embeddings and other ML technologies. You're proficient with any part of our technology stack: React, TypeScript, Node.js, and Postgres. You have experience driving teams toward shared goals and can balance business priorities with individuals’ strengths, areas of interest, and career development goals. You've heard of computing pioneers like Ada Lovelace, Douglas Engelbart, Alan Kay, and others—and understand why we're big fans of their work. You have interests outside of technology, such as in art, history, or social sciences. We hire talented and passionate people from a variety of backgrounds because we want our global employee base to represent the wide diversity of our customers. If you’re excited about a role but your past experience doesn’t align perfectly with every bullet point listed in the job description, we still encourage you to apply. If you’re a builder at heart, share our company values, and enthusiastic about making software toolmaking ubiquitous, we want to hear from you. Notion is proud to be an equal opportunity employer. We do not discriminate in hiring or any employment decision based on race, color, religion, national origin, age, sex (including pregnancy, childbirth, or related medical conditions), marital status, ancestry, physical or mental disability, genetic information, veteran status, gender identity or expression, sexual orientation, or other applicable legally protected characteristic. Notion considers qualified applicants with criminal histories, consistent with applicable federal, state and local law. Notion is also committed to providing reasonable accommodations for qualified individuals with disabilities and disabled veterans in our job application procedures. If you need assistance or an accommodation due to a disability, please let your recruiter know. Notion is committed to providing highly competitive cash compensation, equity, and benefits. The compensation offered for this role will be based on multiple factors such as location, the role’s scope and complexity, and the candidate’s experience and expertise, and may vary from the range provided below. For roles based in San Francisco, the estimated base salary range for this role is $150,000 - $280,000 per year.\\n#LI-Onsite\\nApply for this job*indicates a required field\\nFirst Name*Last Name*Email*Phone*Resume/CV*Attach\\nAttachDropboxGoogle DriveEnter manuallyEnter manuallyAccepted file types: pdf, doc, docx, txt, rtf\\nStarting August 1st 2022, employees across the globe will work and collaborate together in our offices for two Anchor Days each week. The other three days are meant to be flexible; the offices will be open and everyone’s encouraged to come in, but you can also work from home as well.\\nNotion will consider requests for accommodations to this policy. When requested, Notion will provide a reasonable accommodation for physical or mental disabilities, or other reasons recognized by applicable law.\\nPlease confirm you have read and understand our return to office policy.*Select...\\nWill you now or in the future require Notion to sponsor an immigration case in order to employ you? \\nSelect...\\nWhy do you want to work at Notion?*LinkedIn ProfileWebsite or Portfolio\\nHow did you hear about this opportunity? (select all that apply) Select...\\nVoluntary Self-Identification\\nFor government reporting purposes, we ask candidates to respond to the below self-identification survey.\\nCompletion of the form is entirely voluntary, and we hope that you will choose to do so. Your answer is confidential. No one who makes hiring decisions will see it. Your decision to complete the form and your answer will not harm you in any way. If you want to learn more about the law or this form, visit the U.S. Department of Labor’s Office of Federal Contract Compliance Programs (OFCCP) website at www.dol.gov/ofccp.\\nHow do you know if you have a disability?\\nA disability is a condition that substantially limits one or more of your “major life activities.” If you have or have ever had such a condition, you are a person with a disability. Disabilities include, but are not limited to:\\nAlcohol or other substance use disorder (not currently using drugs illegally)\\nAutoimmune disorder, for example, lupus, fibromyalgia, rheumatoid arthritis, HIV/AIDS\\nBlind or low vision\\nCancer (past or present)\\nCardiovascular or heart disease\\nCeliac disease\\nCerebral palsy\\nDeaf or serious difficulty hearing\\nDiabetes\\nDisfigurement, for example, disfigurement caused by burns, wounds, accidents, or congenital disorders\\nEpilepsy or other seizure disorder\\nGastrointestinal disorders, for example, Crohn's Disease, irritable bowel syndrome\\nIntellectual or developmental disability\\nMental health conditions, for example, depression, bipolar disorder, anxiety disorder, schizophrenia, PTSD\\nMissing limbs or partially missing limbs\\nMobility impairment, benefiting from the use of a wheelchair, scooter, walker, leg brace(s) and/or other supports\\nNervous system condition, for example, migraine headaches, Parkinson’s disease, multiple sclerosis (MS)\\nNeurodivergence, for example, attention-deficit/hyperactivity disorder (ADHD), autism spectrum disorder, dyslexia, dyspraxia, other learning disabilities\\nPartial or complete paralysis (any cause)\\nPulmonary or respiratory conditions, for example, tuberculosis, asthma, emphysema\\nShort stature (dwarfism)\\nTraumatic brain injury\\nDisability StatusSelect...\\nPUBLIC BURDEN STATEMENT: According to the Paperwork Reduction Act of 1995 no persons are required to respond to a collection of information unless such collection displays a valid OMB control number. This survey should take about 5 minutes to complete.\\nSubmit applicationPowered byGreenhouseRead our Privacy Policy\"}\n"
|
537 |
+
]
|
538 |
+
},
|
539 |
+
{
|
540 |
+
"output_type": "execute_result",
|
541 |
+
"data": {
|
542 |
+
"text/plain": [
|
543 |
+
"dict"
|
544 |
+
]
|
545 |
+
},
|
546 |
+
"metadata": {},
|
547 |
+
"execution_count": 61
|
548 |
+
}
|
549 |
+
]
|
550 |
+
},
|
551 |
+
{
|
552 |
+
"cell_type": "code",
|
553 |
+
"source": [
|
554 |
+
"import pandas as pd\n",
|
555 |
+
"\n",
|
556 |
+
"df = pd.read_csv(\"/content/techstack-portfolio.csv\")\n",
|
557 |
+
"df.head()\n",
|
558 |
+
"df.info()"
|
559 |
+
],
|
560 |
+
"metadata": {
|
561 |
+
"colab": {
|
562 |
+
"base_uri": "https://localhost:8080/"
|
563 |
+
},
|
564 |
+
"id": "5m9Dk-mquwbc",
|
565 |
+
"outputId": "45f5d683-7b00-462b-aff8-cb1585513f79"
|
566 |
+
},
|
567 |
+
"execution_count": 48,
|
568 |
+
"outputs": [
|
569 |
+
{
|
570 |
+
"output_type": "stream",
|
571 |
+
"name": "stdout",
|
572 |
+
"text": [
|
573 |
+
"<class 'pandas.core.frame.DataFrame'>\n",
|
574 |
+
"RangeIndex: 20 entries, 0 to 19\n",
|
575 |
+
"Data columns (total 2 columns):\n",
|
576 |
+
" # Column Non-Null Count Dtype \n",
|
577 |
+
"--- ------ -------------- ----- \n",
|
578 |
+
" 0 Techstack 20 non-null object\n",
|
579 |
+
" 1 Links 20 non-null object\n",
|
580 |
+
"dtypes: object(2)\n",
|
581 |
+
"memory usage: 448.0+ bytes\n"
|
582 |
+
]
|
583 |
+
}
|
584 |
+
]
|
585 |
+
},
|
586 |
+
{
|
587 |
+
"cell_type": "code",
|
588 |
+
"source": [
|
589 |
+
"import chromadb\n",
|
590 |
+
"import uuid\n",
|
591 |
+
"\n",
|
592 |
+
"client = chromadb.PersistentClient(\"my-vectorstore\") # persistentclient -- create a folder and store records in it\n",
|
593 |
+
"collection = client.get_or_create_collection(\"my_collection\")\n",
|
594 |
+
"\n",
|
595 |
+
"if not collection.count():\n",
|
596 |
+
" for _, row in df.iterrows():\n",
|
597 |
+
" collection.add(documents = row[\"Techstack\"], metadatas = {\"links\": row[\"Links\"]}, ids = [str(uuid.uuid4())])\n"
|
598 |
+
],
|
599 |
+
"metadata": {
|
600 |
+
"id": "9CdGU_fAROj8"
|
601 |
+
},
|
602 |
+
"execution_count": 62,
|
603 |
+
"outputs": []
|
604 |
+
},
|
605 |
+
{
|
606 |
+
"cell_type": "code",
|
607 |
+
"source": [
|
608 |
+
"job = json_response\n",
|
609 |
+
"job['skills']"
|
610 |
+
],
|
611 |
+
"metadata": {
|
612 |
+
"colab": {
|
613 |
+
"base_uri": "https://localhost:8080/"
|
614 |
+
},
|
615 |
+
"id": "w545dkcXxkz1",
|
616 |
+
"outputId": "e27f3b8d-85d7-4534-e7f9-6a7377fb8dcf"
|
617 |
+
},
|
618 |
+
"execution_count": 64,
|
619 |
+
"outputs": [
|
620 |
+
{
|
621 |
+
"output_type": "execute_result",
|
622 |
+
"data": {
|
623 |
+
"text/plain": [
|
624 |
+
"['Expertise building and prototyping',\n",
|
625 |
+
" 'Shipping quality user interfaces',\n",
|
626 |
+
" 'Problem-solving',\n",
|
627 |
+
" 'Empathetic communication and collaboration',\n",
|
628 |
+
" 'Impact-orientation and user focus',\n",
|
629 |
+
" 'Nice to Haves: Extensive experience building AI products using LLMs, embeddings and other ML technologies, Proficient with React, TypeScript, Node.js, and Postgres, Experience driving teams toward shared goals and balancing business priorities with individuals’ strengths, areas of interest, and career development goals']"
|
630 |
+
]
|
631 |
+
},
|
632 |
+
"metadata": {},
|
633 |
+
"execution_count": 64
|
634 |
+
}
|
635 |
+
]
|
636 |
+
},
|
637 |
+
{
|
638 |
+
"cell_type": "code",
|
639 |
+
"source": [
|
640 |
+
"links = collection.query(query_texts = job['skills'], n_results = 2).get(\"metadatas\", [])\n",
|
641 |
+
"links"
|
642 |
+
],
|
643 |
+
"metadata": {
|
644 |
+
"colab": {
|
645 |
+
"base_uri": "https://localhost:8080/"
|
646 |
+
},
|
647 |
+
"id": "0YegZ8bu06Fw",
|
648 |
+
"outputId": "6b968e3e-e65a-43af-a7fd-69d343c37d7b"
|
649 |
+
},
|
650 |
+
"execution_count": 75,
|
651 |
+
"outputs": [
|
652 |
+
{
|
653 |
+
"output_type": "execute_result",
|
654 |
+
"data": {
|
655 |
+
"text/plain": [
|
656 |
+
"[[{'links': 'https://example.com/ml-python-portfolio'},\n",
|
657 |
+
" {'links': 'https://example.com/devops-portfolio'}],\n",
|
658 |
+
" [{'links': 'https://example.com/full-stack-js-portfolio'},\n",
|
659 |
+
" {'links': 'https://example.com/kotlin-backend-portfolio'}],\n",
|
660 |
+
" [{'links': 'https://example.com/ml-python-portfolio'},\n",
|
661 |
+
" {'links': 'https://example.com/magento-portfolio'}],\n",
|
662 |
+
" [{'links': 'https://example.com/flutter-portfolio'},\n",
|
663 |
+
" {'links': 'https://example.com/android-portfolio'}],\n",
|
664 |
+
" [{'links': 'https://example.com/android-portfolio'},\n",
|
665 |
+
" {'links': 'https://example.com/ios-ar-portfolio'}],\n",
|
666 |
+
" [{'links': 'https://example.com/ml-python-portfolio'},\n",
|
667 |
+
" {'links': 'https://example.com/vue-portfolio'}]]"
|
668 |
+
]
|
669 |
+
},
|
670 |
+
"metadata": {},
|
671 |
+
"execution_count": 75
|
672 |
+
}
|
673 |
+
]
|
674 |
+
},
|
675 |
+
{
|
676 |
+
"cell_type": "code",
|
677 |
+
"source": [
|
678 |
+
"prompt_email = PromptTemplate.from_template(\n",
|
679 |
+
" \"\"\"\n",
|
680 |
+
" ### JOB DESCRIPTION:\n",
|
681 |
+
" {job_description}\n",
|
682 |
+
"\n",
|
683 |
+
" ### INSTRUCTION:\n",
|
684 |
+
" You are [Candidate Name], a skilled professional seeking opportunities in [Industry/Field]. Your expertise includes [List of Relevant Skills/Technologies].\n",
|
685 |
+
" You have a strong passion for [relevant interests related to the job or industry], and you are eager to contribute to innovative projects.\n",
|
686 |
+
"\n",
|
687 |
+
" Your job is to write a cold email to the hiring manager or recruiter at the company regarding the job mentioned above.\n",
|
688 |
+
" Highlight your qualifications and explain how your skills align with the company's needs.\n",
|
689 |
+
" Also add the most relevant ones from the following links to showcase your relevant projects. There should be project link with description for atmost 4 skills required by company: {link_list}\n",
|
690 |
+
" Make sure to convey your enthusiasm for the opportunity and express your interest in discussing how you can add value to their team.\n",
|
691 |
+
"\n",
|
692 |
+
" ### EMAIL (NO PREAMBLE):\n",
|
693 |
+
" \"\"\"\n",
|
694 |
+
")\n"
|
695 |
+
],
|
696 |
+
"metadata": {
|
697 |
+
"id": "iJWJJUol2D3U"
|
698 |
+
},
|
699 |
+
"execution_count": 86,
|
700 |
+
"outputs": []
|
701 |
+
},
|
702 |
+
{
|
703 |
+
"cell_type": "code",
|
704 |
+
"source": [
|
705 |
+
"chain_email = prompt_email | llm\n",
|
706 |
+
"res = chain_email.invoke({\"job_description\": str(job), \"link_list\": links})\n",
|
707 |
+
"print(res.content)"
|
708 |
+
],
|
709 |
+
"metadata": {
|
710 |
+
"colab": {
|
711 |
+
"base_uri": "https://localhost:8080/"
|
712 |
+
},
|
713 |
+
"id": "7F01pFoA2eB3",
|
714 |
+
"outputId": "431b424e-423b-4972-faf7-49c3e0c10027"
|
715 |
+
},
|
716 |
+
"execution_count": 87,
|
717 |
+
"outputs": [
|
718 |
+
{
|
719 |
+
"output_type": "stream",
|
720 |
+
"name": "stdout",
|
721 |
+
"text": [
|
722 |
+
"Subject: Application for AI Product Engineer Role at Notion\n",
|
723 |
+
"\n",
|
724 |
+
"Dear [Hiring Manager's Name],\n",
|
725 |
+
"\n",
|
726 |
+
"I am excited to apply for the AI Product Engineer position at Notion, a company that shares my passion for innovation and making software toolmaking ubiquitous. With my expertise in building and prototyping, shipping quality user interfaces, and problem-solving, I am confident that I can contribute to the development of cutting-edge AI-powered products.\n",
|
727 |
+
"\n",
|
728 |
+
"As a seasoned professional with extensive experience in building AI products using large language models (LLMs), embeddings, and other ML technologies, I am well-equipped to tackle the challenges of this role. My proficiency in React, TypeScript, Node.js, and Postgres will enable me to efficiently prototype and experiment with new AI features, while my experience driving teams toward shared goals and balancing business priorities with individuals' strengths, areas of interest, and career development goals will ensure seamless collaboration with cross-functional teams.\n",
|
729 |
+
"\n",
|
730 |
+
"I am particularly drawn to Notion's mission to make AI-powered products accessible to everyone, and I am impressed by the company's commitment to diversity and creativity. My background in art, history, and social sciences has also instilled in me a passion for understanding the impact of technology on society, and I believe that my unique perspective will bring a valuable asset to the team.\n",
|
731 |
+
"\n",
|
732 |
+
"I have attached my resume and a link to my portfolio, which showcases my experience in building AI products, shipping quality user interfaces, and problem-solving. I would be thrilled to discuss my qualifications further and explain in greater detail why I am the ideal candidate for this role.\n",
|
733 |
+
"\n",
|
734 |
+
"Thank you for considering my application. I look forward to the opportunity to contribute to Notion's mission and discuss how I can add value to the team.\n",
|
735 |
+
"\n",
|
736 |
+
"Best regards,\n",
|
737 |
+
"\n",
|
738 |
+
"[Candidate Name]\n",
|
739 |
+
"\n",
|
740 |
+
"Attachments:\n",
|
741 |
+
"\n",
|
742 |
+
"* Resume\n",
|
743 |
+
"* Portfolio (with links to relevant projects)\n"
|
744 |
+
]
|
745 |
+
}
|
746 |
+
]
|
747 |
+
},
|
748 |
+
{
|
749 |
+
"cell_type": "code",
|
750 |
+
"source": [],
|
751 |
+
"metadata": {
|
752 |
+
"id": "qS50C1jp4Li0"
|
753 |
+
},
|
754 |
+
"execution_count": null,
|
755 |
+
"outputs": []
|
756 |
+
}
|
757 |
+
]
|
758 |
+
}
|
env/.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# created by virtualenv automatically
|
2 |
+
*
|
env/Lib/site-packages/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
env/Lib/site-packages/Deprecated-1.2.14.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
env/Lib/site-packages/Deprecated-1.2.14.dist-info/LICENSE.rst
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
The MIT License (MIT)
|
2 |
+
|
3 |
+
Copyright (c) 2017 Laurent LAPORTE
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
env/Lib/site-packages/Deprecated-1.2.14.dist-info/METADATA
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: Deprecated
|
3 |
+
Version: 1.2.14
|
4 |
+
Summary: Python @deprecated decorator to deprecate old python classes, functions or methods.
|
5 |
+
Home-page: https://github.com/tantale/deprecated
|
6 |
+
Author: Laurent LAPORTE
|
7 |
+
Author-email: [email protected]
|
8 |
+
License: MIT
|
9 |
+
Project-URL: Documentation, https://deprecated.readthedocs.io/en/latest/
|
10 |
+
Project-URL: Source, https://github.com/tantale/deprecated
|
11 |
+
Project-URL: Bug Tracker, https://github.com/tantale/deprecated/issues
|
12 |
+
Keywords: deprecate,deprecated,deprecation,warning,warn,decorator
|
13 |
+
Platform: any
|
14 |
+
Classifier: Development Status :: 5 - Production/Stable
|
15 |
+
Classifier: Environment :: Web Environment
|
16 |
+
Classifier: Intended Audience :: Developers
|
17 |
+
Classifier: License :: OSI Approved :: MIT License
|
18 |
+
Classifier: Operating System :: OS Independent
|
19 |
+
Classifier: Programming Language :: Python
|
20 |
+
Classifier: Programming Language :: Python :: 2
|
21 |
+
Classifier: Programming Language :: Python :: 2.7
|
22 |
+
Classifier: Programming Language :: Python :: 3
|
23 |
+
Classifier: Programming Language :: Python :: 3.4
|
24 |
+
Classifier: Programming Language :: Python :: 3.5
|
25 |
+
Classifier: Programming Language :: Python :: 3.6
|
26 |
+
Classifier: Programming Language :: Python :: 3.7
|
27 |
+
Classifier: Programming Language :: Python :: 3.8
|
28 |
+
Classifier: Programming Language :: Python :: 3.9
|
29 |
+
Classifier: Programming Language :: Python :: 3.10
|
30 |
+
Classifier: Programming Language :: Python :: 3.11
|
31 |
+
Classifier: Programming Language :: Python :: 3.12
|
32 |
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
33 |
+
Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
|
34 |
+
Description-Content-Type: text/x-rst
|
35 |
+
License-File: LICENSE.rst
|
36 |
+
Requires-Dist: wrapt (<2,>=1.10)
|
37 |
+
Provides-Extra: dev
|
38 |
+
Requires-Dist: tox ; extra == 'dev'
|
39 |
+
Requires-Dist: PyTest ; extra == 'dev'
|
40 |
+
Requires-Dist: PyTest-Cov ; extra == 'dev'
|
41 |
+
Requires-Dist: bump2version (<1) ; extra == 'dev'
|
42 |
+
Requires-Dist: sphinx (<2) ; extra == 'dev'
|
43 |
+
|
44 |
+
|
45 |
+
Deprecated Library
|
46 |
+
------------------
|
47 |
+
|
48 |
+
Deprecated is Easy to Use
|
49 |
+
`````````````````````````
|
50 |
+
|
51 |
+
If you need to mark a function or a method as deprecated,
|
52 |
+
you can use the ``@deprecated`` decorator:
|
53 |
+
|
54 |
+
Save in a hello.py:
|
55 |
+
|
56 |
+
.. code:: python
|
57 |
+
|
58 |
+
from deprecated import deprecated
|
59 |
+
|
60 |
+
|
61 |
+
@deprecated(version='1.2.1', reason="You should use another function")
|
62 |
+
def some_old_function(x, y):
|
63 |
+
return x + y
|
64 |
+
|
65 |
+
|
66 |
+
class SomeClass(object):
|
67 |
+
@deprecated(version='1.3.0', reason="This method is deprecated")
|
68 |
+
def some_old_method(self, x, y):
|
69 |
+
return x + y
|
70 |
+
|
71 |
+
|
72 |
+
some_old_function(12, 34)
|
73 |
+
obj = SomeClass()
|
74 |
+
obj.some_old_method(5, 8)
|
75 |
+
|
76 |
+
|
77 |
+
And Easy to Setup
|
78 |
+
`````````````````
|
79 |
+
|
80 |
+
And run it:
|
81 |
+
|
82 |
+
.. code:: bash
|
83 |
+
|
84 |
+
$ pip install Deprecated
|
85 |
+
$ python hello.py
|
86 |
+
hello.py:15: DeprecationWarning: Call to deprecated function (or staticmethod) some_old_function.
|
87 |
+
(You should use another function) -- Deprecated since version 1.2.0.
|
88 |
+
some_old_function(12, 34)
|
89 |
+
hello.py:17: DeprecationWarning: Call to deprecated method some_old_method.
|
90 |
+
(This method is deprecated) -- Deprecated since version 1.3.0.
|
91 |
+
obj.some_old_method(5, 8)
|
92 |
+
|
93 |
+
|
94 |
+
You can document your code
|
95 |
+
``````````````````````````
|
96 |
+
|
97 |
+
Have you ever wonder how to document that some functions, classes, methods, etc. are deprecated?
|
98 |
+
This is now possible with the integrated Sphinx directives:
|
99 |
+
|
100 |
+
For instance, in hello_sphinx.py:
|
101 |
+
|
102 |
+
.. code:: python
|
103 |
+
|
104 |
+
from deprecated.sphinx import deprecated
|
105 |
+
from deprecated.sphinx import versionadded
|
106 |
+
from deprecated.sphinx import versionchanged
|
107 |
+
|
108 |
+
|
109 |
+
@versionadded(version='1.0', reason="This function is new")
|
110 |
+
def function_one():
|
111 |
+
'''This is the function one'''
|
112 |
+
|
113 |
+
|
114 |
+
@versionchanged(version='1.0', reason="This function is modified")
|
115 |
+
def function_two():
|
116 |
+
'''This is the function two'''
|
117 |
+
|
118 |
+
|
119 |
+
@deprecated(version='1.0', reason="This function will be removed soon")
|
120 |
+
def function_three():
|
121 |
+
'''This is the function three'''
|
122 |
+
|
123 |
+
|
124 |
+
function_one()
|
125 |
+
function_two()
|
126 |
+
function_three() # warns
|
127 |
+
|
128 |
+
help(function_one)
|
129 |
+
help(function_two)
|
130 |
+
help(function_three)
|
131 |
+
|
132 |
+
|
133 |
+
The result it immediate
|
134 |
+
```````````````````````
|
135 |
+
|
136 |
+
Run it:
|
137 |
+
|
138 |
+
.. code:: bash
|
139 |
+
|
140 |
+
$ python hello_sphinx.py
|
141 |
+
|
142 |
+
hello_sphinx.py:23: DeprecationWarning: Call to deprecated function (or staticmethod) function_three.
|
143 |
+
(This function will be removed soon) -- Deprecated since version 1.0.
|
144 |
+
function_three() # warns
|
145 |
+
|
146 |
+
Help on function function_one in module __main__:
|
147 |
+
|
148 |
+
function_one()
|
149 |
+
This is the function one
|
150 |
+
|
151 |
+
.. versionadded:: 1.0
|
152 |
+
This function is new
|
153 |
+
|
154 |
+
Help on function function_two in module __main__:
|
155 |
+
|
156 |
+
function_two()
|
157 |
+
This is the function two
|
158 |
+
|
159 |
+
.. versionchanged:: 1.0
|
160 |
+
This function is modified
|
161 |
+
|
162 |
+
Help on function function_three in module __main__:
|
163 |
+
|
164 |
+
function_three()
|
165 |
+
This is the function three
|
166 |
+
|
167 |
+
.. deprecated:: 1.0
|
168 |
+
This function will be removed soon
|
169 |
+
|
170 |
+
|
171 |
+
Links
|
172 |
+
`````
|
173 |
+
|
174 |
+
* `Python package index (PyPi) <https://pypi.python.org/pypi/deprecated>`_
|
175 |
+
* `GitHub website <https://github.com/tantale/deprecated>`_
|
176 |
+
* `Read The Docs <https://readthedocs.org/projects/deprecated>`_
|
177 |
+
* `EBook on Lulu.com <http://www.lulu.com/commerce/index.php?fBuyContent=21305117>`_
|
178 |
+
* `StackOverFlow Q&A <https://stackoverflow.com/a/40301488/1513933>`_
|
179 |
+
* `Development version
|
180 |
+
<https://github.com/tantale/deprecated/zipball/master#egg=Deprecated-dev>`_
|
181 |
+
|
env/Lib/site-packages/Deprecated-1.2.14.dist-info/RECORD
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Deprecated-1.2.14.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
2 |
+
Deprecated-1.2.14.dist-info/LICENSE.rst,sha256=HoPt0VvkGbXVveNy4yXlJ_9PmRX1SOfHUxS0H2aZ6Dw,1081
|
3 |
+
Deprecated-1.2.14.dist-info/METADATA,sha256=xQYvk5nwOfnkxxRD-VHkpE-sMu0IBHRZ8ayspypfkTs,5354
|
4 |
+
Deprecated-1.2.14.dist-info/RECORD,,
|
5 |
+
Deprecated-1.2.14.dist-info/WHEEL,sha256=a-zpFRIJzOq5QfuhBzbhiA1eHTzNCJn8OdRvhdNX0Rk,110
|
6 |
+
Deprecated-1.2.14.dist-info/top_level.txt,sha256=nHbOYawKPQQE5lQl-toUB1JBRJjUyn_m_Mb8RVJ0RjA,11
|
7 |
+
deprecated/__init__.py,sha256=ZphiULqDVrESSB0mLV2WA88JyhQxZSK44zuDGbV5k-g,349
|
8 |
+
deprecated/__pycache__/__init__.cpython-311.pyc,,
|
9 |
+
deprecated/__pycache__/classic.cpython-311.pyc,,
|
10 |
+
deprecated/__pycache__/sphinx.cpython-311.pyc,,
|
11 |
+
deprecated/classic.py,sha256=QugmUi7IhBvp2nDvMtyWqFDPRR43-9nfSZG1ZJSDpFM,9880
|
12 |
+
deprecated/sphinx.py,sha256=NqQ0oKGcVn6yUe23iGbCieCgvWbEDQSPt9QelbXJnDU,10258
|
env/Lib/site-packages/Deprecated-1.2.14.dist-info/WHEEL
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: bdist_wheel (0.40.0)
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py2-none-any
|
5 |
+
Tag: py3-none-any
|
6 |
+
|
env/Lib/site-packages/Deprecated-1.2.14.dist-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
deprecated
|
env/Lib/site-packages/GitPython-3.1.43.dist-info/AUTHORS
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GitPython was originally written by Michael Trier.
|
2 |
+
GitPython 0.2 was partially (re)written by Sebastian Thiel, based on 0.1.6 and git-dulwich.
|
3 |
+
|
4 |
+
Contributors are:
|
5 |
+
|
6 |
+
-Michael Trier <mtrier _at_ gmail.com>
|
7 |
+
-Alan Briolat
|
8 |
+
-Florian Apolloner <florian _at_ apolloner.eu>
|
9 |
+
-David Aguilar <davvid _at_ gmail.com>
|
10 |
+
-Jelmer Vernooij <jelmer _at_ samba.org>
|
11 |
+
-Steve Frécinaux <code _at_ istique.net>
|
12 |
+
-Kai Lautaportti <kai _at_ lautaportti.fi>
|
13 |
+
-Paul Sowden <paul _at_ idontsmoke.co.uk>
|
14 |
+
-Sebastian Thiel <byronimo _at_ gmail.com>
|
15 |
+
-Jonathan Chu <jonathan.chu _at_ me.com>
|
16 |
+
-Vincent Driessen <me _at_ nvie.com>
|
17 |
+
-Phil Elson <pelson _dot_ pub _at_ gmail.com>
|
18 |
+
-Bernard `Guyzmo` Pratz <[email protected]>
|
19 |
+
-Timothy B. Hartman <tbhartman _at_ gmail.com>
|
20 |
+
-Konstantin Popov <konstantin.popov.89 _at_ yandex.ru>
|
21 |
+
-Peter Jones <pjones _at_ redhat.com>
|
22 |
+
-Anson Mansfield <anson.mansfield _at_ gmail.com>
|
23 |
+
-Ken Odegard <ken.odegard _at_ gmail.com>
|
24 |
+
-Alexis Horgix Chotard
|
25 |
+
-Piotr Babij <piotr.babij _at_ gmail.com>
|
26 |
+
-Mikuláš Poul <mikulaspoul _at_ gmail.com>
|
27 |
+
-Charles Bouchard-Légaré <cblegare.atl _at_ ntis.ca>
|
28 |
+
-Yaroslav Halchenko <debian _at_ onerussian.com>
|
29 |
+
-Tim Swast <swast _at_ google.com>
|
30 |
+
-William Luc Ritchie
|
31 |
+
-David Host <hostdm _at_ outlook.com>
|
32 |
+
-A. Jesse Jiryu Davis <jesse _at_ emptysquare.net>
|
33 |
+
-Steven Whitman <ninloot _at_ gmail.com>
|
34 |
+
-Stefan Stancu <stefan.stancu _at_ gmail.com>
|
35 |
+
-César Izurieta <cesar _at_ caih.org>
|
36 |
+
-Arthur Milchior <arthur _at_ milchior.fr>
|
37 |
+
-Anil Khatri <anil.soccer.khatri _at_ gmail.com>
|
38 |
+
-JJ Graham <thetwoj _at_ gmail.com>
|
39 |
+
-Ben Thayer <ben _at_ benthayer.com>
|
40 |
+
-Dries Kennes <admin _at_ dries007.net>
|
41 |
+
-Pratik Anurag <panurag247365 _at_ gmail.com>
|
42 |
+
-Harmon <harmon.public _at_ gmail.com>
|
43 |
+
-Liam Beguin <liambeguin _at_ gmail.com>
|
44 |
+
-Ram Rachum <ram _at_ rachum.com>
|
45 |
+
-Alba Mendez <me _at_ alba.sh>
|
46 |
+
-Robert Westman <robert _at_ byteflux.io>
|
47 |
+
-Hugo van Kemenade
|
48 |
+
-Hiroki Tokunaga <tokusan441 _at_ gmail.com>
|
49 |
+
-Julien Mauroy <pro.julien.mauroy _at_ gmail.com>
|
50 |
+
-Patrick Gerard
|
51 |
+
-Luke Twist <[email protected]>
|
52 |
+
-Joseph Hale <me _at_ jhale.dev>
|
53 |
+
-Santos Gallegos <stsewd _at_ proton.me>
|
54 |
+
-Wenhan Zhu <wzhu.cosmos _at_ gmail.com>
|
55 |
+
-Eliah Kagan <eliah.kagan _at_ gmail.com>
|
56 |
+
-Ethan Lin <et.repositories _at_ gmail.com>
|
57 |
+
|
58 |
+
Portions derived from other open source works and are clearly marked.
|
env/Lib/site-packages/GitPython-3.1.43.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
env/Lib/site-packages/GitPython-3.1.43.dist-info/LICENSE
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright (C) 2008, 2009 Michael Trier and contributors
|
2 |
+
All rights reserved.
|
3 |
+
|
4 |
+
Redistribution and use in source and binary forms, with or without
|
5 |
+
modification, are permitted provided that the following conditions
|
6 |
+
are met:
|
7 |
+
|
8 |
+
* Redistributions of source code must retain the above copyright
|
9 |
+
notice, this list of conditions and the following disclaimer.
|
10 |
+
|
11 |
+
* Redistributions in binary form must reproduce the above copyright
|
12 |
+
notice, this list of conditions and the following disclaimer in the
|
13 |
+
documentation and/or other materials provided with the distribution.
|
14 |
+
|
15 |
+
* Neither the name of the GitPython project nor the names of
|
16 |
+
its contributors may be used to endorse or promote products derived
|
17 |
+
from this software without specific prior written permission.
|
18 |
+
|
19 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
25 |
+
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
26 |
+
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
27 |
+
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
28 |
+
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
29 |
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
env/Lib/site-packages/GitPython-3.1.43.dist-info/METADATA
ADDED
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: GitPython
|
3 |
+
Version: 3.1.43
|
4 |
+
Summary: GitPython is a Python library used to interact with Git repositories
|
5 |
+
Home-page: https://github.com/gitpython-developers/GitPython
|
6 |
+
Author: Sebastian Thiel, Michael Trier
|
7 |
+
Author-email: [email protected], [email protected]
|
8 |
+
License: BSD-3-Clause
|
9 |
+
Classifier: Development Status :: 5 - Production/Stable
|
10 |
+
Classifier: Environment :: Console
|
11 |
+
Classifier: Intended Audience :: Developers
|
12 |
+
Classifier: License :: OSI Approved :: BSD License
|
13 |
+
Classifier: Operating System :: OS Independent
|
14 |
+
Classifier: Operating System :: POSIX
|
15 |
+
Classifier: Operating System :: Microsoft :: Windows
|
16 |
+
Classifier: Operating System :: MacOS :: MacOS X
|
17 |
+
Classifier: Typing :: Typed
|
18 |
+
Classifier: Programming Language :: Python
|
19 |
+
Classifier: Programming Language :: Python :: 3
|
20 |
+
Classifier: Programming Language :: Python :: 3.7
|
21 |
+
Classifier: Programming Language :: Python :: 3.8
|
22 |
+
Classifier: Programming Language :: Python :: 3.9
|
23 |
+
Classifier: Programming Language :: Python :: 3.10
|
24 |
+
Classifier: Programming Language :: Python :: 3.11
|
25 |
+
Classifier: Programming Language :: Python :: 3.12
|
26 |
+
Requires-Python: >=3.7
|
27 |
+
Description-Content-Type: text/markdown
|
28 |
+
License-File: LICENSE
|
29 |
+
License-File: AUTHORS
|
30 |
+
Requires-Dist: gitdb <5,>=4.0.1
|
31 |
+
Requires-Dist: typing-extensions >=3.7.4.3 ; python_version < "3.8"
|
32 |
+
Provides-Extra: doc
|
33 |
+
Requires-Dist: sphinx ==4.3.2 ; extra == 'doc'
|
34 |
+
Requires-Dist: sphinx-rtd-theme ; extra == 'doc'
|
35 |
+
Requires-Dist: sphinxcontrib-applehelp <=1.0.4,>=1.0.2 ; extra == 'doc'
|
36 |
+
Requires-Dist: sphinxcontrib-devhelp ==1.0.2 ; extra == 'doc'
|
37 |
+
Requires-Dist: sphinxcontrib-htmlhelp <=2.0.1,>=2.0.0 ; extra == 'doc'
|
38 |
+
Requires-Dist: sphinxcontrib-qthelp ==1.0.3 ; extra == 'doc'
|
39 |
+
Requires-Dist: sphinxcontrib-serializinghtml ==1.1.5 ; extra == 'doc'
|
40 |
+
Requires-Dist: sphinx-autodoc-typehints ; extra == 'doc'
|
41 |
+
Provides-Extra: test
|
42 |
+
Requires-Dist: coverage[toml] ; extra == 'test'
|
43 |
+
Requires-Dist: ddt !=1.4.3,>=1.1.1 ; extra == 'test'
|
44 |
+
Requires-Dist: mypy ; extra == 'test'
|
45 |
+
Requires-Dist: pre-commit ; extra == 'test'
|
46 |
+
Requires-Dist: pytest >=7.3.1 ; extra == 'test'
|
47 |
+
Requires-Dist: pytest-cov ; extra == 'test'
|
48 |
+
Requires-Dist: pytest-instafail ; extra == 'test'
|
49 |
+
Requires-Dist: pytest-mock ; extra == 'test'
|
50 |
+
Requires-Dist: pytest-sugar ; extra == 'test'
|
51 |
+
Requires-Dist: typing-extensions ; (python_version < "3.11") and extra == 'test'
|
52 |
+
Requires-Dist: mock ; (python_version < "3.8") and extra == 'test'
|
53 |
+
|
54 |
+
![Python package](https://github.com/gitpython-developers/GitPython/workflows/Python%20package/badge.svg)
|
55 |
+
[![Documentation Status](https://readthedocs.org/projects/gitpython/badge/?version=stable)](https://readthedocs.org/projects/gitpython/?badge=stable)
|
56 |
+
[![Packaging status](https://repology.org/badge/tiny-repos/python:gitpython.svg)](https://repology.org/metapackage/python:gitpython/versions)
|
57 |
+
|
58 |
+
## [Gitoxide](https://github.com/Byron/gitoxide): A peek into the future…
|
59 |
+
|
60 |
+
I started working on GitPython in 2009, back in the days when Python was 'my thing' and I had great plans with it.
|
61 |
+
Of course, back in the days, I didn't really know what I was doing and this shows in many places. Somewhat similar to
|
62 |
+
Python this happens to be 'good enough', but at the same time is deeply flawed and broken beyond repair.
|
63 |
+
|
64 |
+
By now, GitPython is widely used and I am sure there is a good reason for that, it's something to be proud of and happy about.
|
65 |
+
The community is maintaining the software and is keeping it relevant for which I am absolutely grateful. For the time to come I am happy to continue maintaining GitPython, remaining hopeful that one day it won't be needed anymore.
|
66 |
+
|
67 |
+
More than 15 years after my first meeting with 'git' I am still in excited about it, and am happy to finally have the tools and
|
68 |
+
probably the skills to scratch that itch of mine: implement `git` in a way that makes tool creation a piece of cake for most.
|
69 |
+
|
70 |
+
If you like the idea and want to learn more, please head over to [gitoxide](https://github.com/Byron/gitoxide), an
|
71 |
+
implementation of 'git' in [Rust](https://www.rust-lang.org).
|
72 |
+
|
73 |
+
*(Please note that `gitoxide` is not currently available for use in Python, and that Rust is required.)*
|
74 |
+
|
75 |
+
## GitPython
|
76 |
+
|
77 |
+
GitPython is a python library used to interact with git repositories, high-level like git-porcelain,
|
78 |
+
or low-level like git-plumbing.
|
79 |
+
|
80 |
+
It provides abstractions of git objects for easy access of repository data often backed by calling the `git`
|
81 |
+
command-line program.
|
82 |
+
|
83 |
+
### DEVELOPMENT STATUS
|
84 |
+
|
85 |
+
This project is in **maintenance mode**, which means that
|
86 |
+
|
87 |
+
- …there will be no feature development, unless these are contributed
|
88 |
+
- …there will be no bug fixes, unless they are relevant to the safety of users, or contributed
|
89 |
+
- …issues will be responded to with waiting times of up to a month
|
90 |
+
|
91 |
+
The project is open to contributions of all kinds, as well as new maintainers.
|
92 |
+
|
93 |
+
### REQUIREMENTS
|
94 |
+
|
95 |
+
GitPython needs the `git` executable to be installed on the system and available in your
|
96 |
+
`PATH` for most operations. If it is not in your `PATH`, you can help GitPython find it
|
97 |
+
by setting the `GIT_PYTHON_GIT_EXECUTABLE=<path/to/git>` environment variable.
|
98 |
+
|
99 |
+
- Git (1.7.x or newer)
|
100 |
+
- Python >= 3.7
|
101 |
+
|
102 |
+
The list of dependencies are listed in `./requirements.txt` and `./test-requirements.txt`.
|
103 |
+
The installer takes care of installing them for you.
|
104 |
+
|
105 |
+
### INSTALL
|
106 |
+
|
107 |
+
GitPython and its required package dependencies can be installed in any of the following ways, all of which should typically be done in a [virtual environment](https://docs.python.org/3/tutorial/venv.html).
|
108 |
+
|
109 |
+
#### From PyPI
|
110 |
+
|
111 |
+
To obtain and install a copy [from PyPI](https://pypi.org/project/GitPython/), run:
|
112 |
+
|
113 |
+
```sh
|
114 |
+
pip install GitPython
|
115 |
+
```
|
116 |
+
|
117 |
+
(A distribution package can also be downloaded for manual installation at [the PyPI page](https://pypi.org/project/GitPython/).)
|
118 |
+
|
119 |
+
#### From downloaded source code
|
120 |
+
|
121 |
+
If you have downloaded the source code, run this from inside the unpacked `GitPython` directory:
|
122 |
+
|
123 |
+
```sh
|
124 |
+
pip install .
|
125 |
+
```
|
126 |
+
|
127 |
+
#### By cloning the source code repository
|
128 |
+
|
129 |
+
To clone the [the GitHub repository](https://github.com/gitpython-developers/GitPython) from source to work on the code, you can do it like so:
|
130 |
+
|
131 |
+
```sh
|
132 |
+
git clone https://github.com/gitpython-developers/GitPython
|
133 |
+
cd GitPython
|
134 |
+
./init-tests-after-clone.sh
|
135 |
+
```
|
136 |
+
|
137 |
+
On Windows, `./init-tests-after-clone.sh` can be run in a Git Bash shell.
|
138 |
+
|
139 |
+
If you are cloning [your own fork](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/about-forks), then replace the above `git clone` command with one that gives the URL of your fork. Or use this [`gh`](https://cli.github.com/) command (assuming you have `gh` and your fork is called `GitPython`):
|
140 |
+
|
141 |
+
```sh
|
142 |
+
gh repo clone GitPython
|
143 |
+
```
|
144 |
+
|
145 |
+
Having cloned the repo, create and activate your [virtual environment](https://docs.python.org/3/tutorial/venv.html).
|
146 |
+
|
147 |
+
Then make an [editable install](https://pip.pypa.io/en/stable/topics/local-project-installs/#editable-installs):
|
148 |
+
|
149 |
+
```sh
|
150 |
+
pip install -e ".[test]"
|
151 |
+
```
|
152 |
+
|
153 |
+
In the less common case that you do not want to install test dependencies, `pip install -e .` can be used instead.
|
154 |
+
|
155 |
+
#### With editable *dependencies* (not preferred, and rarely needed)
|
156 |
+
|
157 |
+
In rare cases, you may want to work on GitPython and one or both of its [gitdb](https://github.com/gitpython-developers/gitdb) and [smmap](https://github.com/gitpython-developers/smmap) dependencies at the same time, with changes in your local working copy of gitdb or smmap immediatley reflected in the behavior of your local working copy of GitPython. This can be done by making editable installations of those dependencies in the same virtual environment where you install GitPython.
|
158 |
+
|
159 |
+
If you want to do that *and* you want the versions in GitPython's git submodules to be used, then pass `-e git/ext/gitdb` and/or `-e git/ext/gitdb/gitdb/ext/smmap` to `pip install`. This can be done in any order, and in separate `pip install` commands or the same one, so long as `-e` appears before *each* path. For example, you can install GitPython, gitdb, and smmap editably in the currently active virtual environment this way:
|
160 |
+
|
161 |
+
```sh
|
162 |
+
pip install -e ".[test]" -e git/ext/gitdb -e git/ext/gitdb/gitdb/ext/smmap
|
163 |
+
```
|
164 |
+
|
165 |
+
The submodules must have been cloned for that to work, but that will already be the case if you have run `./init-tests-after-clone.sh`. You can use `pip list` to check which packages are installed editably and which are installed normally.
|
166 |
+
|
167 |
+
To reiterate, this approach should only rarely be used. For most development it is preferable to allow the gitdb and smmap dependencices to be retrieved automatically from PyPI in their latest stable packaged versions.
|
168 |
+
|
169 |
+
### Limitations
|
170 |
+
|
171 |
+
#### Leakage of System Resources
|
172 |
+
|
173 |
+
GitPython is not suited for long-running processes (like daemons) as it tends to
|
174 |
+
leak system resources. It was written in a time where destructors (as implemented
|
175 |
+
in the `__del__` method) still ran deterministically.
|
176 |
+
|
177 |
+
In case you still want to use it in such a context, you will want to search the
|
178 |
+
codebase for `__del__` implementations and call these yourself when you see fit.
|
179 |
+
|
180 |
+
Another way assure proper cleanup of resources is to factor out GitPython into a
|
181 |
+
separate process which can be dropped periodically.
|
182 |
+
|
183 |
+
#### Windows support
|
184 |
+
|
185 |
+
See [Issue #525](https://github.com/gitpython-developers/GitPython/issues/525).
|
186 |
+
|
187 |
+
### RUNNING TESTS
|
188 |
+
|
189 |
+
_Important_: Right after cloning this repository, please be sure to have executed
|
190 |
+
the `./init-tests-after-clone.sh` script in the repository root. Otherwise
|
191 |
+
you will encounter test failures.
|
192 |
+
|
193 |
+
#### Install test dependencies
|
194 |
+
|
195 |
+
Ensure testing libraries are installed. This is taken care of already if you installed with:
|
196 |
+
|
197 |
+
```sh
|
198 |
+
pip install -e ".[test]"
|
199 |
+
```
|
200 |
+
|
201 |
+
If you had installed with a command like `pip install -e .` instead, you can still run
|
202 |
+
the above command to add the testing dependencies.
|
203 |
+
|
204 |
+
#### Test commands
|
205 |
+
|
206 |
+
To test, run:
|
207 |
+
|
208 |
+
```sh
|
209 |
+
pytest
|
210 |
+
```
|
211 |
+
|
212 |
+
To lint, and apply some linting fixes as well as automatic code formatting, run:
|
213 |
+
|
214 |
+
```sh
|
215 |
+
pre-commit run --all-files
|
216 |
+
```
|
217 |
+
|
218 |
+
This includes the linting and autoformatting done by Ruff, as well as some other checks.
|
219 |
+
|
220 |
+
To typecheck, run:
|
221 |
+
|
222 |
+
```sh
|
223 |
+
mypy
|
224 |
+
```
|
225 |
+
|
226 |
+
#### CI (and tox)
|
227 |
+
|
228 |
+
Style and formatting checks, and running tests on all the different supported Python versions, will be performed:
|
229 |
+
|
230 |
+
- Upon submitting a pull request.
|
231 |
+
- On each push, *if* you have a fork with GitHub Actions enabled.
|
232 |
+
- Locally, if you run [`tox`](https://tox.wiki/) (this skips any Python versions you don't have installed).
|
233 |
+
|
234 |
+
#### Configuration files
|
235 |
+
|
236 |
+
Specific tools are all configured in the `./pyproject.toml` file:
|
237 |
+
|
238 |
+
- `pytest` (test runner)
|
239 |
+
- `coverage.py` (code coverage)
|
240 |
+
- `ruff` (linter and formatter)
|
241 |
+
- `mypy` (type checker)
|
242 |
+
|
243 |
+
Orchestration tools:
|
244 |
+
|
245 |
+
- Configuration for `pre-commit` is in the `./.pre-commit-config.yaml` file.
|
246 |
+
- Configuration for `tox` is in `./tox.ini`.
|
247 |
+
- Configuration for GitHub Actions (CI) is in files inside `./.github/workflows/`.
|
248 |
+
|
249 |
+
### Contributions
|
250 |
+
|
251 |
+
Please have a look at the [contributions file][contributing].
|
252 |
+
|
253 |
+
### INFRASTRUCTURE
|
254 |
+
|
255 |
+
- [User Documentation](http://gitpython.readthedocs.org)
|
256 |
+
- [Questions and Answers](http://stackexchange.com/filters/167317/gitpython)
|
257 |
+
- Please post on Stack Overflow and use the `gitpython` tag
|
258 |
+
- [Issue Tracker](https://github.com/gitpython-developers/GitPython/issues)
|
259 |
+
- Post reproducible bugs and feature requests as a new issue.
|
260 |
+
Please be sure to provide the following information if posting bugs:
|
261 |
+
- GitPython version (e.g. `import git; git.__version__`)
|
262 |
+
- Python version (e.g. `python --version`)
|
263 |
+
- The encountered stack-trace, if applicable
|
264 |
+
- Enough information to allow reproducing the issue
|
265 |
+
|
266 |
+
### How to make a new release
|
267 |
+
|
268 |
+
1. Update/verify the **version** in the `VERSION` file.
|
269 |
+
2. Update/verify that the `doc/source/changes.rst` changelog file was updated. It should include a link to the forthcoming release page: `https://github.com/gitpython-developers/GitPython/releases/tag/<version>`
|
270 |
+
3. Commit everything.
|
271 |
+
4. Run `git tag -s <version>` to tag the version in Git.
|
272 |
+
5. _Optionally_ create and activate a [virtual environment](https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/#creating-a-virtual-environment). (Then the next step can install `build` and `twine`.)
|
273 |
+
6. Run `make release`.
|
274 |
+
7. Go to [GitHub Releases](https://github.com/gitpython-developers/GitPython/releases) and publish a new one with the recently pushed tag. Generate the changelog.
|
275 |
+
|
276 |
+
### Projects using GitPython
|
277 |
+
|
278 |
+
- [PyDriller](https://github.com/ishepard/pydriller)
|
279 |
+
- [Kivy Designer](https://github.com/kivy/kivy-designer)
|
280 |
+
- [Prowl](https://github.com/nettitude/Prowl)
|
281 |
+
- [Python Taint](https://github.com/python-security/pyt)
|
282 |
+
- [Buster](https://github.com/axitkhurana/buster)
|
283 |
+
- [git-ftp](https://github.com/ezyang/git-ftp)
|
284 |
+
- [Git-Pandas](https://github.com/wdm0006/git-pandas)
|
285 |
+
- [PyGitUp](https://github.com/msiemens/PyGitUp)
|
286 |
+
- [PyJFuzz](https://github.com/mseclab/PyJFuzz)
|
287 |
+
- [Loki](https://github.com/Neo23x0/Loki)
|
288 |
+
- [Omniwallet](https://github.com/OmniLayer/omniwallet)
|
289 |
+
- [GitViper](https://github.com/BeayemX/GitViper)
|
290 |
+
- [Git Gud](https://github.com/bthayer2365/git-gud)
|
291 |
+
|
292 |
+
### LICENSE
|
293 |
+
|
294 |
+
[3-Clause BSD License](https://opensource.org/license/bsd-3-clause/), also known as the New BSD License. See the [LICENSE file][license].
|
295 |
+
|
296 |
+
[contributing]: https://github.com/gitpython-developers/GitPython/blob/main/CONTRIBUTING.md
|
297 |
+
[license]: https://github.com/gitpython-developers/GitPython/blob/main/LICENSE
|
env/Lib/site-packages/GitPython-3.1.43.dist-info/RECORD
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GitPython-3.1.43.dist-info/AUTHORS,sha256=h1TlPKfp05GA1eKQ15Yl4biR0C0FgivuGSeRA6Q1dz0,2286
|
2 |
+
GitPython-3.1.43.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
3 |
+
GitPython-3.1.43.dist-info/LICENSE,sha256=hvyUwyGpr7wRUUcTURuv3tIl8lEA3MD3NQ6CvCMbi-s,1503
|
4 |
+
GitPython-3.1.43.dist-info/METADATA,sha256=sAh3r1BMVw5_olGgDmpMS69zBpVr7UEOeRivNHKznfU,13376
|
5 |
+
GitPython-3.1.43.dist-info/RECORD,,
|
6 |
+
GitPython-3.1.43.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
7 |
+
GitPython-3.1.43.dist-info/top_level.txt,sha256=0hzDuIp8obv624V3GmbqsagBWkk8ohtGU-Bc1PmTT0o,4
|
8 |
+
git/__init__.py,sha256=w6fnS0QmwTfEFUSL6rfnpP0lUId2goSguZFOvVX3N3U,8899
|
9 |
+
git/__pycache__/__init__.cpython-311.pyc,,
|
10 |
+
git/__pycache__/cmd.cpython-311.pyc,,
|
11 |
+
git/__pycache__/compat.cpython-311.pyc,,
|
12 |
+
git/__pycache__/config.cpython-311.pyc,,
|
13 |
+
git/__pycache__/db.cpython-311.pyc,,
|
14 |
+
git/__pycache__/diff.cpython-311.pyc,,
|
15 |
+
git/__pycache__/exc.cpython-311.pyc,,
|
16 |
+
git/__pycache__/remote.cpython-311.pyc,,
|
17 |
+
git/__pycache__/types.cpython-311.pyc,,
|
18 |
+
git/__pycache__/util.cpython-311.pyc,,
|
19 |
+
git/cmd.py,sha256=qd-gIHSk4mfsYjd9YA08cPyO8TMxaibTXAbFnHK71uc,67659
|
20 |
+
git/compat.py,sha256=y1E6y6O2q5r8clSlr8ZNmuIWG9nmHuehQEsVsmBffs8,4526
|
21 |
+
git/config.py,sha256=Ald8Xc-G9Shcgx3QCISyXTkL4a6nbc3qll-xUw4YdyY,34924
|
22 |
+
git/db.py,sha256=vIW9uWSbqu99zbuU2ZDmOhVOv1UPTmxrnqiCtRHCfjE,2368
|
23 |
+
git/diff.py,sha256=IE5aeHL7aP9yxBluYj06IX8nZjoJ_TOM3gG31-Evf_8,27058
|
24 |
+
git/exc.py,sha256=Gc7g1pHpn8OmTse30NHmJVsBJ2CYH8LxaR8y8UA3lIM,7119
|
25 |
+
git/index/__init__.py,sha256=i-Nqb8Lufp9aFbmxpQBORmmQnjEVVM1Pn58fsQkyGgQ,406
|
26 |
+
git/index/__pycache__/__init__.cpython-311.pyc,,
|
27 |
+
git/index/__pycache__/base.cpython-311.pyc,,
|
28 |
+
git/index/__pycache__/fun.cpython-311.pyc,,
|
29 |
+
git/index/__pycache__/typ.cpython-311.pyc,,
|
30 |
+
git/index/__pycache__/util.cpython-311.pyc,,
|
31 |
+
git/index/base.py,sha256=A4q4cN_Ifxi8CsAR-7h4KsQ2d3JazBNFZ1ltbAKttgs,60734
|
32 |
+
git/index/fun.py,sha256=37cA3DBC9vpAnSVu5TGA072SnoF5XZOkOukExwlejHs,16736
|
33 |
+
git/index/typ.py,sha256=uuKNwitUw83FhVaLSwo4pY7PHDQudtZTLJrLGym4jcI,6570
|
34 |
+
git/index/util.py,sha256=fULi7GPG-MvprKrRCD5c15GNdzku_1E38We0d97WB3A,3659
|
35 |
+
git/objects/__init__.py,sha256=O6ZL_olX7e5-8iIbKviRPkVSJxN37WA-EC0q9d48U5Y,637
|
36 |
+
git/objects/__pycache__/__init__.cpython-311.pyc,,
|
37 |
+
git/objects/__pycache__/base.cpython-311.pyc,,
|
38 |
+
git/objects/__pycache__/blob.cpython-311.pyc,,
|
39 |
+
git/objects/__pycache__/commit.cpython-311.pyc,,
|
40 |
+
git/objects/__pycache__/fun.cpython-311.pyc,,
|
41 |
+
git/objects/__pycache__/tag.cpython-311.pyc,,
|
42 |
+
git/objects/__pycache__/tree.cpython-311.pyc,,
|
43 |
+
git/objects/__pycache__/util.cpython-311.pyc,,
|
44 |
+
git/objects/base.py,sha256=0dqNkSRVH0mk0-7ZKIkGBK7iNYrzLTVxwQFUd6CagsE,10277
|
45 |
+
git/objects/blob.py,sha256=zwwq0KfOMYeP5J2tW5CQatoLyeqFRlfkxP1Vwx1h07s,1215
|
46 |
+
git/objects/commit.py,sha256=vLZNl1I9zp17Rpge7J66CvsryirEs90jyPTQzoP0JJs,30208
|
47 |
+
git/objects/fun.py,sha256=B4jCqhAjm6Hl79GK58FPzW1H9K6Wc7Tx0rssyWmAcEE,8935
|
48 |
+
git/objects/submodule/__init__.py,sha256=6xySp767LVz3UylWgUalntS_nGXRuVzXxDuFAv_Wc2c,303
|
49 |
+
git/objects/submodule/__pycache__/__init__.cpython-311.pyc,,
|
50 |
+
git/objects/submodule/__pycache__/base.cpython-311.pyc,,
|
51 |
+
git/objects/submodule/__pycache__/root.cpython-311.pyc,,
|
52 |
+
git/objects/submodule/__pycache__/util.cpython-311.pyc,,
|
53 |
+
git/objects/submodule/base.py,sha256=MQ-2xV8JznGwy2hLQv1aeQNgAkhBhgc5tdtClFL3DmE,63901
|
54 |
+
git/objects/submodule/root.py,sha256=5eTtYNHasqdPq6q0oDCPr7IaO6uAHL3b4DxMoiO2LhE,20246
|
55 |
+
git/objects/submodule/util.py,sha256=sQqAYaiSJdFkZa9NlAuK_wTsMNiS-kkQnQjvIoJtc_o,3509
|
56 |
+
git/objects/tag.py,sha256=gAx8i-DEwy_Z3R2zLkvetYRV8A56BCcTr3iLuTUTfEM,4467
|
57 |
+
git/objects/tree.py,sha256=jJH888SHiP4dGzE-ra1yenQOyya_0C_MkHr06c1gHpM,13849
|
58 |
+
git/objects/util.py,sha256=Ml2eqZPKO4y9Hc2vWbXJgpsK3nkN3KGMzbn8AlzLyYQ,23834
|
59 |
+
git/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
60 |
+
git/refs/__init__.py,sha256=DWlJNnsx-4jM_E-VycbP-FZUdn6iWhjnH_uZ_pZXBro,509
|
61 |
+
git/refs/__pycache__/__init__.cpython-311.pyc,,
|
62 |
+
git/refs/__pycache__/head.cpython-311.pyc,,
|
63 |
+
git/refs/__pycache__/log.cpython-311.pyc,,
|
64 |
+
git/refs/__pycache__/reference.cpython-311.pyc,,
|
65 |
+
git/refs/__pycache__/remote.cpython-311.pyc,,
|
66 |
+
git/refs/__pycache__/symbolic.cpython-311.pyc,,
|
67 |
+
git/refs/__pycache__/tag.cpython-311.pyc,,
|
68 |
+
git/refs/head.py,sha256=GAZpD5EfqSciDXPtgjHY8ZbBixKExJRhojUB-HrrJPg,10491
|
69 |
+
git/refs/log.py,sha256=kXiuAgTo1DIuM_BfbDUk9gQ0YO-mutIMVdHv1_ES90o,12493
|
70 |
+
git/refs/reference.py,sha256=l6mhF4YLSEwtjz6b9PpOQH-fkng7EYWMaJhkjn-2jXA,5630
|
71 |
+
git/refs/remote.py,sha256=WwqV9T7BbYf3F_WZNUQivu9xktIIKGklCjDpwQrhD-A,2806
|
72 |
+
git/refs/symbolic.py,sha256=c8zOwaqzcg-J-rGrpuWdvh8zwMvSUqAHghd4vJoYG_s,34552
|
73 |
+
git/refs/tag.py,sha256=kgzV2vhpL4FD2TqHb0BJuMRAHgAvJF-TcoyWlaB-djQ,5010
|
74 |
+
git/remote.py,sha256=IHQ3BvXgoIN1EvHlyH3vrSaQoDkLOE6nooSC0w183sU,46561
|
75 |
+
git/repo/__init__.py,sha256=CILSVH36fX_WxVFSjD9o1WF5LgsNedPiJvSngKZqfVU,210
|
76 |
+
git/repo/__pycache__/__init__.cpython-311.pyc,,
|
77 |
+
git/repo/__pycache__/base.cpython-311.pyc,,
|
78 |
+
git/repo/__pycache__/fun.cpython-311.pyc,,
|
79 |
+
git/repo/base.py,sha256=mitfJ8u99CsMpDd7_VRyx-SF8omu2tpf3lqzSaQkKoQ,59353
|
80 |
+
git/repo/fun.py,sha256=tEsClpmbOrKMSNIdncOB_6JdikrL1-AfkOFd7xMpD8k,13582
|
81 |
+
git/types.py,sha256=xCwpp2Y01lhS0MapHhj04m0P_x34kwSD1Gsou_ZPWj8,10251
|
82 |
+
git/util.py,sha256=1E883mnPAFLyFk7ivwnEremsp-uJOTc3ks_QypyLung,43651
|
env/Lib/site-packages/GitPython-3.1.43.dist-info/WHEEL
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: bdist_wheel (0.43.0)
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py3-none-any
|
5 |
+
|
env/Lib/site-packages/GitPython-3.1.43.dist-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
git
|
env/Lib/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
env/Lib/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright 2010 Pallets
|
2 |
+
|
3 |
+
Redistribution and use in source and binary forms, with or without
|
4 |
+
modification, are permitted provided that the following conditions are
|
5 |
+
met:
|
6 |
+
|
7 |
+
1. Redistributions of source code must retain the above copyright
|
8 |
+
notice, this list of conditions and the following disclaimer.
|
9 |
+
|
10 |
+
2. Redistributions in binary form must reproduce the above copyright
|
11 |
+
notice, this list of conditions and the following disclaimer in the
|
12 |
+
documentation and/or other materials provided with the distribution.
|
13 |
+
|
14 |
+
3. Neither the name of the copyright holder nor the names of its
|
15 |
+
contributors may be used to endorse or promote products derived from
|
16 |
+
this software without specific prior written permission.
|
17 |
+
|
18 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
19 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
20 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
21 |
+
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
22 |
+
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
23 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
24 |
+
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
25 |
+
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
26 |
+
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
27 |
+
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
28 |
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
env/Lib/site-packages/MarkupSafe-2.1.5.dist-info/METADATA
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: MarkupSafe
|
3 |
+
Version: 2.1.5
|
4 |
+
Summary: Safely add untrusted strings to HTML/XML markup.
|
5 |
+
Home-page: https://palletsprojects.com/p/markupsafe/
|
6 |
+
Maintainer: Pallets
|
7 |
+
Maintainer-email: [email protected]
|
8 |
+
License: BSD-3-Clause
|
9 |
+
Project-URL: Donate, https://palletsprojects.com/donate
|
10 |
+
Project-URL: Documentation, https://markupsafe.palletsprojects.com/
|
11 |
+
Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/
|
12 |
+
Project-URL: Source Code, https://github.com/pallets/markupsafe/
|
13 |
+
Project-URL: Issue Tracker, https://github.com/pallets/markupsafe/issues/
|
14 |
+
Project-URL: Chat, https://discord.gg/pallets
|
15 |
+
Classifier: Development Status :: 5 - Production/Stable
|
16 |
+
Classifier: Environment :: Web Environment
|
17 |
+
Classifier: Intended Audience :: Developers
|
18 |
+
Classifier: License :: OSI Approved :: BSD License
|
19 |
+
Classifier: Operating System :: OS Independent
|
20 |
+
Classifier: Programming Language :: Python
|
21 |
+
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
|
22 |
+
Classifier: Topic :: Text Processing :: Markup :: HTML
|
23 |
+
Requires-Python: >=3.7
|
24 |
+
Description-Content-Type: text/x-rst
|
25 |
+
License-File: LICENSE.rst
|
26 |
+
|
27 |
+
MarkupSafe
|
28 |
+
==========
|
29 |
+
|
30 |
+
MarkupSafe implements a text object that escapes characters so it is
|
31 |
+
safe to use in HTML and XML. Characters that have special meanings are
|
32 |
+
replaced so that they display as the actual characters. This mitigates
|
33 |
+
injection attacks, meaning untrusted user input can safely be displayed
|
34 |
+
on a page.
|
35 |
+
|
36 |
+
|
37 |
+
Installing
|
38 |
+
----------
|
39 |
+
|
40 |
+
Install and update using `pip`_:
|
41 |
+
|
42 |
+
.. code-block:: text
|
43 |
+
|
44 |
+
pip install -U MarkupSafe
|
45 |
+
|
46 |
+
.. _pip: https://pip.pypa.io/en/stable/getting-started/
|
47 |
+
|
48 |
+
|
49 |
+
Examples
|
50 |
+
--------
|
51 |
+
|
52 |
+
.. code-block:: pycon
|
53 |
+
|
54 |
+
>>> from markupsafe import Markup, escape
|
55 |
+
|
56 |
+
>>> # escape replaces special characters and wraps in Markup
|
57 |
+
>>> escape("<script>alert(document.cookie);</script>")
|
58 |
+
Markup('<script>alert(document.cookie);</script>')
|
59 |
+
|
60 |
+
>>> # wrap in Markup to mark text "safe" and prevent escaping
|
61 |
+
>>> Markup("<strong>Hello</strong>")
|
62 |
+
Markup('<strong>hello</strong>')
|
63 |
+
|
64 |
+
>>> escape(Markup("<strong>Hello</strong>"))
|
65 |
+
Markup('<strong>hello</strong>')
|
66 |
+
|
67 |
+
>>> # Markup is a str subclass
|
68 |
+
>>> # methods and operators escape their arguments
|
69 |
+
>>> template = Markup("Hello <em>{name}</em>")
|
70 |
+
>>> template.format(name='"World"')
|
71 |
+
Markup('Hello <em>"World"</em>')
|
72 |
+
|
73 |
+
|
74 |
+
Donate
|
75 |
+
------
|
76 |
+
|
77 |
+
The Pallets organization develops and supports MarkupSafe and other
|
78 |
+
popular packages. In order to grow the community of contributors and
|
79 |
+
users, and allow the maintainers to devote more time to the projects,
|
80 |
+
`please donate today`_.
|
81 |
+
|
82 |
+
.. _please donate today: https://palletsprojects.com/donate
|
83 |
+
|
84 |
+
|
85 |
+
Links
|
86 |
+
-----
|
87 |
+
|
88 |
+
- Documentation: https://markupsafe.palletsprojects.com/
|
89 |
+
- Changes: https://markupsafe.palletsprojects.com/changes/
|
90 |
+
- PyPI Releases: https://pypi.org/project/MarkupSafe/
|
91 |
+
- Source Code: https://github.com/pallets/markupsafe/
|
92 |
+
- Issue Tracker: https://github.com/pallets/markupsafe/issues/
|
93 |
+
- Chat: https://discord.gg/pallets
|
env/Lib/site-packages/MarkupSafe-2.1.5.dist-info/RECORD
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MarkupSafe-2.1.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
2 |
+
MarkupSafe-2.1.5.dist-info/LICENSE.rst,sha256=RjHsDbX9kKVH4zaBcmTGeYIUM4FG-KyUtKV_lu6MnsQ,1503
|
3 |
+
MarkupSafe-2.1.5.dist-info/METADATA,sha256=icNlaniV7YIQZ1BScCVqNaRtm7MAgfw8d3OBmoSVyAY,3096
|
4 |
+
MarkupSafe-2.1.5.dist-info/RECORD,,
|
5 |
+
MarkupSafe-2.1.5.dist-info/WHEEL,sha256=ircjsfhzblqgSzO8ow7-0pXK-RVqDqNRGQ8F650AUNM,102
|
6 |
+
MarkupSafe-2.1.5.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
|
7 |
+
markupsafe/__init__.py,sha256=m1ysNeqf55zbEoJtaovca40ivrkEFolPlw5bGoC5Gi4,11290
|
8 |
+
markupsafe/__pycache__/__init__.cpython-311.pyc,,
|
9 |
+
markupsafe/__pycache__/_native.cpython-311.pyc,,
|
10 |
+
markupsafe/_native.py,sha256=_Q7UsXCOvgdonCgqG3l5asANI6eo50EKnDM-mlwEC5M,1776
|
11 |
+
markupsafe/_speedups.c,sha256=n3jzzaJwXcoN8nTFyA53f3vSqsWK2vujI-v6QYifjhQ,7403
|
12 |
+
markupsafe/_speedups.cp311-win_amd64.pyd,sha256=MEqnkyBOHmstwQr50hKitovHjrHhMJ0gYmya4Fu1DK0,15872
|
13 |
+
markupsafe/_speedups.pyi,sha256=f5QtwIOP0eLrxh2v5p6SmaYmlcHIGIfmz0DovaqL0OU,238
|
14 |
+
markupsafe/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
env/Lib/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: bdist_wheel (0.42.0)
|
3 |
+
Root-Is-Purelib: false
|
4 |
+
Tag: cp311-cp311-win_amd64
|
5 |
+
|
env/Lib/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
markupsafe
|
env/Lib/site-packages/PIL/BdfFontFile.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library
|
3 |
+
# $Id$
|
4 |
+
#
|
5 |
+
# bitmap distribution font (bdf) file parser
|
6 |
+
#
|
7 |
+
# history:
|
8 |
+
# 1996-05-16 fl created (as bdf2pil)
|
9 |
+
# 1997-08-25 fl converted to FontFile driver
|
10 |
+
# 2001-05-25 fl removed bogus __init__ call
|
11 |
+
# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev)
|
12 |
+
# 2003-04-22 fl more robustification (from Graham Dumpleton)
|
13 |
+
#
|
14 |
+
# Copyright (c) 1997-2003 by Secret Labs AB.
|
15 |
+
# Copyright (c) 1997-2003 by Fredrik Lundh.
|
16 |
+
#
|
17 |
+
# See the README file for information on usage and redistribution.
|
18 |
+
#
|
19 |
+
|
20 |
+
"""
|
21 |
+
Parse X Bitmap Distribution Format (BDF)
|
22 |
+
"""
|
23 |
+
from __future__ import annotations
|
24 |
+
|
25 |
+
from typing import BinaryIO
|
26 |
+
|
27 |
+
from . import FontFile, Image
|
28 |
+
|
29 |
+
bdf_slant = {
|
30 |
+
"R": "Roman",
|
31 |
+
"I": "Italic",
|
32 |
+
"O": "Oblique",
|
33 |
+
"RI": "Reverse Italic",
|
34 |
+
"RO": "Reverse Oblique",
|
35 |
+
"OT": "Other",
|
36 |
+
}
|
37 |
+
|
38 |
+
bdf_spacing = {"P": "Proportional", "M": "Monospaced", "C": "Cell"}
|
39 |
+
|
40 |
+
|
41 |
+
def bdf_char(
|
42 |
+
f: BinaryIO,
|
43 |
+
) -> (
|
44 |
+
tuple[
|
45 |
+
str,
|
46 |
+
int,
|
47 |
+
tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]],
|
48 |
+
Image.Image,
|
49 |
+
]
|
50 |
+
| None
|
51 |
+
):
|
52 |
+
# skip to STARTCHAR
|
53 |
+
while True:
|
54 |
+
s = f.readline()
|
55 |
+
if not s:
|
56 |
+
return None
|
57 |
+
if s[:9] == b"STARTCHAR":
|
58 |
+
break
|
59 |
+
id = s[9:].strip().decode("ascii")
|
60 |
+
|
61 |
+
# load symbol properties
|
62 |
+
props = {}
|
63 |
+
while True:
|
64 |
+
s = f.readline()
|
65 |
+
if not s or s[:6] == b"BITMAP":
|
66 |
+
break
|
67 |
+
i = s.find(b" ")
|
68 |
+
props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii")
|
69 |
+
|
70 |
+
# load bitmap
|
71 |
+
bitmap = bytearray()
|
72 |
+
while True:
|
73 |
+
s = f.readline()
|
74 |
+
if not s or s[:7] == b"ENDCHAR":
|
75 |
+
break
|
76 |
+
bitmap += s[:-1]
|
77 |
+
|
78 |
+
# The word BBX
|
79 |
+
# followed by the width in x (BBw), height in y (BBh),
|
80 |
+
# and x and y displacement (BBxoff0, BByoff0)
|
81 |
+
# of the lower left corner from the origin of the character.
|
82 |
+
width, height, x_disp, y_disp = (int(p) for p in props["BBX"].split())
|
83 |
+
|
84 |
+
# The word DWIDTH
|
85 |
+
# followed by the width in x and y of the character in device pixels.
|
86 |
+
dwx, dwy = (int(p) for p in props["DWIDTH"].split())
|
87 |
+
|
88 |
+
bbox = (
|
89 |
+
(dwx, dwy),
|
90 |
+
(x_disp, -y_disp - height, width + x_disp, -y_disp),
|
91 |
+
(0, 0, width, height),
|
92 |
+
)
|
93 |
+
|
94 |
+
try:
|
95 |
+
im = Image.frombytes("1", (width, height), bitmap, "hex", "1")
|
96 |
+
except ValueError:
|
97 |
+
# deal with zero-width characters
|
98 |
+
im = Image.new("1", (width, height))
|
99 |
+
|
100 |
+
return id, int(props["ENCODING"]), bbox, im
|
101 |
+
|
102 |
+
|
103 |
+
class BdfFontFile(FontFile.FontFile):
|
104 |
+
"""Font file plugin for the X11 BDF format."""
|
105 |
+
|
106 |
+
def __init__(self, fp: BinaryIO) -> None:
|
107 |
+
super().__init__()
|
108 |
+
|
109 |
+
s = fp.readline()
|
110 |
+
if s[:13] != b"STARTFONT 2.1":
|
111 |
+
msg = "not a valid BDF file"
|
112 |
+
raise SyntaxError(msg)
|
113 |
+
|
114 |
+
props = {}
|
115 |
+
comments = []
|
116 |
+
|
117 |
+
while True:
|
118 |
+
s = fp.readline()
|
119 |
+
if not s or s[:13] == b"ENDPROPERTIES":
|
120 |
+
break
|
121 |
+
i = s.find(b" ")
|
122 |
+
props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii")
|
123 |
+
if s[:i] in [b"COMMENT", b"COPYRIGHT"]:
|
124 |
+
if s.find(b"LogicalFontDescription") < 0:
|
125 |
+
comments.append(s[i + 1 : -1].decode("ascii"))
|
126 |
+
|
127 |
+
while True:
|
128 |
+
c = bdf_char(fp)
|
129 |
+
if not c:
|
130 |
+
break
|
131 |
+
id, ch, (xy, dst, src), im = c
|
132 |
+
if 0 <= ch < len(self.glyph):
|
133 |
+
self.glyph[ch] = xy, dst, src, im
|
env/Lib/site-packages/PIL/BlpImagePlugin.py
ADDED
@@ -0,0 +1,488 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Blizzard Mipmap Format (.blp)
|
3 |
+
Jerome Leclanche <[email protected]>
|
4 |
+
|
5 |
+
The contents of this file are hereby released in the public domain (CC0)
|
6 |
+
Full text of the CC0 license:
|
7 |
+
https://creativecommons.org/publicdomain/zero/1.0/
|
8 |
+
|
9 |
+
BLP1 files, used mostly in Warcraft III, are not fully supported.
|
10 |
+
All types of BLP2 files used in World of Warcraft are supported.
|
11 |
+
|
12 |
+
The BLP file structure consists of a header, up to 16 mipmaps of the
|
13 |
+
texture
|
14 |
+
|
15 |
+
Texture sizes must be powers of two, though the two dimensions do
|
16 |
+
not have to be equal; 512x256 is valid, but 512x200 is not.
|
17 |
+
The first mipmap (mipmap #0) is the full size image; each subsequent
|
18 |
+
mipmap halves both dimensions. The final mipmap should be 1x1.
|
19 |
+
|
20 |
+
BLP files come in many different flavours:
|
21 |
+
* JPEG-compressed (type == 0) - only supported for BLP1.
|
22 |
+
* RAW images (type == 1, encoding == 1). Each mipmap is stored as an
|
23 |
+
array of 8-bit values, one per pixel, left to right, top to bottom.
|
24 |
+
Each value is an index to the palette.
|
25 |
+
* DXT-compressed (type == 1, encoding == 2):
|
26 |
+
- DXT1 compression is used if alpha_encoding == 0.
|
27 |
+
- An additional alpha bit is used if alpha_depth == 1.
|
28 |
+
- DXT3 compression is used if alpha_encoding == 1.
|
29 |
+
- DXT5 compression is used if alpha_encoding == 7.
|
30 |
+
"""
|
31 |
+
|
32 |
+
from __future__ import annotations
|
33 |
+
|
34 |
+
import abc
|
35 |
+
import os
|
36 |
+
import struct
|
37 |
+
from enum import IntEnum
|
38 |
+
from io import BytesIO
|
39 |
+
from typing import IO
|
40 |
+
|
41 |
+
from . import Image, ImageFile
|
42 |
+
|
43 |
+
|
44 |
+
class Format(IntEnum):
|
45 |
+
JPEG = 0
|
46 |
+
|
47 |
+
|
48 |
+
class Encoding(IntEnum):
|
49 |
+
UNCOMPRESSED = 1
|
50 |
+
DXT = 2
|
51 |
+
UNCOMPRESSED_RAW_BGRA = 3
|
52 |
+
|
53 |
+
|
54 |
+
class AlphaEncoding(IntEnum):
|
55 |
+
DXT1 = 0
|
56 |
+
DXT3 = 1
|
57 |
+
DXT5 = 7
|
58 |
+
|
59 |
+
|
60 |
+
def unpack_565(i: int) -> tuple[int, int, int]:
|
61 |
+
return ((i >> 11) & 0x1F) << 3, ((i >> 5) & 0x3F) << 2, (i & 0x1F) << 3
|
62 |
+
|
63 |
+
|
64 |
+
def decode_dxt1(
|
65 |
+
data: bytes, alpha: bool = False
|
66 |
+
) -> tuple[bytearray, bytearray, bytearray, bytearray]:
|
67 |
+
"""
|
68 |
+
input: one "row" of data (i.e. will produce 4*width pixels)
|
69 |
+
"""
|
70 |
+
|
71 |
+
blocks = len(data) // 8 # number of blocks in row
|
72 |
+
ret = (bytearray(), bytearray(), bytearray(), bytearray())
|
73 |
+
|
74 |
+
for block_index in range(blocks):
|
75 |
+
# Decode next 8-byte block.
|
76 |
+
idx = block_index * 8
|
77 |
+
color0, color1, bits = struct.unpack_from("<HHI", data, idx)
|
78 |
+
|
79 |
+
r0, g0, b0 = unpack_565(color0)
|
80 |
+
r1, g1, b1 = unpack_565(color1)
|
81 |
+
|
82 |
+
# Decode this block into 4x4 pixels
|
83 |
+
# Accumulate the results onto our 4 row accumulators
|
84 |
+
for j in range(4):
|
85 |
+
for i in range(4):
|
86 |
+
# get next control op and generate a pixel
|
87 |
+
|
88 |
+
control = bits & 3
|
89 |
+
bits = bits >> 2
|
90 |
+
|
91 |
+
a = 0xFF
|
92 |
+
if control == 0:
|
93 |
+
r, g, b = r0, g0, b0
|
94 |
+
elif control == 1:
|
95 |
+
r, g, b = r1, g1, b1
|
96 |
+
elif control == 2:
|
97 |
+
if color0 > color1:
|
98 |
+
r = (2 * r0 + r1) // 3
|
99 |
+
g = (2 * g0 + g1) // 3
|
100 |
+
b = (2 * b0 + b1) // 3
|
101 |
+
else:
|
102 |
+
r = (r0 + r1) // 2
|
103 |
+
g = (g0 + g1) // 2
|
104 |
+
b = (b0 + b1) // 2
|
105 |
+
elif control == 3:
|
106 |
+
if color0 > color1:
|
107 |
+
r = (2 * r1 + r0) // 3
|
108 |
+
g = (2 * g1 + g0) // 3
|
109 |
+
b = (2 * b1 + b0) // 3
|
110 |
+
else:
|
111 |
+
r, g, b, a = 0, 0, 0, 0
|
112 |
+
|
113 |
+
if alpha:
|
114 |
+
ret[j].extend([r, g, b, a])
|
115 |
+
else:
|
116 |
+
ret[j].extend([r, g, b])
|
117 |
+
|
118 |
+
return ret
|
119 |
+
|
120 |
+
|
121 |
+
def decode_dxt3(data: bytes) -> tuple[bytearray, bytearray, bytearray, bytearray]:
|
122 |
+
"""
|
123 |
+
input: one "row" of data (i.e. will produce 4*width pixels)
|
124 |
+
"""
|
125 |
+
|
126 |
+
blocks = len(data) // 16 # number of blocks in row
|
127 |
+
ret = (bytearray(), bytearray(), bytearray(), bytearray())
|
128 |
+
|
129 |
+
for block_index in range(blocks):
|
130 |
+
idx = block_index * 16
|
131 |
+
block = data[idx : idx + 16]
|
132 |
+
# Decode next 16-byte block.
|
133 |
+
bits = struct.unpack_from("<8B", block)
|
134 |
+
color0, color1 = struct.unpack_from("<HH", block, 8)
|
135 |
+
|
136 |
+
(code,) = struct.unpack_from("<I", block, 12)
|
137 |
+
|
138 |
+
r0, g0, b0 = unpack_565(color0)
|
139 |
+
r1, g1, b1 = unpack_565(color1)
|
140 |
+
|
141 |
+
for j in range(4):
|
142 |
+
high = False # Do we want the higher bits?
|
143 |
+
for i in range(4):
|
144 |
+
alphacode_index = (4 * j + i) // 2
|
145 |
+
a = bits[alphacode_index]
|
146 |
+
if high:
|
147 |
+
high = False
|
148 |
+
a >>= 4
|
149 |
+
else:
|
150 |
+
high = True
|
151 |
+
a &= 0xF
|
152 |
+
a *= 17 # We get a value between 0 and 15
|
153 |
+
|
154 |
+
color_code = (code >> 2 * (4 * j + i)) & 0x03
|
155 |
+
|
156 |
+
if color_code == 0:
|
157 |
+
r, g, b = r0, g0, b0
|
158 |
+
elif color_code == 1:
|
159 |
+
r, g, b = r1, g1, b1
|
160 |
+
elif color_code == 2:
|
161 |
+
r = (2 * r0 + r1) // 3
|
162 |
+
g = (2 * g0 + g1) // 3
|
163 |
+
b = (2 * b0 + b1) // 3
|
164 |
+
elif color_code == 3:
|
165 |
+
r = (2 * r1 + r0) // 3
|
166 |
+
g = (2 * g1 + g0) // 3
|
167 |
+
b = (2 * b1 + b0) // 3
|
168 |
+
|
169 |
+
ret[j].extend([r, g, b, a])
|
170 |
+
|
171 |
+
return ret
|
172 |
+
|
173 |
+
|
174 |
+
def decode_dxt5(data: bytes) -> tuple[bytearray, bytearray, bytearray, bytearray]:
|
175 |
+
"""
|
176 |
+
input: one "row" of data (i.e. will produce 4 * width pixels)
|
177 |
+
"""
|
178 |
+
|
179 |
+
blocks = len(data) // 16 # number of blocks in row
|
180 |
+
ret = (bytearray(), bytearray(), bytearray(), bytearray())
|
181 |
+
|
182 |
+
for block_index in range(blocks):
|
183 |
+
idx = block_index * 16
|
184 |
+
block = data[idx : idx + 16]
|
185 |
+
# Decode next 16-byte block.
|
186 |
+
a0, a1 = struct.unpack_from("<BB", block)
|
187 |
+
|
188 |
+
bits = struct.unpack_from("<6B", block, 2)
|
189 |
+
alphacode1 = bits[2] | (bits[3] << 8) | (bits[4] << 16) | (bits[5] << 24)
|
190 |
+
alphacode2 = bits[0] | (bits[1] << 8)
|
191 |
+
|
192 |
+
color0, color1 = struct.unpack_from("<HH", block, 8)
|
193 |
+
|
194 |
+
(code,) = struct.unpack_from("<I", block, 12)
|
195 |
+
|
196 |
+
r0, g0, b0 = unpack_565(color0)
|
197 |
+
r1, g1, b1 = unpack_565(color1)
|
198 |
+
|
199 |
+
for j in range(4):
|
200 |
+
for i in range(4):
|
201 |
+
# get next control op and generate a pixel
|
202 |
+
alphacode_index = 3 * (4 * j + i)
|
203 |
+
|
204 |
+
if alphacode_index <= 12:
|
205 |
+
alphacode = (alphacode2 >> alphacode_index) & 0x07
|
206 |
+
elif alphacode_index == 15:
|
207 |
+
alphacode = (alphacode2 >> 15) | ((alphacode1 << 1) & 0x06)
|
208 |
+
else: # alphacode_index >= 18 and alphacode_index <= 45
|
209 |
+
alphacode = (alphacode1 >> (alphacode_index - 16)) & 0x07
|
210 |
+
|
211 |
+
if alphacode == 0:
|
212 |
+
a = a0
|
213 |
+
elif alphacode == 1:
|
214 |
+
a = a1
|
215 |
+
elif a0 > a1:
|
216 |
+
a = ((8 - alphacode) * a0 + (alphacode - 1) * a1) // 7
|
217 |
+
elif alphacode == 6:
|
218 |
+
a = 0
|
219 |
+
elif alphacode == 7:
|
220 |
+
a = 255
|
221 |
+
else:
|
222 |
+
a = ((6 - alphacode) * a0 + (alphacode - 1) * a1) // 5
|
223 |
+
|
224 |
+
color_code = (code >> 2 * (4 * j + i)) & 0x03
|
225 |
+
|
226 |
+
if color_code == 0:
|
227 |
+
r, g, b = r0, g0, b0
|
228 |
+
elif color_code == 1:
|
229 |
+
r, g, b = r1, g1, b1
|
230 |
+
elif color_code == 2:
|
231 |
+
r = (2 * r0 + r1) // 3
|
232 |
+
g = (2 * g0 + g1) // 3
|
233 |
+
b = (2 * b0 + b1) // 3
|
234 |
+
elif color_code == 3:
|
235 |
+
r = (2 * r1 + r0) // 3
|
236 |
+
g = (2 * g1 + g0) // 3
|
237 |
+
b = (2 * b1 + b0) // 3
|
238 |
+
|
239 |
+
ret[j].extend([r, g, b, a])
|
240 |
+
|
241 |
+
return ret
|
242 |
+
|
243 |
+
|
244 |
+
class BLPFormatError(NotImplementedError):
|
245 |
+
pass
|
246 |
+
|
247 |
+
|
248 |
+
def _accept(prefix: bytes) -> bool:
|
249 |
+
return prefix[:4] in (b"BLP1", b"BLP2")
|
250 |
+
|
251 |
+
|
252 |
+
class BlpImageFile(ImageFile.ImageFile):
|
253 |
+
"""
|
254 |
+
Blizzard Mipmap Format
|
255 |
+
"""
|
256 |
+
|
257 |
+
format = "BLP"
|
258 |
+
format_description = "Blizzard Mipmap Format"
|
259 |
+
|
260 |
+
def _open(self) -> None:
|
261 |
+
self.magic = self.fp.read(4)
|
262 |
+
|
263 |
+
self.fp.seek(5, os.SEEK_CUR)
|
264 |
+
(self._blp_alpha_depth,) = struct.unpack("<b", self.fp.read(1))
|
265 |
+
|
266 |
+
self.fp.seek(2, os.SEEK_CUR)
|
267 |
+
self._size = struct.unpack("<II", self.fp.read(8))
|
268 |
+
|
269 |
+
if self.magic in (b"BLP1", b"BLP2"):
|
270 |
+
decoder = self.magic.decode()
|
271 |
+
else:
|
272 |
+
msg = f"Bad BLP magic {repr(self.magic)}"
|
273 |
+
raise BLPFormatError(msg)
|
274 |
+
|
275 |
+
self._mode = "RGBA" if self._blp_alpha_depth else "RGB"
|
276 |
+
self.tile = [(decoder, (0, 0) + self.size, 0, (self.mode, 0, 1))]
|
277 |
+
|
278 |
+
|
279 |
+
class _BLPBaseDecoder(ImageFile.PyDecoder):
|
280 |
+
_pulls_fd = True
|
281 |
+
|
282 |
+
def decode(self, buffer: bytes) -> tuple[int, int]:
|
283 |
+
try:
|
284 |
+
self._read_blp_header()
|
285 |
+
self._load()
|
286 |
+
except struct.error as e:
|
287 |
+
msg = "Truncated BLP file"
|
288 |
+
raise OSError(msg) from e
|
289 |
+
return -1, 0
|
290 |
+
|
291 |
+
@abc.abstractmethod
|
292 |
+
def _load(self) -> None:
|
293 |
+
pass
|
294 |
+
|
295 |
+
def _read_blp_header(self) -> None:
|
296 |
+
assert self.fd is not None
|
297 |
+
self.fd.seek(4)
|
298 |
+
(self._blp_compression,) = struct.unpack("<i", self._safe_read(4))
|
299 |
+
|
300 |
+
(self._blp_encoding,) = struct.unpack("<b", self._safe_read(1))
|
301 |
+
(self._blp_alpha_depth,) = struct.unpack("<b", self._safe_read(1))
|
302 |
+
(self._blp_alpha_encoding,) = struct.unpack("<b", self._safe_read(1))
|
303 |
+
self.fd.seek(1, os.SEEK_CUR) # mips
|
304 |
+
|
305 |
+
self.size = struct.unpack("<II", self._safe_read(8))
|
306 |
+
|
307 |
+
if isinstance(self, BLP1Decoder):
|
308 |
+
# Only present for BLP1
|
309 |
+
(self._blp_encoding,) = struct.unpack("<i", self._safe_read(4))
|
310 |
+
self.fd.seek(4, os.SEEK_CUR) # subtype
|
311 |
+
|
312 |
+
self._blp_offsets = struct.unpack("<16I", self._safe_read(16 * 4))
|
313 |
+
self._blp_lengths = struct.unpack("<16I", self._safe_read(16 * 4))
|
314 |
+
|
315 |
+
def _safe_read(self, length: int) -> bytes:
|
316 |
+
return ImageFile._safe_read(self.fd, length)
|
317 |
+
|
318 |
+
def _read_palette(self) -> list[tuple[int, int, int, int]]:
|
319 |
+
ret = []
|
320 |
+
for i in range(256):
|
321 |
+
try:
|
322 |
+
b, g, r, a = struct.unpack("<4B", self._safe_read(4))
|
323 |
+
except struct.error:
|
324 |
+
break
|
325 |
+
ret.append((b, g, r, a))
|
326 |
+
return ret
|
327 |
+
|
328 |
+
def _read_bgra(self, palette: list[tuple[int, int, int, int]]) -> bytearray:
|
329 |
+
data = bytearray()
|
330 |
+
_data = BytesIO(self._safe_read(self._blp_lengths[0]))
|
331 |
+
while True:
|
332 |
+
try:
|
333 |
+
(offset,) = struct.unpack("<B", _data.read(1))
|
334 |
+
except struct.error:
|
335 |
+
break
|
336 |
+
b, g, r, a = palette[offset]
|
337 |
+
d: tuple[int, ...] = (r, g, b)
|
338 |
+
if self._blp_alpha_depth:
|
339 |
+
d += (a,)
|
340 |
+
data.extend(d)
|
341 |
+
return data
|
342 |
+
|
343 |
+
|
344 |
+
class BLP1Decoder(_BLPBaseDecoder):
|
345 |
+
def _load(self) -> None:
|
346 |
+
if self._blp_compression == Format.JPEG:
|
347 |
+
self._decode_jpeg_stream()
|
348 |
+
|
349 |
+
elif self._blp_compression == 1:
|
350 |
+
if self._blp_encoding in (4, 5):
|
351 |
+
palette = self._read_palette()
|
352 |
+
data = self._read_bgra(palette)
|
353 |
+
self.set_as_raw(data)
|
354 |
+
else:
|
355 |
+
msg = f"Unsupported BLP encoding {repr(self._blp_encoding)}"
|
356 |
+
raise BLPFormatError(msg)
|
357 |
+
else:
|
358 |
+
msg = f"Unsupported BLP compression {repr(self._blp_encoding)}"
|
359 |
+
raise BLPFormatError(msg)
|
360 |
+
|
361 |
+
def _decode_jpeg_stream(self) -> None:
|
362 |
+
from .JpegImagePlugin import JpegImageFile
|
363 |
+
|
364 |
+
(jpeg_header_size,) = struct.unpack("<I", self._safe_read(4))
|
365 |
+
jpeg_header = self._safe_read(jpeg_header_size)
|
366 |
+
assert self.fd is not None
|
367 |
+
self._safe_read(self._blp_offsets[0] - self.fd.tell()) # What IS this?
|
368 |
+
data = self._safe_read(self._blp_lengths[0])
|
369 |
+
data = jpeg_header + data
|
370 |
+
image = JpegImageFile(BytesIO(data))
|
371 |
+
Image._decompression_bomb_check(image.size)
|
372 |
+
if image.mode == "CMYK":
|
373 |
+
decoder_name, extents, offset, args = image.tile[0]
|
374 |
+
image.tile = [(decoder_name, extents, offset, (args[0], "CMYK"))]
|
375 |
+
r, g, b = image.convert("RGB").split()
|
376 |
+
reversed_image = Image.merge("RGB", (b, g, r))
|
377 |
+
self.set_as_raw(reversed_image.tobytes())
|
378 |
+
|
379 |
+
|
380 |
+
class BLP2Decoder(_BLPBaseDecoder):
|
381 |
+
def _load(self) -> None:
|
382 |
+
palette = self._read_palette()
|
383 |
+
|
384 |
+
assert self.fd is not None
|
385 |
+
self.fd.seek(self._blp_offsets[0])
|
386 |
+
|
387 |
+
if self._blp_compression == 1:
|
388 |
+
# Uncompressed or DirectX compression
|
389 |
+
|
390 |
+
if self._blp_encoding == Encoding.UNCOMPRESSED:
|
391 |
+
data = self._read_bgra(palette)
|
392 |
+
|
393 |
+
elif self._blp_encoding == Encoding.DXT:
|
394 |
+
data = bytearray()
|
395 |
+
if self._blp_alpha_encoding == AlphaEncoding.DXT1:
|
396 |
+
linesize = (self.size[0] + 3) // 4 * 8
|
397 |
+
for yb in range((self.size[1] + 3) // 4):
|
398 |
+
for d in decode_dxt1(
|
399 |
+
self._safe_read(linesize), alpha=bool(self._blp_alpha_depth)
|
400 |
+
):
|
401 |
+
data += d
|
402 |
+
|
403 |
+
elif self._blp_alpha_encoding == AlphaEncoding.DXT3:
|
404 |
+
linesize = (self.size[0] + 3) // 4 * 16
|
405 |
+
for yb in range((self.size[1] + 3) // 4):
|
406 |
+
for d in decode_dxt3(self._safe_read(linesize)):
|
407 |
+
data += d
|
408 |
+
|
409 |
+
elif self._blp_alpha_encoding == AlphaEncoding.DXT5:
|
410 |
+
linesize = (self.size[0] + 3) // 4 * 16
|
411 |
+
for yb in range((self.size[1] + 3) // 4):
|
412 |
+
for d in decode_dxt5(self._safe_read(linesize)):
|
413 |
+
data += d
|
414 |
+
else:
|
415 |
+
msg = f"Unsupported alpha encoding {repr(self._blp_alpha_encoding)}"
|
416 |
+
raise BLPFormatError(msg)
|
417 |
+
else:
|
418 |
+
msg = f"Unknown BLP encoding {repr(self._blp_encoding)}"
|
419 |
+
raise BLPFormatError(msg)
|
420 |
+
|
421 |
+
else:
|
422 |
+
msg = f"Unknown BLP compression {repr(self._blp_compression)}"
|
423 |
+
raise BLPFormatError(msg)
|
424 |
+
|
425 |
+
self.set_as_raw(data)
|
426 |
+
|
427 |
+
|
428 |
+
class BLPEncoder(ImageFile.PyEncoder):
|
429 |
+
_pushes_fd = True
|
430 |
+
|
431 |
+
def _write_palette(self) -> bytes:
|
432 |
+
data = b""
|
433 |
+
assert self.im is not None
|
434 |
+
palette = self.im.getpalette("RGBA", "RGBA")
|
435 |
+
for i in range(len(palette) // 4):
|
436 |
+
r, g, b, a = palette[i * 4 : (i + 1) * 4]
|
437 |
+
data += struct.pack("<4B", b, g, r, a)
|
438 |
+
while len(data) < 256 * 4:
|
439 |
+
data += b"\x00" * 4
|
440 |
+
return data
|
441 |
+
|
442 |
+
def encode(self, bufsize: int) -> tuple[int, int, bytes]:
|
443 |
+
palette_data = self._write_palette()
|
444 |
+
|
445 |
+
offset = 20 + 16 * 4 * 2 + len(palette_data)
|
446 |
+
data = struct.pack("<16I", offset, *((0,) * 15))
|
447 |
+
|
448 |
+
assert self.im is not None
|
449 |
+
w, h = self.im.size
|
450 |
+
data += struct.pack("<16I", w * h, *((0,) * 15))
|
451 |
+
|
452 |
+
data += palette_data
|
453 |
+
|
454 |
+
for y in range(h):
|
455 |
+
for x in range(w):
|
456 |
+
data += struct.pack("<B", self.im.getpixel((x, y)))
|
457 |
+
|
458 |
+
return len(data), 0, data
|
459 |
+
|
460 |
+
|
461 |
+
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
462 |
+
if im.mode != "P":
|
463 |
+
msg = "Unsupported BLP image mode"
|
464 |
+
raise ValueError(msg)
|
465 |
+
|
466 |
+
magic = b"BLP1" if im.encoderinfo.get("blp_version") == "BLP1" else b"BLP2"
|
467 |
+
fp.write(magic)
|
468 |
+
|
469 |
+
fp.write(struct.pack("<i", 1)) # Uncompressed or DirectX compression
|
470 |
+
fp.write(struct.pack("<b", Encoding.UNCOMPRESSED))
|
471 |
+
fp.write(struct.pack("<b", 1 if im.palette.mode == "RGBA" else 0))
|
472 |
+
fp.write(struct.pack("<b", 0)) # alpha encoding
|
473 |
+
fp.write(struct.pack("<b", 0)) # mips
|
474 |
+
fp.write(struct.pack("<II", *im.size))
|
475 |
+
if magic == b"BLP1":
|
476 |
+
fp.write(struct.pack("<i", 5))
|
477 |
+
fp.write(struct.pack("<i", 0))
|
478 |
+
|
479 |
+
ImageFile._save(im, fp, [("BLP", (0, 0) + im.size, 0, im.mode)])
|
480 |
+
|
481 |
+
|
482 |
+
Image.register_open(BlpImageFile.format, BlpImageFile, _accept)
|
483 |
+
Image.register_extension(BlpImageFile.format, ".blp")
|
484 |
+
Image.register_decoder("BLP1", BLP1Decoder)
|
485 |
+
Image.register_decoder("BLP2", BLP2Decoder)
|
486 |
+
|
487 |
+
Image.register_save(BlpImageFile.format, _save)
|
488 |
+
Image.register_encoder("BLP", BLPEncoder)
|
env/Lib/site-packages/PIL/BmpImagePlugin.py
ADDED
@@ -0,0 +1,489 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library.
|
3 |
+
# $Id$
|
4 |
+
#
|
5 |
+
# BMP file handler
|
6 |
+
#
|
7 |
+
# Windows (and OS/2) native bitmap storage format.
|
8 |
+
#
|
9 |
+
# history:
|
10 |
+
# 1995-09-01 fl Created
|
11 |
+
# 1996-04-30 fl Added save
|
12 |
+
# 1997-08-27 fl Fixed save of 1-bit images
|
13 |
+
# 1998-03-06 fl Load P images as L where possible
|
14 |
+
# 1998-07-03 fl Load P images as 1 where possible
|
15 |
+
# 1998-12-29 fl Handle small palettes
|
16 |
+
# 2002-12-30 fl Fixed load of 1-bit palette images
|
17 |
+
# 2003-04-21 fl Fixed load of 1-bit monochrome images
|
18 |
+
# 2003-04-23 fl Added limited support for BI_BITFIELDS compression
|
19 |
+
#
|
20 |
+
# Copyright (c) 1997-2003 by Secret Labs AB
|
21 |
+
# Copyright (c) 1995-2003 by Fredrik Lundh
|
22 |
+
#
|
23 |
+
# See the README file for information on usage and redistribution.
|
24 |
+
#
|
25 |
+
from __future__ import annotations
|
26 |
+
|
27 |
+
import os
|
28 |
+
from typing import IO
|
29 |
+
|
30 |
+
from . import Image, ImageFile, ImagePalette
|
31 |
+
from ._binary import i16le as i16
|
32 |
+
from ._binary import i32le as i32
|
33 |
+
from ._binary import o8
|
34 |
+
from ._binary import o16le as o16
|
35 |
+
from ._binary import o32le as o32
|
36 |
+
|
37 |
+
#
|
38 |
+
# --------------------------------------------------------------------
|
39 |
+
# Read BMP file
|
40 |
+
|
41 |
+
BIT2MODE = {
|
42 |
+
# bits => mode, rawmode
|
43 |
+
1: ("P", "P;1"),
|
44 |
+
4: ("P", "P;4"),
|
45 |
+
8: ("P", "P"),
|
46 |
+
16: ("RGB", "BGR;15"),
|
47 |
+
24: ("RGB", "BGR"),
|
48 |
+
32: ("RGB", "BGRX"),
|
49 |
+
}
|
50 |
+
|
51 |
+
|
52 |
+
def _accept(prefix: bytes) -> bool:
|
53 |
+
return prefix[:2] == b"BM"
|
54 |
+
|
55 |
+
|
56 |
+
def _dib_accept(prefix: bytes) -> bool:
|
57 |
+
return i32(prefix) in [12, 40, 52, 56, 64, 108, 124]
|
58 |
+
|
59 |
+
|
60 |
+
# =============================================================================
|
61 |
+
# Image plugin for the Windows BMP format.
|
62 |
+
# =============================================================================
|
63 |
+
class BmpImageFile(ImageFile.ImageFile):
|
64 |
+
"""Image plugin for the Windows Bitmap format (BMP)"""
|
65 |
+
|
66 |
+
# ------------------------------------------------------------- Description
|
67 |
+
format_description = "Windows Bitmap"
|
68 |
+
format = "BMP"
|
69 |
+
|
70 |
+
# -------------------------------------------------- BMP Compression values
|
71 |
+
COMPRESSIONS = {"RAW": 0, "RLE8": 1, "RLE4": 2, "BITFIELDS": 3, "JPEG": 4, "PNG": 5}
|
72 |
+
for k, v in COMPRESSIONS.items():
|
73 |
+
vars()[k] = v
|
74 |
+
|
75 |
+
def _bitmap(self, header=0, offset=0):
|
76 |
+
"""Read relevant info about the BMP"""
|
77 |
+
read, seek = self.fp.read, self.fp.seek
|
78 |
+
if header:
|
79 |
+
seek(header)
|
80 |
+
# read bmp header size @offset 14 (this is part of the header size)
|
81 |
+
file_info = {"header_size": i32(read(4)), "direction": -1}
|
82 |
+
|
83 |
+
# -------------------- If requested, read header at a specific position
|
84 |
+
# read the rest of the bmp header, without its size
|
85 |
+
header_data = ImageFile._safe_read(self.fp, file_info["header_size"] - 4)
|
86 |
+
|
87 |
+
# ------------------------------- Windows Bitmap v2, IBM OS/2 Bitmap v1
|
88 |
+
# ----- This format has different offsets because of width/height types
|
89 |
+
# 12: BITMAPCOREHEADER/OS21XBITMAPHEADER
|
90 |
+
if file_info["header_size"] == 12:
|
91 |
+
file_info["width"] = i16(header_data, 0)
|
92 |
+
file_info["height"] = i16(header_data, 2)
|
93 |
+
file_info["planes"] = i16(header_data, 4)
|
94 |
+
file_info["bits"] = i16(header_data, 6)
|
95 |
+
file_info["compression"] = self.RAW
|
96 |
+
file_info["palette_padding"] = 3
|
97 |
+
|
98 |
+
# --------------------------------------------- Windows Bitmap v3 to v5
|
99 |
+
# 40: BITMAPINFOHEADER
|
100 |
+
# 52: BITMAPV2HEADER
|
101 |
+
# 56: BITMAPV3HEADER
|
102 |
+
# 64: BITMAPCOREHEADER2/OS22XBITMAPHEADER
|
103 |
+
# 108: BITMAPV4HEADER
|
104 |
+
# 124: BITMAPV5HEADER
|
105 |
+
elif file_info["header_size"] in (40, 52, 56, 64, 108, 124):
|
106 |
+
file_info["y_flip"] = header_data[7] == 0xFF
|
107 |
+
file_info["direction"] = 1 if file_info["y_flip"] else -1
|
108 |
+
file_info["width"] = i32(header_data, 0)
|
109 |
+
file_info["height"] = (
|
110 |
+
i32(header_data, 4)
|
111 |
+
if not file_info["y_flip"]
|
112 |
+
else 2**32 - i32(header_data, 4)
|
113 |
+
)
|
114 |
+
file_info["planes"] = i16(header_data, 8)
|
115 |
+
file_info["bits"] = i16(header_data, 10)
|
116 |
+
file_info["compression"] = i32(header_data, 12)
|
117 |
+
# byte size of pixel data
|
118 |
+
file_info["data_size"] = i32(header_data, 16)
|
119 |
+
file_info["pixels_per_meter"] = (
|
120 |
+
i32(header_data, 20),
|
121 |
+
i32(header_data, 24),
|
122 |
+
)
|
123 |
+
file_info["colors"] = i32(header_data, 28)
|
124 |
+
file_info["palette_padding"] = 4
|
125 |
+
self.info["dpi"] = tuple(x / 39.3701 for x in file_info["pixels_per_meter"])
|
126 |
+
if file_info["compression"] == self.BITFIELDS:
|
127 |
+
masks = ["r_mask", "g_mask", "b_mask"]
|
128 |
+
if len(header_data) >= 48:
|
129 |
+
if len(header_data) >= 52:
|
130 |
+
masks.append("a_mask")
|
131 |
+
else:
|
132 |
+
file_info["a_mask"] = 0x0
|
133 |
+
for idx, mask in enumerate(masks):
|
134 |
+
file_info[mask] = i32(header_data, 36 + idx * 4)
|
135 |
+
else:
|
136 |
+
# 40 byte headers only have the three components in the
|
137 |
+
# bitfields masks, ref:
|
138 |
+
# https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx
|
139 |
+
# See also
|
140 |
+
# https://github.com/python-pillow/Pillow/issues/1293
|
141 |
+
# There is a 4th component in the RGBQuad, in the alpha
|
142 |
+
# location, but it is listed as a reserved component,
|
143 |
+
# and it is not generally an alpha channel
|
144 |
+
file_info["a_mask"] = 0x0
|
145 |
+
for mask in masks:
|
146 |
+
file_info[mask] = i32(read(4))
|
147 |
+
file_info["rgb_mask"] = (
|
148 |
+
file_info["r_mask"],
|
149 |
+
file_info["g_mask"],
|
150 |
+
file_info["b_mask"],
|
151 |
+
)
|
152 |
+
file_info["rgba_mask"] = (
|
153 |
+
file_info["r_mask"],
|
154 |
+
file_info["g_mask"],
|
155 |
+
file_info["b_mask"],
|
156 |
+
file_info["a_mask"],
|
157 |
+
)
|
158 |
+
else:
|
159 |
+
msg = f"Unsupported BMP header type ({file_info['header_size']})"
|
160 |
+
raise OSError(msg)
|
161 |
+
|
162 |
+
# ------------------ Special case : header is reported 40, which
|
163 |
+
# ---------------------- is shorter than real size for bpp >= 16
|
164 |
+
self._size = file_info["width"], file_info["height"]
|
165 |
+
|
166 |
+
# ------- If color count was not found in the header, compute from bits
|
167 |
+
file_info["colors"] = (
|
168 |
+
file_info["colors"]
|
169 |
+
if file_info.get("colors", 0)
|
170 |
+
else (1 << file_info["bits"])
|
171 |
+
)
|
172 |
+
if offset == 14 + file_info["header_size"] and file_info["bits"] <= 8:
|
173 |
+
offset += 4 * file_info["colors"]
|
174 |
+
|
175 |
+
# ---------------------- Check bit depth for unusual unsupported values
|
176 |
+
self._mode, raw_mode = BIT2MODE.get(file_info["bits"], (None, None))
|
177 |
+
if self.mode is None:
|
178 |
+
msg = f"Unsupported BMP pixel depth ({file_info['bits']})"
|
179 |
+
raise OSError(msg)
|
180 |
+
|
181 |
+
# ---------------- Process BMP with Bitfields compression (not palette)
|
182 |
+
decoder_name = "raw"
|
183 |
+
if file_info["compression"] == self.BITFIELDS:
|
184 |
+
SUPPORTED = {
|
185 |
+
32: [
|
186 |
+
(0xFF0000, 0xFF00, 0xFF, 0x0),
|
187 |
+
(0xFF000000, 0xFF0000, 0xFF00, 0x0),
|
188 |
+
(0xFF000000, 0xFF00, 0xFF, 0x0),
|
189 |
+
(0xFF000000, 0xFF0000, 0xFF00, 0xFF),
|
190 |
+
(0xFF, 0xFF00, 0xFF0000, 0xFF000000),
|
191 |
+
(0xFF0000, 0xFF00, 0xFF, 0xFF000000),
|
192 |
+
(0xFF000000, 0xFF00, 0xFF, 0xFF0000),
|
193 |
+
(0x0, 0x0, 0x0, 0x0),
|
194 |
+
],
|
195 |
+
24: [(0xFF0000, 0xFF00, 0xFF)],
|
196 |
+
16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)],
|
197 |
+
}
|
198 |
+
MASK_MODES = {
|
199 |
+
(32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX",
|
200 |
+
(32, (0xFF000000, 0xFF0000, 0xFF00, 0x0)): "XBGR",
|
201 |
+
(32, (0xFF000000, 0xFF00, 0xFF, 0x0)): "BGXR",
|
202 |
+
(32, (0xFF000000, 0xFF0000, 0xFF00, 0xFF)): "ABGR",
|
203 |
+
(32, (0xFF, 0xFF00, 0xFF0000, 0xFF000000)): "RGBA",
|
204 |
+
(32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA",
|
205 |
+
(32, (0xFF000000, 0xFF00, 0xFF, 0xFF0000)): "BGAR",
|
206 |
+
(32, (0x0, 0x0, 0x0, 0x0)): "BGRA",
|
207 |
+
(24, (0xFF0000, 0xFF00, 0xFF)): "BGR",
|
208 |
+
(16, (0xF800, 0x7E0, 0x1F)): "BGR;16",
|
209 |
+
(16, (0x7C00, 0x3E0, 0x1F)): "BGR;15",
|
210 |
+
}
|
211 |
+
if file_info["bits"] in SUPPORTED:
|
212 |
+
if (
|
213 |
+
file_info["bits"] == 32
|
214 |
+
and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]]
|
215 |
+
):
|
216 |
+
raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])]
|
217 |
+
self._mode = "RGBA" if "A" in raw_mode else self.mode
|
218 |
+
elif (
|
219 |
+
file_info["bits"] in (24, 16)
|
220 |
+
and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]]
|
221 |
+
):
|
222 |
+
raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])]
|
223 |
+
else:
|
224 |
+
msg = "Unsupported BMP bitfields layout"
|
225 |
+
raise OSError(msg)
|
226 |
+
else:
|
227 |
+
msg = "Unsupported BMP bitfields layout"
|
228 |
+
raise OSError(msg)
|
229 |
+
elif file_info["compression"] == self.RAW:
|
230 |
+
if file_info["bits"] == 32 and header == 22: # 32-bit .cur offset
|
231 |
+
raw_mode, self._mode = "BGRA", "RGBA"
|
232 |
+
elif file_info["compression"] in (self.RLE8, self.RLE4):
|
233 |
+
decoder_name = "bmp_rle"
|
234 |
+
else:
|
235 |
+
msg = f"Unsupported BMP compression ({file_info['compression']})"
|
236 |
+
raise OSError(msg)
|
237 |
+
|
238 |
+
# --------------- Once the header is processed, process the palette/LUT
|
239 |
+
if self.mode == "P": # Paletted for 1, 4 and 8 bit images
|
240 |
+
# ---------------------------------------------------- 1-bit images
|
241 |
+
if not (0 < file_info["colors"] <= 65536):
|
242 |
+
msg = f"Unsupported BMP Palette size ({file_info['colors']})"
|
243 |
+
raise OSError(msg)
|
244 |
+
else:
|
245 |
+
padding = file_info["palette_padding"]
|
246 |
+
palette = read(padding * file_info["colors"])
|
247 |
+
grayscale = True
|
248 |
+
indices = (
|
249 |
+
(0, 255)
|
250 |
+
if file_info["colors"] == 2
|
251 |
+
else list(range(file_info["colors"]))
|
252 |
+
)
|
253 |
+
|
254 |
+
# ----------------- Check if grayscale and ignore palette if so
|
255 |
+
for ind, val in enumerate(indices):
|
256 |
+
rgb = palette[ind * padding : ind * padding + 3]
|
257 |
+
if rgb != o8(val) * 3:
|
258 |
+
grayscale = False
|
259 |
+
|
260 |
+
# ------- If all colors are gray, white or black, ditch palette
|
261 |
+
if grayscale:
|
262 |
+
self._mode = "1" if file_info["colors"] == 2 else "L"
|
263 |
+
raw_mode = self.mode
|
264 |
+
else:
|
265 |
+
self._mode = "P"
|
266 |
+
self.palette = ImagePalette.raw(
|
267 |
+
"BGRX" if padding == 4 else "BGR", palette
|
268 |
+
)
|
269 |
+
|
270 |
+
# ---------------------------- Finally set the tile data for the plugin
|
271 |
+
self.info["compression"] = file_info["compression"]
|
272 |
+
args = [raw_mode]
|
273 |
+
if decoder_name == "bmp_rle":
|
274 |
+
args.append(file_info["compression"] == self.RLE4)
|
275 |
+
else:
|
276 |
+
args.append(((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3))
|
277 |
+
args.append(file_info["direction"])
|
278 |
+
self.tile = [
|
279 |
+
(
|
280 |
+
decoder_name,
|
281 |
+
(0, 0, file_info["width"], file_info["height"]),
|
282 |
+
offset or self.fp.tell(),
|
283 |
+
tuple(args),
|
284 |
+
)
|
285 |
+
]
|
286 |
+
|
287 |
+
def _open(self) -> None:
|
288 |
+
"""Open file, check magic number and read header"""
|
289 |
+
# read 14 bytes: magic number, filesize, reserved, header final offset
|
290 |
+
head_data = self.fp.read(14)
|
291 |
+
# choke if the file does not have the required magic bytes
|
292 |
+
if not _accept(head_data):
|
293 |
+
msg = "Not a BMP file"
|
294 |
+
raise SyntaxError(msg)
|
295 |
+
# read the start position of the BMP image data (u32)
|
296 |
+
offset = i32(head_data, 10)
|
297 |
+
# load bitmap information (offset=raster info)
|
298 |
+
self._bitmap(offset=offset)
|
299 |
+
|
300 |
+
|
301 |
+
class BmpRleDecoder(ImageFile.PyDecoder):
|
302 |
+
_pulls_fd = True
|
303 |
+
|
304 |
+
def decode(self, buffer: bytes) -> tuple[int, int]:
|
305 |
+
assert self.fd is not None
|
306 |
+
rle4 = self.args[1]
|
307 |
+
data = bytearray()
|
308 |
+
x = 0
|
309 |
+
dest_length = self.state.xsize * self.state.ysize
|
310 |
+
while len(data) < dest_length:
|
311 |
+
pixels = self.fd.read(1)
|
312 |
+
byte = self.fd.read(1)
|
313 |
+
if not pixels or not byte:
|
314 |
+
break
|
315 |
+
num_pixels = pixels[0]
|
316 |
+
if num_pixels:
|
317 |
+
# encoded mode
|
318 |
+
if x + num_pixels > self.state.xsize:
|
319 |
+
# Too much data for row
|
320 |
+
num_pixels = max(0, self.state.xsize - x)
|
321 |
+
if rle4:
|
322 |
+
first_pixel = o8(byte[0] >> 4)
|
323 |
+
second_pixel = o8(byte[0] & 0x0F)
|
324 |
+
for index in range(num_pixels):
|
325 |
+
if index % 2 == 0:
|
326 |
+
data += first_pixel
|
327 |
+
else:
|
328 |
+
data += second_pixel
|
329 |
+
else:
|
330 |
+
data += byte * num_pixels
|
331 |
+
x += num_pixels
|
332 |
+
else:
|
333 |
+
if byte[0] == 0:
|
334 |
+
# end of line
|
335 |
+
while len(data) % self.state.xsize != 0:
|
336 |
+
data += b"\x00"
|
337 |
+
x = 0
|
338 |
+
elif byte[0] == 1:
|
339 |
+
# end of bitmap
|
340 |
+
break
|
341 |
+
elif byte[0] == 2:
|
342 |
+
# delta
|
343 |
+
bytes_read = self.fd.read(2)
|
344 |
+
if len(bytes_read) < 2:
|
345 |
+
break
|
346 |
+
right, up = self.fd.read(2)
|
347 |
+
data += b"\x00" * (right + up * self.state.xsize)
|
348 |
+
x = len(data) % self.state.xsize
|
349 |
+
else:
|
350 |
+
# absolute mode
|
351 |
+
if rle4:
|
352 |
+
# 2 pixels per byte
|
353 |
+
byte_count = byte[0] // 2
|
354 |
+
bytes_read = self.fd.read(byte_count)
|
355 |
+
for byte_read in bytes_read:
|
356 |
+
data += o8(byte_read >> 4)
|
357 |
+
data += o8(byte_read & 0x0F)
|
358 |
+
else:
|
359 |
+
byte_count = byte[0]
|
360 |
+
bytes_read = self.fd.read(byte_count)
|
361 |
+
data += bytes_read
|
362 |
+
if len(bytes_read) < byte_count:
|
363 |
+
break
|
364 |
+
x += byte[0]
|
365 |
+
|
366 |
+
# align to 16-bit word boundary
|
367 |
+
if self.fd.tell() % 2 != 0:
|
368 |
+
self.fd.seek(1, os.SEEK_CUR)
|
369 |
+
rawmode = "L" if self.mode == "L" else "P"
|
370 |
+
self.set_as_raw(bytes(data), (rawmode, 0, self.args[-1]))
|
371 |
+
return -1, 0
|
372 |
+
|
373 |
+
|
374 |
+
# =============================================================================
|
375 |
+
# Image plugin for the DIB format (BMP alias)
|
376 |
+
# =============================================================================
|
377 |
+
class DibImageFile(BmpImageFile):
|
378 |
+
format = "DIB"
|
379 |
+
format_description = "Windows Bitmap"
|
380 |
+
|
381 |
+
def _open(self) -> None:
|
382 |
+
self._bitmap()
|
383 |
+
|
384 |
+
|
385 |
+
#
|
386 |
+
# --------------------------------------------------------------------
|
387 |
+
# Write BMP file
|
388 |
+
|
389 |
+
|
390 |
+
SAVE = {
|
391 |
+
"1": ("1", 1, 2),
|
392 |
+
"L": ("L", 8, 256),
|
393 |
+
"P": ("P", 8, 256),
|
394 |
+
"RGB": ("BGR", 24, 0),
|
395 |
+
"RGBA": ("BGRA", 32, 0),
|
396 |
+
}
|
397 |
+
|
398 |
+
|
399 |
+
def _dib_save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
400 |
+
_save(im, fp, filename, False)
|
401 |
+
|
402 |
+
|
403 |
+
def _save(
|
404 |
+
im: Image.Image, fp: IO[bytes], filename: str | bytes, bitmap_header: bool = True
|
405 |
+
) -> None:
|
406 |
+
try:
|
407 |
+
rawmode, bits, colors = SAVE[im.mode]
|
408 |
+
except KeyError as e:
|
409 |
+
msg = f"cannot write mode {im.mode} as BMP"
|
410 |
+
raise OSError(msg) from e
|
411 |
+
|
412 |
+
info = im.encoderinfo
|
413 |
+
|
414 |
+
dpi = info.get("dpi", (96, 96))
|
415 |
+
|
416 |
+
# 1 meter == 39.3701 inches
|
417 |
+
ppm = tuple(int(x * 39.3701 + 0.5) for x in dpi)
|
418 |
+
|
419 |
+
stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3)
|
420 |
+
header = 40 # or 64 for OS/2 version 2
|
421 |
+
image = stride * im.size[1]
|
422 |
+
|
423 |
+
if im.mode == "1":
|
424 |
+
palette = b"".join(o8(i) * 4 for i in (0, 255))
|
425 |
+
elif im.mode == "L":
|
426 |
+
palette = b"".join(o8(i) * 4 for i in range(256))
|
427 |
+
elif im.mode == "P":
|
428 |
+
palette = im.im.getpalette("RGB", "BGRX")
|
429 |
+
colors = len(palette) // 4
|
430 |
+
else:
|
431 |
+
palette = None
|
432 |
+
|
433 |
+
# bitmap header
|
434 |
+
if bitmap_header:
|
435 |
+
offset = 14 + header + colors * 4
|
436 |
+
file_size = offset + image
|
437 |
+
if file_size > 2**32 - 1:
|
438 |
+
msg = "File size is too large for the BMP format"
|
439 |
+
raise ValueError(msg)
|
440 |
+
fp.write(
|
441 |
+
b"BM" # file type (magic)
|
442 |
+
+ o32(file_size) # file size
|
443 |
+
+ o32(0) # reserved
|
444 |
+
+ o32(offset) # image data offset
|
445 |
+
)
|
446 |
+
|
447 |
+
# bitmap info header
|
448 |
+
fp.write(
|
449 |
+
o32(header) # info header size
|
450 |
+
+ o32(im.size[0]) # width
|
451 |
+
+ o32(im.size[1]) # height
|
452 |
+
+ o16(1) # planes
|
453 |
+
+ o16(bits) # depth
|
454 |
+
+ o32(0) # compression (0=uncompressed)
|
455 |
+
+ o32(image) # size of bitmap
|
456 |
+
+ o32(ppm[0]) # resolution
|
457 |
+
+ o32(ppm[1]) # resolution
|
458 |
+
+ o32(colors) # colors used
|
459 |
+
+ o32(colors) # colors important
|
460 |
+
)
|
461 |
+
|
462 |
+
fp.write(b"\0" * (header - 40)) # padding (for OS/2 format)
|
463 |
+
|
464 |
+
if palette:
|
465 |
+
fp.write(palette)
|
466 |
+
|
467 |
+
ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))])
|
468 |
+
|
469 |
+
|
470 |
+
#
|
471 |
+
# --------------------------------------------------------------------
|
472 |
+
# Registry
|
473 |
+
|
474 |
+
|
475 |
+
Image.register_open(BmpImageFile.format, BmpImageFile, _accept)
|
476 |
+
Image.register_save(BmpImageFile.format, _save)
|
477 |
+
|
478 |
+
Image.register_extension(BmpImageFile.format, ".bmp")
|
479 |
+
|
480 |
+
Image.register_mime(BmpImageFile.format, "image/bmp")
|
481 |
+
|
482 |
+
Image.register_decoder("bmp_rle", BmpRleDecoder)
|
483 |
+
|
484 |
+
Image.register_open(DibImageFile.format, DibImageFile, _dib_accept)
|
485 |
+
Image.register_save(DibImageFile.format, _dib_save)
|
486 |
+
|
487 |
+
Image.register_extension(DibImageFile.format, ".dib")
|
488 |
+
|
489 |
+
Image.register_mime(DibImageFile.format, "image/bmp")
|
env/Lib/site-packages/PIL/BufrStubImagePlugin.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library
|
3 |
+
# $Id$
|
4 |
+
#
|
5 |
+
# BUFR stub adapter
|
6 |
+
#
|
7 |
+
# Copyright (c) 1996-2003 by Fredrik Lundh
|
8 |
+
#
|
9 |
+
# See the README file for information on usage and redistribution.
|
10 |
+
#
|
11 |
+
from __future__ import annotations
|
12 |
+
|
13 |
+
from typing import IO
|
14 |
+
|
15 |
+
from . import Image, ImageFile
|
16 |
+
|
17 |
+
_handler = None
|
18 |
+
|
19 |
+
|
20 |
+
def register_handler(handler: ImageFile.StubHandler | None) -> None:
|
21 |
+
"""
|
22 |
+
Install application-specific BUFR image handler.
|
23 |
+
|
24 |
+
:param handler: Handler object.
|
25 |
+
"""
|
26 |
+
global _handler
|
27 |
+
_handler = handler
|
28 |
+
|
29 |
+
|
30 |
+
# --------------------------------------------------------------------
|
31 |
+
# Image adapter
|
32 |
+
|
33 |
+
|
34 |
+
def _accept(prefix: bytes) -> bool:
|
35 |
+
return prefix[:4] == b"BUFR" or prefix[:4] == b"ZCZC"
|
36 |
+
|
37 |
+
|
38 |
+
class BufrStubImageFile(ImageFile.StubImageFile):
|
39 |
+
format = "BUFR"
|
40 |
+
format_description = "BUFR"
|
41 |
+
|
42 |
+
def _open(self) -> None:
|
43 |
+
offset = self.fp.tell()
|
44 |
+
|
45 |
+
if not _accept(self.fp.read(4)):
|
46 |
+
msg = "Not a BUFR file"
|
47 |
+
raise SyntaxError(msg)
|
48 |
+
|
49 |
+
self.fp.seek(offset)
|
50 |
+
|
51 |
+
# make something up
|
52 |
+
self._mode = "F"
|
53 |
+
self._size = 1, 1
|
54 |
+
|
55 |
+
loader = self._load()
|
56 |
+
if loader:
|
57 |
+
loader.open(self)
|
58 |
+
|
59 |
+
def _load(self) -> ImageFile.StubHandler | None:
|
60 |
+
return _handler
|
61 |
+
|
62 |
+
|
63 |
+
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
64 |
+
if _handler is None or not hasattr(_handler, "save"):
|
65 |
+
msg = "BUFR save handler not installed"
|
66 |
+
raise OSError(msg)
|
67 |
+
_handler.save(im, fp, filename)
|
68 |
+
|
69 |
+
|
70 |
+
# --------------------------------------------------------------------
|
71 |
+
# Registry
|
72 |
+
|
73 |
+
Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept)
|
74 |
+
Image.register_save(BufrStubImageFile.format, _save)
|
75 |
+
|
76 |
+
Image.register_extension(BufrStubImageFile.format, ".bufr")
|
env/Lib/site-packages/PIL/ContainerIO.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library.
|
3 |
+
# $Id$
|
4 |
+
#
|
5 |
+
# a class to read from a container file
|
6 |
+
#
|
7 |
+
# History:
|
8 |
+
# 1995-06-18 fl Created
|
9 |
+
# 1995-09-07 fl Added readline(), readlines()
|
10 |
+
#
|
11 |
+
# Copyright (c) 1997-2001 by Secret Labs AB
|
12 |
+
# Copyright (c) 1995 by Fredrik Lundh
|
13 |
+
#
|
14 |
+
# See the README file for information on usage and redistribution.
|
15 |
+
#
|
16 |
+
from __future__ import annotations
|
17 |
+
|
18 |
+
import io
|
19 |
+
from typing import IO, AnyStr, Generic, Literal
|
20 |
+
|
21 |
+
|
22 |
+
class ContainerIO(Generic[AnyStr]):
|
23 |
+
"""
|
24 |
+
A file object that provides read access to a part of an existing
|
25 |
+
file (for example a TAR file).
|
26 |
+
"""
|
27 |
+
|
28 |
+
def __init__(self, file: IO[AnyStr], offset: int, length: int) -> None:
|
29 |
+
"""
|
30 |
+
Create file object.
|
31 |
+
|
32 |
+
:param file: Existing file.
|
33 |
+
:param offset: Start of region, in bytes.
|
34 |
+
:param length: Size of region, in bytes.
|
35 |
+
"""
|
36 |
+
self.fh: IO[AnyStr] = file
|
37 |
+
self.pos = 0
|
38 |
+
self.offset = offset
|
39 |
+
self.length = length
|
40 |
+
self.fh.seek(offset)
|
41 |
+
|
42 |
+
##
|
43 |
+
# Always false.
|
44 |
+
|
45 |
+
def isatty(self) -> bool:
|
46 |
+
return False
|
47 |
+
|
48 |
+
def seek(self, offset: int, mode: Literal[0, 1, 2] = io.SEEK_SET) -> None:
|
49 |
+
"""
|
50 |
+
Move file pointer.
|
51 |
+
|
52 |
+
:param offset: Offset in bytes.
|
53 |
+
:param mode: Starting position. Use 0 for beginning of region, 1
|
54 |
+
for current offset, and 2 for end of region. You cannot move
|
55 |
+
the pointer outside the defined region.
|
56 |
+
"""
|
57 |
+
if mode == 1:
|
58 |
+
self.pos = self.pos + offset
|
59 |
+
elif mode == 2:
|
60 |
+
self.pos = self.length + offset
|
61 |
+
else:
|
62 |
+
self.pos = offset
|
63 |
+
# clamp
|
64 |
+
self.pos = max(0, min(self.pos, self.length))
|
65 |
+
self.fh.seek(self.offset + self.pos)
|
66 |
+
|
67 |
+
def tell(self) -> int:
|
68 |
+
"""
|
69 |
+
Get current file pointer.
|
70 |
+
|
71 |
+
:returns: Offset from start of region, in bytes.
|
72 |
+
"""
|
73 |
+
return self.pos
|
74 |
+
|
75 |
+
def read(self, n: int = 0) -> AnyStr:
|
76 |
+
"""
|
77 |
+
Read data.
|
78 |
+
|
79 |
+
:param n: Number of bytes to read. If omitted or zero,
|
80 |
+
read until end of region.
|
81 |
+
:returns: An 8-bit string.
|
82 |
+
"""
|
83 |
+
if n:
|
84 |
+
n = min(n, self.length - self.pos)
|
85 |
+
else:
|
86 |
+
n = self.length - self.pos
|
87 |
+
if not n: # EOF
|
88 |
+
return b"" if "b" in self.fh.mode else "" # type: ignore[return-value]
|
89 |
+
self.pos = self.pos + n
|
90 |
+
return self.fh.read(n)
|
91 |
+
|
92 |
+
def readline(self) -> AnyStr:
|
93 |
+
"""
|
94 |
+
Read a line of text.
|
95 |
+
|
96 |
+
:returns: An 8-bit string.
|
97 |
+
"""
|
98 |
+
s: AnyStr = b"" if "b" in self.fh.mode else "" # type: ignore[assignment]
|
99 |
+
newline_character = b"\n" if "b" in self.fh.mode else "\n"
|
100 |
+
while True:
|
101 |
+
c = self.read(1)
|
102 |
+
if not c:
|
103 |
+
break
|
104 |
+
s = s + c
|
105 |
+
if c == newline_character:
|
106 |
+
break
|
107 |
+
return s
|
108 |
+
|
109 |
+
def readlines(self) -> list[AnyStr]:
|
110 |
+
"""
|
111 |
+
Read multiple lines of text.
|
112 |
+
|
113 |
+
:returns: A list of 8-bit strings.
|
114 |
+
"""
|
115 |
+
lines = []
|
116 |
+
while True:
|
117 |
+
s = self.readline()
|
118 |
+
if not s:
|
119 |
+
break
|
120 |
+
lines.append(s)
|
121 |
+
return lines
|
env/Lib/site-packages/PIL/CurImagePlugin.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library.
|
3 |
+
# $Id$
|
4 |
+
#
|
5 |
+
# Windows Cursor support for PIL
|
6 |
+
#
|
7 |
+
# notes:
|
8 |
+
# uses BmpImagePlugin.py to read the bitmap data.
|
9 |
+
#
|
10 |
+
# history:
|
11 |
+
# 96-05-27 fl Created
|
12 |
+
#
|
13 |
+
# Copyright (c) Secret Labs AB 1997.
|
14 |
+
# Copyright (c) Fredrik Lundh 1996.
|
15 |
+
#
|
16 |
+
# See the README file for information on usage and redistribution.
|
17 |
+
#
|
18 |
+
from __future__ import annotations
|
19 |
+
|
20 |
+
from . import BmpImagePlugin, Image
|
21 |
+
from ._binary import i16le as i16
|
22 |
+
from ._binary import i32le as i32
|
23 |
+
|
24 |
+
#
|
25 |
+
# --------------------------------------------------------------------
|
26 |
+
|
27 |
+
|
28 |
+
def _accept(prefix: bytes) -> bool:
|
29 |
+
return prefix[:4] == b"\0\0\2\0"
|
30 |
+
|
31 |
+
|
32 |
+
##
|
33 |
+
# Image plugin for Windows Cursor files.
|
34 |
+
|
35 |
+
|
36 |
+
class CurImageFile(BmpImagePlugin.BmpImageFile):
|
37 |
+
format = "CUR"
|
38 |
+
format_description = "Windows Cursor"
|
39 |
+
|
40 |
+
def _open(self) -> None:
|
41 |
+
offset = self.fp.tell()
|
42 |
+
|
43 |
+
# check magic
|
44 |
+
s = self.fp.read(6)
|
45 |
+
if not _accept(s):
|
46 |
+
msg = "not a CUR file"
|
47 |
+
raise SyntaxError(msg)
|
48 |
+
|
49 |
+
# pick the largest cursor in the file
|
50 |
+
m = b""
|
51 |
+
for i in range(i16(s, 4)):
|
52 |
+
s = self.fp.read(16)
|
53 |
+
if not m:
|
54 |
+
m = s
|
55 |
+
elif s[0] > m[0] and s[1] > m[1]:
|
56 |
+
m = s
|
57 |
+
if not m:
|
58 |
+
msg = "No cursors were found"
|
59 |
+
raise TypeError(msg)
|
60 |
+
|
61 |
+
# load as bitmap
|
62 |
+
self._bitmap(i32(m, 12) + offset)
|
63 |
+
|
64 |
+
# patch up the bitmap height
|
65 |
+
self._size = self.size[0], self.size[1] // 2
|
66 |
+
d, e, o, a = self.tile[0]
|
67 |
+
self.tile[0] = d, (0, 0) + self.size, o, a
|
68 |
+
|
69 |
+
|
70 |
+
#
|
71 |
+
# --------------------------------------------------------------------
|
72 |
+
|
73 |
+
Image.register_open(CurImageFile.format, CurImageFile, _accept)
|
74 |
+
|
75 |
+
Image.register_extension(CurImageFile.format, ".cur")
|
env/Lib/site-packages/PIL/DcxImagePlugin.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library.
|
3 |
+
# $Id$
|
4 |
+
#
|
5 |
+
# DCX file handling
|
6 |
+
#
|
7 |
+
# DCX is a container file format defined by Intel, commonly used
|
8 |
+
# for fax applications. Each DCX file consists of a directory
|
9 |
+
# (a list of file offsets) followed by a set of (usually 1-bit)
|
10 |
+
# PCX files.
|
11 |
+
#
|
12 |
+
# History:
|
13 |
+
# 1995-09-09 fl Created
|
14 |
+
# 1996-03-20 fl Properly derived from PcxImageFile.
|
15 |
+
# 1998-07-15 fl Renamed offset attribute to avoid name clash
|
16 |
+
# 2002-07-30 fl Fixed file handling
|
17 |
+
#
|
18 |
+
# Copyright (c) 1997-98 by Secret Labs AB.
|
19 |
+
# Copyright (c) 1995-96 by Fredrik Lundh.
|
20 |
+
#
|
21 |
+
# See the README file for information on usage and redistribution.
|
22 |
+
#
|
23 |
+
from __future__ import annotations
|
24 |
+
|
25 |
+
from . import Image
|
26 |
+
from ._binary import i32le as i32
|
27 |
+
from .PcxImagePlugin import PcxImageFile
|
28 |
+
|
29 |
+
MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then?
|
30 |
+
|
31 |
+
|
32 |
+
def _accept(prefix: bytes) -> bool:
|
33 |
+
return len(prefix) >= 4 and i32(prefix) == MAGIC
|
34 |
+
|
35 |
+
|
36 |
+
##
|
37 |
+
# Image plugin for the Intel DCX format.
|
38 |
+
|
39 |
+
|
40 |
+
class DcxImageFile(PcxImageFile):
|
41 |
+
format = "DCX"
|
42 |
+
format_description = "Intel DCX"
|
43 |
+
_close_exclusive_fp_after_loading = False
|
44 |
+
|
45 |
+
def _open(self) -> None:
|
46 |
+
# Header
|
47 |
+
s = self.fp.read(4)
|
48 |
+
if not _accept(s):
|
49 |
+
msg = "not a DCX file"
|
50 |
+
raise SyntaxError(msg)
|
51 |
+
|
52 |
+
# Component directory
|
53 |
+
self._offset = []
|
54 |
+
for i in range(1024):
|
55 |
+
offset = i32(self.fp.read(4))
|
56 |
+
if not offset:
|
57 |
+
break
|
58 |
+
self._offset.append(offset)
|
59 |
+
|
60 |
+
self._fp = self.fp
|
61 |
+
self.frame = -1
|
62 |
+
self.n_frames = len(self._offset)
|
63 |
+
self.is_animated = self.n_frames > 1
|
64 |
+
self.seek(0)
|
65 |
+
|
66 |
+
def seek(self, frame: int) -> None:
|
67 |
+
if not self._seek_check(frame):
|
68 |
+
return
|
69 |
+
self.frame = frame
|
70 |
+
self.fp = self._fp
|
71 |
+
self.fp.seek(self._offset[frame])
|
72 |
+
PcxImageFile._open(self)
|
73 |
+
|
74 |
+
def tell(self) -> int:
|
75 |
+
return self.frame
|
76 |
+
|
77 |
+
|
78 |
+
Image.register_open(DcxImageFile.format, DcxImageFile, _accept)
|
79 |
+
|
80 |
+
Image.register_extension(DcxImageFile.format, ".dcx")
|
env/Lib/site-packages/PIL/DdsImagePlugin.py
ADDED
@@ -0,0 +1,575 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
A Pillow loader for .dds files (S3TC-compressed aka DXTC)
|
3 |
+
Jerome Leclanche <[email protected]>
|
4 |
+
|
5 |
+
Documentation:
|
6 |
+
https://web.archive.org/web/20170802060935/http://oss.sgi.com/projects/ogl-sample/registry/EXT/texture_compression_s3tc.txt
|
7 |
+
|
8 |
+
The contents of this file are hereby released in the public domain (CC0)
|
9 |
+
Full text of the CC0 license:
|
10 |
+
https://creativecommons.org/publicdomain/zero/1.0/
|
11 |
+
"""
|
12 |
+
|
13 |
+
from __future__ import annotations
|
14 |
+
|
15 |
+
import io
|
16 |
+
import struct
|
17 |
+
import sys
|
18 |
+
from enum import IntEnum, IntFlag
|
19 |
+
from typing import IO
|
20 |
+
|
21 |
+
from . import Image, ImageFile, ImagePalette
|
22 |
+
from ._binary import i32le as i32
|
23 |
+
from ._binary import o8
|
24 |
+
from ._binary import o32le as o32
|
25 |
+
|
26 |
+
# Magic ("DDS ")
|
27 |
+
DDS_MAGIC = 0x20534444
|
28 |
+
|
29 |
+
|
30 |
+
# DDS flags
|
31 |
+
class DDSD(IntFlag):
|
32 |
+
CAPS = 0x1
|
33 |
+
HEIGHT = 0x2
|
34 |
+
WIDTH = 0x4
|
35 |
+
PITCH = 0x8
|
36 |
+
PIXELFORMAT = 0x1000
|
37 |
+
MIPMAPCOUNT = 0x20000
|
38 |
+
LINEARSIZE = 0x80000
|
39 |
+
DEPTH = 0x800000
|
40 |
+
|
41 |
+
|
42 |
+
# DDS caps
|
43 |
+
class DDSCAPS(IntFlag):
|
44 |
+
COMPLEX = 0x8
|
45 |
+
TEXTURE = 0x1000
|
46 |
+
MIPMAP = 0x400000
|
47 |
+
|
48 |
+
|
49 |
+
class DDSCAPS2(IntFlag):
|
50 |
+
CUBEMAP = 0x200
|
51 |
+
CUBEMAP_POSITIVEX = 0x400
|
52 |
+
CUBEMAP_NEGATIVEX = 0x800
|
53 |
+
CUBEMAP_POSITIVEY = 0x1000
|
54 |
+
CUBEMAP_NEGATIVEY = 0x2000
|
55 |
+
CUBEMAP_POSITIVEZ = 0x4000
|
56 |
+
CUBEMAP_NEGATIVEZ = 0x8000
|
57 |
+
VOLUME = 0x200000
|
58 |
+
|
59 |
+
|
60 |
+
# Pixel Format
|
61 |
+
class DDPF(IntFlag):
|
62 |
+
ALPHAPIXELS = 0x1
|
63 |
+
ALPHA = 0x2
|
64 |
+
FOURCC = 0x4
|
65 |
+
PALETTEINDEXED8 = 0x20
|
66 |
+
RGB = 0x40
|
67 |
+
LUMINANCE = 0x20000
|
68 |
+
|
69 |
+
|
70 |
+
# dxgiformat.h
|
71 |
+
class DXGI_FORMAT(IntEnum):
|
72 |
+
UNKNOWN = 0
|
73 |
+
R32G32B32A32_TYPELESS = 1
|
74 |
+
R32G32B32A32_FLOAT = 2
|
75 |
+
R32G32B32A32_UINT = 3
|
76 |
+
R32G32B32A32_SINT = 4
|
77 |
+
R32G32B32_TYPELESS = 5
|
78 |
+
R32G32B32_FLOAT = 6
|
79 |
+
R32G32B32_UINT = 7
|
80 |
+
R32G32B32_SINT = 8
|
81 |
+
R16G16B16A16_TYPELESS = 9
|
82 |
+
R16G16B16A16_FLOAT = 10
|
83 |
+
R16G16B16A16_UNORM = 11
|
84 |
+
R16G16B16A16_UINT = 12
|
85 |
+
R16G16B16A16_SNORM = 13
|
86 |
+
R16G16B16A16_SINT = 14
|
87 |
+
R32G32_TYPELESS = 15
|
88 |
+
R32G32_FLOAT = 16
|
89 |
+
R32G32_UINT = 17
|
90 |
+
R32G32_SINT = 18
|
91 |
+
R32G8X24_TYPELESS = 19
|
92 |
+
D32_FLOAT_S8X24_UINT = 20
|
93 |
+
R32_FLOAT_X8X24_TYPELESS = 21
|
94 |
+
X32_TYPELESS_G8X24_UINT = 22
|
95 |
+
R10G10B10A2_TYPELESS = 23
|
96 |
+
R10G10B10A2_UNORM = 24
|
97 |
+
R10G10B10A2_UINT = 25
|
98 |
+
R11G11B10_FLOAT = 26
|
99 |
+
R8G8B8A8_TYPELESS = 27
|
100 |
+
R8G8B8A8_UNORM = 28
|
101 |
+
R8G8B8A8_UNORM_SRGB = 29
|
102 |
+
R8G8B8A8_UINT = 30
|
103 |
+
R8G8B8A8_SNORM = 31
|
104 |
+
R8G8B8A8_SINT = 32
|
105 |
+
R16G16_TYPELESS = 33
|
106 |
+
R16G16_FLOAT = 34
|
107 |
+
R16G16_UNORM = 35
|
108 |
+
R16G16_UINT = 36
|
109 |
+
R16G16_SNORM = 37
|
110 |
+
R16G16_SINT = 38
|
111 |
+
R32_TYPELESS = 39
|
112 |
+
D32_FLOAT = 40
|
113 |
+
R32_FLOAT = 41
|
114 |
+
R32_UINT = 42
|
115 |
+
R32_SINT = 43
|
116 |
+
R24G8_TYPELESS = 44
|
117 |
+
D24_UNORM_S8_UINT = 45
|
118 |
+
R24_UNORM_X8_TYPELESS = 46
|
119 |
+
X24_TYPELESS_G8_UINT = 47
|
120 |
+
R8G8_TYPELESS = 48
|
121 |
+
R8G8_UNORM = 49
|
122 |
+
R8G8_UINT = 50
|
123 |
+
R8G8_SNORM = 51
|
124 |
+
R8G8_SINT = 52
|
125 |
+
R16_TYPELESS = 53
|
126 |
+
R16_FLOAT = 54
|
127 |
+
D16_UNORM = 55
|
128 |
+
R16_UNORM = 56
|
129 |
+
R16_UINT = 57
|
130 |
+
R16_SNORM = 58
|
131 |
+
R16_SINT = 59
|
132 |
+
R8_TYPELESS = 60
|
133 |
+
R8_UNORM = 61
|
134 |
+
R8_UINT = 62
|
135 |
+
R8_SNORM = 63
|
136 |
+
R8_SINT = 64
|
137 |
+
A8_UNORM = 65
|
138 |
+
R1_UNORM = 66
|
139 |
+
R9G9B9E5_SHAREDEXP = 67
|
140 |
+
R8G8_B8G8_UNORM = 68
|
141 |
+
G8R8_G8B8_UNORM = 69
|
142 |
+
BC1_TYPELESS = 70
|
143 |
+
BC1_UNORM = 71
|
144 |
+
BC1_UNORM_SRGB = 72
|
145 |
+
BC2_TYPELESS = 73
|
146 |
+
BC2_UNORM = 74
|
147 |
+
BC2_UNORM_SRGB = 75
|
148 |
+
BC3_TYPELESS = 76
|
149 |
+
BC3_UNORM = 77
|
150 |
+
BC3_UNORM_SRGB = 78
|
151 |
+
BC4_TYPELESS = 79
|
152 |
+
BC4_UNORM = 80
|
153 |
+
BC4_SNORM = 81
|
154 |
+
BC5_TYPELESS = 82
|
155 |
+
BC5_UNORM = 83
|
156 |
+
BC5_SNORM = 84
|
157 |
+
B5G6R5_UNORM = 85
|
158 |
+
B5G5R5A1_UNORM = 86
|
159 |
+
B8G8R8A8_UNORM = 87
|
160 |
+
B8G8R8X8_UNORM = 88
|
161 |
+
R10G10B10_XR_BIAS_A2_UNORM = 89
|
162 |
+
B8G8R8A8_TYPELESS = 90
|
163 |
+
B8G8R8A8_UNORM_SRGB = 91
|
164 |
+
B8G8R8X8_TYPELESS = 92
|
165 |
+
B8G8R8X8_UNORM_SRGB = 93
|
166 |
+
BC6H_TYPELESS = 94
|
167 |
+
BC6H_UF16 = 95
|
168 |
+
BC6H_SF16 = 96
|
169 |
+
BC7_TYPELESS = 97
|
170 |
+
BC7_UNORM = 98
|
171 |
+
BC7_UNORM_SRGB = 99
|
172 |
+
AYUV = 100
|
173 |
+
Y410 = 101
|
174 |
+
Y416 = 102
|
175 |
+
NV12 = 103
|
176 |
+
P010 = 104
|
177 |
+
P016 = 105
|
178 |
+
OPAQUE_420 = 106
|
179 |
+
YUY2 = 107
|
180 |
+
Y210 = 108
|
181 |
+
Y216 = 109
|
182 |
+
NV11 = 110
|
183 |
+
AI44 = 111
|
184 |
+
IA44 = 112
|
185 |
+
P8 = 113
|
186 |
+
A8P8 = 114
|
187 |
+
B4G4R4A4_UNORM = 115
|
188 |
+
P208 = 130
|
189 |
+
V208 = 131
|
190 |
+
V408 = 132
|
191 |
+
SAMPLER_FEEDBACK_MIN_MIP_OPAQUE = 189
|
192 |
+
SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE = 190
|
193 |
+
|
194 |
+
|
195 |
+
class D3DFMT(IntEnum):
|
196 |
+
UNKNOWN = 0
|
197 |
+
R8G8B8 = 20
|
198 |
+
A8R8G8B8 = 21
|
199 |
+
X8R8G8B8 = 22
|
200 |
+
R5G6B5 = 23
|
201 |
+
X1R5G5B5 = 24
|
202 |
+
A1R5G5B5 = 25
|
203 |
+
A4R4G4B4 = 26
|
204 |
+
R3G3B2 = 27
|
205 |
+
A8 = 28
|
206 |
+
A8R3G3B2 = 29
|
207 |
+
X4R4G4B4 = 30
|
208 |
+
A2B10G10R10 = 31
|
209 |
+
A8B8G8R8 = 32
|
210 |
+
X8B8G8R8 = 33
|
211 |
+
G16R16 = 34
|
212 |
+
A2R10G10B10 = 35
|
213 |
+
A16B16G16R16 = 36
|
214 |
+
A8P8 = 40
|
215 |
+
P8 = 41
|
216 |
+
L8 = 50
|
217 |
+
A8L8 = 51
|
218 |
+
A4L4 = 52
|
219 |
+
V8U8 = 60
|
220 |
+
L6V5U5 = 61
|
221 |
+
X8L8V8U8 = 62
|
222 |
+
Q8W8V8U8 = 63
|
223 |
+
V16U16 = 64
|
224 |
+
A2W10V10U10 = 67
|
225 |
+
D16_LOCKABLE = 70
|
226 |
+
D32 = 71
|
227 |
+
D15S1 = 73
|
228 |
+
D24S8 = 75
|
229 |
+
D24X8 = 77
|
230 |
+
D24X4S4 = 79
|
231 |
+
D16 = 80
|
232 |
+
D32F_LOCKABLE = 82
|
233 |
+
D24FS8 = 83
|
234 |
+
D32_LOCKABLE = 84
|
235 |
+
S8_LOCKABLE = 85
|
236 |
+
L16 = 81
|
237 |
+
VERTEXDATA = 100
|
238 |
+
INDEX16 = 101
|
239 |
+
INDEX32 = 102
|
240 |
+
Q16W16V16U16 = 110
|
241 |
+
R16F = 111
|
242 |
+
G16R16F = 112
|
243 |
+
A16B16G16R16F = 113
|
244 |
+
R32F = 114
|
245 |
+
G32R32F = 115
|
246 |
+
A32B32G32R32F = 116
|
247 |
+
CxV8U8 = 117
|
248 |
+
A1 = 118
|
249 |
+
A2B10G10R10_XR_BIAS = 119
|
250 |
+
BINARYBUFFER = 199
|
251 |
+
|
252 |
+
UYVY = i32(b"UYVY")
|
253 |
+
R8G8_B8G8 = i32(b"RGBG")
|
254 |
+
YUY2 = i32(b"YUY2")
|
255 |
+
G8R8_G8B8 = i32(b"GRGB")
|
256 |
+
DXT1 = i32(b"DXT1")
|
257 |
+
DXT2 = i32(b"DXT2")
|
258 |
+
DXT3 = i32(b"DXT3")
|
259 |
+
DXT4 = i32(b"DXT4")
|
260 |
+
DXT5 = i32(b"DXT5")
|
261 |
+
DX10 = i32(b"DX10")
|
262 |
+
BC4S = i32(b"BC4S")
|
263 |
+
BC4U = i32(b"BC4U")
|
264 |
+
BC5S = i32(b"BC5S")
|
265 |
+
BC5U = i32(b"BC5U")
|
266 |
+
ATI1 = i32(b"ATI1")
|
267 |
+
ATI2 = i32(b"ATI2")
|
268 |
+
MULTI2_ARGB8 = i32(b"MET1")
|
269 |
+
|
270 |
+
|
271 |
+
# Backward compatibility layer
|
272 |
+
module = sys.modules[__name__]
|
273 |
+
for item in DDSD:
|
274 |
+
assert item.name is not None
|
275 |
+
setattr(module, f"DDSD_{item.name}", item.value)
|
276 |
+
for item1 in DDSCAPS:
|
277 |
+
assert item1.name is not None
|
278 |
+
setattr(module, f"DDSCAPS_{item1.name}", item1.value)
|
279 |
+
for item2 in DDSCAPS2:
|
280 |
+
assert item2.name is not None
|
281 |
+
setattr(module, f"DDSCAPS2_{item2.name}", item2.value)
|
282 |
+
for item3 in DDPF:
|
283 |
+
assert item3.name is not None
|
284 |
+
setattr(module, f"DDPF_{item3.name}", item3.value)
|
285 |
+
|
286 |
+
DDS_FOURCC = DDPF.FOURCC
|
287 |
+
DDS_RGB = DDPF.RGB
|
288 |
+
DDS_RGBA = DDPF.RGB | DDPF.ALPHAPIXELS
|
289 |
+
DDS_LUMINANCE = DDPF.LUMINANCE
|
290 |
+
DDS_LUMINANCEA = DDPF.LUMINANCE | DDPF.ALPHAPIXELS
|
291 |
+
DDS_ALPHA = DDPF.ALPHA
|
292 |
+
DDS_PAL8 = DDPF.PALETTEINDEXED8
|
293 |
+
|
294 |
+
DDS_HEADER_FLAGS_TEXTURE = DDSD.CAPS | DDSD.HEIGHT | DDSD.WIDTH | DDSD.PIXELFORMAT
|
295 |
+
DDS_HEADER_FLAGS_MIPMAP = DDSD.MIPMAPCOUNT
|
296 |
+
DDS_HEADER_FLAGS_VOLUME = DDSD.DEPTH
|
297 |
+
DDS_HEADER_FLAGS_PITCH = DDSD.PITCH
|
298 |
+
DDS_HEADER_FLAGS_LINEARSIZE = DDSD.LINEARSIZE
|
299 |
+
|
300 |
+
DDS_HEIGHT = DDSD.HEIGHT
|
301 |
+
DDS_WIDTH = DDSD.WIDTH
|
302 |
+
|
303 |
+
DDS_SURFACE_FLAGS_TEXTURE = DDSCAPS.TEXTURE
|
304 |
+
DDS_SURFACE_FLAGS_MIPMAP = DDSCAPS.COMPLEX | DDSCAPS.MIPMAP
|
305 |
+
DDS_SURFACE_FLAGS_CUBEMAP = DDSCAPS.COMPLEX
|
306 |
+
|
307 |
+
DDS_CUBEMAP_POSITIVEX = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEX
|
308 |
+
DDS_CUBEMAP_NEGATIVEX = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEX
|
309 |
+
DDS_CUBEMAP_POSITIVEY = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEY
|
310 |
+
DDS_CUBEMAP_NEGATIVEY = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEY
|
311 |
+
DDS_CUBEMAP_POSITIVEZ = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEZ
|
312 |
+
DDS_CUBEMAP_NEGATIVEZ = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEZ
|
313 |
+
|
314 |
+
DXT1_FOURCC = D3DFMT.DXT1
|
315 |
+
DXT3_FOURCC = D3DFMT.DXT3
|
316 |
+
DXT5_FOURCC = D3DFMT.DXT5
|
317 |
+
|
318 |
+
DXGI_FORMAT_R8G8B8A8_TYPELESS = DXGI_FORMAT.R8G8B8A8_TYPELESS
|
319 |
+
DXGI_FORMAT_R8G8B8A8_UNORM = DXGI_FORMAT.R8G8B8A8_UNORM
|
320 |
+
DXGI_FORMAT_R8G8B8A8_UNORM_SRGB = DXGI_FORMAT.R8G8B8A8_UNORM_SRGB
|
321 |
+
DXGI_FORMAT_BC5_TYPELESS = DXGI_FORMAT.BC5_TYPELESS
|
322 |
+
DXGI_FORMAT_BC5_UNORM = DXGI_FORMAT.BC5_UNORM
|
323 |
+
DXGI_FORMAT_BC5_SNORM = DXGI_FORMAT.BC5_SNORM
|
324 |
+
DXGI_FORMAT_BC6H_UF16 = DXGI_FORMAT.BC6H_UF16
|
325 |
+
DXGI_FORMAT_BC6H_SF16 = DXGI_FORMAT.BC6H_SF16
|
326 |
+
DXGI_FORMAT_BC7_TYPELESS = DXGI_FORMAT.BC7_TYPELESS
|
327 |
+
DXGI_FORMAT_BC7_UNORM = DXGI_FORMAT.BC7_UNORM
|
328 |
+
DXGI_FORMAT_BC7_UNORM_SRGB = DXGI_FORMAT.BC7_UNORM_SRGB
|
329 |
+
|
330 |
+
|
331 |
+
class DdsImageFile(ImageFile.ImageFile):
|
332 |
+
format = "DDS"
|
333 |
+
format_description = "DirectDraw Surface"
|
334 |
+
|
335 |
+
def _open(self) -> None:
|
336 |
+
if not _accept(self.fp.read(4)):
|
337 |
+
msg = "not a DDS file"
|
338 |
+
raise SyntaxError(msg)
|
339 |
+
(header_size,) = struct.unpack("<I", self.fp.read(4))
|
340 |
+
if header_size != 124:
|
341 |
+
msg = f"Unsupported header size {repr(header_size)}"
|
342 |
+
raise OSError(msg)
|
343 |
+
header_bytes = self.fp.read(header_size - 4)
|
344 |
+
if len(header_bytes) != 120:
|
345 |
+
msg = f"Incomplete header: {len(header_bytes)} bytes"
|
346 |
+
raise OSError(msg)
|
347 |
+
header = io.BytesIO(header_bytes)
|
348 |
+
|
349 |
+
flags, height, width = struct.unpack("<3I", header.read(12))
|
350 |
+
self._size = (width, height)
|
351 |
+
extents = (0, 0) + self.size
|
352 |
+
|
353 |
+
pitch, depth, mipmaps = struct.unpack("<3I", header.read(12))
|
354 |
+
struct.unpack("<11I", header.read(44)) # reserved
|
355 |
+
|
356 |
+
# pixel format
|
357 |
+
pfsize, pfflags, fourcc, bitcount = struct.unpack("<4I", header.read(16))
|
358 |
+
n = 0
|
359 |
+
rawmode = None
|
360 |
+
if pfflags & DDPF.RGB:
|
361 |
+
# Texture contains uncompressed RGB data
|
362 |
+
if pfflags & DDPF.ALPHAPIXELS:
|
363 |
+
self._mode = "RGBA"
|
364 |
+
mask_count = 4
|
365 |
+
else:
|
366 |
+
self._mode = "RGB"
|
367 |
+
mask_count = 3
|
368 |
+
|
369 |
+
masks = struct.unpack(f"<{mask_count}I", header.read(mask_count * 4))
|
370 |
+
self.tile = [("dds_rgb", extents, 0, (bitcount, masks))]
|
371 |
+
return
|
372 |
+
elif pfflags & DDPF.LUMINANCE:
|
373 |
+
if bitcount == 8:
|
374 |
+
self._mode = "L"
|
375 |
+
elif bitcount == 16 and pfflags & DDPF.ALPHAPIXELS:
|
376 |
+
self._mode = "LA"
|
377 |
+
else:
|
378 |
+
msg = f"Unsupported bitcount {bitcount} for {pfflags}"
|
379 |
+
raise OSError(msg)
|
380 |
+
elif pfflags & DDPF.PALETTEINDEXED8:
|
381 |
+
self._mode = "P"
|
382 |
+
self.palette = ImagePalette.raw("RGBA", self.fp.read(1024))
|
383 |
+
self.palette.mode = "RGBA"
|
384 |
+
elif pfflags & DDPF.FOURCC:
|
385 |
+
offset = header_size + 4
|
386 |
+
if fourcc == D3DFMT.DXT1:
|
387 |
+
self._mode = "RGBA"
|
388 |
+
self.pixel_format = "DXT1"
|
389 |
+
n = 1
|
390 |
+
elif fourcc == D3DFMT.DXT3:
|
391 |
+
self._mode = "RGBA"
|
392 |
+
self.pixel_format = "DXT3"
|
393 |
+
n = 2
|
394 |
+
elif fourcc == D3DFMT.DXT5:
|
395 |
+
self._mode = "RGBA"
|
396 |
+
self.pixel_format = "DXT5"
|
397 |
+
n = 3
|
398 |
+
elif fourcc in (D3DFMT.BC4U, D3DFMT.ATI1):
|
399 |
+
self._mode = "L"
|
400 |
+
self.pixel_format = "BC4"
|
401 |
+
n = 4
|
402 |
+
elif fourcc == D3DFMT.BC5S:
|
403 |
+
self._mode = "RGB"
|
404 |
+
self.pixel_format = "BC5S"
|
405 |
+
n = 5
|
406 |
+
elif fourcc in (D3DFMT.BC5U, D3DFMT.ATI2):
|
407 |
+
self._mode = "RGB"
|
408 |
+
self.pixel_format = "BC5"
|
409 |
+
n = 5
|
410 |
+
elif fourcc == D3DFMT.DX10:
|
411 |
+
offset += 20
|
412 |
+
# ignoring flags which pertain to volume textures and cubemaps
|
413 |
+
(dxgi_format,) = struct.unpack("<I", self.fp.read(4))
|
414 |
+
self.fp.read(16)
|
415 |
+
if dxgi_format in (
|
416 |
+
DXGI_FORMAT.BC1_UNORM,
|
417 |
+
DXGI_FORMAT.BC1_TYPELESS,
|
418 |
+
):
|
419 |
+
self._mode = "RGBA"
|
420 |
+
self.pixel_format = "BC1"
|
421 |
+
n = 1
|
422 |
+
elif dxgi_format in (DXGI_FORMAT.BC4_TYPELESS, DXGI_FORMAT.BC4_UNORM):
|
423 |
+
self._mode = "L"
|
424 |
+
self.pixel_format = "BC4"
|
425 |
+
n = 4
|
426 |
+
elif dxgi_format in (DXGI_FORMAT.BC5_TYPELESS, DXGI_FORMAT.BC5_UNORM):
|
427 |
+
self._mode = "RGB"
|
428 |
+
self.pixel_format = "BC5"
|
429 |
+
n = 5
|
430 |
+
elif dxgi_format == DXGI_FORMAT.BC5_SNORM:
|
431 |
+
self._mode = "RGB"
|
432 |
+
self.pixel_format = "BC5S"
|
433 |
+
n = 5
|
434 |
+
elif dxgi_format == DXGI_FORMAT.BC6H_UF16:
|
435 |
+
self._mode = "RGB"
|
436 |
+
self.pixel_format = "BC6H"
|
437 |
+
n = 6
|
438 |
+
elif dxgi_format == DXGI_FORMAT.BC6H_SF16:
|
439 |
+
self._mode = "RGB"
|
440 |
+
self.pixel_format = "BC6HS"
|
441 |
+
n = 6
|
442 |
+
elif dxgi_format in (
|
443 |
+
DXGI_FORMAT.BC7_TYPELESS,
|
444 |
+
DXGI_FORMAT.BC7_UNORM,
|
445 |
+
DXGI_FORMAT.BC7_UNORM_SRGB,
|
446 |
+
):
|
447 |
+
self._mode = "RGBA"
|
448 |
+
self.pixel_format = "BC7"
|
449 |
+
n = 7
|
450 |
+
if dxgi_format == DXGI_FORMAT.BC7_UNORM_SRGB:
|
451 |
+
self.info["gamma"] = 1 / 2.2
|
452 |
+
elif dxgi_format in (
|
453 |
+
DXGI_FORMAT.R8G8B8A8_TYPELESS,
|
454 |
+
DXGI_FORMAT.R8G8B8A8_UNORM,
|
455 |
+
DXGI_FORMAT.R8G8B8A8_UNORM_SRGB,
|
456 |
+
):
|
457 |
+
self._mode = "RGBA"
|
458 |
+
if dxgi_format == DXGI_FORMAT.R8G8B8A8_UNORM_SRGB:
|
459 |
+
self.info["gamma"] = 1 / 2.2
|
460 |
+
else:
|
461 |
+
msg = f"Unimplemented DXGI format {dxgi_format}"
|
462 |
+
raise NotImplementedError(msg)
|
463 |
+
else:
|
464 |
+
msg = f"Unimplemented pixel format {repr(fourcc)}"
|
465 |
+
raise NotImplementedError(msg)
|
466 |
+
else:
|
467 |
+
msg = f"Unknown pixel format flags {pfflags}"
|
468 |
+
raise NotImplementedError(msg)
|
469 |
+
|
470 |
+
if n:
|
471 |
+
self.tile = [
|
472 |
+
ImageFile._Tile("bcn", extents, offset, (n, self.pixel_format))
|
473 |
+
]
|
474 |
+
else:
|
475 |
+
self.tile = [ImageFile._Tile("raw", extents, 0, rawmode or self.mode)]
|
476 |
+
|
477 |
+
def load_seek(self, pos: int) -> None:
|
478 |
+
pass
|
479 |
+
|
480 |
+
|
481 |
+
class DdsRgbDecoder(ImageFile.PyDecoder):
|
482 |
+
_pulls_fd = True
|
483 |
+
|
484 |
+
def decode(self, buffer: bytes) -> tuple[int, int]:
|
485 |
+
assert self.fd is not None
|
486 |
+
bitcount, masks = self.args
|
487 |
+
|
488 |
+
# Some masks will be padded with zeros, e.g. R 0b11 G 0b1100
|
489 |
+
# Calculate how many zeros each mask is padded with
|
490 |
+
mask_offsets = []
|
491 |
+
# And the maximum value of each channel without the padding
|
492 |
+
mask_totals = []
|
493 |
+
for mask in masks:
|
494 |
+
offset = 0
|
495 |
+
if mask != 0:
|
496 |
+
while mask >> (offset + 1) << (offset + 1) == mask:
|
497 |
+
offset += 1
|
498 |
+
mask_offsets.append(offset)
|
499 |
+
mask_totals.append(mask >> offset)
|
500 |
+
|
501 |
+
data = bytearray()
|
502 |
+
bytecount = bitcount // 8
|
503 |
+
dest_length = self.state.xsize * self.state.ysize * len(masks)
|
504 |
+
while len(data) < dest_length:
|
505 |
+
value = int.from_bytes(self.fd.read(bytecount), "little")
|
506 |
+
for i, mask in enumerate(masks):
|
507 |
+
masked_value = value & mask
|
508 |
+
# Remove the zero padding, and scale it to 8 bits
|
509 |
+
data += o8(
|
510 |
+
int(((masked_value >> mask_offsets[i]) / mask_totals[i]) * 255)
|
511 |
+
)
|
512 |
+
self.set_as_raw(data)
|
513 |
+
return -1, 0
|
514 |
+
|
515 |
+
|
516 |
+
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
517 |
+
if im.mode not in ("RGB", "RGBA", "L", "LA"):
|
518 |
+
msg = f"cannot write mode {im.mode} as DDS"
|
519 |
+
raise OSError(msg)
|
520 |
+
|
521 |
+
alpha = im.mode[-1] == "A"
|
522 |
+
if im.mode[0] == "L":
|
523 |
+
pixel_flags = DDPF.LUMINANCE
|
524 |
+
rawmode = im.mode
|
525 |
+
if alpha:
|
526 |
+
rgba_mask = [0x000000FF, 0x000000FF, 0x000000FF]
|
527 |
+
else:
|
528 |
+
rgba_mask = [0xFF000000, 0xFF000000, 0xFF000000]
|
529 |
+
else:
|
530 |
+
pixel_flags = DDPF.RGB
|
531 |
+
rawmode = im.mode[::-1]
|
532 |
+
rgba_mask = [0x00FF0000, 0x0000FF00, 0x000000FF]
|
533 |
+
|
534 |
+
if alpha:
|
535 |
+
r, g, b, a = im.split()
|
536 |
+
im = Image.merge("RGBA", (a, r, g, b))
|
537 |
+
if alpha:
|
538 |
+
pixel_flags |= DDPF.ALPHAPIXELS
|
539 |
+
rgba_mask.append(0xFF000000 if alpha else 0)
|
540 |
+
|
541 |
+
flags = DDSD.CAPS | DDSD.HEIGHT | DDSD.WIDTH | DDSD.PITCH | DDSD.PIXELFORMAT
|
542 |
+
bitcount = len(im.getbands()) * 8
|
543 |
+
pitch = (im.width * bitcount + 7) // 8
|
544 |
+
|
545 |
+
fp.write(
|
546 |
+
o32(DDS_MAGIC)
|
547 |
+
+ struct.pack(
|
548 |
+
"<7I",
|
549 |
+
124, # header size
|
550 |
+
flags, # flags
|
551 |
+
im.height,
|
552 |
+
im.width,
|
553 |
+
pitch,
|
554 |
+
0, # depth
|
555 |
+
0, # mipmaps
|
556 |
+
)
|
557 |
+
+ struct.pack("11I", *((0,) * 11)) # reserved
|
558 |
+
# pfsize, pfflags, fourcc, bitcount
|
559 |
+
+ struct.pack("<4I", 32, pixel_flags, 0, bitcount)
|
560 |
+
+ struct.pack("<4I", *rgba_mask) # dwRGBABitMask
|
561 |
+
+ struct.pack("<5I", DDSCAPS.TEXTURE, 0, 0, 0, 0)
|
562 |
+
)
|
563 |
+
ImageFile._save(
|
564 |
+
im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))]
|
565 |
+
)
|
566 |
+
|
567 |
+
|
568 |
+
def _accept(prefix: bytes) -> bool:
|
569 |
+
return prefix[:4] == b"DDS "
|
570 |
+
|
571 |
+
|
572 |
+
Image.register_open(DdsImageFile.format, DdsImageFile, _accept)
|
573 |
+
Image.register_decoder("dds_rgb", DdsRgbDecoder)
|
574 |
+
Image.register_save(DdsImageFile.format, _save)
|
575 |
+
Image.register_extension(DdsImageFile.format, ".dds")
|
env/Lib/site-packages/PIL/EpsImagePlugin.py
ADDED
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library.
|
3 |
+
# $Id$
|
4 |
+
#
|
5 |
+
# EPS file handling
|
6 |
+
#
|
7 |
+
# History:
|
8 |
+
# 1995-09-01 fl Created (0.1)
|
9 |
+
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
|
10 |
+
# 1996-08-22 fl Don't choke on floating point BoundingBox values
|
11 |
+
# 1996-08-23 fl Handle files from Macintosh (0.3)
|
12 |
+
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
|
13 |
+
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
|
14 |
+
# 2014-05-07 e Handling of EPS with binary preview and fixed resolution
|
15 |
+
# resizing
|
16 |
+
#
|
17 |
+
# Copyright (c) 1997-2003 by Secret Labs AB.
|
18 |
+
# Copyright (c) 1995-2003 by Fredrik Lundh
|
19 |
+
#
|
20 |
+
# See the README file for information on usage and redistribution.
|
21 |
+
#
|
22 |
+
from __future__ import annotations
|
23 |
+
|
24 |
+
import io
|
25 |
+
import os
|
26 |
+
import re
|
27 |
+
import subprocess
|
28 |
+
import sys
|
29 |
+
import tempfile
|
30 |
+
from typing import IO
|
31 |
+
|
32 |
+
from . import Image, ImageFile
|
33 |
+
from ._binary import i32le as i32
|
34 |
+
from ._deprecate import deprecate
|
35 |
+
|
36 |
+
# --------------------------------------------------------------------
|
37 |
+
|
38 |
+
|
39 |
+
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
|
40 |
+
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
|
41 |
+
|
42 |
+
gs_binary: str | bool | None = None
|
43 |
+
gs_windows_binary = None
|
44 |
+
|
45 |
+
|
46 |
+
def has_ghostscript() -> bool:
|
47 |
+
global gs_binary, gs_windows_binary
|
48 |
+
if gs_binary is None:
|
49 |
+
if sys.platform.startswith("win"):
|
50 |
+
if gs_windows_binary is None:
|
51 |
+
import shutil
|
52 |
+
|
53 |
+
for binary in ("gswin32c", "gswin64c", "gs"):
|
54 |
+
if shutil.which(binary) is not None:
|
55 |
+
gs_windows_binary = binary
|
56 |
+
break
|
57 |
+
else:
|
58 |
+
gs_windows_binary = False
|
59 |
+
gs_binary = gs_windows_binary
|
60 |
+
else:
|
61 |
+
try:
|
62 |
+
subprocess.check_call(["gs", "--version"], stdout=subprocess.DEVNULL)
|
63 |
+
gs_binary = "gs"
|
64 |
+
except OSError:
|
65 |
+
gs_binary = False
|
66 |
+
return gs_binary is not False
|
67 |
+
|
68 |
+
|
69 |
+
def Ghostscript(tile, size, fp, scale=1, transparency=False):
|
70 |
+
"""Render an image using Ghostscript"""
|
71 |
+
global gs_binary
|
72 |
+
if not has_ghostscript():
|
73 |
+
msg = "Unable to locate Ghostscript on paths"
|
74 |
+
raise OSError(msg)
|
75 |
+
|
76 |
+
# Unpack decoder tile
|
77 |
+
decoder, tile, offset, data = tile[0]
|
78 |
+
length, bbox = data
|
79 |
+
|
80 |
+
# Hack to support hi-res rendering
|
81 |
+
scale = int(scale) or 1
|
82 |
+
width = size[0] * scale
|
83 |
+
height = size[1] * scale
|
84 |
+
# resolution is dependent on bbox and size
|
85 |
+
res_x = 72.0 * width / (bbox[2] - bbox[0])
|
86 |
+
res_y = 72.0 * height / (bbox[3] - bbox[1])
|
87 |
+
|
88 |
+
out_fd, outfile = tempfile.mkstemp()
|
89 |
+
os.close(out_fd)
|
90 |
+
|
91 |
+
infile_temp = None
|
92 |
+
if hasattr(fp, "name") and os.path.exists(fp.name):
|
93 |
+
infile = fp.name
|
94 |
+
else:
|
95 |
+
in_fd, infile_temp = tempfile.mkstemp()
|
96 |
+
os.close(in_fd)
|
97 |
+
infile = infile_temp
|
98 |
+
|
99 |
+
# Ignore length and offset!
|
100 |
+
# Ghostscript can read it
|
101 |
+
# Copy whole file to read in Ghostscript
|
102 |
+
with open(infile_temp, "wb") as f:
|
103 |
+
# fetch length of fp
|
104 |
+
fp.seek(0, io.SEEK_END)
|
105 |
+
fsize = fp.tell()
|
106 |
+
# ensure start position
|
107 |
+
# go back
|
108 |
+
fp.seek(0)
|
109 |
+
lengthfile = fsize
|
110 |
+
while lengthfile > 0:
|
111 |
+
s = fp.read(min(lengthfile, 100 * 1024))
|
112 |
+
if not s:
|
113 |
+
break
|
114 |
+
lengthfile -= len(s)
|
115 |
+
f.write(s)
|
116 |
+
|
117 |
+
device = "pngalpha" if transparency else "ppmraw"
|
118 |
+
|
119 |
+
# Build Ghostscript command
|
120 |
+
command = [
|
121 |
+
gs_binary,
|
122 |
+
"-q", # quiet mode
|
123 |
+
f"-g{width:d}x{height:d}", # set output geometry (pixels)
|
124 |
+
f"-r{res_x:f}x{res_y:f}", # set input DPI (dots per inch)
|
125 |
+
"-dBATCH", # exit after processing
|
126 |
+
"-dNOPAUSE", # don't pause between pages
|
127 |
+
"-dSAFER", # safe mode
|
128 |
+
f"-sDEVICE={device}",
|
129 |
+
f"-sOutputFile={outfile}", # output file
|
130 |
+
# adjust for image origin
|
131 |
+
"-c",
|
132 |
+
f"{-bbox[0]} {-bbox[1]} translate",
|
133 |
+
"-f",
|
134 |
+
infile, # input file
|
135 |
+
# showpage (see https://bugs.ghostscript.com/show_bug.cgi?id=698272)
|
136 |
+
"-c",
|
137 |
+
"showpage",
|
138 |
+
]
|
139 |
+
|
140 |
+
# push data through Ghostscript
|
141 |
+
try:
|
142 |
+
startupinfo = None
|
143 |
+
if sys.platform.startswith("win"):
|
144 |
+
startupinfo = subprocess.STARTUPINFO()
|
145 |
+
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
|
146 |
+
subprocess.check_call(command, startupinfo=startupinfo)
|
147 |
+
out_im = Image.open(outfile)
|
148 |
+
out_im.load()
|
149 |
+
finally:
|
150 |
+
try:
|
151 |
+
os.unlink(outfile)
|
152 |
+
if infile_temp:
|
153 |
+
os.unlink(infile_temp)
|
154 |
+
except OSError:
|
155 |
+
pass
|
156 |
+
|
157 |
+
im = out_im.im.copy()
|
158 |
+
out_im.close()
|
159 |
+
return im
|
160 |
+
|
161 |
+
|
162 |
+
class PSFile:
|
163 |
+
"""
|
164 |
+
Wrapper for bytesio object that treats either CR or LF as end of line.
|
165 |
+
This class is no longer used internally, but kept for backwards compatibility.
|
166 |
+
"""
|
167 |
+
|
168 |
+
def __init__(self, fp):
|
169 |
+
deprecate(
|
170 |
+
"PSFile",
|
171 |
+
11,
|
172 |
+
action="If you need the functionality of this class "
|
173 |
+
"you will need to implement it yourself.",
|
174 |
+
)
|
175 |
+
self.fp = fp
|
176 |
+
self.char = None
|
177 |
+
|
178 |
+
def seek(self, offset, whence=io.SEEK_SET):
|
179 |
+
self.char = None
|
180 |
+
self.fp.seek(offset, whence)
|
181 |
+
|
182 |
+
def readline(self) -> str:
|
183 |
+
s = [self.char or b""]
|
184 |
+
self.char = None
|
185 |
+
|
186 |
+
c = self.fp.read(1)
|
187 |
+
while (c not in b"\r\n") and len(c):
|
188 |
+
s.append(c)
|
189 |
+
c = self.fp.read(1)
|
190 |
+
|
191 |
+
self.char = self.fp.read(1)
|
192 |
+
# line endings can be 1 or 2 of \r \n, in either order
|
193 |
+
if self.char in b"\r\n":
|
194 |
+
self.char = None
|
195 |
+
|
196 |
+
return b"".join(s).decode("latin-1")
|
197 |
+
|
198 |
+
|
199 |
+
def _accept(prefix: bytes) -> bool:
|
200 |
+
return prefix[:4] == b"%!PS" or (len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5)
|
201 |
+
|
202 |
+
|
203 |
+
##
|
204 |
+
# Image plugin for Encapsulated PostScript. This plugin supports only
|
205 |
+
# a few variants of this format.
|
206 |
+
|
207 |
+
|
208 |
+
class EpsImageFile(ImageFile.ImageFile):
|
209 |
+
"""EPS File Parser for the Python Imaging Library"""
|
210 |
+
|
211 |
+
format = "EPS"
|
212 |
+
format_description = "Encapsulated Postscript"
|
213 |
+
|
214 |
+
mode_map = {1: "L", 2: "LAB", 3: "RGB", 4: "CMYK"}
|
215 |
+
|
216 |
+
def _open(self) -> None:
|
217 |
+
(length, offset) = self._find_offset(self.fp)
|
218 |
+
|
219 |
+
# go to offset - start of "%!PS"
|
220 |
+
self.fp.seek(offset)
|
221 |
+
|
222 |
+
self._mode = "RGB"
|
223 |
+
self._size = None
|
224 |
+
|
225 |
+
byte_arr = bytearray(255)
|
226 |
+
bytes_mv = memoryview(byte_arr)
|
227 |
+
bytes_read = 0
|
228 |
+
reading_header_comments = True
|
229 |
+
reading_trailer_comments = False
|
230 |
+
trailer_reached = False
|
231 |
+
|
232 |
+
def check_required_header_comments() -> None:
|
233 |
+
"""
|
234 |
+
The EPS specification requires that some headers exist.
|
235 |
+
This should be checked when the header comments formally end,
|
236 |
+
when image data starts, or when the file ends, whichever comes first.
|
237 |
+
"""
|
238 |
+
if "PS-Adobe" not in self.info:
|
239 |
+
msg = 'EPS header missing "%!PS-Adobe" comment'
|
240 |
+
raise SyntaxError(msg)
|
241 |
+
if "BoundingBox" not in self.info:
|
242 |
+
msg = 'EPS header missing "%%BoundingBox" comment'
|
243 |
+
raise SyntaxError(msg)
|
244 |
+
|
245 |
+
def _read_comment(s: str) -> bool:
|
246 |
+
nonlocal reading_trailer_comments
|
247 |
+
try:
|
248 |
+
m = split.match(s)
|
249 |
+
except re.error as e:
|
250 |
+
msg = "not an EPS file"
|
251 |
+
raise SyntaxError(msg) from e
|
252 |
+
|
253 |
+
if not m:
|
254 |
+
return False
|
255 |
+
|
256 |
+
k, v = m.group(1, 2)
|
257 |
+
self.info[k] = v
|
258 |
+
if k == "BoundingBox":
|
259 |
+
if v == "(atend)":
|
260 |
+
reading_trailer_comments = True
|
261 |
+
elif not self._size or (trailer_reached and reading_trailer_comments):
|
262 |
+
try:
|
263 |
+
# Note: The DSC spec says that BoundingBox
|
264 |
+
# fields should be integers, but some drivers
|
265 |
+
# put floating point values there anyway.
|
266 |
+
box = [int(float(i)) for i in v.split()]
|
267 |
+
self._size = box[2] - box[0], box[3] - box[1]
|
268 |
+
self.tile = [("eps", (0, 0) + self.size, offset, (length, box))]
|
269 |
+
except Exception:
|
270 |
+
pass
|
271 |
+
return True
|
272 |
+
|
273 |
+
while True:
|
274 |
+
byte = self.fp.read(1)
|
275 |
+
if byte == b"":
|
276 |
+
# if we didn't read a byte we must be at the end of the file
|
277 |
+
if bytes_read == 0:
|
278 |
+
if reading_header_comments:
|
279 |
+
check_required_header_comments()
|
280 |
+
break
|
281 |
+
elif byte in b"\r\n":
|
282 |
+
# if we read a line ending character, ignore it and parse what
|
283 |
+
# we have already read. if we haven't read any other characters,
|
284 |
+
# continue reading
|
285 |
+
if bytes_read == 0:
|
286 |
+
continue
|
287 |
+
else:
|
288 |
+
# ASCII/hexadecimal lines in an EPS file must not exceed
|
289 |
+
# 255 characters, not including line ending characters
|
290 |
+
if bytes_read >= 255:
|
291 |
+
# only enforce this for lines starting with a "%",
|
292 |
+
# otherwise assume it's binary data
|
293 |
+
if byte_arr[0] == ord("%"):
|
294 |
+
msg = "not an EPS file"
|
295 |
+
raise SyntaxError(msg)
|
296 |
+
else:
|
297 |
+
if reading_header_comments:
|
298 |
+
check_required_header_comments()
|
299 |
+
reading_header_comments = False
|
300 |
+
# reset bytes_read so we can keep reading
|
301 |
+
# data until the end of the line
|
302 |
+
bytes_read = 0
|
303 |
+
byte_arr[bytes_read] = byte[0]
|
304 |
+
bytes_read += 1
|
305 |
+
continue
|
306 |
+
|
307 |
+
if reading_header_comments:
|
308 |
+
# Load EPS header
|
309 |
+
|
310 |
+
# if this line doesn't start with a "%",
|
311 |
+
# or does start with "%%EndComments",
|
312 |
+
# then we've reached the end of the header/comments
|
313 |
+
if byte_arr[0] != ord("%") or bytes_mv[:13] == b"%%EndComments":
|
314 |
+
check_required_header_comments()
|
315 |
+
reading_header_comments = False
|
316 |
+
continue
|
317 |
+
|
318 |
+
s = str(bytes_mv[:bytes_read], "latin-1")
|
319 |
+
if not _read_comment(s):
|
320 |
+
m = field.match(s)
|
321 |
+
if m:
|
322 |
+
k = m.group(1)
|
323 |
+
if k[:8] == "PS-Adobe":
|
324 |
+
self.info["PS-Adobe"] = k[9:]
|
325 |
+
else:
|
326 |
+
self.info[k] = ""
|
327 |
+
elif s[0] == "%":
|
328 |
+
# handle non-DSC PostScript comments that some
|
329 |
+
# tools mistakenly put in the Comments section
|
330 |
+
pass
|
331 |
+
else:
|
332 |
+
msg = "bad EPS header"
|
333 |
+
raise OSError(msg)
|
334 |
+
elif bytes_mv[:11] == b"%ImageData:":
|
335 |
+
# Check for an "ImageData" descriptor
|
336 |
+
# https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#50577413_pgfId-1035096
|
337 |
+
|
338 |
+
# Values:
|
339 |
+
# columns
|
340 |
+
# rows
|
341 |
+
# bit depth (1 or 8)
|
342 |
+
# mode (1: L, 2: LAB, 3: RGB, 4: CMYK)
|
343 |
+
# number of padding channels
|
344 |
+
# block size (number of bytes per row per channel)
|
345 |
+
# binary/ascii (1: binary, 2: ascii)
|
346 |
+
# data start identifier (the image data follows after a single line
|
347 |
+
# consisting only of this quoted value)
|
348 |
+
image_data_values = byte_arr[11:bytes_read].split(None, 7)
|
349 |
+
columns, rows, bit_depth, mode_id = (
|
350 |
+
int(value) for value in image_data_values[:4]
|
351 |
+
)
|
352 |
+
|
353 |
+
if bit_depth == 1:
|
354 |
+
self._mode = "1"
|
355 |
+
elif bit_depth == 8:
|
356 |
+
try:
|
357 |
+
self._mode = self.mode_map[mode_id]
|
358 |
+
except ValueError:
|
359 |
+
break
|
360 |
+
else:
|
361 |
+
break
|
362 |
+
|
363 |
+
self._size = columns, rows
|
364 |
+
return
|
365 |
+
elif bytes_mv[:5] == b"%%EOF":
|
366 |
+
break
|
367 |
+
elif trailer_reached and reading_trailer_comments:
|
368 |
+
# Load EPS trailer
|
369 |
+
s = str(bytes_mv[:bytes_read], "latin-1")
|
370 |
+
_read_comment(s)
|
371 |
+
elif bytes_mv[:9] == b"%%Trailer":
|
372 |
+
trailer_reached = True
|
373 |
+
bytes_read = 0
|
374 |
+
|
375 |
+
if not self._size:
|
376 |
+
msg = "cannot determine EPS bounding box"
|
377 |
+
raise OSError(msg)
|
378 |
+
|
379 |
+
def _find_offset(self, fp):
|
380 |
+
s = fp.read(4)
|
381 |
+
|
382 |
+
if s == b"%!PS":
|
383 |
+
# for HEAD without binary preview
|
384 |
+
fp.seek(0, io.SEEK_END)
|
385 |
+
length = fp.tell()
|
386 |
+
offset = 0
|
387 |
+
elif i32(s) == 0xC6D3D0C5:
|
388 |
+
# FIX for: Some EPS file not handled correctly / issue #302
|
389 |
+
# EPS can contain binary data
|
390 |
+
# or start directly with latin coding
|
391 |
+
# more info see:
|
392 |
+
# https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf
|
393 |
+
s = fp.read(8)
|
394 |
+
offset = i32(s)
|
395 |
+
length = i32(s, 4)
|
396 |
+
else:
|
397 |
+
msg = "not an EPS file"
|
398 |
+
raise SyntaxError(msg)
|
399 |
+
|
400 |
+
return length, offset
|
401 |
+
|
402 |
+
def load(self, scale=1, transparency=False):
|
403 |
+
# Load EPS via Ghostscript
|
404 |
+
if self.tile:
|
405 |
+
self.im = Ghostscript(self.tile, self.size, self.fp, scale, transparency)
|
406 |
+
self._mode = self.im.mode
|
407 |
+
self._size = self.im.size
|
408 |
+
self.tile = []
|
409 |
+
return Image.Image.load(self)
|
410 |
+
|
411 |
+
def load_seek(self, pos: int) -> None:
|
412 |
+
# we can't incrementally load, so force ImageFile.parser to
|
413 |
+
# use our custom load method by defining this method.
|
414 |
+
pass
|
415 |
+
|
416 |
+
|
417 |
+
# --------------------------------------------------------------------
|
418 |
+
|
419 |
+
|
420 |
+
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes, eps: int = 1) -> None:
|
421 |
+
"""EPS Writer for the Python Imaging Library."""
|
422 |
+
|
423 |
+
# make sure image data is available
|
424 |
+
im.load()
|
425 |
+
|
426 |
+
# determine PostScript image mode
|
427 |
+
if im.mode == "L":
|
428 |
+
operator = (8, 1, b"image")
|
429 |
+
elif im.mode == "RGB":
|
430 |
+
operator = (8, 3, b"false 3 colorimage")
|
431 |
+
elif im.mode == "CMYK":
|
432 |
+
operator = (8, 4, b"false 4 colorimage")
|
433 |
+
else:
|
434 |
+
msg = "image mode is not supported"
|
435 |
+
raise ValueError(msg)
|
436 |
+
|
437 |
+
if eps:
|
438 |
+
# write EPS header
|
439 |
+
fp.write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
|
440 |
+
fp.write(b"%%Creator: PIL 0.1 EpsEncode\n")
|
441 |
+
# fp.write("%%CreationDate: %s"...)
|
442 |
+
fp.write(b"%%%%BoundingBox: 0 0 %d %d\n" % im.size)
|
443 |
+
fp.write(b"%%Pages: 1\n")
|
444 |
+
fp.write(b"%%EndComments\n")
|
445 |
+
fp.write(b"%%Page: 1 1\n")
|
446 |
+
fp.write(b"%%ImageData: %d %d " % im.size)
|
447 |
+
fp.write(b'%d %d 0 1 1 "%s"\n' % operator)
|
448 |
+
|
449 |
+
# image header
|
450 |
+
fp.write(b"gsave\n")
|
451 |
+
fp.write(b"10 dict begin\n")
|
452 |
+
fp.write(b"/buf %d string def\n" % (im.size[0] * operator[1]))
|
453 |
+
fp.write(b"%d %d scale\n" % im.size)
|
454 |
+
fp.write(b"%d %d 8\n" % im.size) # <= bits
|
455 |
+
fp.write(b"[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
|
456 |
+
fp.write(b"{ currentfile buf readhexstring pop } bind\n")
|
457 |
+
fp.write(operator[2] + b"\n")
|
458 |
+
if hasattr(fp, "flush"):
|
459 |
+
fp.flush()
|
460 |
+
|
461 |
+
ImageFile._save(im, fp, [("eps", (0, 0) + im.size, 0, None)])
|
462 |
+
|
463 |
+
fp.write(b"\n%%%%EndBinary\n")
|
464 |
+
fp.write(b"grestore end\n")
|
465 |
+
if hasattr(fp, "flush"):
|
466 |
+
fp.flush()
|
467 |
+
|
468 |
+
|
469 |
+
# --------------------------------------------------------------------
|
470 |
+
|
471 |
+
|
472 |
+
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
|
473 |
+
|
474 |
+
Image.register_save(EpsImageFile.format, _save)
|
475 |
+
|
476 |
+
Image.register_extensions(EpsImageFile.format, [".ps", ".eps"])
|
477 |
+
|
478 |
+
Image.register_mime(EpsImageFile.format, "application/postscript")
|
env/Lib/site-packages/PIL/ExifTags.py
ADDED
@@ -0,0 +1,381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library.
|
3 |
+
# $Id$
|
4 |
+
#
|
5 |
+
# EXIF tags
|
6 |
+
#
|
7 |
+
# Copyright (c) 2003 by Secret Labs AB
|
8 |
+
#
|
9 |
+
# See the README file for information on usage and redistribution.
|
10 |
+
#
|
11 |
+
|
12 |
+
"""
|
13 |
+
This module provides constants and clear-text names for various
|
14 |
+
well-known EXIF tags.
|
15 |
+
"""
|
16 |
+
from __future__ import annotations
|
17 |
+
|
18 |
+
from enum import IntEnum
|
19 |
+
|
20 |
+
|
21 |
+
class Base(IntEnum):
|
22 |
+
# possibly incomplete
|
23 |
+
InteropIndex = 0x0001
|
24 |
+
ProcessingSoftware = 0x000B
|
25 |
+
NewSubfileType = 0x00FE
|
26 |
+
SubfileType = 0x00FF
|
27 |
+
ImageWidth = 0x0100
|
28 |
+
ImageLength = 0x0101
|
29 |
+
BitsPerSample = 0x0102
|
30 |
+
Compression = 0x0103
|
31 |
+
PhotometricInterpretation = 0x0106
|
32 |
+
Thresholding = 0x0107
|
33 |
+
CellWidth = 0x0108
|
34 |
+
CellLength = 0x0109
|
35 |
+
FillOrder = 0x010A
|
36 |
+
DocumentName = 0x010D
|
37 |
+
ImageDescription = 0x010E
|
38 |
+
Make = 0x010F
|
39 |
+
Model = 0x0110
|
40 |
+
StripOffsets = 0x0111
|
41 |
+
Orientation = 0x0112
|
42 |
+
SamplesPerPixel = 0x0115
|
43 |
+
RowsPerStrip = 0x0116
|
44 |
+
StripByteCounts = 0x0117
|
45 |
+
MinSampleValue = 0x0118
|
46 |
+
MaxSampleValue = 0x0119
|
47 |
+
XResolution = 0x011A
|
48 |
+
YResolution = 0x011B
|
49 |
+
PlanarConfiguration = 0x011C
|
50 |
+
PageName = 0x011D
|
51 |
+
FreeOffsets = 0x0120
|
52 |
+
FreeByteCounts = 0x0121
|
53 |
+
GrayResponseUnit = 0x0122
|
54 |
+
GrayResponseCurve = 0x0123
|
55 |
+
T4Options = 0x0124
|
56 |
+
T6Options = 0x0125
|
57 |
+
ResolutionUnit = 0x0128
|
58 |
+
PageNumber = 0x0129
|
59 |
+
TransferFunction = 0x012D
|
60 |
+
Software = 0x0131
|
61 |
+
DateTime = 0x0132
|
62 |
+
Artist = 0x013B
|
63 |
+
HostComputer = 0x013C
|
64 |
+
Predictor = 0x013D
|
65 |
+
WhitePoint = 0x013E
|
66 |
+
PrimaryChromaticities = 0x013F
|
67 |
+
ColorMap = 0x0140
|
68 |
+
HalftoneHints = 0x0141
|
69 |
+
TileWidth = 0x0142
|
70 |
+
TileLength = 0x0143
|
71 |
+
TileOffsets = 0x0144
|
72 |
+
TileByteCounts = 0x0145
|
73 |
+
SubIFDs = 0x014A
|
74 |
+
InkSet = 0x014C
|
75 |
+
InkNames = 0x014D
|
76 |
+
NumberOfInks = 0x014E
|
77 |
+
DotRange = 0x0150
|
78 |
+
TargetPrinter = 0x0151
|
79 |
+
ExtraSamples = 0x0152
|
80 |
+
SampleFormat = 0x0153
|
81 |
+
SMinSampleValue = 0x0154
|
82 |
+
SMaxSampleValue = 0x0155
|
83 |
+
TransferRange = 0x0156
|
84 |
+
ClipPath = 0x0157
|
85 |
+
XClipPathUnits = 0x0158
|
86 |
+
YClipPathUnits = 0x0159
|
87 |
+
Indexed = 0x015A
|
88 |
+
JPEGTables = 0x015B
|
89 |
+
OPIProxy = 0x015F
|
90 |
+
JPEGProc = 0x0200
|
91 |
+
JpegIFOffset = 0x0201
|
92 |
+
JpegIFByteCount = 0x0202
|
93 |
+
JpegRestartInterval = 0x0203
|
94 |
+
JpegLosslessPredictors = 0x0205
|
95 |
+
JpegPointTransforms = 0x0206
|
96 |
+
JpegQTables = 0x0207
|
97 |
+
JpegDCTables = 0x0208
|
98 |
+
JpegACTables = 0x0209
|
99 |
+
YCbCrCoefficients = 0x0211
|
100 |
+
YCbCrSubSampling = 0x0212
|
101 |
+
YCbCrPositioning = 0x0213
|
102 |
+
ReferenceBlackWhite = 0x0214
|
103 |
+
XMLPacket = 0x02BC
|
104 |
+
RelatedImageFileFormat = 0x1000
|
105 |
+
RelatedImageWidth = 0x1001
|
106 |
+
RelatedImageLength = 0x1002
|
107 |
+
Rating = 0x4746
|
108 |
+
RatingPercent = 0x4749
|
109 |
+
ImageID = 0x800D
|
110 |
+
CFARepeatPatternDim = 0x828D
|
111 |
+
BatteryLevel = 0x828F
|
112 |
+
Copyright = 0x8298
|
113 |
+
ExposureTime = 0x829A
|
114 |
+
FNumber = 0x829D
|
115 |
+
IPTCNAA = 0x83BB
|
116 |
+
ImageResources = 0x8649
|
117 |
+
ExifOffset = 0x8769
|
118 |
+
InterColorProfile = 0x8773
|
119 |
+
ExposureProgram = 0x8822
|
120 |
+
SpectralSensitivity = 0x8824
|
121 |
+
GPSInfo = 0x8825
|
122 |
+
ISOSpeedRatings = 0x8827
|
123 |
+
OECF = 0x8828
|
124 |
+
Interlace = 0x8829
|
125 |
+
TimeZoneOffset = 0x882A
|
126 |
+
SelfTimerMode = 0x882B
|
127 |
+
SensitivityType = 0x8830
|
128 |
+
StandardOutputSensitivity = 0x8831
|
129 |
+
RecommendedExposureIndex = 0x8832
|
130 |
+
ISOSpeed = 0x8833
|
131 |
+
ISOSpeedLatitudeyyy = 0x8834
|
132 |
+
ISOSpeedLatitudezzz = 0x8835
|
133 |
+
ExifVersion = 0x9000
|
134 |
+
DateTimeOriginal = 0x9003
|
135 |
+
DateTimeDigitized = 0x9004
|
136 |
+
OffsetTime = 0x9010
|
137 |
+
OffsetTimeOriginal = 0x9011
|
138 |
+
OffsetTimeDigitized = 0x9012
|
139 |
+
ComponentsConfiguration = 0x9101
|
140 |
+
CompressedBitsPerPixel = 0x9102
|
141 |
+
ShutterSpeedValue = 0x9201
|
142 |
+
ApertureValue = 0x9202
|
143 |
+
BrightnessValue = 0x9203
|
144 |
+
ExposureBiasValue = 0x9204
|
145 |
+
MaxApertureValue = 0x9205
|
146 |
+
SubjectDistance = 0x9206
|
147 |
+
MeteringMode = 0x9207
|
148 |
+
LightSource = 0x9208
|
149 |
+
Flash = 0x9209
|
150 |
+
FocalLength = 0x920A
|
151 |
+
Noise = 0x920D
|
152 |
+
ImageNumber = 0x9211
|
153 |
+
SecurityClassification = 0x9212
|
154 |
+
ImageHistory = 0x9213
|
155 |
+
TIFFEPStandardID = 0x9216
|
156 |
+
MakerNote = 0x927C
|
157 |
+
UserComment = 0x9286
|
158 |
+
SubsecTime = 0x9290
|
159 |
+
SubsecTimeOriginal = 0x9291
|
160 |
+
SubsecTimeDigitized = 0x9292
|
161 |
+
AmbientTemperature = 0x9400
|
162 |
+
Humidity = 0x9401
|
163 |
+
Pressure = 0x9402
|
164 |
+
WaterDepth = 0x9403
|
165 |
+
Acceleration = 0x9404
|
166 |
+
CameraElevationAngle = 0x9405
|
167 |
+
XPTitle = 0x9C9B
|
168 |
+
XPComment = 0x9C9C
|
169 |
+
XPAuthor = 0x9C9D
|
170 |
+
XPKeywords = 0x9C9E
|
171 |
+
XPSubject = 0x9C9F
|
172 |
+
FlashPixVersion = 0xA000
|
173 |
+
ColorSpace = 0xA001
|
174 |
+
ExifImageWidth = 0xA002
|
175 |
+
ExifImageHeight = 0xA003
|
176 |
+
RelatedSoundFile = 0xA004
|
177 |
+
ExifInteroperabilityOffset = 0xA005
|
178 |
+
FlashEnergy = 0xA20B
|
179 |
+
SpatialFrequencyResponse = 0xA20C
|
180 |
+
FocalPlaneXResolution = 0xA20E
|
181 |
+
FocalPlaneYResolution = 0xA20F
|
182 |
+
FocalPlaneResolutionUnit = 0xA210
|
183 |
+
SubjectLocation = 0xA214
|
184 |
+
ExposureIndex = 0xA215
|
185 |
+
SensingMethod = 0xA217
|
186 |
+
FileSource = 0xA300
|
187 |
+
SceneType = 0xA301
|
188 |
+
CFAPattern = 0xA302
|
189 |
+
CustomRendered = 0xA401
|
190 |
+
ExposureMode = 0xA402
|
191 |
+
WhiteBalance = 0xA403
|
192 |
+
DigitalZoomRatio = 0xA404
|
193 |
+
FocalLengthIn35mmFilm = 0xA405
|
194 |
+
SceneCaptureType = 0xA406
|
195 |
+
GainControl = 0xA407
|
196 |
+
Contrast = 0xA408
|
197 |
+
Saturation = 0xA409
|
198 |
+
Sharpness = 0xA40A
|
199 |
+
DeviceSettingDescription = 0xA40B
|
200 |
+
SubjectDistanceRange = 0xA40C
|
201 |
+
ImageUniqueID = 0xA420
|
202 |
+
CameraOwnerName = 0xA430
|
203 |
+
BodySerialNumber = 0xA431
|
204 |
+
LensSpecification = 0xA432
|
205 |
+
LensMake = 0xA433
|
206 |
+
LensModel = 0xA434
|
207 |
+
LensSerialNumber = 0xA435
|
208 |
+
CompositeImage = 0xA460
|
209 |
+
CompositeImageCount = 0xA461
|
210 |
+
CompositeImageExposureTimes = 0xA462
|
211 |
+
Gamma = 0xA500
|
212 |
+
PrintImageMatching = 0xC4A5
|
213 |
+
DNGVersion = 0xC612
|
214 |
+
DNGBackwardVersion = 0xC613
|
215 |
+
UniqueCameraModel = 0xC614
|
216 |
+
LocalizedCameraModel = 0xC615
|
217 |
+
CFAPlaneColor = 0xC616
|
218 |
+
CFALayout = 0xC617
|
219 |
+
LinearizationTable = 0xC618
|
220 |
+
BlackLevelRepeatDim = 0xC619
|
221 |
+
BlackLevel = 0xC61A
|
222 |
+
BlackLevelDeltaH = 0xC61B
|
223 |
+
BlackLevelDeltaV = 0xC61C
|
224 |
+
WhiteLevel = 0xC61D
|
225 |
+
DefaultScale = 0xC61E
|
226 |
+
DefaultCropOrigin = 0xC61F
|
227 |
+
DefaultCropSize = 0xC620
|
228 |
+
ColorMatrix1 = 0xC621
|
229 |
+
ColorMatrix2 = 0xC622
|
230 |
+
CameraCalibration1 = 0xC623
|
231 |
+
CameraCalibration2 = 0xC624
|
232 |
+
ReductionMatrix1 = 0xC625
|
233 |
+
ReductionMatrix2 = 0xC626
|
234 |
+
AnalogBalance = 0xC627
|
235 |
+
AsShotNeutral = 0xC628
|
236 |
+
AsShotWhiteXY = 0xC629
|
237 |
+
BaselineExposure = 0xC62A
|
238 |
+
BaselineNoise = 0xC62B
|
239 |
+
BaselineSharpness = 0xC62C
|
240 |
+
BayerGreenSplit = 0xC62D
|
241 |
+
LinearResponseLimit = 0xC62E
|
242 |
+
CameraSerialNumber = 0xC62F
|
243 |
+
LensInfo = 0xC630
|
244 |
+
ChromaBlurRadius = 0xC631
|
245 |
+
AntiAliasStrength = 0xC632
|
246 |
+
ShadowScale = 0xC633
|
247 |
+
DNGPrivateData = 0xC634
|
248 |
+
MakerNoteSafety = 0xC635
|
249 |
+
CalibrationIlluminant1 = 0xC65A
|
250 |
+
CalibrationIlluminant2 = 0xC65B
|
251 |
+
BestQualityScale = 0xC65C
|
252 |
+
RawDataUniqueID = 0xC65D
|
253 |
+
OriginalRawFileName = 0xC68B
|
254 |
+
OriginalRawFileData = 0xC68C
|
255 |
+
ActiveArea = 0xC68D
|
256 |
+
MaskedAreas = 0xC68E
|
257 |
+
AsShotICCProfile = 0xC68F
|
258 |
+
AsShotPreProfileMatrix = 0xC690
|
259 |
+
CurrentICCProfile = 0xC691
|
260 |
+
CurrentPreProfileMatrix = 0xC692
|
261 |
+
ColorimetricReference = 0xC6BF
|
262 |
+
CameraCalibrationSignature = 0xC6F3
|
263 |
+
ProfileCalibrationSignature = 0xC6F4
|
264 |
+
AsShotProfileName = 0xC6F6
|
265 |
+
NoiseReductionApplied = 0xC6F7
|
266 |
+
ProfileName = 0xC6F8
|
267 |
+
ProfileHueSatMapDims = 0xC6F9
|
268 |
+
ProfileHueSatMapData1 = 0xC6FA
|
269 |
+
ProfileHueSatMapData2 = 0xC6FB
|
270 |
+
ProfileToneCurve = 0xC6FC
|
271 |
+
ProfileEmbedPolicy = 0xC6FD
|
272 |
+
ProfileCopyright = 0xC6FE
|
273 |
+
ForwardMatrix1 = 0xC714
|
274 |
+
ForwardMatrix2 = 0xC715
|
275 |
+
PreviewApplicationName = 0xC716
|
276 |
+
PreviewApplicationVersion = 0xC717
|
277 |
+
PreviewSettingsName = 0xC718
|
278 |
+
PreviewSettingsDigest = 0xC719
|
279 |
+
PreviewColorSpace = 0xC71A
|
280 |
+
PreviewDateTime = 0xC71B
|
281 |
+
RawImageDigest = 0xC71C
|
282 |
+
OriginalRawFileDigest = 0xC71D
|
283 |
+
SubTileBlockSize = 0xC71E
|
284 |
+
RowInterleaveFactor = 0xC71F
|
285 |
+
ProfileLookTableDims = 0xC725
|
286 |
+
ProfileLookTableData = 0xC726
|
287 |
+
OpcodeList1 = 0xC740
|
288 |
+
OpcodeList2 = 0xC741
|
289 |
+
OpcodeList3 = 0xC74E
|
290 |
+
NoiseProfile = 0xC761
|
291 |
+
|
292 |
+
|
293 |
+
"""Maps EXIF tags to tag names."""
|
294 |
+
TAGS = {
|
295 |
+
**{i.value: i.name for i in Base},
|
296 |
+
0x920C: "SpatialFrequencyResponse",
|
297 |
+
0x9214: "SubjectLocation",
|
298 |
+
0x9215: "ExposureIndex",
|
299 |
+
0x828E: "CFAPattern",
|
300 |
+
0x920B: "FlashEnergy",
|
301 |
+
0x9216: "TIFF/EPStandardID",
|
302 |
+
}
|
303 |
+
|
304 |
+
|
305 |
+
class GPS(IntEnum):
|
306 |
+
GPSVersionID = 0
|
307 |
+
GPSLatitudeRef = 1
|
308 |
+
GPSLatitude = 2
|
309 |
+
GPSLongitudeRef = 3
|
310 |
+
GPSLongitude = 4
|
311 |
+
GPSAltitudeRef = 5
|
312 |
+
GPSAltitude = 6
|
313 |
+
GPSTimeStamp = 7
|
314 |
+
GPSSatellites = 8
|
315 |
+
GPSStatus = 9
|
316 |
+
GPSMeasureMode = 10
|
317 |
+
GPSDOP = 11
|
318 |
+
GPSSpeedRef = 12
|
319 |
+
GPSSpeed = 13
|
320 |
+
GPSTrackRef = 14
|
321 |
+
GPSTrack = 15
|
322 |
+
GPSImgDirectionRef = 16
|
323 |
+
GPSImgDirection = 17
|
324 |
+
GPSMapDatum = 18
|
325 |
+
GPSDestLatitudeRef = 19
|
326 |
+
GPSDestLatitude = 20
|
327 |
+
GPSDestLongitudeRef = 21
|
328 |
+
GPSDestLongitude = 22
|
329 |
+
GPSDestBearingRef = 23
|
330 |
+
GPSDestBearing = 24
|
331 |
+
GPSDestDistanceRef = 25
|
332 |
+
GPSDestDistance = 26
|
333 |
+
GPSProcessingMethod = 27
|
334 |
+
GPSAreaInformation = 28
|
335 |
+
GPSDateStamp = 29
|
336 |
+
GPSDifferential = 30
|
337 |
+
GPSHPositioningError = 31
|
338 |
+
|
339 |
+
|
340 |
+
"""Maps EXIF GPS tags to tag names."""
|
341 |
+
GPSTAGS = {i.value: i.name for i in GPS}
|
342 |
+
|
343 |
+
|
344 |
+
class Interop(IntEnum):
|
345 |
+
InteropIndex = 1
|
346 |
+
InteropVersion = 2
|
347 |
+
RelatedImageFileFormat = 4096
|
348 |
+
RelatedImageWidth = 4097
|
349 |
+
RelatedImageHeight = 4098
|
350 |
+
|
351 |
+
|
352 |
+
class IFD(IntEnum):
|
353 |
+
Exif = 34665
|
354 |
+
GPSInfo = 34853
|
355 |
+
Makernote = 37500
|
356 |
+
Interop = 40965
|
357 |
+
IFD1 = -1
|
358 |
+
|
359 |
+
|
360 |
+
class LightSource(IntEnum):
|
361 |
+
Unknown = 0
|
362 |
+
Daylight = 1
|
363 |
+
Fluorescent = 2
|
364 |
+
Tungsten = 3
|
365 |
+
Flash = 4
|
366 |
+
Fine = 9
|
367 |
+
Cloudy = 10
|
368 |
+
Shade = 11
|
369 |
+
DaylightFluorescent = 12
|
370 |
+
DayWhiteFluorescent = 13
|
371 |
+
CoolWhiteFluorescent = 14
|
372 |
+
WhiteFluorescent = 15
|
373 |
+
StandardLightA = 17
|
374 |
+
StandardLightB = 18
|
375 |
+
StandardLightC = 19
|
376 |
+
D55 = 20
|
377 |
+
D65 = 21
|
378 |
+
D75 = 22
|
379 |
+
D50 = 23
|
380 |
+
ISO = 24
|
381 |
+
Other = 255
|
env/Lib/site-packages/PIL/FitsImagePlugin.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library
|
3 |
+
# $Id$
|
4 |
+
#
|
5 |
+
# FITS file handling
|
6 |
+
#
|
7 |
+
# Copyright (c) 1998-2003 by Fredrik Lundh
|
8 |
+
#
|
9 |
+
# See the README file for information on usage and redistribution.
|
10 |
+
#
|
11 |
+
from __future__ import annotations
|
12 |
+
|
13 |
+
import gzip
|
14 |
+
import math
|
15 |
+
|
16 |
+
from . import Image, ImageFile
|
17 |
+
|
18 |
+
|
19 |
+
def _accept(prefix: bytes) -> bool:
|
20 |
+
return prefix[:6] == b"SIMPLE"
|
21 |
+
|
22 |
+
|
23 |
+
class FitsImageFile(ImageFile.ImageFile):
|
24 |
+
format = "FITS"
|
25 |
+
format_description = "FITS"
|
26 |
+
|
27 |
+
def _open(self) -> None:
|
28 |
+
assert self.fp is not None
|
29 |
+
|
30 |
+
headers: dict[bytes, bytes] = {}
|
31 |
+
header_in_progress = False
|
32 |
+
decoder_name = ""
|
33 |
+
while True:
|
34 |
+
header = self.fp.read(80)
|
35 |
+
if not header:
|
36 |
+
msg = "Truncated FITS file"
|
37 |
+
raise OSError(msg)
|
38 |
+
keyword = header[:8].strip()
|
39 |
+
if keyword in (b"SIMPLE", b"XTENSION"):
|
40 |
+
header_in_progress = True
|
41 |
+
elif headers and not header_in_progress:
|
42 |
+
# This is now a data unit
|
43 |
+
break
|
44 |
+
elif keyword == b"END":
|
45 |
+
# Seek to the end of the header unit
|
46 |
+
self.fp.seek(math.ceil(self.fp.tell() / 2880) * 2880)
|
47 |
+
if not decoder_name:
|
48 |
+
decoder_name, offset, args = self._parse_headers(headers)
|
49 |
+
|
50 |
+
header_in_progress = False
|
51 |
+
continue
|
52 |
+
|
53 |
+
if decoder_name:
|
54 |
+
# Keep going to read past the headers
|
55 |
+
continue
|
56 |
+
|
57 |
+
value = header[8:].split(b"/")[0].strip()
|
58 |
+
if value.startswith(b"="):
|
59 |
+
value = value[1:].strip()
|
60 |
+
if not headers and (not _accept(keyword) or value != b"T"):
|
61 |
+
msg = "Not a FITS file"
|
62 |
+
raise SyntaxError(msg)
|
63 |
+
headers[keyword] = value
|
64 |
+
|
65 |
+
if not decoder_name:
|
66 |
+
msg = "No image data"
|
67 |
+
raise ValueError(msg)
|
68 |
+
|
69 |
+
offset += self.fp.tell() - 80
|
70 |
+
self.tile = [(decoder_name, (0, 0) + self.size, offset, args)]
|
71 |
+
|
72 |
+
def _get_size(
|
73 |
+
self, headers: dict[bytes, bytes], prefix: bytes
|
74 |
+
) -> tuple[int, int] | None:
|
75 |
+
naxis = int(headers[prefix + b"NAXIS"])
|
76 |
+
if naxis == 0:
|
77 |
+
return None
|
78 |
+
|
79 |
+
if naxis == 1:
|
80 |
+
return 1, int(headers[prefix + b"NAXIS1"])
|
81 |
+
else:
|
82 |
+
return int(headers[prefix + b"NAXIS1"]), int(headers[prefix + b"NAXIS2"])
|
83 |
+
|
84 |
+
def _parse_headers(
|
85 |
+
self, headers: dict[bytes, bytes]
|
86 |
+
) -> tuple[str, int, tuple[str | int, ...]]:
|
87 |
+
prefix = b""
|
88 |
+
decoder_name = "raw"
|
89 |
+
offset = 0
|
90 |
+
if (
|
91 |
+
headers.get(b"XTENSION") == b"'BINTABLE'"
|
92 |
+
and headers.get(b"ZIMAGE") == b"T"
|
93 |
+
and headers[b"ZCMPTYPE"] == b"'GZIP_1 '"
|
94 |
+
):
|
95 |
+
no_prefix_size = self._get_size(headers, prefix) or (0, 0)
|
96 |
+
number_of_bits = int(headers[b"BITPIX"])
|
97 |
+
offset = no_prefix_size[0] * no_prefix_size[1] * (number_of_bits // 8)
|
98 |
+
|
99 |
+
prefix = b"Z"
|
100 |
+
decoder_name = "fits_gzip"
|
101 |
+
|
102 |
+
size = self._get_size(headers, prefix)
|
103 |
+
if not size:
|
104 |
+
return "", 0, ()
|
105 |
+
|
106 |
+
self._size = size
|
107 |
+
|
108 |
+
number_of_bits = int(headers[prefix + b"BITPIX"])
|
109 |
+
if number_of_bits == 8:
|
110 |
+
self._mode = "L"
|
111 |
+
elif number_of_bits == 16:
|
112 |
+
self._mode = "I;16"
|
113 |
+
elif number_of_bits == 32:
|
114 |
+
self._mode = "I"
|
115 |
+
elif number_of_bits in (-32, -64):
|
116 |
+
self._mode = "F"
|
117 |
+
|
118 |
+
args: tuple[str | int, ...]
|
119 |
+
if decoder_name == "raw":
|
120 |
+
args = (self.mode, 0, -1)
|
121 |
+
else:
|
122 |
+
args = (number_of_bits,)
|
123 |
+
return decoder_name, offset, args
|
124 |
+
|
125 |
+
|
126 |
+
class FitsGzipDecoder(ImageFile.PyDecoder):
|
127 |
+
_pulls_fd = True
|
128 |
+
|
129 |
+
def decode(self, buffer: bytes) -> tuple[int, int]:
|
130 |
+
assert self.fd is not None
|
131 |
+
value = gzip.decompress(self.fd.read())
|
132 |
+
|
133 |
+
rows = []
|
134 |
+
offset = 0
|
135 |
+
number_of_bits = min(self.args[0] // 8, 4)
|
136 |
+
for y in range(self.state.ysize):
|
137 |
+
row = bytearray()
|
138 |
+
for x in range(self.state.xsize):
|
139 |
+
row += value[offset + (4 - number_of_bits) : offset + 4]
|
140 |
+
offset += 4
|
141 |
+
rows.append(row)
|
142 |
+
self.set_as_raw(bytes([pixel for row in rows[::-1] for pixel in row]))
|
143 |
+
return -1, 0
|
144 |
+
|
145 |
+
|
146 |
+
# --------------------------------------------------------------------
|
147 |
+
# Registry
|
148 |
+
|
149 |
+
Image.register_open(FitsImageFile.format, FitsImageFile, _accept)
|
150 |
+
Image.register_decoder("fits_gzip", FitsGzipDecoder)
|
151 |
+
|
152 |
+
Image.register_extensions(FitsImageFile.format, [".fit", ".fits"])
|
env/Lib/site-packages/PIL/FliImagePlugin.py
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library.
|
3 |
+
# $Id$
|
4 |
+
#
|
5 |
+
# FLI/FLC file handling.
|
6 |
+
#
|
7 |
+
# History:
|
8 |
+
# 95-09-01 fl Created
|
9 |
+
# 97-01-03 fl Fixed parser, setup decoder tile
|
10 |
+
# 98-07-15 fl Renamed offset attribute to avoid name clash
|
11 |
+
#
|
12 |
+
# Copyright (c) Secret Labs AB 1997-98.
|
13 |
+
# Copyright (c) Fredrik Lundh 1995-97.
|
14 |
+
#
|
15 |
+
# See the README file for information on usage and redistribution.
|
16 |
+
#
|
17 |
+
from __future__ import annotations
|
18 |
+
|
19 |
+
import os
|
20 |
+
|
21 |
+
from . import Image, ImageFile, ImagePalette
|
22 |
+
from ._binary import i16le as i16
|
23 |
+
from ._binary import i32le as i32
|
24 |
+
from ._binary import o8
|
25 |
+
|
26 |
+
#
|
27 |
+
# decoder
|
28 |
+
|
29 |
+
|
30 |
+
def _accept(prefix: bytes) -> bool:
|
31 |
+
return (
|
32 |
+
len(prefix) >= 6
|
33 |
+
and i16(prefix, 4) in [0xAF11, 0xAF12]
|
34 |
+
and i16(prefix, 14) in [0, 3] # flags
|
35 |
+
)
|
36 |
+
|
37 |
+
|
38 |
+
##
|
39 |
+
# Image plugin for the FLI/FLC animation format. Use the <b>seek</b>
|
40 |
+
# method to load individual frames.
|
41 |
+
|
42 |
+
|
43 |
+
class FliImageFile(ImageFile.ImageFile):
|
44 |
+
format = "FLI"
|
45 |
+
format_description = "Autodesk FLI/FLC Animation"
|
46 |
+
_close_exclusive_fp_after_loading = False
|
47 |
+
|
48 |
+
def _open(self):
|
49 |
+
# HEAD
|
50 |
+
s = self.fp.read(128)
|
51 |
+
if not (_accept(s) and s[20:22] == b"\x00\x00"):
|
52 |
+
msg = "not an FLI/FLC file"
|
53 |
+
raise SyntaxError(msg)
|
54 |
+
|
55 |
+
# frames
|
56 |
+
self.n_frames = i16(s, 6)
|
57 |
+
self.is_animated = self.n_frames > 1
|
58 |
+
|
59 |
+
# image characteristics
|
60 |
+
self._mode = "P"
|
61 |
+
self._size = i16(s, 8), i16(s, 10)
|
62 |
+
|
63 |
+
# animation speed
|
64 |
+
duration = i32(s, 16)
|
65 |
+
magic = i16(s, 4)
|
66 |
+
if magic == 0xAF11:
|
67 |
+
duration = (duration * 1000) // 70
|
68 |
+
self.info["duration"] = duration
|
69 |
+
|
70 |
+
# look for palette
|
71 |
+
palette = [(a, a, a) for a in range(256)]
|
72 |
+
|
73 |
+
s = self.fp.read(16)
|
74 |
+
|
75 |
+
self.__offset = 128
|
76 |
+
|
77 |
+
if i16(s, 4) == 0xF100:
|
78 |
+
# prefix chunk; ignore it
|
79 |
+
self.__offset = self.__offset + i32(s)
|
80 |
+
self.fp.seek(self.__offset)
|
81 |
+
s = self.fp.read(16)
|
82 |
+
|
83 |
+
if i16(s, 4) == 0xF1FA:
|
84 |
+
# look for palette chunk
|
85 |
+
number_of_subchunks = i16(s, 6)
|
86 |
+
chunk_size = None
|
87 |
+
for _ in range(number_of_subchunks):
|
88 |
+
if chunk_size is not None:
|
89 |
+
self.fp.seek(chunk_size - 6, os.SEEK_CUR)
|
90 |
+
s = self.fp.read(6)
|
91 |
+
chunk_type = i16(s, 4)
|
92 |
+
if chunk_type in (4, 11):
|
93 |
+
self._palette(palette, 2 if chunk_type == 11 else 0)
|
94 |
+
break
|
95 |
+
chunk_size = i32(s)
|
96 |
+
if not chunk_size:
|
97 |
+
break
|
98 |
+
|
99 |
+
palette = [o8(r) + o8(g) + o8(b) for (r, g, b) in palette]
|
100 |
+
self.palette = ImagePalette.raw("RGB", b"".join(palette))
|
101 |
+
|
102 |
+
# set things up to decode first frame
|
103 |
+
self.__frame = -1
|
104 |
+
self._fp = self.fp
|
105 |
+
self.__rewind = self.fp.tell()
|
106 |
+
self.seek(0)
|
107 |
+
|
108 |
+
def _palette(self, palette, shift):
|
109 |
+
# load palette
|
110 |
+
|
111 |
+
i = 0
|
112 |
+
for e in range(i16(self.fp.read(2))):
|
113 |
+
s = self.fp.read(2)
|
114 |
+
i = i + s[0]
|
115 |
+
n = s[1]
|
116 |
+
if n == 0:
|
117 |
+
n = 256
|
118 |
+
s = self.fp.read(n * 3)
|
119 |
+
for n in range(0, len(s), 3):
|
120 |
+
r = s[n] << shift
|
121 |
+
g = s[n + 1] << shift
|
122 |
+
b = s[n + 2] << shift
|
123 |
+
palette[i] = (r, g, b)
|
124 |
+
i += 1
|
125 |
+
|
126 |
+
def seek(self, frame: int) -> None:
|
127 |
+
if not self._seek_check(frame):
|
128 |
+
return
|
129 |
+
if frame < self.__frame:
|
130 |
+
self._seek(0)
|
131 |
+
|
132 |
+
for f in range(self.__frame + 1, frame + 1):
|
133 |
+
self._seek(f)
|
134 |
+
|
135 |
+
def _seek(self, frame: int) -> None:
|
136 |
+
if frame == 0:
|
137 |
+
self.__frame = -1
|
138 |
+
self._fp.seek(self.__rewind)
|
139 |
+
self.__offset = 128
|
140 |
+
else:
|
141 |
+
# ensure that the previous frame was loaded
|
142 |
+
self.load()
|
143 |
+
|
144 |
+
if frame != self.__frame + 1:
|
145 |
+
msg = f"cannot seek to frame {frame}"
|
146 |
+
raise ValueError(msg)
|
147 |
+
self.__frame = frame
|
148 |
+
|
149 |
+
# move to next frame
|
150 |
+
self.fp = self._fp
|
151 |
+
self.fp.seek(self.__offset)
|
152 |
+
|
153 |
+
s = self.fp.read(4)
|
154 |
+
if not s:
|
155 |
+
msg = "missing frame size"
|
156 |
+
raise EOFError(msg)
|
157 |
+
|
158 |
+
framesize = i32(s)
|
159 |
+
|
160 |
+
self.decodermaxblock = framesize
|
161 |
+
self.tile = [("fli", (0, 0) + self.size, self.__offset, None)]
|
162 |
+
|
163 |
+
self.__offset += framesize
|
164 |
+
|
165 |
+
def tell(self) -> int:
|
166 |
+
return self.__frame
|
167 |
+
|
168 |
+
|
169 |
+
#
|
170 |
+
# registry
|
171 |
+
|
172 |
+
Image.register_open(FliImageFile.format, FliImageFile, _accept)
|
173 |
+
|
174 |
+
Image.register_extensions(FliImageFile.format, [".fli", ".flc"])
|
env/Lib/site-packages/PIL/FontFile.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library
|
3 |
+
# $Id$
|
4 |
+
#
|
5 |
+
# base class for raster font file parsers
|
6 |
+
#
|
7 |
+
# history:
|
8 |
+
# 1997-06-05 fl created
|
9 |
+
# 1997-08-19 fl restrict image width
|
10 |
+
#
|
11 |
+
# Copyright (c) 1997-1998 by Secret Labs AB
|
12 |
+
# Copyright (c) 1997-1998 by Fredrik Lundh
|
13 |
+
#
|
14 |
+
# See the README file for information on usage and redistribution.
|
15 |
+
#
|
16 |
+
from __future__ import annotations
|
17 |
+
|
18 |
+
import os
|
19 |
+
from typing import BinaryIO
|
20 |
+
|
21 |
+
from . import Image, _binary
|
22 |
+
|
23 |
+
WIDTH = 800
|
24 |
+
|
25 |
+
|
26 |
+
def puti16(
|
27 |
+
fp: BinaryIO, values: tuple[int, int, int, int, int, int, int, int, int, int]
|
28 |
+
) -> None:
|
29 |
+
"""Write network order (big-endian) 16-bit sequence"""
|
30 |
+
for v in values:
|
31 |
+
if v < 0:
|
32 |
+
v += 65536
|
33 |
+
fp.write(_binary.o16be(v))
|
34 |
+
|
35 |
+
|
36 |
+
class FontFile:
|
37 |
+
"""Base class for raster font file handlers."""
|
38 |
+
|
39 |
+
bitmap: Image.Image | None = None
|
40 |
+
|
41 |
+
def __init__(self) -> None:
|
42 |
+
self.info: dict[bytes, bytes | int] = {}
|
43 |
+
self.glyph: list[
|
44 |
+
tuple[
|
45 |
+
tuple[int, int],
|
46 |
+
tuple[int, int, int, int],
|
47 |
+
tuple[int, int, int, int],
|
48 |
+
Image.Image,
|
49 |
+
]
|
50 |
+
| None
|
51 |
+
] = [None] * 256
|
52 |
+
|
53 |
+
def __getitem__(self, ix: int) -> (
|
54 |
+
tuple[
|
55 |
+
tuple[int, int],
|
56 |
+
tuple[int, int, int, int],
|
57 |
+
tuple[int, int, int, int],
|
58 |
+
Image.Image,
|
59 |
+
]
|
60 |
+
| None
|
61 |
+
):
|
62 |
+
return self.glyph[ix]
|
63 |
+
|
64 |
+
def compile(self) -> None:
|
65 |
+
"""Create metrics and bitmap"""
|
66 |
+
|
67 |
+
if self.bitmap:
|
68 |
+
return
|
69 |
+
|
70 |
+
# create bitmap large enough to hold all data
|
71 |
+
h = w = maxwidth = 0
|
72 |
+
lines = 1
|
73 |
+
for glyph in self.glyph:
|
74 |
+
if glyph:
|
75 |
+
d, dst, src, im = glyph
|
76 |
+
h = max(h, src[3] - src[1])
|
77 |
+
w = w + (src[2] - src[0])
|
78 |
+
if w > WIDTH:
|
79 |
+
lines += 1
|
80 |
+
w = src[2] - src[0]
|
81 |
+
maxwidth = max(maxwidth, w)
|
82 |
+
|
83 |
+
xsize = maxwidth
|
84 |
+
ysize = lines * h
|
85 |
+
|
86 |
+
if xsize == 0 and ysize == 0:
|
87 |
+
return
|
88 |
+
|
89 |
+
self.ysize = h
|
90 |
+
|
91 |
+
# paste glyphs into bitmap
|
92 |
+
self.bitmap = Image.new("1", (xsize, ysize))
|
93 |
+
self.metrics: list[
|
94 |
+
tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]]
|
95 |
+
| None
|
96 |
+
] = [None] * 256
|
97 |
+
x = y = 0
|
98 |
+
for i in range(256):
|
99 |
+
glyph = self[i]
|
100 |
+
if glyph:
|
101 |
+
d, dst, src, im = glyph
|
102 |
+
xx = src[2] - src[0]
|
103 |
+
x0, y0 = x, y
|
104 |
+
x = x + xx
|
105 |
+
if x > WIDTH:
|
106 |
+
x, y = 0, y + h
|
107 |
+
x0, y0 = x, y
|
108 |
+
x = xx
|
109 |
+
s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0
|
110 |
+
self.bitmap.paste(im.crop(src), s)
|
111 |
+
self.metrics[i] = d, dst, s
|
112 |
+
|
113 |
+
def save(self, filename: str) -> None:
|
114 |
+
"""Save font"""
|
115 |
+
|
116 |
+
self.compile()
|
117 |
+
|
118 |
+
# font data
|
119 |
+
if not self.bitmap:
|
120 |
+
msg = "No bitmap created"
|
121 |
+
raise ValueError(msg)
|
122 |
+
self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG")
|
123 |
+
|
124 |
+
# font metrics
|
125 |
+
with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp:
|
126 |
+
fp.write(b"PILfont\n")
|
127 |
+
fp.write(f";;;;;;{self.ysize};\n".encode("ascii")) # HACK!!!
|
128 |
+
fp.write(b"DATA\n")
|
129 |
+
for id in range(256):
|
130 |
+
m = self.metrics[id]
|
131 |
+
if not m:
|
132 |
+
puti16(fp, (0,) * 10)
|
133 |
+
else:
|
134 |
+
puti16(fp, m[0] + m[1] + m[2])
|
env/Lib/site-packages/PIL/FpxImagePlugin.py
ADDED
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# THIS IS WORK IN PROGRESS
|
3 |
+
#
|
4 |
+
# The Python Imaging Library.
|
5 |
+
# $Id$
|
6 |
+
#
|
7 |
+
# FlashPix support for PIL
|
8 |
+
#
|
9 |
+
# History:
|
10 |
+
# 97-01-25 fl Created (reads uncompressed RGB images only)
|
11 |
+
#
|
12 |
+
# Copyright (c) Secret Labs AB 1997.
|
13 |
+
# Copyright (c) Fredrik Lundh 1997.
|
14 |
+
#
|
15 |
+
# See the README file for information on usage and redistribution.
|
16 |
+
#
|
17 |
+
from __future__ import annotations
|
18 |
+
|
19 |
+
import olefile
|
20 |
+
|
21 |
+
from . import Image, ImageFile
|
22 |
+
from ._binary import i32le as i32
|
23 |
+
|
24 |
+
# we map from colour field tuples to (mode, rawmode) descriptors
|
25 |
+
MODES = {
|
26 |
+
# opacity
|
27 |
+
(0x00007FFE,): ("A", "L"),
|
28 |
+
# monochrome
|
29 |
+
(0x00010000,): ("L", "L"),
|
30 |
+
(0x00018000, 0x00017FFE): ("RGBA", "LA"),
|
31 |
+
# photo YCC
|
32 |
+
(0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"),
|
33 |
+
(0x00028000, 0x00028001, 0x00028002, 0x00027FFE): ("RGBA", "YCCA;P"),
|
34 |
+
# standard RGB (NIFRGB)
|
35 |
+
(0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"),
|
36 |
+
(0x00038000, 0x00038001, 0x00038002, 0x00037FFE): ("RGBA", "RGBA"),
|
37 |
+
}
|
38 |
+
|
39 |
+
|
40 |
+
#
|
41 |
+
# --------------------------------------------------------------------
|
42 |
+
|
43 |
+
|
44 |
+
def _accept(prefix: bytes) -> bool:
|
45 |
+
return prefix[:8] == olefile.MAGIC
|
46 |
+
|
47 |
+
|
48 |
+
##
|
49 |
+
# Image plugin for the FlashPix images.
|
50 |
+
|
51 |
+
|
52 |
+
class FpxImageFile(ImageFile.ImageFile):
|
53 |
+
format = "FPX"
|
54 |
+
format_description = "FlashPix"
|
55 |
+
|
56 |
+
def _open(self):
|
57 |
+
#
|
58 |
+
# read the OLE directory and see if this is a likely
|
59 |
+
# to be a FlashPix file
|
60 |
+
|
61 |
+
try:
|
62 |
+
self.ole = olefile.OleFileIO(self.fp)
|
63 |
+
except OSError as e:
|
64 |
+
msg = "not an FPX file; invalid OLE file"
|
65 |
+
raise SyntaxError(msg) from e
|
66 |
+
|
67 |
+
if self.ole.root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B":
|
68 |
+
msg = "not an FPX file; bad root CLSID"
|
69 |
+
raise SyntaxError(msg)
|
70 |
+
|
71 |
+
self._open_index(1)
|
72 |
+
|
73 |
+
def _open_index(self, index: int = 1) -> None:
|
74 |
+
#
|
75 |
+
# get the Image Contents Property Set
|
76 |
+
|
77 |
+
prop = self.ole.getproperties(
|
78 |
+
[f"Data Object Store {index:06d}", "\005Image Contents"]
|
79 |
+
)
|
80 |
+
|
81 |
+
# size (highest resolution)
|
82 |
+
|
83 |
+
self._size = prop[0x1000002], prop[0x1000003]
|
84 |
+
|
85 |
+
size = max(self.size)
|
86 |
+
i = 1
|
87 |
+
while size > 64:
|
88 |
+
size = size // 2
|
89 |
+
i += 1
|
90 |
+
self.maxid = i - 1
|
91 |
+
|
92 |
+
# mode. instead of using a single field for this, flashpix
|
93 |
+
# requires you to specify the mode for each channel in each
|
94 |
+
# resolution subimage, and leaves it to the decoder to make
|
95 |
+
# sure that they all match. for now, we'll cheat and assume
|
96 |
+
# that this is always the case.
|
97 |
+
|
98 |
+
id = self.maxid << 16
|
99 |
+
|
100 |
+
s = prop[0x2000002 | id]
|
101 |
+
|
102 |
+
bands = i32(s, 4)
|
103 |
+
if bands > 4:
|
104 |
+
msg = "Invalid number of bands"
|
105 |
+
raise OSError(msg)
|
106 |
+
|
107 |
+
# note: for now, we ignore the "uncalibrated" flag
|
108 |
+
colors = tuple(i32(s, 8 + i * 4) & 0x7FFFFFFF for i in range(bands))
|
109 |
+
|
110 |
+
self._mode, self.rawmode = MODES[colors]
|
111 |
+
|
112 |
+
# load JPEG tables, if any
|
113 |
+
self.jpeg = {}
|
114 |
+
for i in range(256):
|
115 |
+
id = 0x3000001 | (i << 16)
|
116 |
+
if id in prop:
|
117 |
+
self.jpeg[i] = prop[id]
|
118 |
+
|
119 |
+
self._open_subimage(1, self.maxid)
|
120 |
+
|
121 |
+
def _open_subimage(self, index: int = 1, subimage: int = 0) -> None:
|
122 |
+
#
|
123 |
+
# setup tile descriptors for a given subimage
|
124 |
+
|
125 |
+
stream = [
|
126 |
+
f"Data Object Store {index:06d}",
|
127 |
+
f"Resolution {subimage:04d}",
|
128 |
+
"Subimage 0000 Header",
|
129 |
+
]
|
130 |
+
|
131 |
+
fp = self.ole.openstream(stream)
|
132 |
+
|
133 |
+
# skip prefix
|
134 |
+
fp.read(28)
|
135 |
+
|
136 |
+
# header stream
|
137 |
+
s = fp.read(36)
|
138 |
+
|
139 |
+
size = i32(s, 4), i32(s, 8)
|
140 |
+
# tilecount = i32(s, 12)
|
141 |
+
tilesize = i32(s, 16), i32(s, 20)
|
142 |
+
# channels = i32(s, 24)
|
143 |
+
offset = i32(s, 28)
|
144 |
+
length = i32(s, 32)
|
145 |
+
|
146 |
+
if size != self.size:
|
147 |
+
msg = "subimage mismatch"
|
148 |
+
raise OSError(msg)
|
149 |
+
|
150 |
+
# get tile descriptors
|
151 |
+
fp.seek(28 + offset)
|
152 |
+
s = fp.read(i32(s, 12) * length)
|
153 |
+
|
154 |
+
x = y = 0
|
155 |
+
xsize, ysize = size
|
156 |
+
xtile, ytile = tilesize
|
157 |
+
self.tile = []
|
158 |
+
|
159 |
+
for i in range(0, len(s), length):
|
160 |
+
x1 = min(xsize, x + xtile)
|
161 |
+
y1 = min(ysize, y + ytile)
|
162 |
+
|
163 |
+
compression = i32(s, i + 8)
|
164 |
+
|
165 |
+
if compression == 0:
|
166 |
+
self.tile.append(
|
167 |
+
(
|
168 |
+
"raw",
|
169 |
+
(x, y, x1, y1),
|
170 |
+
i32(s, i) + 28,
|
171 |
+
(self.rawmode,),
|
172 |
+
)
|
173 |
+
)
|
174 |
+
|
175 |
+
elif compression == 1:
|
176 |
+
# FIXME: the fill decoder is not implemented
|
177 |
+
self.tile.append(
|
178 |
+
(
|
179 |
+
"fill",
|
180 |
+
(x, y, x1, y1),
|
181 |
+
i32(s, i) + 28,
|
182 |
+
(self.rawmode, s[12:16]),
|
183 |
+
)
|
184 |
+
)
|
185 |
+
|
186 |
+
elif compression == 2:
|
187 |
+
internal_color_conversion = s[14]
|
188 |
+
jpeg_tables = s[15]
|
189 |
+
rawmode = self.rawmode
|
190 |
+
|
191 |
+
if internal_color_conversion:
|
192 |
+
# The image is stored as usual (usually YCbCr).
|
193 |
+
if rawmode == "RGBA":
|
194 |
+
# For "RGBA", data is stored as YCbCrA based on
|
195 |
+
# negative RGB. The following trick works around
|
196 |
+
# this problem :
|
197 |
+
jpegmode, rawmode = "YCbCrK", "CMYK"
|
198 |
+
else:
|
199 |
+
jpegmode = None # let the decoder decide
|
200 |
+
|
201 |
+
else:
|
202 |
+
# The image is stored as defined by rawmode
|
203 |
+
jpegmode = rawmode
|
204 |
+
|
205 |
+
self.tile.append(
|
206 |
+
(
|
207 |
+
"jpeg",
|
208 |
+
(x, y, x1, y1),
|
209 |
+
i32(s, i) + 28,
|
210 |
+
(rawmode, jpegmode),
|
211 |
+
)
|
212 |
+
)
|
213 |
+
|
214 |
+
# FIXME: jpeg tables are tile dependent; the prefix
|
215 |
+
# data must be placed in the tile descriptor itself!
|
216 |
+
|
217 |
+
if jpeg_tables:
|
218 |
+
self.tile_prefix = self.jpeg[jpeg_tables]
|
219 |
+
|
220 |
+
else:
|
221 |
+
msg = "unknown/invalid compression"
|
222 |
+
raise OSError(msg)
|
223 |
+
|
224 |
+
x = x + xtile
|
225 |
+
if x >= xsize:
|
226 |
+
x, y = 0, y + ytile
|
227 |
+
if y >= ysize:
|
228 |
+
break # isn't really required
|
229 |
+
|
230 |
+
self.stream = stream
|
231 |
+
self._fp = self.fp
|
232 |
+
self.fp = None
|
233 |
+
|
234 |
+
def load(self):
|
235 |
+
if not self.fp:
|
236 |
+
self.fp = self.ole.openstream(self.stream[:2] + ["Subimage 0000 Data"])
|
237 |
+
|
238 |
+
return ImageFile.ImageFile.load(self)
|
239 |
+
|
240 |
+
def close(self) -> None:
|
241 |
+
self.ole.close()
|
242 |
+
super().close()
|
243 |
+
|
244 |
+
def __exit__(self, *args: object) -> None:
|
245 |
+
self.ole.close()
|
246 |
+
super().__exit__()
|
247 |
+
|
248 |
+
|
249 |
+
#
|
250 |
+
# --------------------------------------------------------------------
|
251 |
+
|
252 |
+
|
253 |
+
Image.register_open(FpxImageFile.format, FpxImageFile, _accept)
|
254 |
+
|
255 |
+
Image.register_extension(FpxImageFile.format, ".fpx")
|
env/Lib/site-packages/PIL/FtexImagePlugin.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
A Pillow loader for .ftc and .ftu files (FTEX)
|
3 |
+
Jerome Leclanche <[email protected]>
|
4 |
+
|
5 |
+
The contents of this file are hereby released in the public domain (CC0)
|
6 |
+
Full text of the CC0 license:
|
7 |
+
https://creativecommons.org/publicdomain/zero/1.0/
|
8 |
+
|
9 |
+
Independence War 2: Edge Of Chaos - Texture File Format - 16 October 2001
|
10 |
+
|
11 |
+
The textures used for 3D objects in Independence War 2: Edge Of Chaos are in a
|
12 |
+
packed custom format called FTEX. This file format uses file extensions FTC
|
13 |
+
and FTU.
|
14 |
+
* FTC files are compressed textures (using standard texture compression).
|
15 |
+
* FTU files are not compressed.
|
16 |
+
Texture File Format
|
17 |
+
The FTC and FTU texture files both use the same format. This
|
18 |
+
has the following structure:
|
19 |
+
{header}
|
20 |
+
{format_directory}
|
21 |
+
{data}
|
22 |
+
Where:
|
23 |
+
{header} = {
|
24 |
+
u32:magic,
|
25 |
+
u32:version,
|
26 |
+
u32:width,
|
27 |
+
u32:height,
|
28 |
+
u32:mipmap_count,
|
29 |
+
u32:format_count
|
30 |
+
}
|
31 |
+
|
32 |
+
* The "magic" number is "FTEX".
|
33 |
+
* "width" and "height" are the dimensions of the texture.
|
34 |
+
* "mipmap_count" is the number of mipmaps in the texture.
|
35 |
+
* "format_count" is the number of texture formats (different versions of the
|
36 |
+
same texture) in this file.
|
37 |
+
|
38 |
+
{format_directory} = format_count * { u32:format, u32:where }
|
39 |
+
|
40 |
+
The format value is 0 for DXT1 compressed textures and 1 for 24-bit RGB
|
41 |
+
uncompressed textures.
|
42 |
+
The texture data for a format starts at the position "where" in the file.
|
43 |
+
|
44 |
+
Each set of texture data in the file has the following structure:
|
45 |
+
{data} = format_count * { u32:mipmap_size, mipmap_size * { u8 } }
|
46 |
+
* "mipmap_size" is the number of bytes in that mip level. For compressed
|
47 |
+
textures this is the size of the texture data compressed with DXT1. For 24 bit
|
48 |
+
uncompressed textures, this is 3 * width * height. Following this are the image
|
49 |
+
bytes for that mipmap level.
|
50 |
+
|
51 |
+
Note: All data is stored in little-Endian (Intel) byte order.
|
52 |
+
"""
|
53 |
+
|
54 |
+
from __future__ import annotations
|
55 |
+
|
56 |
+
import struct
|
57 |
+
from enum import IntEnum
|
58 |
+
from io import BytesIO
|
59 |
+
|
60 |
+
from . import Image, ImageFile
|
61 |
+
|
62 |
+
MAGIC = b"FTEX"
|
63 |
+
|
64 |
+
|
65 |
+
class Format(IntEnum):
|
66 |
+
DXT1 = 0
|
67 |
+
UNCOMPRESSED = 1
|
68 |
+
|
69 |
+
|
70 |
+
class FtexImageFile(ImageFile.ImageFile):
|
71 |
+
format = "FTEX"
|
72 |
+
format_description = "Texture File Format (IW2:EOC)"
|
73 |
+
|
74 |
+
def _open(self) -> None:
|
75 |
+
if not _accept(self.fp.read(4)):
|
76 |
+
msg = "not an FTEX file"
|
77 |
+
raise SyntaxError(msg)
|
78 |
+
struct.unpack("<i", self.fp.read(4)) # version
|
79 |
+
self._size = struct.unpack("<2i", self.fp.read(8))
|
80 |
+
mipmap_count, format_count = struct.unpack("<2i", self.fp.read(8))
|
81 |
+
|
82 |
+
self._mode = "RGB"
|
83 |
+
|
84 |
+
# Only support single-format files.
|
85 |
+
# I don't know of any multi-format file.
|
86 |
+
assert format_count == 1
|
87 |
+
|
88 |
+
format, where = struct.unpack("<2i", self.fp.read(8))
|
89 |
+
self.fp.seek(where)
|
90 |
+
(mipmap_size,) = struct.unpack("<i", self.fp.read(4))
|
91 |
+
|
92 |
+
data = self.fp.read(mipmap_size)
|
93 |
+
|
94 |
+
if format == Format.DXT1:
|
95 |
+
self._mode = "RGBA"
|
96 |
+
self.tile = [("bcn", (0, 0) + self.size, 0, 1)]
|
97 |
+
elif format == Format.UNCOMPRESSED:
|
98 |
+
self.tile = [("raw", (0, 0) + self.size, 0, ("RGB", 0, 1))]
|
99 |
+
else:
|
100 |
+
msg = f"Invalid texture compression format: {repr(format)}"
|
101 |
+
raise ValueError(msg)
|
102 |
+
|
103 |
+
self.fp.close()
|
104 |
+
self.fp = BytesIO(data)
|
105 |
+
|
106 |
+
def load_seek(self, pos: int) -> None:
|
107 |
+
pass
|
108 |
+
|
109 |
+
|
110 |
+
def _accept(prefix: bytes) -> bool:
|
111 |
+
return prefix[:4] == MAGIC
|
112 |
+
|
113 |
+
|
114 |
+
Image.register_open(FtexImageFile.format, FtexImageFile, _accept)
|
115 |
+
Image.register_extensions(FtexImageFile.format, [".ftc", ".ftu"])
|
env/Lib/site-packages/PIL/GbrImagePlugin.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library
|
3 |
+
#
|
4 |
+
# load a GIMP brush file
|
5 |
+
#
|
6 |
+
# History:
|
7 |
+
# 96-03-14 fl Created
|
8 |
+
# 16-01-08 es Version 2
|
9 |
+
#
|
10 |
+
# Copyright (c) Secret Labs AB 1997.
|
11 |
+
# Copyright (c) Fredrik Lundh 1996.
|
12 |
+
# Copyright (c) Eric Soroos 2016.
|
13 |
+
#
|
14 |
+
# See the README file for information on usage and redistribution.
|
15 |
+
#
|
16 |
+
#
|
17 |
+
# See https://github.com/GNOME/gimp/blob/mainline/devel-docs/gbr.txt for
|
18 |
+
# format documentation.
|
19 |
+
#
|
20 |
+
# This code Interprets version 1 and 2 .gbr files.
|
21 |
+
# Version 1 files are obsolete, and should not be used for new
|
22 |
+
# brushes.
|
23 |
+
# Version 2 files are saved by GIMP v2.8 (at least)
|
24 |
+
# Version 3 files have a format specifier of 18 for 16bit floats in
|
25 |
+
# the color depth field. This is currently unsupported by Pillow.
|
26 |
+
from __future__ import annotations
|
27 |
+
|
28 |
+
from . import Image, ImageFile
|
29 |
+
from ._binary import i32be as i32
|
30 |
+
|
31 |
+
|
32 |
+
def _accept(prefix: bytes) -> bool:
|
33 |
+
return len(prefix) >= 8 and i32(prefix, 0) >= 20 and i32(prefix, 4) in (1, 2)
|
34 |
+
|
35 |
+
|
36 |
+
##
|
37 |
+
# Image plugin for the GIMP brush format.
|
38 |
+
|
39 |
+
|
40 |
+
class GbrImageFile(ImageFile.ImageFile):
|
41 |
+
format = "GBR"
|
42 |
+
format_description = "GIMP brush file"
|
43 |
+
|
44 |
+
def _open(self) -> None:
|
45 |
+
header_size = i32(self.fp.read(4))
|
46 |
+
if header_size < 20:
|
47 |
+
msg = "not a GIMP brush"
|
48 |
+
raise SyntaxError(msg)
|
49 |
+
version = i32(self.fp.read(4))
|
50 |
+
if version not in (1, 2):
|
51 |
+
msg = f"Unsupported GIMP brush version: {version}"
|
52 |
+
raise SyntaxError(msg)
|
53 |
+
|
54 |
+
width = i32(self.fp.read(4))
|
55 |
+
height = i32(self.fp.read(4))
|
56 |
+
color_depth = i32(self.fp.read(4))
|
57 |
+
if width <= 0 or height <= 0:
|
58 |
+
msg = "not a GIMP brush"
|
59 |
+
raise SyntaxError(msg)
|
60 |
+
if color_depth not in (1, 4):
|
61 |
+
msg = f"Unsupported GIMP brush color depth: {color_depth}"
|
62 |
+
raise SyntaxError(msg)
|
63 |
+
|
64 |
+
if version == 1:
|
65 |
+
comment_length = header_size - 20
|
66 |
+
else:
|
67 |
+
comment_length = header_size - 28
|
68 |
+
magic_number = self.fp.read(4)
|
69 |
+
if magic_number != b"GIMP":
|
70 |
+
msg = "not a GIMP brush, bad magic number"
|
71 |
+
raise SyntaxError(msg)
|
72 |
+
self.info["spacing"] = i32(self.fp.read(4))
|
73 |
+
|
74 |
+
comment = self.fp.read(comment_length)[:-1]
|
75 |
+
|
76 |
+
if color_depth == 1:
|
77 |
+
self._mode = "L"
|
78 |
+
else:
|
79 |
+
self._mode = "RGBA"
|
80 |
+
|
81 |
+
self._size = width, height
|
82 |
+
|
83 |
+
self.info["comment"] = comment
|
84 |
+
|
85 |
+
# Image might not be small
|
86 |
+
Image._decompression_bomb_check(self.size)
|
87 |
+
|
88 |
+
# Data is an uncompressed block of w * h * bytes/pixel
|
89 |
+
self._data_size = width * height * color_depth
|
90 |
+
|
91 |
+
def load(self):
|
92 |
+
if not self.im:
|
93 |
+
self.im = Image.core.new(self.mode, self.size)
|
94 |
+
self.frombytes(self.fp.read(self._data_size))
|
95 |
+
return Image.Image.load(self)
|
96 |
+
|
97 |
+
|
98 |
+
#
|
99 |
+
# registry
|
100 |
+
|
101 |
+
|
102 |
+
Image.register_open(GbrImageFile.format, GbrImageFile, _accept)
|
103 |
+
Image.register_extension(GbrImageFile.format, ".gbr")
|
env/Lib/site-packages/PIL/GdImageFile.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library.
|
3 |
+
# $Id$
|
4 |
+
#
|
5 |
+
# GD file handling
|
6 |
+
#
|
7 |
+
# History:
|
8 |
+
# 1996-04-12 fl Created
|
9 |
+
#
|
10 |
+
# Copyright (c) 1997 by Secret Labs AB.
|
11 |
+
# Copyright (c) 1996 by Fredrik Lundh.
|
12 |
+
#
|
13 |
+
# See the README file for information on usage and redistribution.
|
14 |
+
#
|
15 |
+
|
16 |
+
|
17 |
+
"""
|
18 |
+
.. note::
|
19 |
+
This format cannot be automatically recognized, so the
|
20 |
+
class is not registered for use with :py:func:`PIL.Image.open()`. To open a
|
21 |
+
gd file, use the :py:func:`PIL.GdImageFile.open()` function instead.
|
22 |
+
|
23 |
+
.. warning::
|
24 |
+
THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This
|
25 |
+
implementation is provided for convenience and demonstrational
|
26 |
+
purposes only.
|
27 |
+
"""
|
28 |
+
from __future__ import annotations
|
29 |
+
|
30 |
+
from typing import IO
|
31 |
+
|
32 |
+
from . import ImageFile, ImagePalette, UnidentifiedImageError
|
33 |
+
from ._binary import i16be as i16
|
34 |
+
from ._binary import i32be as i32
|
35 |
+
from ._typing import StrOrBytesPath
|
36 |
+
|
37 |
+
|
38 |
+
class GdImageFile(ImageFile.ImageFile):
|
39 |
+
"""
|
40 |
+
Image plugin for the GD uncompressed format. Note that this format
|
41 |
+
is not supported by the standard :py:func:`PIL.Image.open()` function. To use
|
42 |
+
this plugin, you have to import the :py:mod:`PIL.GdImageFile` module and
|
43 |
+
use the :py:func:`PIL.GdImageFile.open()` function.
|
44 |
+
"""
|
45 |
+
|
46 |
+
format = "GD"
|
47 |
+
format_description = "GD uncompressed images"
|
48 |
+
|
49 |
+
def _open(self) -> None:
|
50 |
+
# Header
|
51 |
+
assert self.fp is not None
|
52 |
+
|
53 |
+
s = self.fp.read(1037)
|
54 |
+
|
55 |
+
if i16(s) not in [65534, 65535]:
|
56 |
+
msg = "Not a valid GD 2.x .gd file"
|
57 |
+
raise SyntaxError(msg)
|
58 |
+
|
59 |
+
self._mode = "L" # FIXME: "P"
|
60 |
+
self._size = i16(s, 2), i16(s, 4)
|
61 |
+
|
62 |
+
true_color = s[6]
|
63 |
+
true_color_offset = 2 if true_color else 0
|
64 |
+
|
65 |
+
# transparency index
|
66 |
+
tindex = i32(s, 7 + true_color_offset)
|
67 |
+
if tindex < 256:
|
68 |
+
self.info["transparency"] = tindex
|
69 |
+
|
70 |
+
self.palette = ImagePalette.raw(
|
71 |
+
"XBGR", s[7 + true_color_offset + 4 : 7 + true_color_offset + 4 + 256 * 4]
|
72 |
+
)
|
73 |
+
|
74 |
+
self.tile = [
|
75 |
+
(
|
76 |
+
"raw",
|
77 |
+
(0, 0) + self.size,
|
78 |
+
7 + true_color_offset + 4 + 256 * 4,
|
79 |
+
("L", 0, 1),
|
80 |
+
)
|
81 |
+
]
|
82 |
+
|
83 |
+
|
84 |
+
def open(fp: StrOrBytesPath | IO[bytes], mode: str = "r") -> GdImageFile:
|
85 |
+
"""
|
86 |
+
Load texture from a GD image file.
|
87 |
+
|
88 |
+
:param fp: GD file name, or an opened file handle.
|
89 |
+
:param mode: Optional mode. In this version, if the mode argument
|
90 |
+
is given, it must be "r".
|
91 |
+
:returns: An image instance.
|
92 |
+
:raises OSError: If the image could not be read.
|
93 |
+
"""
|
94 |
+
if mode != "r":
|
95 |
+
msg = "bad mode"
|
96 |
+
raise ValueError(msg)
|
97 |
+
|
98 |
+
try:
|
99 |
+
return GdImageFile(fp)
|
100 |
+
except SyntaxError as e:
|
101 |
+
msg = "cannot identify this image file"
|
102 |
+
raise UnidentifiedImageError(msg) from e
|
env/Lib/site-packages/PIL/GifImagePlugin.py
ADDED
@@ -0,0 +1,1159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# The Python Imaging Library.
|
3 |
+
# $Id$
|
4 |
+
#
|
5 |
+
# GIF file handling
|
6 |
+
#
|
7 |
+
# History:
|
8 |
+
# 1995-09-01 fl Created
|
9 |
+
# 1996-12-14 fl Added interlace support
|
10 |
+
# 1996-12-30 fl Added animation support
|
11 |
+
# 1997-01-05 fl Added write support, fixed local colour map bug
|
12 |
+
# 1997-02-23 fl Make sure to load raster data in getdata()
|
13 |
+
# 1997-07-05 fl Support external decoder (0.4)
|
14 |
+
# 1998-07-09 fl Handle all modes when saving (0.5)
|
15 |
+
# 1998-07-15 fl Renamed offset attribute to avoid name clash
|
16 |
+
# 2001-04-16 fl Added rewind support (seek to frame 0) (0.6)
|
17 |
+
# 2001-04-17 fl Added palette optimization (0.7)
|
18 |
+
# 2002-06-06 fl Added transparency support for save (0.8)
|
19 |
+
# 2004-02-24 fl Disable interlacing for small images
|
20 |
+
#
|
21 |
+
# Copyright (c) 1997-2004 by Secret Labs AB
|
22 |
+
# Copyright (c) 1995-2004 by Fredrik Lundh
|
23 |
+
#
|
24 |
+
# See the README file for information on usage and redistribution.
|
25 |
+
#
|
26 |
+
from __future__ import annotations
|
27 |
+
|
28 |
+
import itertools
|
29 |
+
import math
|
30 |
+
import os
|
31 |
+
import subprocess
|
32 |
+
import sys
|
33 |
+
from enum import IntEnum
|
34 |
+
from functools import cached_property
|
35 |
+
from typing import IO, TYPE_CHECKING, Any, List, Literal, NamedTuple, Union
|
36 |
+
|
37 |
+
from . import (
|
38 |
+
Image,
|
39 |
+
ImageChops,
|
40 |
+
ImageFile,
|
41 |
+
ImageMath,
|
42 |
+
ImageOps,
|
43 |
+
ImagePalette,
|
44 |
+
ImageSequence,
|
45 |
+
)
|
46 |
+
from ._binary import i16le as i16
|
47 |
+
from ._binary import o8
|
48 |
+
from ._binary import o16le as o16
|
49 |
+
|
50 |
+
if TYPE_CHECKING:
|
51 |
+
from . import _imaging
|
52 |
+
|
53 |
+
|
54 |
+
class LoadingStrategy(IntEnum):
|
55 |
+
""".. versionadded:: 9.1.0"""
|
56 |
+
|
57 |
+
RGB_AFTER_FIRST = 0
|
58 |
+
RGB_AFTER_DIFFERENT_PALETTE_ONLY = 1
|
59 |
+
RGB_ALWAYS = 2
|
60 |
+
|
61 |
+
|
62 |
+
#: .. versionadded:: 9.1.0
|
63 |
+
LOADING_STRATEGY = LoadingStrategy.RGB_AFTER_FIRST
|
64 |
+
|
65 |
+
# --------------------------------------------------------------------
|
66 |
+
# Identify/read GIF files
|
67 |
+
|
68 |
+
|
69 |
+
def _accept(prefix: bytes) -> bool:
|
70 |
+
return prefix[:6] in [b"GIF87a", b"GIF89a"]
|
71 |
+
|
72 |
+
|
73 |
+
##
|
74 |
+
# Image plugin for GIF images. This plugin supports both GIF87 and
|
75 |
+
# GIF89 images.
|
76 |
+
|
77 |
+
|
78 |
+
class GifImageFile(ImageFile.ImageFile):
|
79 |
+
format = "GIF"
|
80 |
+
format_description = "Compuserve GIF"
|
81 |
+
_close_exclusive_fp_after_loading = False
|
82 |
+
|
83 |
+
global_palette = None
|
84 |
+
|
85 |
+
def data(self) -> bytes | None:
|
86 |
+
s = self.fp.read(1)
|
87 |
+
if s and s[0]:
|
88 |
+
return self.fp.read(s[0])
|
89 |
+
return None
|
90 |
+
|
91 |
+
def _is_palette_needed(self, p: bytes) -> bool:
|
92 |
+
for i in range(0, len(p), 3):
|
93 |
+
if not (i // 3 == p[i] == p[i + 1] == p[i + 2]):
|
94 |
+
return True
|
95 |
+
return False
|
96 |
+
|
97 |
+
def _open(self) -> None:
|
98 |
+
# Screen
|
99 |
+
s = self.fp.read(13)
|
100 |
+
if not _accept(s):
|
101 |
+
msg = "not a GIF file"
|
102 |
+
raise SyntaxError(msg)
|
103 |
+
|
104 |
+
self.info["version"] = s[:6]
|
105 |
+
self._size = i16(s, 6), i16(s, 8)
|
106 |
+
self.tile = []
|
107 |
+
flags = s[10]
|
108 |
+
bits = (flags & 7) + 1
|
109 |
+
|
110 |
+
if flags & 128:
|
111 |
+
# get global palette
|
112 |
+
self.info["background"] = s[11]
|
113 |
+
# check if palette contains colour indices
|
114 |
+
p = self.fp.read(3 << bits)
|
115 |
+
if self._is_palette_needed(p):
|
116 |
+
p = ImagePalette.raw("RGB", p)
|
117 |
+
self.global_palette = self.palette = p
|
118 |
+
|
119 |
+
self._fp = self.fp # FIXME: hack
|
120 |
+
self.__rewind = self.fp.tell()
|
121 |
+
self._n_frames: int | None = None
|
122 |
+
self._seek(0) # get ready to read first frame
|
123 |
+
|
124 |
+
@property
|
125 |
+
def n_frames(self) -> int:
|
126 |
+
if self._n_frames is None:
|
127 |
+
current = self.tell()
|
128 |
+
try:
|
129 |
+
while True:
|
130 |
+
self._seek(self.tell() + 1, False)
|
131 |
+
except EOFError:
|
132 |
+
self._n_frames = self.tell() + 1
|
133 |
+
self.seek(current)
|
134 |
+
return self._n_frames
|
135 |
+
|
136 |
+
@cached_property
|
137 |
+
def is_animated(self) -> bool:
|
138 |
+
if self._n_frames is not None:
|
139 |
+
return self._n_frames != 1
|
140 |
+
|
141 |
+
current = self.tell()
|
142 |
+
if current:
|
143 |
+
return True
|
144 |
+
|
145 |
+
try:
|
146 |
+
self._seek(1, False)
|
147 |
+
is_animated = True
|
148 |
+
except EOFError:
|
149 |
+
is_animated = False
|
150 |
+
|
151 |
+
self.seek(current)
|
152 |
+
return is_animated
|
153 |
+
|
154 |
+
def seek(self, frame: int) -> None:
|
155 |
+
if not self._seek_check(frame):
|
156 |
+
return
|
157 |
+
if frame < self.__frame:
|
158 |
+
self.im = None
|
159 |
+
self._seek(0)
|
160 |
+
|
161 |
+
last_frame = self.__frame
|
162 |
+
for f in range(self.__frame + 1, frame + 1):
|
163 |
+
try:
|
164 |
+
self._seek(f)
|
165 |
+
except EOFError as e:
|
166 |
+
self.seek(last_frame)
|
167 |
+
msg = "no more images in GIF file"
|
168 |
+
raise EOFError(msg) from e
|
169 |
+
|
170 |
+
def _seek(self, frame: int, update_image: bool = True) -> None:
|
171 |
+
if frame == 0:
|
172 |
+
# rewind
|
173 |
+
self.__offset = 0
|
174 |
+
self.dispose: _imaging.ImagingCore | None = None
|
175 |
+
self.__frame = -1
|
176 |
+
self._fp.seek(self.__rewind)
|
177 |
+
self.disposal_method = 0
|
178 |
+
if "comment" in self.info:
|
179 |
+
del self.info["comment"]
|
180 |
+
else:
|
181 |
+
# ensure that the previous frame was loaded
|
182 |
+
if self.tile and update_image:
|
183 |
+
self.load()
|
184 |
+
|
185 |
+
if frame != self.__frame + 1:
|
186 |
+
msg = f"cannot seek to frame {frame}"
|
187 |
+
raise ValueError(msg)
|
188 |
+
|
189 |
+
self.fp = self._fp
|
190 |
+
if self.__offset:
|
191 |
+
# backup to last frame
|
192 |
+
self.fp.seek(self.__offset)
|
193 |
+
while self.data():
|
194 |
+
pass
|
195 |
+
self.__offset = 0
|
196 |
+
|
197 |
+
s = self.fp.read(1)
|
198 |
+
if not s or s == b";":
|
199 |
+
msg = "no more images in GIF file"
|
200 |
+
raise EOFError(msg)
|
201 |
+
|
202 |
+
palette: ImagePalette.ImagePalette | Literal[False] | None = None
|
203 |
+
|
204 |
+
info: dict[str, Any] = {}
|
205 |
+
frame_transparency = None
|
206 |
+
interlace = None
|
207 |
+
frame_dispose_extent = None
|
208 |
+
while True:
|
209 |
+
if not s:
|
210 |
+
s = self.fp.read(1)
|
211 |
+
if not s or s == b";":
|
212 |
+
break
|
213 |
+
|
214 |
+
elif s == b"!":
|
215 |
+
#
|
216 |
+
# extensions
|
217 |
+
#
|
218 |
+
s = self.fp.read(1)
|
219 |
+
block = self.data()
|
220 |
+
if s[0] == 249 and block is not None:
|
221 |
+
#
|
222 |
+
# graphic control extension
|
223 |
+
#
|
224 |
+
flags = block[0]
|
225 |
+
if flags & 1:
|
226 |
+
frame_transparency = block[3]
|
227 |
+
info["duration"] = i16(block, 1) * 10
|
228 |
+
|
229 |
+
# disposal method - find the value of bits 4 - 6
|
230 |
+
dispose_bits = 0b00011100 & flags
|
231 |
+
dispose_bits = dispose_bits >> 2
|
232 |
+
if dispose_bits:
|
233 |
+
# only set the dispose if it is not
|
234 |
+
# unspecified. I'm not sure if this is
|
235 |
+
# correct, but it seems to prevent the last
|
236 |
+
# frame from looking odd for some animations
|
237 |
+
self.disposal_method = dispose_bits
|
238 |
+
elif s[0] == 254:
|
239 |
+
#
|
240 |
+
# comment extension
|
241 |
+
#
|
242 |
+
comment = b""
|
243 |
+
|
244 |
+
# Read this comment block
|
245 |
+
while block:
|
246 |
+
comment += block
|
247 |
+
block = self.data()
|
248 |
+
|
249 |
+
if "comment" in info:
|
250 |
+
# If multiple comment blocks in frame, separate with \n
|
251 |
+
info["comment"] += b"\n" + comment
|
252 |
+
else:
|
253 |
+
info["comment"] = comment
|
254 |
+
s = None
|
255 |
+
continue
|
256 |
+
elif s[0] == 255 and frame == 0 and block is not None:
|
257 |
+
#
|
258 |
+
# application extension
|
259 |
+
#
|
260 |
+
info["extension"] = block, self.fp.tell()
|
261 |
+
if block[:11] == b"NETSCAPE2.0":
|
262 |
+
block = self.data()
|
263 |
+
if block and len(block) >= 3 and block[0] == 1:
|
264 |
+
self.info["loop"] = i16(block, 1)
|
265 |
+
while self.data():
|
266 |
+
pass
|
267 |
+
|
268 |
+
elif s == b",":
|
269 |
+
#
|
270 |
+
# local image
|
271 |
+
#
|
272 |
+
s = self.fp.read(9)
|
273 |
+
|
274 |
+
# extent
|
275 |
+
x0, y0 = i16(s, 0), i16(s, 2)
|
276 |
+
x1, y1 = x0 + i16(s, 4), y0 + i16(s, 6)
|
277 |
+
if (x1 > self.size[0] or y1 > self.size[1]) and update_image:
|
278 |
+
self._size = max(x1, self.size[0]), max(y1, self.size[1])
|
279 |
+
Image._decompression_bomb_check(self._size)
|
280 |
+
frame_dispose_extent = x0, y0, x1, y1
|
281 |
+
flags = s[8]
|
282 |
+
|
283 |
+
interlace = (flags & 64) != 0
|
284 |
+
|
285 |
+
if flags & 128:
|
286 |
+
bits = (flags & 7) + 1
|
287 |
+
p = self.fp.read(3 << bits)
|
288 |
+
if self._is_palette_needed(p):
|
289 |
+
palette = ImagePalette.raw("RGB", p)
|
290 |
+
else:
|
291 |
+
palette = False
|
292 |
+
|
293 |
+
# image data
|
294 |
+
bits = self.fp.read(1)[0]
|
295 |
+
self.__offset = self.fp.tell()
|
296 |
+
break
|
297 |
+
s = None
|
298 |
+
|
299 |
+
if interlace is None:
|
300 |
+
msg = "image not found in GIF frame"
|
301 |
+
raise EOFError(msg)
|
302 |
+
|
303 |
+
self.__frame = frame
|
304 |
+
if not update_image:
|
305 |
+
return
|
306 |
+
|
307 |
+
self.tile = []
|
308 |
+
|
309 |
+
if self.dispose:
|
310 |
+
self.im.paste(self.dispose, self.dispose_extent)
|
311 |
+
|
312 |
+
self._frame_palette = palette if palette is not None else self.global_palette
|
313 |
+
self._frame_transparency = frame_transparency
|
314 |
+
if frame == 0:
|
315 |
+
if self._frame_palette:
|
316 |
+
if LOADING_STRATEGY == LoadingStrategy.RGB_ALWAYS:
|
317 |
+
self._mode = "RGBA" if frame_transparency is not None else "RGB"
|
318 |
+
else:
|
319 |
+
self._mode = "P"
|
320 |
+
else:
|
321 |
+
self._mode = "L"
|
322 |
+
|
323 |
+
if not palette and self.global_palette:
|
324 |
+
from copy import copy
|
325 |
+
|
326 |
+
palette = copy(self.global_palette)
|
327 |
+
self.palette = palette
|
328 |
+
else:
|
329 |
+
if self.mode == "P":
|
330 |
+
if (
|
331 |
+
LOADING_STRATEGY != LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY
|
332 |
+
or palette
|
333 |
+
):
|
334 |
+
self.pyaccess = None
|
335 |
+
if "transparency" in self.info:
|
336 |
+
self.im.putpalettealpha(self.info["transparency"], 0)
|
337 |
+
self.im = self.im.convert("RGBA", Image.Dither.FLOYDSTEINBERG)
|
338 |
+
self._mode = "RGBA"
|
339 |
+
del self.info["transparency"]
|
340 |
+
else:
|
341 |
+
self._mode = "RGB"
|
342 |
+
self.im = self.im.convert("RGB", Image.Dither.FLOYDSTEINBERG)
|
343 |
+
|
344 |
+
def _rgb(color: int) -> tuple[int, int, int]:
|
345 |
+
if self._frame_palette:
|
346 |
+
if color * 3 + 3 > len(self._frame_palette.palette):
|
347 |
+
color = 0
|
348 |
+
return tuple(self._frame_palette.palette[color * 3 : color * 3 + 3])
|
349 |
+
else:
|
350 |
+
return (color, color, color)
|
351 |
+
|
352 |
+
self.dispose = None
|
353 |
+
self.dispose_extent = frame_dispose_extent
|
354 |
+
if self.dispose_extent and self.disposal_method >= 2:
|
355 |
+
try:
|
356 |
+
if self.disposal_method == 2:
|
357 |
+
# replace with background colour
|
358 |
+
|
359 |
+
# only dispose the extent in this frame
|
360 |
+
x0, y0, x1, y1 = self.dispose_extent
|
361 |
+
dispose_size = (x1 - x0, y1 - y0)
|
362 |
+
|
363 |
+
Image._decompression_bomb_check(dispose_size)
|
364 |
+
|
365 |
+
# by convention, attempt to use transparency first
|
366 |
+
dispose_mode = "P"
|
367 |
+
color = self.info.get("transparency", frame_transparency)
|
368 |
+
if color is not None:
|
369 |
+
if self.mode in ("RGB", "RGBA"):
|
370 |
+
dispose_mode = "RGBA"
|
371 |
+
color = _rgb(color) + (0,)
|
372 |
+
else:
|
373 |
+
color = self.info.get("background", 0)
|
374 |
+
if self.mode in ("RGB", "RGBA"):
|
375 |
+
dispose_mode = "RGB"
|
376 |
+
color = _rgb(color)
|
377 |
+
self.dispose = Image.core.fill(dispose_mode, dispose_size, color)
|
378 |
+
else:
|
379 |
+
# replace with previous contents
|
380 |
+
if self.im is not None:
|
381 |
+
# only dispose the extent in this frame
|
382 |
+
self.dispose = self._crop(self.im, self.dispose_extent)
|
383 |
+
elif frame_transparency is not None:
|
384 |
+
x0, y0, x1, y1 = self.dispose_extent
|
385 |
+
dispose_size = (x1 - x0, y1 - y0)
|
386 |
+
|
387 |
+
Image._decompression_bomb_check(dispose_size)
|
388 |
+
dispose_mode = "P"
|
389 |
+
color = frame_transparency
|
390 |
+
if self.mode in ("RGB", "RGBA"):
|
391 |
+
dispose_mode = "RGBA"
|
392 |
+
color = _rgb(frame_transparency) + (0,)
|
393 |
+
self.dispose = Image.core.fill(
|
394 |
+
dispose_mode, dispose_size, color
|
395 |
+
)
|
396 |
+
except AttributeError:
|
397 |
+
pass
|
398 |
+
|
399 |
+
if interlace is not None:
|
400 |
+
transparency = -1
|
401 |
+
if frame_transparency is not None:
|
402 |
+
if frame == 0:
|
403 |
+
if LOADING_STRATEGY != LoadingStrategy.RGB_ALWAYS:
|
404 |
+
self.info["transparency"] = frame_transparency
|
405 |
+
elif self.mode not in ("RGB", "RGBA"):
|
406 |
+
transparency = frame_transparency
|
407 |
+
self.tile = [
|
408 |
+
(
|
409 |
+
"gif",
|
410 |
+
(x0, y0, x1, y1),
|
411 |
+
self.__offset,
|
412 |
+
(bits, interlace, transparency),
|
413 |
+
)
|
414 |
+
]
|
415 |
+
|
416 |
+
if info.get("comment"):
|
417 |
+
self.info["comment"] = info["comment"]
|
418 |
+
for k in ["duration", "extension"]:
|
419 |
+
if k in info:
|
420 |
+
self.info[k] = info[k]
|
421 |
+
elif k in self.info:
|
422 |
+
del self.info[k]
|
423 |
+
|
424 |
+
def load_prepare(self) -> None:
|
425 |
+
temp_mode = "P" if self._frame_palette else "L"
|
426 |
+
self._prev_im = None
|
427 |
+
if self.__frame == 0:
|
428 |
+
if self._frame_transparency is not None:
|
429 |
+
self.im = Image.core.fill(
|
430 |
+
temp_mode, self.size, self._frame_transparency
|
431 |
+
)
|
432 |
+
elif self.mode in ("RGB", "RGBA"):
|
433 |
+
self._prev_im = self.im
|
434 |
+
if self._frame_palette:
|
435 |
+
self.im = Image.core.fill("P", self.size, self._frame_transparency or 0)
|
436 |
+
self.im.putpalette("RGB", *self._frame_palette.getdata())
|
437 |
+
else:
|
438 |
+
self.im = None
|
439 |
+
self._mode = temp_mode
|
440 |
+
self._frame_palette = None
|
441 |
+
|
442 |
+
super().load_prepare()
|
443 |
+
|
444 |
+
def load_end(self) -> None:
|
445 |
+
if self.__frame == 0:
|
446 |
+
if self.mode == "P" and LOADING_STRATEGY == LoadingStrategy.RGB_ALWAYS:
|
447 |
+
if self._frame_transparency is not None:
|
448 |
+
self.im.putpalettealpha(self._frame_transparency, 0)
|
449 |
+
self._mode = "RGBA"
|
450 |
+
else:
|
451 |
+
self._mode = "RGB"
|
452 |
+
self.im = self.im.convert(self.mode, Image.Dither.FLOYDSTEINBERG)
|
453 |
+
return
|
454 |
+
if not self._prev_im:
|
455 |
+
return
|
456 |
+
if self._frame_transparency is not None:
|
457 |
+
self.im.putpalettealpha(self._frame_transparency, 0)
|
458 |
+
frame_im = self.im.convert("RGBA")
|
459 |
+
else:
|
460 |
+
frame_im = self.im.convert("RGB")
|
461 |
+
|
462 |
+
assert self.dispose_extent is not None
|
463 |
+
frame_im = self._crop(frame_im, self.dispose_extent)
|
464 |
+
|
465 |
+
self.im = self._prev_im
|
466 |
+
self._mode = self.im.mode
|
467 |
+
if frame_im.mode == "RGBA":
|
468 |
+
self.im.paste(frame_im, self.dispose_extent, frame_im)
|
469 |
+
else:
|
470 |
+
self.im.paste(frame_im, self.dispose_extent)
|
471 |
+
|
472 |
+
def tell(self) -> int:
|
473 |
+
return self.__frame
|
474 |
+
|
475 |
+
|
476 |
+
# --------------------------------------------------------------------
|
477 |
+
# Write GIF files
|
478 |
+
|
479 |
+
|
480 |
+
RAWMODE = {"1": "L", "L": "L", "P": "P"}
|
481 |
+
|
482 |
+
|
483 |
+
def _normalize_mode(im: Image.Image) -> Image.Image:
|
484 |
+
"""
|
485 |
+
Takes an image (or frame), returns an image in a mode that is appropriate
|
486 |
+
for saving in a Gif.
|
487 |
+
|
488 |
+
It may return the original image, or it may return an image converted to
|
489 |
+
palette or 'L' mode.
|
490 |
+
|
491 |
+
:param im: Image object
|
492 |
+
:returns: Image object
|
493 |
+
"""
|
494 |
+
if im.mode in RAWMODE:
|
495 |
+
im.load()
|
496 |
+
return im
|
497 |
+
if Image.getmodebase(im.mode) == "RGB":
|
498 |
+
im = im.convert("P", palette=Image.Palette.ADAPTIVE)
|
499 |
+
if im.palette.mode == "RGBA":
|
500 |
+
for rgba in im.palette.colors:
|
501 |
+
if rgba[3] == 0:
|
502 |
+
im.info["transparency"] = im.palette.colors[rgba]
|
503 |
+
break
|
504 |
+
return im
|
505 |
+
return im.convert("L")
|
506 |
+
|
507 |
+
|
508 |
+
_Palette = Union[bytes, bytearray, List[int], ImagePalette.ImagePalette]
|
509 |
+
|
510 |
+
|
511 |
+
def _normalize_palette(
|
512 |
+
im: Image.Image, palette: _Palette | None, info: dict[str, Any]
|
513 |
+
) -> Image.Image:
|
514 |
+
"""
|
515 |
+
Normalizes the palette for image.
|
516 |
+
- Sets the palette to the incoming palette, if provided.
|
517 |
+
- Ensures that there's a palette for L mode images
|
518 |
+
- Optimizes the palette if necessary/desired.
|
519 |
+
|
520 |
+
:param im: Image object
|
521 |
+
:param palette: bytes object containing the source palette, or ....
|
522 |
+
:param info: encoderinfo
|
523 |
+
:returns: Image object
|
524 |
+
"""
|
525 |
+
source_palette = None
|
526 |
+
if palette:
|
527 |
+
# a bytes palette
|
528 |
+
if isinstance(palette, (bytes, bytearray, list)):
|
529 |
+
source_palette = bytearray(palette[:768])
|
530 |
+
if isinstance(palette, ImagePalette.ImagePalette):
|
531 |
+
source_palette = bytearray(palette.palette)
|
532 |
+
|
533 |
+
if im.mode == "P":
|
534 |
+
if not source_palette:
|
535 |
+
source_palette = im.im.getpalette("RGB")[:768]
|
536 |
+
else: # L-mode
|
537 |
+
if not source_palette:
|
538 |
+
source_palette = bytearray(i // 3 for i in range(768))
|
539 |
+
im.palette = ImagePalette.ImagePalette("RGB", palette=source_palette)
|
540 |
+
|
541 |
+
used_palette_colors: list[int] | None
|
542 |
+
if palette:
|
543 |
+
used_palette_colors = []
|
544 |
+
assert source_palette is not None
|
545 |
+
for i in range(0, len(source_palette), 3):
|
546 |
+
source_color = tuple(source_palette[i : i + 3])
|
547 |
+
index = im.palette.colors.get(source_color)
|
548 |
+
if index in used_palette_colors:
|
549 |
+
index = None
|
550 |
+
used_palette_colors.append(index)
|
551 |
+
for i, index in enumerate(used_palette_colors):
|
552 |
+
if index is None:
|
553 |
+
for j in range(len(used_palette_colors)):
|
554 |
+
if j not in used_palette_colors:
|
555 |
+
used_palette_colors[i] = j
|
556 |
+
break
|
557 |
+
im = im.remap_palette(used_palette_colors)
|
558 |
+
else:
|
559 |
+
used_palette_colors = _get_optimize(im, info)
|
560 |
+
if used_palette_colors is not None:
|
561 |
+
im = im.remap_palette(used_palette_colors, source_palette)
|
562 |
+
if "transparency" in info:
|
563 |
+
try:
|
564 |
+
info["transparency"] = used_palette_colors.index(
|
565 |
+
info["transparency"]
|
566 |
+
)
|
567 |
+
except ValueError:
|
568 |
+
del info["transparency"]
|
569 |
+
return im
|
570 |
+
|
571 |
+
im.palette.palette = source_palette
|
572 |
+
return im
|
573 |
+
|
574 |
+
|
575 |
+
def _write_single_frame(
|
576 |
+
im: Image.Image,
|
577 |
+
fp: IO[bytes],
|
578 |
+
palette: _Palette | None,
|
579 |
+
) -> None:
|
580 |
+
im_out = _normalize_mode(im)
|
581 |
+
for k, v in im_out.info.items():
|
582 |
+
im.encoderinfo.setdefault(k, v)
|
583 |
+
im_out = _normalize_palette(im_out, palette, im.encoderinfo)
|
584 |
+
|
585 |
+
for s in _get_global_header(im_out, im.encoderinfo):
|
586 |
+
fp.write(s)
|
587 |
+
|
588 |
+
# local image header
|
589 |
+
flags = 0
|
590 |
+
if get_interlace(im):
|
591 |
+
flags = flags | 64
|
592 |
+
_write_local_header(fp, im, (0, 0), flags)
|
593 |
+
|
594 |
+
im_out.encoderconfig = (8, get_interlace(im))
|
595 |
+
ImageFile._save(im_out, fp, [("gif", (0, 0) + im.size, 0, RAWMODE[im_out.mode])])
|
596 |
+
|
597 |
+
fp.write(b"\0") # end of image data
|
598 |
+
|
599 |
+
|
600 |
+
def _getbbox(
|
601 |
+
base_im: Image.Image, im_frame: Image.Image
|
602 |
+
) -> tuple[Image.Image, tuple[int, int, int, int] | None]:
|
603 |
+
if _get_palette_bytes(im_frame) != _get_palette_bytes(base_im):
|
604 |
+
im_frame = im_frame.convert("RGBA")
|
605 |
+
base_im = base_im.convert("RGBA")
|
606 |
+
delta = ImageChops.subtract_modulo(im_frame, base_im)
|
607 |
+
return delta, delta.getbbox(alpha_only=False)
|
608 |
+
|
609 |
+
|
610 |
+
class _Frame(NamedTuple):
|
611 |
+
im: Image.Image
|
612 |
+
bbox: tuple[int, int, int, int] | None
|
613 |
+
encoderinfo: dict[str, Any]
|
614 |
+
|
615 |
+
|
616 |
+
def _write_multiple_frames(
|
617 |
+
im: Image.Image, fp: IO[bytes], palette: _Palette | None
|
618 |
+
) -> bool:
|
619 |
+
duration = im.encoderinfo.get("duration")
|
620 |
+
disposal = im.encoderinfo.get("disposal", im.info.get("disposal"))
|
621 |
+
|
622 |
+
im_frames: list[_Frame] = []
|
623 |
+
previous_im: Image.Image | None = None
|
624 |
+
frame_count = 0
|
625 |
+
background_im = None
|
626 |
+
for imSequence in itertools.chain([im], im.encoderinfo.get("append_images", [])):
|
627 |
+
for im_frame in ImageSequence.Iterator(imSequence):
|
628 |
+
# a copy is required here since seek can still mutate the image
|
629 |
+
im_frame = _normalize_mode(im_frame.copy())
|
630 |
+
if frame_count == 0:
|
631 |
+
for k, v in im_frame.info.items():
|
632 |
+
if k == "transparency":
|
633 |
+
continue
|
634 |
+
im.encoderinfo.setdefault(k, v)
|
635 |
+
|
636 |
+
encoderinfo = im.encoderinfo.copy()
|
637 |
+
if "transparency" in im_frame.info:
|
638 |
+
encoderinfo.setdefault("transparency", im_frame.info["transparency"])
|
639 |
+
im_frame = _normalize_palette(im_frame, palette, encoderinfo)
|
640 |
+
if isinstance(duration, (list, tuple)):
|
641 |
+
encoderinfo["duration"] = duration[frame_count]
|
642 |
+
elif duration is None and "duration" in im_frame.info:
|
643 |
+
encoderinfo["duration"] = im_frame.info["duration"]
|
644 |
+
if isinstance(disposal, (list, tuple)):
|
645 |
+
encoderinfo["disposal"] = disposal[frame_count]
|
646 |
+
frame_count += 1
|
647 |
+
|
648 |
+
diff_frame = None
|
649 |
+
if im_frames and previous_im:
|
650 |
+
# delta frame
|
651 |
+
delta, bbox = _getbbox(previous_im, im_frame)
|
652 |
+
if not bbox:
|
653 |
+
# This frame is identical to the previous frame
|
654 |
+
if encoderinfo.get("duration"):
|
655 |
+
im_frames[-1].encoderinfo["duration"] += encoderinfo["duration"]
|
656 |
+
continue
|
657 |
+
if im_frames[-1].encoderinfo.get("disposal") == 2:
|
658 |
+
if background_im is None:
|
659 |
+
color = im.encoderinfo.get(
|
660 |
+
"transparency", im.info.get("transparency", (0, 0, 0))
|
661 |
+
)
|
662 |
+
background = _get_background(im_frame, color)
|
663 |
+
background_im = Image.new("P", im_frame.size, background)
|
664 |
+
background_im.putpalette(im_frames[0].im.palette)
|
665 |
+
bbox = _getbbox(background_im, im_frame)[1]
|
666 |
+
elif encoderinfo.get("optimize") and im_frame.mode != "1":
|
667 |
+
if "transparency" not in encoderinfo:
|
668 |
+
try:
|
669 |
+
encoderinfo["transparency"] = (
|
670 |
+
im_frame.palette._new_color_index(im_frame)
|
671 |
+
)
|
672 |
+
except ValueError:
|
673 |
+
pass
|
674 |
+
if "transparency" in encoderinfo:
|
675 |
+
# When the delta is zero, fill the image with transparency
|
676 |
+
diff_frame = im_frame.copy()
|
677 |
+
fill = Image.new("P", delta.size, encoderinfo["transparency"])
|
678 |
+
if delta.mode == "RGBA":
|
679 |
+
r, g, b, a = delta.split()
|
680 |
+
mask = ImageMath.lambda_eval(
|
681 |
+
lambda args: args["convert"](
|
682 |
+
args["max"](
|
683 |
+
args["max"](
|
684 |
+
args["max"](args["r"], args["g"]), args["b"]
|
685 |
+
),
|
686 |
+
args["a"],
|
687 |
+
)
|
688 |
+
* 255,
|
689 |
+
"1",
|
690 |
+
),
|
691 |
+
r=r,
|
692 |
+
g=g,
|
693 |
+
b=b,
|
694 |
+
a=a,
|
695 |
+
)
|
696 |
+
else:
|
697 |
+
if delta.mode == "P":
|
698 |
+
# Convert to L without considering palette
|
699 |
+
delta_l = Image.new("L", delta.size)
|
700 |
+
delta_l.putdata(delta.getdata())
|
701 |
+
delta = delta_l
|
702 |
+
mask = ImageMath.lambda_eval(
|
703 |
+
lambda args: args["convert"](args["im"] * 255, "1"),
|
704 |
+
im=delta,
|
705 |
+
)
|
706 |
+
diff_frame.paste(fill, mask=ImageOps.invert(mask))
|
707 |
+
else:
|
708 |
+
bbox = None
|
709 |
+
previous_im = im_frame
|
710 |
+
im_frames.append(_Frame(diff_frame or im_frame, bbox, encoderinfo))
|
711 |
+
|
712 |
+
if len(im_frames) == 1:
|
713 |
+
if "duration" in im.encoderinfo:
|
714 |
+
# Since multiple frames will not be written, use the combined duration
|
715 |
+
im.encoderinfo["duration"] = im_frames[0].encoderinfo["duration"]
|
716 |
+
return False
|
717 |
+
|
718 |
+
for frame_data in im_frames:
|
719 |
+
im_frame = frame_data.im
|
720 |
+
if not frame_data.bbox:
|
721 |
+
# global header
|
722 |
+
for s in _get_global_header(im_frame, frame_data.encoderinfo):
|
723 |
+
fp.write(s)
|
724 |
+
offset = (0, 0)
|
725 |
+
else:
|
726 |
+
# compress difference
|
727 |
+
if not palette:
|
728 |
+
frame_data.encoderinfo["include_color_table"] = True
|
729 |
+
|
730 |
+
im_frame = im_frame.crop(frame_data.bbox)
|
731 |
+
offset = frame_data.bbox[:2]
|
732 |
+
_write_frame_data(fp, im_frame, offset, frame_data.encoderinfo)
|
733 |
+
return True
|
734 |
+
|
735 |
+
|
736 |
+
def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
737 |
+
_save(im, fp, filename, save_all=True)
|
738 |
+
|
739 |
+
|
740 |
+
def _save(
|
741 |
+
im: Image.Image, fp: IO[bytes], filename: str | bytes, save_all: bool = False
|
742 |
+
) -> None:
|
743 |
+
# header
|
744 |
+
if "palette" in im.encoderinfo or "palette" in im.info:
|
745 |
+
palette = im.encoderinfo.get("palette", im.info.get("palette"))
|
746 |
+
else:
|
747 |
+
palette = None
|
748 |
+
im.encoderinfo.setdefault("optimize", True)
|
749 |
+
|
750 |
+
if not save_all or not _write_multiple_frames(im, fp, palette):
|
751 |
+
_write_single_frame(im, fp, palette)
|
752 |
+
|
753 |
+
fp.write(b";") # end of file
|
754 |
+
|
755 |
+
if hasattr(fp, "flush"):
|
756 |
+
fp.flush()
|
757 |
+
|
758 |
+
|
759 |
+
def get_interlace(im: Image.Image) -> int:
|
760 |
+
interlace = im.encoderinfo.get("interlace", 1)
|
761 |
+
|
762 |
+
# workaround for @PIL153
|
763 |
+
if min(im.size) < 16:
|
764 |
+
interlace = 0
|
765 |
+
|
766 |
+
return interlace
|
767 |
+
|
768 |
+
|
769 |
+
def _write_local_header(
|
770 |
+
fp: IO[bytes], im: Image.Image, offset: tuple[int, int], flags: int
|
771 |
+
) -> None:
|
772 |
+
try:
|
773 |
+
transparency = im.encoderinfo["transparency"]
|
774 |
+
except KeyError:
|
775 |
+
transparency = None
|
776 |
+
|
777 |
+
if "duration" in im.encoderinfo:
|
778 |
+
duration = int(im.encoderinfo["duration"] / 10)
|
779 |
+
else:
|
780 |
+
duration = 0
|
781 |
+
|
782 |
+
disposal = int(im.encoderinfo.get("disposal", 0))
|
783 |
+
|
784 |
+
if transparency is not None or duration != 0 or disposal:
|
785 |
+
packed_flag = 1 if transparency is not None else 0
|
786 |
+
packed_flag |= disposal << 2
|
787 |
+
|
788 |
+
fp.write(
|
789 |
+
b"!"
|
790 |
+
+ o8(249) # extension intro
|
791 |
+
+ o8(4) # length
|
792 |
+
+ o8(packed_flag) # packed fields
|
793 |
+
+ o16(duration) # duration
|
794 |
+
+ o8(transparency or 0) # transparency index
|
795 |
+
+ o8(0)
|
796 |
+
)
|
797 |
+
|
798 |
+
include_color_table = im.encoderinfo.get("include_color_table")
|
799 |
+
if include_color_table:
|
800 |
+
palette_bytes = _get_palette_bytes(im)
|
801 |
+
color_table_size = _get_color_table_size(palette_bytes)
|
802 |
+
if color_table_size:
|
803 |
+
flags = flags | 128 # local color table flag
|
804 |
+
flags = flags | color_table_size
|
805 |
+
|
806 |
+
fp.write(
|
807 |
+
b","
|
808 |
+
+ o16(offset[0]) # offset
|
809 |
+
+ o16(offset[1])
|
810 |
+
+ o16(im.size[0]) # size
|
811 |
+
+ o16(im.size[1])
|
812 |
+
+ o8(flags) # flags
|
813 |
+
)
|
814 |
+
if include_color_table and color_table_size:
|
815 |
+
fp.write(_get_header_palette(palette_bytes))
|
816 |
+
fp.write(o8(8)) # bits
|
817 |
+
|
818 |
+
|
819 |
+
def _save_netpbm(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
820 |
+
# Unused by default.
|
821 |
+
# To use, uncomment the register_save call at the end of the file.
|
822 |
+
#
|
823 |
+
# If you need real GIF compression and/or RGB quantization, you
|
824 |
+
# can use the external NETPBM/PBMPLUS utilities. See comments
|
825 |
+
# below for information on how to enable this.
|
826 |
+
tempfile = im._dump()
|
827 |
+
|
828 |
+
try:
|
829 |
+
with open(filename, "wb") as f:
|
830 |
+
if im.mode != "RGB":
|
831 |
+
subprocess.check_call(
|
832 |
+
["ppmtogif", tempfile], stdout=f, stderr=subprocess.DEVNULL
|
833 |
+
)
|
834 |
+
else:
|
835 |
+
# Pipe ppmquant output into ppmtogif
|
836 |
+
# "ppmquant 256 %s | ppmtogif > %s" % (tempfile, filename)
|
837 |
+
quant_cmd = ["ppmquant", "256", tempfile]
|
838 |
+
togif_cmd = ["ppmtogif"]
|
839 |
+
quant_proc = subprocess.Popen(
|
840 |
+
quant_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL
|
841 |
+
)
|
842 |
+
togif_proc = subprocess.Popen(
|
843 |
+
togif_cmd,
|
844 |
+
stdin=quant_proc.stdout,
|
845 |
+
stdout=f,
|
846 |
+
stderr=subprocess.DEVNULL,
|
847 |
+
)
|
848 |
+
|
849 |
+
# Allow ppmquant to receive SIGPIPE if ppmtogif exits
|
850 |
+
assert quant_proc.stdout is not None
|
851 |
+
quant_proc.stdout.close()
|
852 |
+
|
853 |
+
retcode = quant_proc.wait()
|
854 |
+
if retcode:
|
855 |
+
raise subprocess.CalledProcessError(retcode, quant_cmd)
|
856 |
+
|
857 |
+
retcode = togif_proc.wait()
|
858 |
+
if retcode:
|
859 |
+
raise subprocess.CalledProcessError(retcode, togif_cmd)
|
860 |
+
finally:
|
861 |
+
try:
|
862 |
+
os.unlink(tempfile)
|
863 |
+
except OSError:
|
864 |
+
pass
|
865 |
+
|
866 |
+
|
867 |
+
# Force optimization so that we can test performance against
|
868 |
+
# cases where it took lots of memory and time previously.
|
869 |
+
_FORCE_OPTIMIZE = False
|
870 |
+
|
871 |
+
|
872 |
+
def _get_optimize(im: Image.Image, info: dict[str, Any]) -> list[int] | None:
|
873 |
+
"""
|
874 |
+
Palette optimization is a potentially expensive operation.
|
875 |
+
|
876 |
+
This function determines if the palette should be optimized using
|
877 |
+
some heuristics, then returns the list of palette entries in use.
|
878 |
+
|
879 |
+
:param im: Image object
|
880 |
+
:param info: encoderinfo
|
881 |
+
:returns: list of indexes of palette entries in use, or None
|
882 |
+
"""
|
883 |
+
if im.mode in ("P", "L") and info and info.get("optimize"):
|
884 |
+
# Potentially expensive operation.
|
885 |
+
|
886 |
+
# The palette saves 3 bytes per color not used, but palette
|
887 |
+
# lengths are restricted to 3*(2**N) bytes. Max saving would
|
888 |
+
# be 768 -> 6 bytes if we went all the way down to 2 colors.
|
889 |
+
# * If we're over 128 colors, we can't save any space.
|
890 |
+
# * If there aren't any holes, it's not worth collapsing.
|
891 |
+
# * If we have a 'large' image, the palette is in the noise.
|
892 |
+
|
893 |
+
# create the new palette if not every color is used
|
894 |
+
optimise = _FORCE_OPTIMIZE or im.mode == "L"
|
895 |
+
if optimise or im.width * im.height < 512 * 512:
|
896 |
+
# check which colors are used
|
897 |
+
used_palette_colors = []
|
898 |
+
for i, count in enumerate(im.histogram()):
|
899 |
+
if count:
|
900 |
+
used_palette_colors.append(i)
|
901 |
+
|
902 |
+
if optimise or max(used_palette_colors) >= len(used_palette_colors):
|
903 |
+
return used_palette_colors
|
904 |
+
|
905 |
+
num_palette_colors = len(im.palette.palette) // Image.getmodebands(
|
906 |
+
im.palette.mode
|
907 |
+
)
|
908 |
+
current_palette_size = 1 << (num_palette_colors - 1).bit_length()
|
909 |
+
if (
|
910 |
+
# check that the palette would become smaller when saved
|
911 |
+
len(used_palette_colors) <= current_palette_size // 2
|
912 |
+
# check that the palette is not already the smallest possible size
|
913 |
+
and current_palette_size > 2
|
914 |
+
):
|
915 |
+
return used_palette_colors
|
916 |
+
return None
|
917 |
+
|
918 |
+
|
919 |
+
def _get_color_table_size(palette_bytes: bytes) -> int:
|
920 |
+
# calculate the palette size for the header
|
921 |
+
if not palette_bytes:
|
922 |
+
return 0
|
923 |
+
elif len(palette_bytes) < 9:
|
924 |
+
return 1
|
925 |
+
else:
|
926 |
+
return math.ceil(math.log(len(palette_bytes) // 3, 2)) - 1
|
927 |
+
|
928 |
+
|
929 |
+
def _get_header_palette(palette_bytes: bytes) -> bytes:
|
930 |
+
"""
|
931 |
+
Returns the palette, null padded to the next power of 2 (*3) bytes
|
932 |
+
suitable for direct inclusion in the GIF header
|
933 |
+
|
934 |
+
:param palette_bytes: Unpadded palette bytes, in RGBRGB form
|
935 |
+
:returns: Null padded palette
|
936 |
+
"""
|
937 |
+
color_table_size = _get_color_table_size(palette_bytes)
|
938 |
+
|
939 |
+
# add the missing amount of bytes
|
940 |
+
# the palette has to be 2<<n in size
|
941 |
+
actual_target_size_diff = (2 << color_table_size) - len(palette_bytes) // 3
|
942 |
+
if actual_target_size_diff > 0:
|
943 |
+
palette_bytes += o8(0) * 3 * actual_target_size_diff
|
944 |
+
return palette_bytes
|
945 |
+
|
946 |
+
|
947 |
+
def _get_palette_bytes(im: Image.Image) -> bytes:
|
948 |
+
"""
|
949 |
+
Gets the palette for inclusion in the gif header
|
950 |
+
|
951 |
+
:param im: Image object
|
952 |
+
:returns: Bytes, len<=768 suitable for inclusion in gif header
|
953 |
+
"""
|
954 |
+
return im.palette.palette if im.palette else b""
|
955 |
+
|
956 |
+
|
957 |
+
def _get_background(
|
958 |
+
im: Image.Image,
|
959 |
+
info_background: int | tuple[int, int, int] | tuple[int, int, int, int] | None,
|
960 |
+
) -> int:
|
961 |
+
background = 0
|
962 |
+
if info_background:
|
963 |
+
if isinstance(info_background, tuple):
|
964 |
+
# WebPImagePlugin stores an RGBA value in info["background"]
|
965 |
+
# So it must be converted to the same format as GifImagePlugin's
|
966 |
+
# info["background"] - a global color table index
|
967 |
+
try:
|
968 |
+
background = im.palette.getcolor(info_background, im)
|
969 |
+
except ValueError as e:
|
970 |
+
if str(e) not in (
|
971 |
+
# If all 256 colors are in use,
|
972 |
+
# then there is no need for the background color
|
973 |
+
"cannot allocate more than 256 colors",
|
974 |
+
# Ignore non-opaque WebP background
|
975 |
+
"cannot add non-opaque RGBA color to RGB palette",
|
976 |
+
):
|
977 |
+
raise
|
978 |
+
else:
|
979 |
+
background = info_background
|
980 |
+
return background
|
981 |
+
|
982 |
+
|
983 |
+
def _get_global_header(im: Image.Image, info: dict[str, Any]) -> list[bytes]:
|
984 |
+
"""Return a list of strings representing a GIF header"""
|
985 |
+
|
986 |
+
# Header Block
|
987 |
+
# https://www.matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp
|
988 |
+
|
989 |
+
version = b"87a"
|
990 |
+
if im.info.get("version") == b"89a" or (
|
991 |
+
info
|
992 |
+
and (
|
993 |
+
"transparency" in info
|
994 |
+
or info.get("loop") is not None
|
995 |
+
or info.get("duration")
|
996 |
+
or info.get("comment")
|
997 |
+
)
|
998 |
+
):
|
999 |
+
version = b"89a"
|
1000 |
+
|
1001 |
+
background = _get_background(im, info.get("background"))
|
1002 |
+
|
1003 |
+
palette_bytes = _get_palette_bytes(im)
|
1004 |
+
color_table_size = _get_color_table_size(palette_bytes)
|
1005 |
+
|
1006 |
+
header = [
|
1007 |
+
b"GIF" # signature
|
1008 |
+
+ version # version
|
1009 |
+
+ o16(im.size[0]) # canvas width
|
1010 |
+
+ o16(im.size[1]), # canvas height
|
1011 |
+
# Logical Screen Descriptor
|
1012 |
+
# size of global color table + global color table flag
|
1013 |
+
o8(color_table_size + 128), # packed fields
|
1014 |
+
# background + reserved/aspect
|
1015 |
+
o8(background) + o8(0),
|
1016 |
+
# Global Color Table
|
1017 |
+
_get_header_palette(palette_bytes),
|
1018 |
+
]
|
1019 |
+
if info.get("loop") is not None:
|
1020 |
+
header.append(
|
1021 |
+
b"!"
|
1022 |
+
+ o8(255) # extension intro
|
1023 |
+
+ o8(11)
|
1024 |
+
+ b"NETSCAPE2.0"
|
1025 |
+
+ o8(3)
|
1026 |
+
+ o8(1)
|
1027 |
+
+ o16(info["loop"]) # number of loops
|
1028 |
+
+ o8(0)
|
1029 |
+
)
|
1030 |
+
if info.get("comment"):
|
1031 |
+
comment_block = b"!" + o8(254) # extension intro
|
1032 |
+
|
1033 |
+
comment = info["comment"]
|
1034 |
+
if isinstance(comment, str):
|
1035 |
+
comment = comment.encode()
|
1036 |
+
for i in range(0, len(comment), 255):
|
1037 |
+
subblock = comment[i : i + 255]
|
1038 |
+
comment_block += o8(len(subblock)) + subblock
|
1039 |
+
|
1040 |
+
comment_block += o8(0)
|
1041 |
+
header.append(comment_block)
|
1042 |
+
return header
|
1043 |
+
|
1044 |
+
|
1045 |
+
def _write_frame_data(
|
1046 |
+
fp: IO[bytes],
|
1047 |
+
im_frame: Image.Image,
|
1048 |
+
offset: tuple[int, int],
|
1049 |
+
params: dict[str, Any],
|
1050 |
+
) -> None:
|
1051 |
+
try:
|
1052 |
+
im_frame.encoderinfo = params
|
1053 |
+
|
1054 |
+
# local image header
|
1055 |
+
_write_local_header(fp, im_frame, offset, 0)
|
1056 |
+
|
1057 |
+
ImageFile._save(
|
1058 |
+
im_frame, fp, [("gif", (0, 0) + im_frame.size, 0, RAWMODE[im_frame.mode])]
|
1059 |
+
)
|
1060 |
+
|
1061 |
+
fp.write(b"\0") # end of image data
|
1062 |
+
finally:
|
1063 |
+
del im_frame.encoderinfo
|
1064 |
+
|
1065 |
+
|
1066 |
+
# --------------------------------------------------------------------
|
1067 |
+
# Legacy GIF utilities
|
1068 |
+
|
1069 |
+
|
1070 |
+
def getheader(
|
1071 |
+
im: Image.Image, palette: _Palette | None = None, info: dict[str, Any] | None = None
|
1072 |
+
) -> tuple[list[bytes], list[int] | None]:
|
1073 |
+
"""
|
1074 |
+
Legacy Method to get Gif data from image.
|
1075 |
+
|
1076 |
+
Warning:: May modify image data.
|
1077 |
+
|
1078 |
+
:param im: Image object
|
1079 |
+
:param palette: bytes object containing the source palette, or ....
|
1080 |
+
:param info: encoderinfo
|
1081 |
+
:returns: tuple of(list of header items, optimized palette)
|
1082 |
+
|
1083 |
+
"""
|
1084 |
+
if info is None:
|
1085 |
+
info = {}
|
1086 |
+
|
1087 |
+
used_palette_colors = _get_optimize(im, info)
|
1088 |
+
|
1089 |
+
if "background" not in info and "background" in im.info:
|
1090 |
+
info["background"] = im.info["background"]
|
1091 |
+
|
1092 |
+
im_mod = _normalize_palette(im, palette, info)
|
1093 |
+
im.palette = im_mod.palette
|
1094 |
+
im.im = im_mod.im
|
1095 |
+
header = _get_global_header(im, info)
|
1096 |
+
|
1097 |
+
return header, used_palette_colors
|
1098 |
+
|
1099 |
+
|
1100 |
+
def getdata(
|
1101 |
+
im: Image.Image, offset: tuple[int, int] = (0, 0), **params: Any
|
1102 |
+
) -> list[bytes]:
|
1103 |
+
"""
|
1104 |
+
Legacy Method
|
1105 |
+
|
1106 |
+
Return a list of strings representing this image.
|
1107 |
+
The first string is a local image header, the rest contains
|
1108 |
+
encoded image data.
|
1109 |
+
|
1110 |
+
To specify duration, add the time in milliseconds,
|
1111 |
+
e.g. ``getdata(im_frame, duration=1000)``
|
1112 |
+
|
1113 |
+
:param im: Image object
|
1114 |
+
:param offset: Tuple of (x, y) pixels. Defaults to (0, 0)
|
1115 |
+
:param \\**params: e.g. duration or other encoder info parameters
|
1116 |
+
:returns: List of bytes containing GIF encoded frame data
|
1117 |
+
|
1118 |
+
"""
|
1119 |
+
from io import BytesIO
|
1120 |
+
|
1121 |
+
class Collector(BytesIO):
|
1122 |
+
data = []
|
1123 |
+
|
1124 |
+
if sys.version_info >= (3, 12):
|
1125 |
+
from collections.abc import Buffer
|
1126 |
+
|
1127 |
+
def write(self, data: Buffer) -> int:
|
1128 |
+
self.data.append(data)
|
1129 |
+
return len(data)
|
1130 |
+
|
1131 |
+
else:
|
1132 |
+
|
1133 |
+
def write(self, data: Any) -> int:
|
1134 |
+
self.data.append(data)
|
1135 |
+
return len(data)
|
1136 |
+
|
1137 |
+
im.load() # make sure raster data is available
|
1138 |
+
|
1139 |
+
fp = Collector()
|
1140 |
+
|
1141 |
+
_write_frame_data(fp, im, offset, params)
|
1142 |
+
|
1143 |
+
return fp.data
|
1144 |
+
|
1145 |
+
|
1146 |
+
# --------------------------------------------------------------------
|
1147 |
+
# Registry
|
1148 |
+
|
1149 |
+
Image.register_open(GifImageFile.format, GifImageFile, _accept)
|
1150 |
+
Image.register_save(GifImageFile.format, _save)
|
1151 |
+
Image.register_save_all(GifImageFile.format, _save_all)
|
1152 |
+
Image.register_extension(GifImageFile.format, ".gif")
|
1153 |
+
Image.register_mime(GifImageFile.format, "image/gif")
|
1154 |
+
|
1155 |
+
#
|
1156 |
+
# Uncomment the following line if you wish to use NETPBM/PBMPLUS
|
1157 |
+
# instead of the built-in "uncompressed" GIF encoder
|
1158 |
+
|
1159 |
+
# Image.register_save(GifImageFile.format, _save_netpbm)
|