Upload Joycaption_Alpha_One.ipynb
Browse files
Joycaption_Alpha_One.ipynb
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739735627072}],"gpuType":"T4"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU","widgets":{"application/vnd.jupyter.widget-state+json":{"b89006df8aee42ecaf385b89e5df4acf":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_58970cc506754327b9c1c2586b5519f5","IPY_MODEL_648b1c4e67084eecbc207c65bccaff9c","IPY_MODEL_2a2bb4f2290d4df1963c54f9150c7870"],"layout":"IPY_MODEL_481634b2c06f4d6f9645e86f2cf0f3c3"}},"58970cc506754327b9c1c2586b5519f5":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_616bab3bd7f145d79412a8dc2cd97af7","placeholder":"","style":"IPY_MODEL_c8e0e598b7724db0893f96298b51b652","value":"config.json: 100%"}},"648b1c4e67084eecbc207c65bccaff9c":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_055de6f70357469d85e4c8695705ed7f","max":1523,"min":0,"orientation":"horizontal","style":"IPY_MODEL_5fc17e37d17049a5bbae17151a6ac0ab","value":1523}},"2a2bb4f2290d4df1963c54f9150c7870":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a742183fb23241db8c717bffc88ad11c","placeholder":"","style":"IPY_MODEL_6d6a8e3473d74352b77056f07c812c61","value":" 1.52k/1.52k [00:00<00:00, 92.3kB/s]"}},"481634b2c06f4d6f9645e86f2cf0f3c3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"616bab3bd7f145d79412a8dc2cd97af7":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c8e0e598b7724db0893f96298b51b652":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"055de6f70357469d85e4c8695705ed7f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5fc17e37d17049a5bbae17151a6ac0ab":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a742183fb23241db8c717bffc88ad11c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6d6a8e3473d74352b77056f07c812c61":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"2153314e67304a77b518a65ca9c34a4d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_37ec7b26f5f54b65962b896d156e27e1","IPY_MODEL_03eba632f74744aaadc30435a4766d9e","IPY_MODEL_4f64e9ed0ac2497fa7dfa5ff73a1d2bc"],"layout":"IPY_MODEL_3d989f76f97142e99152327b3a1a91a4"}},"37ec7b26f5f54b65962b896d156e27e1":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_082fbc7e17134b7a8ebfb1a0f04e1933","placeholder":"","style":"IPY_MODEL_fe328b7afea347aebc8beef40122fd3a","value":"model.safetensors: 100%"}},"03eba632f74744aaadc30435a4766d9e":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_6a6b435daed54dafad714bd4c5577f1a","max":5702746390,"min":0,"orientation":"horizontal","style":"IPY_MODEL_e2b39602d3ea47fa9b1b38470386bbba","value":5702746390}},"4f64e9ed0ac2497fa7dfa5ff73a1d2bc":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a43970bd01864771abbea368623242b9","placeholder":"","style":"IPY_MODEL_5637508040014a1cab3274eebe522670","value":" 5.70G/5.70G [02:15<00:00, 42.7MB/s]"}},"3d989f76f97142e99152327b3a1a91a4":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"082fbc7e17134b7a8ebfb1a0f04e1933":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"fe328b7afea347aebc8beef40122fd3a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6a6b435daed54dafad714bd4c5577f1a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e2b39602d3ea47fa9b1b38470386bbba":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a43970bd01864771abbea368623242b9":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5637508040014a1cab3274eebe522670":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"bc73094764b24d669a84aea3305dc01b":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_0a4d96f78b1c4d87b7bd17a15f5b4725","IPY_MODEL_82239a97c3db42518a02e8780b898e00","IPY_MODEL_882dbf9d286f4e268b8e8708611b7f97"],"layout":"IPY_MODEL_33dd123118e4456fbe120aaec9d289e2"}},"0a4d96f78b1c4d87b7bd17a15f5b4725":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_6658b7dd85024c4abe959f80621a9af2","placeholder":"","style":"IPY_MODEL_4e227b5514374035a692e8c6ee354b34","value":"generation_config.json: 100%"}},"82239a97c3db42518a02e8780b898e00":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_993b8c406d5343f99749d14d017adff2","max":235,"min":0,"orientation":"horizontal","style":"IPY_MODEL_9c13629a76a74782801ecee67709d8d5","value":235}},"882dbf9d286f4e268b8e8708611b7f97":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_9a5bebb819c04e3487951234198eca4a","placeholder":"","style":"IPY_MODEL_57e1f17948d64761b43c9762b82398af","value":" 235/235 [00:00<00:00, 17.4kB/s]"}},"33dd123118e4456fbe120aaec9d289e2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6658b7dd85024c4abe959f80621a9af2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4e227b5514374035a692e8c6ee354b34":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"993b8c406d5343f99749d14d017adff2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9c13629a76a74782801ecee67709d8d5":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"9a5bebb819c04e3487951234198eca4a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"57e1f17948d64761b43c9762b82398af":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"cells":[{"cell_type":"code","execution_count":1,"metadata":{"id":"Dwr7gk5OwuGC","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1739734102932,"user_tz":-60,"elapsed":18853,"user":{"displayName":"","userId":""}},"outputId":"4178d27e-4276-4d40-98d8-b0d1428ddef7"},"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","source":["!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","!pip install peft bitsandbytes\n","from huggingface_hub import InferenceClient\n","from torch import nn\n","from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n","import torch\n","import torch.amp.autocast_mode\n","from PIL import Image\n","import os\n","import torchvision.transforms.functional as TVF\n","\n","CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n","MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\"\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [\"Describe the image in 400 words\"],\n"," (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n"," (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n"," (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n"," (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n"," (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n"," (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n"," (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n"," (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n"," (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n"," (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n"," (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n","}\n","\n","class ImageAdapter(nn.Module):\n","\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n","\t\tsuper().__init__()\n","\t\tself.deep_extract = deep_extract\n","\t\tif self.deep_extract:\n","\t\t\tinput_features = input_features * 5\n","\t\tself.linear1 = nn.Linear(input_features, output_features)\n","\t\tself.activation = nn.GELU()\n","\t\tself.linear2 = nn.Linear(output_features, output_features)\n","\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n","\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n","\t\tself.other_tokens = nn.Embedding(3, output_features)\n","\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n","\tdef forward(self, vision_outputs: torch.Tensor):\n","\t\tif self.deep_extract:\n","\t\t\tx = torch.concat((\n","\t\t\t\tvision_outputs[-2],\n","\t\t\t\tvision_outputs[3],\n","\t\t\t\tvision_outputs[7],\n","\t\t\t\tvision_outputs[13],\n","\t\t\t\tvision_outputs[20],\n","\t\t\t), dim=-1)\n","\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n","\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n","\t\telse:\n","\t\t\tx = vision_outputs[-2]\n","\t\tx = self.ln1(x)\n","\t\tif self.pos_emb is not None:\n","\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n","\t\t\tx = x + self.pos_emb\n","\t\tx = self.linear1(x)\n","\t\tx = self.activation(x)\n","\t\tx = self.linear2(x)\n","\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n","\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n","\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n","\t\treturn x\n","\tdef get_eot_embedding(self):\n","\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n","\n","clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n","clip_model = AutoModel.from_pretrained(CLIP_PATH)\n","clip_model = clip_model.vision_model\n","checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n","checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n","clip_model.load_state_dict(checkpoint)\n","# del checkpoint\n","clip_model.eval()\n","clip_model.requires_grad_(False)\n","clip_model.to(\"cuda\")\n","#tokenizer=''\n","#tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, use_fast=False)\n","tokenizer = AutoTokenizer.from_pretrained(\"unsloth/Meta-Llama-3.1-8B-bnb-4bit\")\n","assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n","text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, load_in_8bit=True, device_map=\"auto\", torch_dtype=torch.bfloat16)\n","text_model.load_adapter(\"/content/joy/text_model\")\n","text_model.eval()\n","image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n","image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n","image_adapter.eval()\n","image_adapter.to(\"cuda\")\n","\n","@torch.no_grad()\n","def stream_chat(input_image: Image.Image, caption_type: str, caption_tone: str, caption_length: str | int) -> str:\n"," torch.cuda.empty_cache()\n"," length = None if caption_length == \"any\" else caption_length\n"," if isinstance(length, str):\n"," try:\n"," length = int(length)\n"," except ValueError:\n"," pass\n"," if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n"," caption_tone = \"formal\"\n"," prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n"," if prompt_key not in CAPTION_TYPE_MAP:\n"," raise ValueError(f\"Invalid caption type: {prompt_key}\")\n"," prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n"," print(f\"Prompt: {prompt_str}\")\n"," image = input_image.resize((384, 384), Image.LANCZOS)\n"," pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n"," pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n"," pixel_values = pixel_values.to('cuda')\n"," prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n"," with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n"," vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n"," image_features = vision_outputs.hidden_states\n"," embedded_images = image_adapter(image_features)\n"," embedded_images = embedded_images.to('cuda')\n"," prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n"," assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n"," embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n"," eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n"," inputs_embeds = torch.cat([\n"," embedded_bos.expand(embedded_images.shape[0], -1, -1),\n"," embedded_images.to(dtype=embedded_bos.dtype),\n"," prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n"," eot_embed.expand(embedded_images.shape[0], -1, -1),\n"," ], dim=1)\n"," input_ids = torch.cat([\n"," torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n"," torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n"," prompt,\n"," torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n"," ], dim=1).to('cuda')\n"," attention_mask = torch.ones_like(input_ids)\n"," generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n"," generate_ids = generate_ids[:, input_ids.shape[1]:]\n"," if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n"," generate_ids = generate_ids[:, :-1]\n"," caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n"," caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n"," return caption"],"metadata":{"id":"0zaheBIsw_dc","colab":{"base_uri":"https://localhost:8080/","height":943,"referenced_widgets":["b89006df8aee42ecaf385b89e5df4acf","58970cc506754327b9c1c2586b5519f5","648b1c4e67084eecbc207c65bccaff9c","2a2bb4f2290d4df1963c54f9150c7870","481634b2c06f4d6f9645e86f2cf0f3c3","616bab3bd7f145d79412a8dc2cd97af7","c8e0e598b7724db0893f96298b51b652","055de6f70357469d85e4c8695705ed7f","5fc17e37d17049a5bbae17151a6ac0ab","a742183fb23241db8c717bffc88ad11c","6d6a8e3473d74352b77056f07c812c61","2153314e67304a77b518a65ca9c34a4d","37ec7b26f5f54b65962b896d156e27e1","03eba632f74744aaadc30435a4766d9e","4f64e9ed0ac2497fa7dfa5ff73a1d2bc","3d989f76f97142e99152327b3a1a91a4","082fbc7e17134b7a8ebfb1a0f04e1933","fe328b7afea347aebc8beef40122fd3a","6a6b435daed54dafad714bd4c5577f1a","e2b39602d3ea47fa9b1b38470386bbba","a43970bd01864771abbea368623242b9","5637508040014a1cab3274eebe522670","bc73094764b24d669a84aea3305dc01b","0a4d96f78b1c4d87b7bd17a15f5b4725","82239a97c3db42518a02e8780b898e00","882dbf9d286f4e268b8e8708611b7f97","33dd123118e4456fbe120aaec9d289e2","6658b7dd85024c4abe959f80621a9af2","4e227b5514374035a692e8c6ee354b34","993b8c406d5343f99749d14d017adff2","9c13629a76a74782801ecee67709d8d5","9a5bebb819c04e3487951234198eca4a","57e1f17948d64761b43c9762b82398af"]},"executionInfo":{"status":"ok","timestamp":1739735193230,"user_tz":-60,"elapsed":42070,"user":{"displayName":"","userId":""}},"outputId":"792a80a0-d2b8-4ba9-8633-43c796fd15d0"},"execution_count":7,"outputs":[{"metadata":{"tags":null},"name":"stdout","output_type":"stream","text":["aria2 is already the newest version (1.36.0-1).\n","0 upgraded, 0 newly installed, 0 to remove and 20 not upgraded.\n","\n","Download Results:\n","gid |stat|avg speed |path/URI\n","======+====+===========+=======================================================\n","c14bf0|\u001b[1;32mOK\u001b[0m | 0B/s|/content/joy/text_model/adapter_config.json\n","\n","Status Legend:\n","(OK):download completed.\n","\n","Download Results:\n","gid |stat|avg speed |path/URI\n","======+====+===========+=======================================================\n","1ca36c|\u001b[1;32mOK\u001b[0m | 0B/s|/content/joy/text_model/adapter_model.safetensors\n","\n","Status Legend:\n","(OK):download completed.\n","\n","Download Results:\n","gid |stat|avg speed |path/URI\n","======+====+===========+=======================================================\n","643a5c|\u001b[1;32mOK\u001b[0m | 0B/s|/content/joy/clip_model.pt\n","\n","Status Legend:\n","(OK):download completed.\n","\n","Download Results:\n","gid |stat|avg speed |path/URI\n","======+====+===========+=======================================================\n","fe604f|\u001b[1;32mOK\u001b[0m | 0B/s|/content/joy/config.yaml\n","\n","Status Legend:\n","(OK):download completed.\n","\n","Download Results:\n","gid |stat|avg speed |path/URI\n","======+====+===========+=======================================================\n","1f6b74|\u001b[1;32mOK\u001b[0m | 0B/s|/content/joy/image_adapter.pt\n","\n","Status Legend:\n","(OK):download completed.\n","Requirement already satisfied: peft in /usr/local/lib/python3.11/dist-packages (0.14.0)\n","Requirement already satisfied: bitsandbytes in /usr/local/lib/python3.11/dist-packages (0.45.2)\n","Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.11/dist-packages (from peft) (1.26.4)\n","Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.11/dist-packages (from peft) (24.2)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.11/dist-packages (from peft) (5.9.5)\n","Requirement already satisfied: pyyaml in /usr/local/lib/python3.11/dist-packages (from peft) (6.0.2)\n","Requirement already satisfied: torch>=1.13.0 in /usr/local/lib/python3.11/dist-packages (from peft) (2.5.1+cu124)\n","Requirement already satisfied: transformers in /usr/local/lib/python3.11/dist-packages (from peft) (4.48.3)\n","Requirement already satisfied: tqdm in /usr/local/lib/python3.11/dist-packages (from peft) (4.67.1)\n","Requirement already satisfied: accelerate>=0.21.0 in /usr/local/lib/python3.11/dist-packages (from peft) (1.3.0)\n","Requirement already satisfied: safetensors in /usr/local/lib/python3.11/dist-packages (from peft) (0.5.2)\n","Requirement already satisfied: huggingface-hub>=0.25.0 in /usr/local/lib/python3.11/dist-packages (from peft) (0.28.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.25.0->peft) (3.17.0)\n","Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.25.0->peft) (2024.10.0)\n","Requirement already satisfied: requests in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.25.0->peft) (2.32.3)\n","Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.25.0->peft) (4.12.2)\n","Requirement already satisfied: networkx in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (3.4.2)\n","Requirement already satisfied: jinja2 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (3.1.5)\n","Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.4.127)\n","Requirement already satisfied: nvidia-cuda-runtime-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.4.127)\n","Requirement already satisfied: nvidia-cuda-cupti-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.4.127)\n","Requirement already satisfied: nvidia-cudnn-cu12==9.1.0.70 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (9.1.0.70)\n","Requirement already satisfied: nvidia-cublas-cu12==12.4.5.8 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.4.5.8)\n","Requirement already satisfied: nvidia-cufft-cu12==11.2.1.3 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (11.2.1.3)\n","Requirement already satisfied: nvidia-curand-cu12==10.3.5.147 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (10.3.5.147)\n","Requirement already satisfied: nvidia-cusolver-cu12==11.6.1.9 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (11.6.1.9)\n","Requirement already satisfied: nvidia-cusparse-cu12==12.3.1.170 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.3.1.170)\n","Requirement already satisfied: nvidia-nccl-cu12==2.21.5 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (2.21.5)\n","Requirement already satisfied: nvidia-nvtx-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.4.127)\n","Requirement already satisfied: nvidia-nvjitlink-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (12.4.127)\n","Requirement already satisfied: triton==3.1.0 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (3.1.0)\n","Requirement already satisfied: sympy==1.13.1 in /usr/local/lib/python3.11/dist-packages (from torch>=1.13.0->peft) (1.13.1)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.11/dist-packages (from sympy==1.13.1->torch>=1.13.0->peft) (1.3.0)\n","Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.11/dist-packages (from transformers->peft) (2024.11.6)\n","Requirement already satisfied: tokenizers<0.22,>=0.21 in /usr/local/lib/python3.11/dist-packages (from transformers->peft) (0.21.0)\n","Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.11/dist-packages (from jinja2->torch>=1.13.0->peft) (3.0.2)\n","Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests->huggingface-hub>=0.25.0->peft) (3.4.1)\n","Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.11/dist-packages (from requests->huggingface-hub>=0.25.0->peft) (3.10)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/dist-packages (from requests->huggingface-hub>=0.25.0->peft) (2.3.0)\n","Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.11/dist-packages (from requests->huggingface-hub>=0.25.0->peft) (2025.1.31)\n"]},{"metadata":{"tags":null},"name":"stderr","output_type":"stream","text":["<ipython-input-7-250a868fcd30>:78: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n"," checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"b89006df8aee42ecaf385b89e5df4acf","version_major":2,"version_minor":0},"text/plain":["config.json: 0%| | 0.00/1.52k [00:00<?, ?B/s]"]},"metadata":{},"output_type":"display_data"},{"metadata":{"tags":null},"name":"stderr","output_type":"stream","text":["The `load_in_4bit` and `load_in_8bit` arguments are deprecated and will be removed in the future versions. Please, pass a `BitsAndBytesConfig` object in `quantization_config` argument instead.\n","Unused kwargs: ['_load_in_4bit', '_load_in_8bit', 'quant_method']. These kwargs are not used in <class 'transformers.utils.quantization_config.BitsAndBytesConfig'>.\n","/usr/local/lib/python3.11/dist-packages/transformers/quantizers/auto.py:195: UserWarning: You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading already has a `quantization_config` attribute. The `quantization_config` from the model will be used.\n"," warnings.warn(warning_msg)\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"2153314e67304a77b518a65ca9c34a4d","version_major":2,"version_minor":0},"text/plain":["model.safetensors: 0%| | 0.00/5.70G [00:00<?, ?B/s]"]},"metadata":{},"output_type":"display_data"},{"output_type":"display_data","data":{"text/plain":["generation_config.json: 0%| | 0.00/235 [00:00<?, ?B/s]"],"application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"bc73094764b24d669a84aea3305dc01b"}},"metadata":{}},{"output_type":"stream","name":"stderr","text":["<ipython-input-7-250a868fcd30>:93: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n"," image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n"]}]},{"cell_type":"code","source":["import os\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","\n","\n","src_folder = '/content/'\n","suffixes = ['.png', '.jpeg' , '.webp' , '.jpg']\n","num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," print(filename)\n"," %cd {src_folder}\n"," input_image = Image.open(f\"{filename}\").convert('RGB')\n"," caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n"," print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," print(caption)\n"," #---------#\n"," %cd {tgt_folder}\n"," f = open(f\"{num}.txt\", \"w\")\n"," f.write(f'{caption}')\n"," f.close()\n"," input_image.save(f'{num}.png', \"PNG\")\n"," num = num+1"],"metadata":{"id":"J811UZU6xZEo","colab":{"base_uri":"https://localhost:8080/"},"outputId":"bb32903f-3449-46cf-d3e8-95aeddfe9f69"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n","IMG_6076.png\n","/content\n","Prompt: Describe the image in 400 words\n","...\n","\n","...caption for IMG_6076.png..png\n","\n","...\n","A collage of explicit, erotic photos featuring young women in various stages of undress. In the top left, a woman with pale skin, long blonde hair, and a slender build poses outdoors in a garden setting, wearing a sheer, lacey bra and panties, with a playful expression. The top right showcases four women with varying skin tones and body types, all topless, wearing only garters and stockings, posing in front of a window, with one woman wearing a black mask and another a black choker. The middle left shows a woman with fair skin, blonde hair, and a slender physique, wearing a revealing orange bra and panties, posed indoors in a dimly lit room with a pool in the background. The middle right features a woman with light skin and short red hair, wearing a black cat ear headband and a black choker, with her breasts exposed, standing indoors in a modern, minimalist room. The bottom left shows a woman with light skin and short hair, wearing a sheer, lacey bra and panties, with her hands on her hips, standing indoors in a cozy, rustic room. The bottom right shows a woman with fair skin, wearing a black lace bra and panties, with a black choker, posing in a modern, sleek room with large windows.\n","/content/tmp\n","IMG_6057.png\n","/content\n","Prompt: Describe the image in 400 words\n","...\n","\n","...caption for IMG_6057.png..png\n","\n","...\n","A collage of six photographs featuring different women in erotic poses.\n","\n","Top left: A woman with long black hair is kneeling on a bed, wearing a black leather choker, white thigh-high stockings, and a black thong. She has a slender physique and is performing oral sex on a man whose torso and face are partially visible.\n","\n","Top right: A woman with long brown hair and fair skin is standing in a dark room, wearing a white cloth wrapped around her waist and holding a wooden stick. She has a slim figure and is topless, exposing her small breasts.\n","\n","Middle left: A woman with long black hair and a medium tan is kneeling on a bed, wearing a black thong and a black harness. She has a slender physique and is holding a small cross.\n","\n","Middle right: A woman with long brown hair and fair skin is lying on a bed, wearing a black mesh top and a white skirt. She has a slender physique and is topless, exposing her small breasts.\n","\n","Bottom left: A woman with long black hair and a medium tan is standing in a dark room, wearing a black thong and a black harness. She has a slender physique and is topless, exposing her small breasts.\n","\n","Bottom right: A woman with long brown hair and fair skin is lying on a bed, wearing a white cloth\n","/content/tmp\n","IMG_6075.png\n","/content\n","Prompt: Describe the image in 400 words\n","...\n","\n","...caption for IMG_6075.png..png\n","\n","...\n","A collage of six photographs featuring young women with varying skin tones and ethnic backgrounds, showcasing a range of nude and semi-nude poses. The top left image features a young Caucasian woman with long, platinum blonde hair, wearing only black panties. She stands outdoors amidst lush greenery with a serene expression. Top center depicts a Caucasian woman with light brown hair, sitting provocatively on a bed with her legs spread, wearing an orange thong. Her breasts are small and she has a slender physique. Top right shows a young Caucasian woman with platinum blonde hair, wearing a white bikini top and white thigh-high stockings, posing in front of a vibrant pink backdrop. She has a slim figure and a youthful face.\n","\n","Bottom left shows a young Caucasian woman with long, blonde hair, smiling and posing outdoors with her arms crossed. She has a slender physique and small breasts. Bottom center features a young woman with light brown skin and long, straight hair, wearing a silver thong bikini that accentuates her slim figure and small breasts. Bottom right shows a young woman with dark brown skin and long, straight hair, wearing a metallic gold bikini. She has a slender physique and small breasts.\n","/content/tmp\n","IMG_6056.png\n","/content\n","Prompt: Describe the image in 400 words\n","...\n","\n","...caption for IMG_6056.png..png\n","\n","...\n","Photographic collage of four nude women in provocative poses, featuring their buttocks and vulvas.\n","\n","1. Top left: Close-up of a Caucasian woman's buttocks, showing fair skin and a tattoo on her left side. She has a slim physique and wears black lace lingerie.\n","\n","2. Top right: Full body shot of a Caucasian woman with fair skin and long, wavy brown hair. She is wearing a black lace lingerie set with a strappy bra and matching thong, and poses confidently with her arms raised above her head.\n","\n","3. Bottom left: Close-up of a Caucasian woman's vulva, with her buttocks partially visible. She has fair skin and a slim physique, wearing black fishnet stockings.\n","\n","4. Bottom right: Full body shot of a Caucasian woman with fair skin and long, straight blonde hair. She is nude, lying on her stomach on a bed with a dark grey blanket, and her vulva is prominently displayed. She is looking at the camera with a playful expression.\n","\n","All images have a dark, sensual tone with a focus on the women's bodies and lingerie, highlighting their curves and intimate areas. The background is minimal, keeping the focus on the models.\n","/content/tmp\n","IMG_6058.png\n","/content\n","Prompt: Describe the image in 400 words\n","...\n","\n","...caption for IMG_6058.png..png\n","\n","...\n","A collage of six nude women, each in a different setting and pose.\n","\n","1. Top left: A slender Caucasian woman with long blonde hair, fair skin, and blue eyes. She is nude, standing outdoors with a blurred green background, holding her breasts with her hands. She looks directly at the camera with a neutral expression.\n","\n","2. Top center: A young Caucasian woman with long brown hair, light skin, and blue eyes. She is nude, sitting on a blue cushioned chair with her legs spread, exposing her vulva. She gazes at the camera with a calm expression.\n","\n","3. Top right: A young Caucasian woman with long blonde hair, light skin, and blue eyes. She is nude, standing in a room with a pink curtain in the background. She is wearing a polka dot thong, and her right hand is touching her vulva while her left hand is on her hip. She has a playful expression.\n","\n","4. Bottom left: A slender Caucasian woman with long blonde hair, fair skin, and blue eyes. She is nude, standing outdoors with a blurred green background. She has a neutral expression.\n","\n","5. Bottom center: A young Caucasian woman with long blonde hair, light skin, and blue eyes. She is nude, sitting on a wooden bench outdoors. She is looking\n","/content/tmp\n","IMG_6064.png\n","/content\n","Prompt: Describe the image in 400 words\n","...\n","\n","...caption for IMG_6064.png..png\n","\n","...\n","A collage of explicit, erotic images featuring a young woman with long brown hair, fair skin, and a slim, petite physique. She is captured in various poses, often nude, highlighting her small breasts and slender figure. In the top left, she is kneeling, performing oral sex on a man, whose large, erect penis is visible. Her face is filled with desire, and she wears a blue bikini top.\n","\n","In the top center, she is wearing a black bikini top and white gloves, with a large, erect penis in her hands. Her expression is one of lust, and she gazes directly at the camera.\n","\n","The top right shows her posing nude in a bedroom, with a pillow under her hips, her hands gently caressing her breasts. Her expression is one of sensuality, and her skin glistens with a sheen.\n","\n","The bottom left features her in a blue bikini, standing in a room with a window and a bed. Her expression is soft, and she looks directly at the camera.\n","\n","The bottom center shows her nude, lying on a black bed, with a chain around her neck. Her expression is one of seduction, and she looks directly at the camera.\n","\n","The bottom right captures her nude, lying on a black bed, with a chain around her neck. Her expression is\n","/content/tmp\n","IMG_6068.png\n","/content\n","Prompt: Describe the image in 400 words\n","...\n","\n","...caption for IMG_6068.png..png\n","\n","...\n","This collage features six photos of young women with light skin and long, wavy blonde hair. The first image shows a young woman with a slim physique and small breasts, posing topless in a shower, with water droplets on her skin. She has a gentle expression and is standing in a corner of the shower, with a tiled wall and showerhead visible.\n","\n","The second image is a close-up of the woman's face and torso, showing her bare breasts, with a slight smile and a relaxed expression.\n","\n","The third image is a close-up of the woman's back, showing her bare buttocks and side profile, with a serene expression.\n","\n","The fourth image is a close-up of the woman's vulva, with a focus on her labia, showing her bare genitalia.\n","\n","The fifth image is a full-body shot of the woman in a black lace lingerie set, including a bra and panties, posing outdoors, with a tropical background of palm trees and a pool in the distance.\n","\n","The sixth image shows the woman wearing a black lace bra and panties, posing indoors with a sultry expression, with a background of bottles and glasses on a bar.\n","\n","Each image is professionally photographed with bright, even lighting, highlighting the models' features.\n","/content/tmp\n","IMG_6074.png\n","/content\n","Prompt: Describe the image in 400 words\n","...\n","\n","...caption for IMG_6074.png..png\n","\n","...\n","A collage of six photos featuring young women with varying skin tones and body types, each in explicit poses. The top left image shows a blonde woman with fair skin and blue eyes, topless with yellow bikini bottoms, posing outdoors near a pool with lounge chairs. She has long, straight hair and a slender physique. The top center image features a redhead with fair skin and light brown eyes, wearing a red, sheer bodysuit, posing indoors with a tattooed arm. She is nude from the waist up, with a slender build. The top right image shows a nude brunette with fair skin, standing outside in a garden with colorful flowers, with a slim build and long, straight hair.\n","\n","The bottom left image shows a dark-skinned woman with long, wavy black hair and dark eyes, wearing a black and white polka-dot bikini, posing indoors with a slim figure. The bottom center image features a blonde with light skin and blue eyes, wearing black fishnet stockings and a metallic bodysuit, posing indoors with a slim figure. The bottom right image shows a woman with fair skin, long brown hair, and brown eyes, wearing a black polka-dot bikini, posing indoors with a slim figure. The photos are vibrant and detailed, capturing the women in various states of undress.\n","/content/tmp\n","IMG_6060.png\n","/content\n","Prompt: Describe the image in 400 words\n","...\n","\n","...caption for IMG_6060.png..png\n","\n","...\n","A collage of six photographs featuring different women in various states of undress, showcasing their bodies and faces in a mix of close-up and full-body shots.\n","\n","1. Top left: A blonde woman with light skin and long brown hair tied in a ponytail stands outside, wearing a black bikini top and black panties, with orange exercise balls behind her. Her pose is confident and provocative.\n","2. Top center: A close-up of a woman with pale skin and straight blonde hair, wearing a black bra and panties. Her expression is serious, with a hint of defiance.\n","3. Top right: A woman with long, straight black hair and a dark complexion stands in a dimly lit room, wearing a black thong and a silver collar. Her body is toned and muscular, with a slight smirk.\n","4. Bottom left: A blonde woman with light skin and a fit physique sits at a bar, wearing a black lace top and a black thong. Her expression is sultry, with her hand touching her cheek.\n","5. Bottom center: A woman with light skin and short black hair sits on a stool in a dimly lit room, wearing a black top and black panties. Her expression is seductive, with her hand teasingly near her mouth.\n","6. Bottom right: A woman with long, straight black hair and\n","/content/tmp\n","IMG_6053.png\n","/content\n","Prompt: Describe the image in 400 words\n","...\n","\n","...caption for IMG_6053.png..png\n","\n","...\n","A collage of six photos featuring nude women in various provocative poses.\n","\n","1. Top left: A young Caucasian woman with light skin and long brown hair, wearing a white bikini, stands in a modern living room with large windows and a glass door leading to a pool area. She is smiling and appears confident.\n","2. Top middle: A young Caucasian woman with light skin and long blonde hair lies on a bed with white sheets, legs spread wide, showing her vulva. She has a slender physique and a relaxed expression.\n","3. Top right: A young Caucasian woman with light skin and long blonde hair sits on a bed with a wooden headboard. She has a slender physique and a serene expression, looking at the camera.\n","4. Bottom left: A young Caucasian woman with light skin and long brown hair, wearing a white thong, bends over, exposing her buttocks and vulva. She has a slender physique and a relaxed expression.\n","5. Bottom middle: A young Caucasian woman with light skin and long blonde hair, nude, stands in a modern living room with large windows. She has a slender physique and a confident expression.\n","6. Bottom right: A young Caucasian woman with light skin and long blonde hair, nude, bends over, exposing her buttocks and vulva. She has a slender physique and a relaxed expression.\n","/content/tmp\n","IMG_6063.png\n","/content\n","Prompt: Describe the image in 400 words\n","...\n","\n","...caption for IMG_6063.png..png\n","\n","...\n","A collage of five different photographs featuring young women in various states of undress.\n","\n","1. Top left: A blonde woman with long, wavy hair stands in a bar with a dark wooden counter and bottles lined up behind her. She wears a black lace garter belt and a silver bikini top, accentuating her medium-sized breasts and slim physique. She has a light complexion and a sultry expression.\n","\n","2. Top right: A young woman with dark blonde hair in a high ponytail poses outside on a patio with a glass railing. She wears a silver thong bikini, revealing her small breasts and toned stomach. She has a fair complexion and a confident smile.\n","\n","3. Bottom left: A brunette woman with long hair in a ponytail stands indoors, wearing a blue bikini top and bottom, revealing her medium-sized breasts and slim figure. She has a light complexion and a soft expression.\n","\n","4. Bottom right: A blonde woman with her hair in a braid stands topless, wearing only a red bikini bottom. She has a fair complexion and a playful expression, her breasts fully visible.\n","\n","5. Bottom center: A young woman with dark hair in a ponytail stands indoors, wearing a black thong bikini and a black garter belt, revealing her medium-sized breasts and slim figure. She has a light complexion\n","/content/tmp\n","IMG_6073.png\n","/content\n","Prompt: Describe the image in 400 words\n"]}]},{"cell_type":"code","source":["import shutil\n","%cd '/content/'\n","shutil.make_archive('/content/drive/MyDrive/nsfw_captions', format='zip', root_dir=f'{tgt_folder}')\n","\n","\n","\n","\n","\n"],"metadata":{"id":"5EztLCjkPq4U"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["from google.colab import runtime\n","#runtime.unassign()"],"metadata":{"id":"kM4TpfdB1amt"},"execution_count":null,"outputs":[]}]}
|
|
|
1 |
+
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739796923572},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739735627072}],"gpuType":"T4"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU"},"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"Dwr7gk5OwuGC"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","source":["!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","!pip install peft bitsandbytes\n","from huggingface_hub import InferenceClient\n","from torch import nn\n","from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n","import torch\n","import torch.amp.autocast_mode\n","from PIL import Image\n","import os\n","import torchvision.transforms.functional as TVF\n","\n","CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n","MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\"\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [\"Describe the image in 400 words\"],\n"," (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n"," (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n"," (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n"," (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n"," (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n"," (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n"," (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n"," (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n"," (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n"," (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n"," (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n","}\n","\n","class ImageAdapter(nn.Module):\n","\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n","\t\tsuper().__init__()\n","\t\tself.deep_extract = deep_extract\n","\t\tif self.deep_extract:\n","\t\t\tinput_features = input_features * 5\n","\t\tself.linear1 = nn.Linear(input_features, output_features)\n","\t\tself.activation = nn.GELU()\n","\t\tself.linear2 = nn.Linear(output_features, output_features)\n","\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n","\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n","\t\tself.other_tokens = nn.Embedding(3, output_features)\n","\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n","\tdef forward(self, vision_outputs: torch.Tensor):\n","\t\tif self.deep_extract:\n","\t\t\tx = torch.concat((\n","\t\t\t\tvision_outputs[-2],\n","\t\t\t\tvision_outputs[3],\n","\t\t\t\tvision_outputs[7],\n","\t\t\t\tvision_outputs[13],\n","\t\t\t\tvision_outputs[20],\n","\t\t\t), dim=-1)\n","\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n","\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n","\t\telse:\n","\t\t\tx = vision_outputs[-2]\n","\t\tx = self.ln1(x)\n","\t\tif self.pos_emb is not None:\n","\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n","\t\t\tx = x + self.pos_emb\n","\t\tx = self.linear1(x)\n","\t\tx = self.activation(x)\n","\t\tx = self.linear2(x)\n","\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n","\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n","\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n","\t\treturn x\n","\tdef get_eot_embedding(self):\n","\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n","\n","clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n","clip_model = AutoModel.from_pretrained(CLIP_PATH)\n","clip_model = clip_model.vision_model\n","checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n","checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n","clip_model.load_state_dict(checkpoint)\n","# del checkpoint\n","clip_model.eval()\n","clip_model.requires_grad_(False)\n","clip_model.to(\"cuda\")\n","tokenizer = AutoTokenizer.from_pretrained(f'{MODEL_PATH}')\n","#tokenizer = AutoTokenizer.from_pretrained(\"unsloth/Meta-Llama-3.1-8B-bnb-4bit\")\n","assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n","text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, load_in_8bit=True, device_map=\"auto\", torch_dtype=torch.bfloat16)\n","text_model.load_adapter(\"/content/joy/text_model\")\n","text_model.eval()\n","image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n","image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n","image_adapter.eval()\n","image_adapter.to(\"cuda\")\n","\n","@torch.no_grad()\n","def stream_chat(input_image: Image.Image, caption_type: str, caption_tone: str, caption_length: str | int) -> str:\n"," torch.cuda.empty_cache()\n"," length = None if caption_length == \"any\" else caption_length\n"," if isinstance(length, str):\n"," try:\n"," length = int(length)\n"," except ValueError:\n"," pass\n"," if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n"," caption_tone = \"formal\"\n"," prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n"," if prompt_key not in CAPTION_TYPE_MAP:\n"," raise ValueError(f\"Invalid caption type: {prompt_key}\")\n"," prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n"," print(f\"Prompt: {prompt_str}\")\n"," image = input_image.resize((384, 384), Image.LANCZOS)\n"," pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n"," pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n"," pixel_values = pixel_values.to('cuda')\n"," prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n"," with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n"," vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n"," image_features = vision_outputs.hidden_states\n"," embedded_images = image_adapter(image_features)\n"," embedded_images = embedded_images.to('cuda')\n"," prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n"," assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n"," embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n"," eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n"," inputs_embeds = torch.cat([\n"," embedded_bos.expand(embedded_images.shape[0], -1, -1),\n"," embedded_images.to(dtype=embedded_bos.dtype),\n"," prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n"," eot_embed.expand(embedded_images.shape[0], -1, -1),\n"," ], dim=1)\n"," input_ids = torch.cat([\n"," torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n"," torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n"," prompt,\n"," torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n"," ], dim=1).to('cuda')\n"," attention_mask = torch.ones_like(input_ids)\n"," generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n"," generate_ids = generate_ids[:, input_ids.shape[1]:]\n"," if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n"," generate_ids = generate_ids[:, :-1]\n"," caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n"," caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n"," return caption"],"metadata":{"id":"0zaheBIsw_dc"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import os\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","\n","\n","src_folder = '/content/'\n","suffixes = ['.png', '.jpeg' , '.webp' , '.jpg']\n","num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," print(filename)\n"," %cd {src_folder}\n"," input_image = Image.open(f\"{filename}\").convert('RGB')\n"," caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n"," print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," print(caption)\n"," #---------#\n"," %cd {tgt_folder}\n"," f = open(f\"{num}.txt\", \"w\")\n"," f.write(f'{caption}')\n"," f.close()\n"," input_image.save(f'{num}.png', \"PNG\")\n"," num = num+1"],"metadata":{"id":"J811UZU6xZEo"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import shutil\n","%cd '/content/'\n","shutil.make_archive('/content/drive/MyDrive/my_dataset', format='zip', root_dir=f'{tgt_folder}')\n"],"metadata":{"id":"5EztLCjkPq4U"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["from google.colab import runtime\n","#runtime.unassign() #Disconnect from runtime"],"metadata":{"id":"kM4TpfdB1amt"},"execution_count":null,"outputs":[]}]}
|