{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [], "gpuType": "T4" }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" }, "accelerator": "GPU" }, "cells": [ { "cell_type": "code", "source": [ "standing, girl, angry ,white hair, blue eyes, magic book, ice, frost forest background, magical casting, diaper" ], "metadata": { "id": "PPiFKDDCh5vg" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "deformed hands, watermark, text, deformed fingers, blurred faces, irregular face, irrregular body shape, ugly eyes, deformed face, squint, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, poorly framed, extra limbs, disfigured, deformed, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft, ugly eyes, squint, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, poorly framed, extra limbs, disfigured, deformed, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft, disfigured, kitsch, ugly, oversaturated, grain, low-res, Deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation, mutated, extra limb, ugly, poorly drawn hands, missing limb, blurry, floating limbs, disconnected limbs, malformed hands, blur, out of focus, long neck, long body, ugly, disgusting, poorly drawn, childish, mutilated, mangled, old, surreal, 2 heads, 2 faces" ], "metadata": { "id": "kBpDXuTiiIfI" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "AGHvD51fXhiG", "outputId": "5d8a01a9-56d8-47c5-b563-3a6e9b8a36e9" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[1;32mDone!\n" ] } ], "source": [ "\n", "\n", "from IPython.display import clear_output\n", "\n", "!git clone -q --branch main https://github.com/nekohacker591/genaaaa\n", "%cd /content/genaaaa\n", "%pip install -r requirements.txt --quiet\n", "\n", "\n", "!sed -i \"s@os.path.splitext(checkpoint_file)@os.path.splitext(checkpoint_file); map_location='cuda'@\" /content/lite_colab/modules/sd_models.py\n", "!sed -i 's@ui.create_ui().*@ui.create_ui();shared.demo.queue(concurrency_count=999999,status_update_rate=0.1)@' /content/lite_colab/startfk.py\n", "!sed -i \"s@map_location='cpu'@map_location='cuda'@\" /content/lite_colab/modules/extras.py\n", "!cp /content/lite_colab/assets/blocks.py /usr/local/lib/python3.10/dist-packages/gradio/blocks.py\n", "\n", "\n", "\n", "\n", "\n", "MODEL_LINK = \"https://huggingface.co/nekohacker591/googletest/resolve/main/best%20abdl%20model%20final.safetensors\"\n", "safetensors = True\n", "\n", "if MODEL_LINK != \"\":\n", " pth = '/content/genaaaa/models/Stable-diffusion/'\n", " if not safetensors:\n", " modelname=\"model.ckpt\"\n", " else:\n", " modelname=\"model.safetensors\"\n", " dwnld = pth + modelname\n", " print('\u001b[1;32mDownload model...')\n", " !gdown --fuzzy -O $dwnld \"$MODEL_LINK\"\n", " clear_output()\n", " print('\u001b[1;32mDone!')\n", "else:\n", " print('\u001b[1;31mPaste model link and try again!')" ] }, { "cell_type": "code", "source": [ "\n", "!python launch.py --share --api --disable-safe-unpickle --enable-insecure-extension-access --opt-sdp-attention --disable-console-progressbars --no-download-sd-model" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "L21uOKSraRoc", "outputId": "d3ef4344-b402-4f56-824e-5675df6fa4b1" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Python 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]\n", "Commit hash: 3e310628d4cf2aadaba9d73a0e5f78e0986b08fe\n", "Installing clip\n", "Installing open_clip\n", "Cloning Stable Diffusion into /content/genaaaa/repositories/stable-diffusion-stability-ai...\n", "Cloning Taming Transformers into /content/genaaaa/repositories/taming-transformers...\n", "Cloning K-diffusion into /content/genaaaa/repositories/k-diffusion...\n", "Cloning CodeFormer into /content/genaaaa/repositories/CodeFormer...\n", "Cloning BLIP into /content/genaaaa/repositories/BLIP...\n", "Installing requirements for CodeFormer\n", "Installing requirements\n", "Installing diffusers\n", "\n", "Installing controlnet requirement: mediapipe\n", "Installing controlnet requirement: svglib\n", "Installing controlnet requirement: fvcore\n", "\n", "Launching startfk with arguments: --share --api --disable-safe-unpickle --enable-insecure-extension-access --opt-sdp-attention --disable-console-progressbars --no-download-sd-model\n", "2023-09-30 01:05:33.376887: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", "2023-09-30 01:05:34.364999: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", "No module 'xformers'. Proceeding without it.\n", "ControlNet v1.1.145\n", "ControlNet v1.1.145\n", "Calculating sha256 for /content/genaaaa/models/Stable-diffusion/model.safetensors: 879db523c30d3b9017143d56705015e15a2cb5628762c11d086fed9538abd7fd\n", "Loading weights [879db523c3] from /content/genaaaa/models/Stable-diffusion/model.safetensors\n", "Creating model from config: /content/genaaaa/configs/v1-inference.yaml\n", "LatentDiffusion: Running in eps-prediction mode\n", "DiffusionWrapper has 859.52 M params.\n", "Downloading (…)olve/main/vocab.json: 100% 961k/961k [00:00<00:00, 1.35MB/s]\n", "Downloading (…)olve/main/merges.txt: 100% 525k/525k [00:00<00:00, 2.14MB/s]\n", "Downloading (…)cial_tokens_map.json: 100% 389/389 [00:00<00:00, 1.79MB/s]\n", "Downloading (…)okenizer_config.json: 100% 905/905 [00:00<00:00, 4.57MB/s]\n", "Downloading (…)lve/main/config.json: 100% 4.52k/4.52k [00:00<00:00, 15.1MB/s]\n", "Applying scaled dot product cross attention optimization.\n", "Textual inversion embeddings loaded(0): \n", "Model loaded in 18.2s (calculate hash: 6.0s, load weights from disk: 0.3s, create model: 5.1s, apply weights to model: 4.6s, apply half(): 1.4s, move model to device: 0.7s).\n", "Running on local URL: http://127.0.0.1:7860\n", "Running on public URL: https://0e5d93dd12cd257e53.gradio.live\n", "\n", "This share link expires in 72 hours. For free permanent hosting and GPU upgrades (NEW!), check out Spaces: https://huggingface.co/spaces\n", "Startup time: 43.1s (import torch: 5.8s, import gradio: 1.5s, import ldm: 0.7s, other imports: 2.3s, setup codeformer: 0.2s, load scripts: 3.0s, load SD checkpoint: 18.3s, create ui: 0.4s, gradio launch: 10.7s, scripts app_started_callback: 0.1s).\n", "100% 20/20 [00:06<00:00, 3.22it/s]\n", "100% 20/20 [00:02<00:00, 6.71it/s]\n", "100% 20/20 [00:21<00:00, 1.06s/it]\n", "100% 20/20 [00:04<00:00, 4.38it/s]\n", "Downloading: \"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth\" to /content/genaaaa/models/Codeformer/codeformer-v0.1.0.pth\n", "\n", "100% 359M/359M [00:02<00:00, 184MB/s]\n", "Downloading: \"https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth\" to /content/genaaaa/repositories/CodeFormer/weights/facelib/detection_Resnet50_Final.pth\n", "\n", "100% 104M/104M [00:07<00:00, 14.4MB/s]\n", "Downloading: \"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_parsenet.pth\" to /content/genaaaa/repositories/CodeFormer/weights/facelib/parsing_parsenet.pth\n", "\n", "100% 81.4M/81.4M [00:05<00:00, 14.9MB/s]\n", "100% 20/20 [00:03<00:00, 5.11it/s]\n", "100% 20/20 [00:03<00:00, 5.05it/s]\n", "100% 20/20 [00:03<00:00, 5.04it/s]\n", "100% 20/20 [00:03<00:00, 5.09it/s]\n", "100% 20/20 [00:03<00:00, 5.14it/s]\n", "100% 20/20 [00:03<00:00, 5.16it/s]\n", "100% 20/20 [00:03<00:00, 5.16it/s]\n", " 40% 8/20 [00:01<00:02, 5.09it/s]" ] } ] } ] }