diff --git "a/Untitled.ipynb" "b/Untitled.ipynb"
deleted file mode 100644--- "a/Untitled.ipynb"
+++ /dev/null
@@ -1,1886 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 1,
- "id": "c69a901b-8c3f-4188-97bd-5594c4496ec5",
- "metadata": {},
- "outputs": [],
- "source": [
- "from huggingface_hub import HfApi\n",
- "api = HfApi()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "id": "8b249adc-ccd0-4145-86ce-64509ad276cf",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "faf7b41b81e54705bae8921f2a86e9fd",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "VBox(children=(HTML(value='
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/plain": []
- },
- "execution_count": 10,
- "metadata": {},
- "output_type": "execute_result"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "using safetensor as default\n",
- "{'checkpoint': 'checkpoints/SadTalker_V0.0.2_256.safetensors', 'dir_of_BFM_fitting': 'src/config', 'audio2pose_yaml_path': 'src/config/auido2pose.yaml', 'audio2exp_yaml_path': 'src/config/auido2exp.yaml', 'use_safetensor': True, 'mappingnet_checkpoint': 'checkpoints/mapping_00229-model.pth.tar', 'facerender_yaml': 'src/config/facerender.yaml'}\n",
- "/tmp/gradio/ee3af0f3e1518c57a03610bc7c5b9f774dd29f5c/image.png\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "landmark Det:: 100%|██████████| 1/1 [00:00<00:00, 26.62it/s]\n",
- "3DMM Extraction In Video:: 100%|██████████| 1/1 [00:00<00:00, 119.22it/s]\n",
- "mel:: 100%|██████████| 92/92 [00:00<00:00, 45445.29it/s]\n",
- "audio2exp:: 100%|██████████| 10/10 [00:00<00:00, 102.60it/s]\n",
- "Face Renderer:: 100%|██████████| 46/46 [00:05<00:00, 8.18it/s]\n",
- "Numba: Attempted to fork from a non-main thread, the TBB library may be in an invalid state in the child process.\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "The generated video is named ./results/c6e207ec-5e2b-4164-8cdd-8d13be7a51f3/image##nana_speech_cut_4sec 1-0-100.mp4\n",
- "face enhancer....\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Face Enhancer:: 0%| | 0/92 [00:00, ?it/s]Numba: Attempted to fork from a non-main thread, the TBB library may be in an invalid state in the child process.\n",
- "Face Enhancer:: 100%|██████████| 92/92 [00:17<00:00, 5.18it/s]\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "The generated video is named ./results/c6e207ec-5e2b-4164-8cdd-8d13be7a51f3/image##nana_speech_cut_4sec 1-0-100_enhanced.mp4\n",
- "The generated video is named image##nana_speech_cut_4sec 1-0-100 in ./results/c6e207ec-5e2b-4164-8cdd-8d13be7a51f3\n"
- ]
- }
- ],
- "source": [
- "\n",
- " with gr.Row().style(equal_height=False):\n",
- " with gr.Column(variant='panel'):\n",
- " with gr.Tabs(elem_id=\"sadtalker_source_image\"):\n",
- " with gr.TabItem('Upload image'):\n",
- " with gr.Row():\n",
- " source_image = gr.Image(label=\"Source image\", source=\"upload\", type=\"filepath\", elem_id=\"img2img_image\").style(width=512)\n",
- "\n",
- " with gr.Tabs(elem_id=\"sadtalker_driven_audio\"):\n",
- " with gr.TabItem('Upload OR TTS'):\n",
- " with gr.Column(variant='panel'):\n",
- " driven_audio = gr.Audio(label=\"Input audio\", source=\"upload\", type=\"filepath\")\n",
- "\n",
- " with gr.Column(variant='panel'): \n",
- " with gr.Tabs(elem_id=\"sadtalker_checkbox\"):\n",
- " with gr.TabItem('Settings'):\n",
- " gr.Markdown(\"need help? please visit our [best practice page](https://github.com/OpenTalker/SadTalker/blob/main/docs/best_practice.md) for more detials\")\n",
- " with gr.Column(variant='panel'):\n",
- " # width = gr.Slider(minimum=64, elem_id=\"img2img_width\", maximum=2048, step=8, label=\"Manually Crop Width\", value=512) # img2img_width\n",
- " # height = gr.Slider(minimum=64, elem_id=\"img2img_height\", maximum=2048, step=8, label=\"Manually Crop Height\", value=512) # img2img_width\n",
- " pose_style = gr.Slider(minimum=0, maximum=46, step=1, label=\"Pose style\", value=0) # \n",
- " size_of_image = gr.Radio([256, 512], value=256, label='face model resolution', info=\"use 256/512 model?\") # \n",
- " preprocess_type = gr.Radio(['crop', 'resize','full', 'extcrop', 'extfull'], value='crop', label='preprocess', info=\"How to handle input image?\")\n",
- " is_still_mode = gr.Checkbox(label=\"Still Mode (fewer hand motion, works with preprocess `full`)\")\n",
- " batch_size = gr.Slider(label=\"batch size in generation\", step=1, maximum=10, value=2)\n",
- " enhancer = gr.Checkbox(label=\"GFPGAN as Face enhancer\")\n",
- " submit = gr.Button('Generate', elem_id=\"sadtalker_generate\", variant='primary')\n",
- " \n",
- " with gr.Tabs(elem_id=\"sadtalker_genearted\"):\n",
- " gen_video = gr.Video(label=\"Generated video\", format=\"mp4\").style(width=256)\n",
- "\n",
- " submit.click(\n",
- " fn=sad_talker.test, \n",
- " inputs=[source_image,\n",
- " driven_audio,\n",
- " preprocess_type,\n",
- " is_still_mode,\n",
- " enhancer,\n",
- " batch_size, \n",
- " size_of_image,\n",
- " pose_style\n",
- " ], \n",
- " outputs=[gen_video]\n",
- " )\n",
- "\n",
- "\n",
- "\n",
- "demo.queue().launch(share=True)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 18,
- "id": "4d898fab-53de-4acd-b65f-f31faf43f184",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Loaded the voice encoder model on cuda in 0.03 seconds.\n",
- "INFO:root:Loaded checkpoint 'checkpoint/freevc-24.pth' (iteration 3461)\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /microsoft/wavlm-large/resolve/main/config.json HTTP/1.1\" 200 0\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/tokenizer_config.json HTTP/1.1\" 200 0\n",
- "DEBUG:filelock:Attempting to acquire lock 140053383728096 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/79b212c00ecdde03c51bb791f7fdfc25f4176ff3.lock\n",
- "DEBUG:filelock:Lock 140053383728096 acquired on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/79b212c00ecdde03c51bb791f7fdfc25f4176ff3.lock\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"GET /THUDM/chatglm2-6b/resolve/main/tokenizer_config.json HTTP/1.1\" 200 244\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "f5162e6e32bf42aa8a74a6db5a484dd1",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Downloading (…)okenizer_config.json: 0%| | 0.00/244 [00:00, ?B/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:filelock:Attempting to release lock 140053383728096 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/79b212c00ecdde03c51bb791f7fdfc25f4176ff3.lock\n",
- "DEBUG:filelock:Lock 140053383728096 released on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/79b212c00ecdde03c51bb791f7fdfc25f4176ff3.lock\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/tokenization_chatglm.py HTTP/1.1\" 200 0\n",
- "DEBUG:filelock:Attempting to acquire lock 140053383729920 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/d4ce41652736b14c5c0a9263d32ba33566adf73b.lock\n",
- "DEBUG:filelock:Lock 140053383729920 acquired on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/d4ce41652736b14c5c0a9263d32ba33566adf73b.lock\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"GET /THUDM/chatglm2-6b/resolve/main/tokenization_chatglm.py HTTP/1.1\" 200 10061\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "b7448f529c574a0687bc5b763594f07a",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Downloading (…)enization_chatglm.py: 0%| | 0.00/10.1k [00:00, ?B/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:filelock:Attempting to release lock 140053383729920 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/d4ce41652736b14c5c0a9263d32ba33566adf73b.lock\n",
- "DEBUG:filelock:Lock 140053383729920 released on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/d4ce41652736b14c5c0a9263d32ba33566adf73b.lock\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "A new version of the following files was downloaded from https://huggingface.co/THUDM/chatglm2-6b:\n",
- "- tokenization_chatglm.py\n",
- ". Make sure to double-check they do not contain any added malicious code. To avoid downloading new versions of the code file, you can pin a revision.\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/tokenizer.model HTTP/1.1\" 302 0\n",
- "DEBUG:filelock:Attempting to acquire lock 140053382719296 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2.lock\n",
- "DEBUG:filelock:Lock 140053382719296 acquired on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2.lock\n",
- "DEBUG:urllib3.connectionpool:Resetting dropped connection: cdn-lfs.huggingface.co\n",
- "DEBUG:urllib3.connectionpool:https://cdn-lfs.huggingface.co:443 \"GET /repos/a6/9c/a69cdfd86b704ca3e2aef541707690cf6c7500f9d7f5a0dd0f7f990f31a0b5a4/e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27tokenizer.model%3B+filename%3D%22tokenizer.model%22%3B&Expires=1690051432&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTY5MDA1MTQzMn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9hNi85Yy9hNjljZGZkODZiNzA0Y2EzZTJhZWY1NDE3MDc2OTBjZjZjNzUwMGY5ZDdmNWEwZGQwZjdmOTkwZjMxYTBiNWE0L2U3ZGM0YzM5MzQyM2I3NmU0MzczZTUxNTdkZGMzNDgwM2EwMTg5YmE5NmIyMWRkYmI0MDI2OWQzMTQ2OGE2ZjI~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qIn1dfQ__&Signature=v6PoUyqEYT1zJIWMbqgab6uMVCUTf4nGk57BpaZEh77EX3L6LPP44BLjNFMr29CyU07JeylFqtUcqVmNmGbeOHeQlfevnm0xuSoduPKRJiTvmhyg8FzLi~YOdftC-dMkn4Z7eYGMf7TVfaZS6zuPVcUgUGNZUqT4gR6VqWGZh6HNT4u~qJi~-GSVWH3~8u8~VNW1fkK34u1M1isvDXqQB7OO4l1iggMVykVHuQZT6ZUHbbXEgoGCpcIf7-uDWl1CB3HIsLVSStsvP2j043KzcgWo03jks7hXfy3lfj6yyaPgmqz4-fBEAUmyqP5rhXrPrE9lHMwo07as5BkCk8oLtA__&Key-Pair-Id=KVTP0A1DKRTAX HTTP/1.1\" 200 1018370\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "8464021703304b82ab3bf21818185bb4",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Downloading tokenizer.model: 0%| | 0.00/1.02M [00:00, ?B/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:filelock:Attempting to release lock 140053382719296 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2.lock\n",
- "DEBUG:filelock:Lock 140053382719296 released on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2.lock\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/added_tokens.json HTTP/1.1\" 404 0\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/special_tokens_map.json HTTP/1.1\" 404 0\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/config.json HTTP/1.1\" 200 0\n",
- "DEBUG:filelock:Attempting to acquire lock 140053920529280 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/77bf76c7d44633a6b7603858077827b03531e838.lock\n",
- "DEBUG:filelock:Lock 140053920529280 acquired on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/77bf76c7d44633a6b7603858077827b03531e838.lock\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"GET /THUDM/chatglm2-6b/resolve/main/config.json HTTP/1.1\" 200 1223\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "d66637b1ab2447a097917dfdcea68a88",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Downloading (…)lve/main/config.json: 0%| | 0.00/1.22k [00:00, ?B/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:filelock:Attempting to release lock 140053920529280 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/77bf76c7d44633a6b7603858077827b03531e838.lock\n",
- "DEBUG:filelock:Lock 140053920529280 released on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/77bf76c7d44633a6b7603858077827b03531e838.lock\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/configuration_chatglm.py HTTP/1.1\" 200 0\n",
- "DEBUG:filelock:Attempting to acquire lock 140053382716992 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/3730b4e34d8688587e88cbb75384542244a40662.lock\n",
- "DEBUG:filelock:Lock 140053382716992 acquired on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/3730b4e34d8688587e88cbb75384542244a40662.lock\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"GET /THUDM/chatglm2-6b/resolve/main/configuration_chatglm.py HTTP/1.1\" 200 2246\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "ed5ce207b7154aa7938a415f1817d83e",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Downloading (…)iguration_chatglm.py: 0%| | 0.00/2.25k [00:00, ?B/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:filelock:Attempting to release lock 140053382716992 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/3730b4e34d8688587e88cbb75384542244a40662.lock\n",
- "DEBUG:filelock:Lock 140053382716992 released on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/3730b4e34d8688587e88cbb75384542244a40662.lock\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "A new version of the following files was downloaded from https://huggingface.co/THUDM/chatglm2-6b:\n",
- "- configuration_chatglm.py\n",
- ". Make sure to double-check they do not contain any added malicious code. To avoid downloading new versions of the code file, you can pin a revision.\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/config.json HTTP/1.1\" 200 0\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/modeling_chatglm.py HTTP/1.1\" 200 0\n",
- "DEBUG:filelock:Attempting to acquire lock 140053920528656 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/230f4c3b7c6cdc56d6ecec1043eeccfc7b0a7844.lock\n",
- "DEBUG:filelock:Lock 140053920528656 acquired on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/230f4c3b7c6cdc56d6ecec1043eeccfc7b0a7844.lock\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"GET /THUDM/chatglm2-6b/resolve/main/modeling_chatglm.py HTTP/1.1\" 200 50717\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "f95f6fbc537749bc99095d61ed2d49ab",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Downloading (…)/modeling_chatglm.py: 0%| | 0.00/50.7k [00:00, ?B/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:filelock:Attempting to release lock 140053920528656 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/230f4c3b7c6cdc56d6ecec1043eeccfc7b0a7844.lock\n",
- "DEBUG:filelock:Lock 140053920528656 released on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/230f4c3b7c6cdc56d6ecec1043eeccfc7b0a7844.lock\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/quantization.py HTTP/1.1\" 200 0\n",
- "DEBUG:filelock:Attempting to acquire lock 140053784823120 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/cb95bfe82b203ff6a2aa962326d2c7a438d6a52f.lock\n",
- "DEBUG:filelock:Lock 140053784823120 acquired on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/cb95bfe82b203ff6a2aa962326d2c7a438d6a52f.lock\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"GET /THUDM/chatglm2-6b/resolve/main/quantization.py HTTP/1.1\" 200 14692\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "6d95620e03af4d938e461b395f6bb263",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Downloading (…)main/quantization.py: 0%| | 0.00/14.7k [00:00, ?B/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:filelock:Attempting to release lock 140053784823120 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/cb95bfe82b203ff6a2aa962326d2c7a438d6a52f.lock\n",
- "DEBUG:filelock:Lock 140053784823120 released on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/cb95bfe82b203ff6a2aa962326d2c7a438d6a52f.lock\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "A new version of the following files was downloaded from https://huggingface.co/THUDM/chatglm2-6b:\n",
- "- quantization.py\n",
- ". Make sure to double-check they do not contain any added malicious code. To avoid downloading new versions of the code file, you can pin a revision.\n",
- "A new version of the following files was downloaded from https://huggingface.co/THUDM/chatglm2-6b:\n",
- "- modeling_chatglm.py\n",
- "- quantization.py\n",
- ". Make sure to double-check they do not contain any added malicious code. To avoid downloading new versions of the code file, you can pin a revision.\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/model.safetensors HTTP/1.1\" 404 0\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/model.safetensors.index.json HTTP/1.1\" 404 0\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/pytorch_model.bin HTTP/1.1\" 404 0\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/pytorch_model.bin.index.json HTTP/1.1\" 200 0\n",
- "DEBUG:filelock:Attempting to acquire lock 140053383728528 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/fb856bdb5fdf13d7227fa50e5e6b59b8f39d0bc7.lock\n",
- "DEBUG:filelock:Lock 140053383728528 acquired on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/fb856bdb5fdf13d7227fa50e5e6b59b8f39d0bc7.lock\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"GET /THUDM/chatglm2-6b/resolve/main/pytorch_model.bin.index.json HTTP/1.1\" 200 20438\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "8c9418e21142480c9a8a72a4bba30fe6",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Downloading (…)model.bin.index.json: 0%| | 0.00/20.4k [00:00, ?B/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:filelock:Attempting to release lock 140053383728528 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/fb856bdb5fdf13d7227fa50e5e6b59b8f39d0bc7.lock\n",
- "DEBUG:filelock:Lock 140053383728528 released on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/fb856bdb5fdf13d7227fa50e5e6b59b8f39d0bc7.lock\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "636fec89b5cc4f4dba2528f091680c96",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Downloading shards: 0%| | 0/7 [00:00, ?it/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/pytorch_model-00001-of-00007.bin HTTP/1.1\" 302 0\n",
- "DEBUG:filelock:Attempting to acquire lock 140053383729968 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/cdf1bf57d519abe11043e9121314e76bc0934993e649a9e438a4b0894f4e6ee8.lock\n",
- "DEBUG:filelock:Lock 140053383729968 acquired on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/cdf1bf57d519abe11043e9121314e76bc0934993e649a9e438a4b0894f4e6ee8.lock\n",
- "DEBUG:urllib3.connectionpool:https://cdn-lfs.huggingface.co:443 \"GET /repos/a6/9c/a69cdfd86b704ca3e2aef541707690cf6c7500f9d7f5a0dd0f7f990f31a0b5a4/cdf1bf57d519abe11043e9121314e76bc0934993e649a9e438a4b0894f4e6ee8?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00001-of-00007.bin%3B+filename%3D%22pytorch_model-00001-of-00007.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1690044476&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTY5MDA0NDQ3Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9hNi85Yy9hNjljZGZkODZiNzA0Y2EzZTJhZWY1NDE3MDc2OTBjZjZjNzUwMGY5ZDdmNWEwZGQwZjdmOTkwZjMxYTBiNWE0L2NkZjFiZjU3ZDUxOWFiZTExMDQzZTkxMjEzMTRlNzZiYzA5MzQ5OTNlNjQ5YTllNDM4YTRiMDg5NGY0ZTZlZTg~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=lYCQirEjv~izqjBjF5ZpeL7PwuMH9SWeGH4v95CyueWvhpwu-Sx4-QA42Gy0hqwMAO69kBYhdXQvBUpu5ZajqcP93v0~~zkEJVCMCMiQdZuNte4yGiWb0pwHHAaA2NvqG1hpRkAPsBPiro9ko8HubnxSbsfo~K4MLRquwvuY~HOPaoqPv46sjrk-R1LezbyNVw1vKtRJvoHCokCDYlgeYn-PWUbBhCMU9g0pRESRov7lUGoGym0km61N0C4konSkrFcvGQBEWbTdNbf~nIk-NK6JfwX3UQx-wYFO6lnmFpujFKxIpWueYZC76GreBtQGROr-H~q6BMERcJKpjvyCeg__&Key-Pair-Id=KVTP0A1DKRTAX HTTP/1.1\" 200 1827780615\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "920a430fffe24bdbb6da9348a8515fa3",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Downloading (…)l-00001-of-00007.bin: 0%| | 0.00/1.83G [00:00, ?B/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:filelock:Attempting to release lock 140053383729968 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/cdf1bf57d519abe11043e9121314e76bc0934993e649a9e438a4b0894f4e6ee8.lock\n",
- "DEBUG:filelock:Lock 140053383729968 released on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/cdf1bf57d519abe11043e9121314e76bc0934993e649a9e438a4b0894f4e6ee8.lock\n",
- "DEBUG:urllib3.connectionpool:https://huggingface.co:443 \"HEAD /THUDM/chatglm2-6b/resolve/main/pytorch_model-00002-of-00007.bin HTTP/1.1\" 302 0\n",
- "DEBUG:filelock:Attempting to acquire lock 140053383729968 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/1cd596bd15905248b20b755daf12a02a8fa963da09b59da7fdc896e17bfa518c.lock\n",
- "DEBUG:filelock:Lock 140053383729968 acquired on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/1cd596bd15905248b20b755daf12a02a8fa963da09b59da7fdc896e17bfa518c.lock\n",
- "DEBUG:urllib3.connectionpool:https://cdn-lfs.huggingface.co:443 \"GET /repos/a6/9c/a69cdfd86b704ca3e2aef541707690cf6c7500f9d7f5a0dd0f7f990f31a0b5a4/1cd596bd15905248b20b755daf12a02a8fa963da09b59da7fdc896e17bfa518c?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00002-of-00007.bin%3B+filename%3D%22pytorch_model-00002-of-00007.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1690051445&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTY5MDA1MTQ0NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9hNi85Yy9hNjljZGZkODZiNzA0Y2EzZTJhZWY1NDE3MDc2OTBjZjZjNzUwMGY5ZDdmNWEwZGQwZjdmOTkwZjMxYTBiNWE0LzFjZDU5NmJkMTU5MDUyNDhiMjBiNzU1ZGFmMTJhMDJhOGZhOTYzZGEwOWI1OWRhN2ZkYzg5NmUxN2JmYTUxOGM~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=GW-3lZl~8mqmK9gmOWlbrZwBNbxDNbMfrQ~AokZDFeqD9fpD~C63eQbgc8elf5uoluXDp~FSWxXtdTM~YGJgpzQ4Ymm0xiTh3l7Aw-IOgOQ93-QG96wfWQMLF22xY1iPr1NCgsea2zrnS9XDqNol3ELxEudHzvULU2Ix6AzjfDWAhPBvYvgjSivSFOtYh6AK21s0SXa5EVyL1AIPUKyFp-nlB665jWehlOKFwagVMZUYD5NJYYcibFJRlliz0zyUU051aTQsOFCcabdbM9jsX47bLXM9sce5nQ0M5fE9Au10IvELHZAaM2-LX6S-CG-rAE4nkfjFqeid98ush45eZg__&Key-Pair-Id=KVTP0A1DKRTAX HTTP/1.1\" 200 1968299005\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "ae8cba83b39a4546a7ba31347bec63a2",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Downloading (…)l-00002-of-00007.bin: 0%| | 0.00/1.97G [00:00, ?B/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "DEBUG:filelock:Attempting to release lock 140053383729968 on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/1cd596bd15905248b20b755daf12a02a8fa963da09b59da7fdc896e17bfa518c.lock\n",
- "DEBUG:filelock:Lock 140053383729968 released on /root/.cache/huggingface/hub/models--THUDM--chatglm2-6b/blobs/1cd596bd15905248b20b755daf12a02a8fa963da09b59da7fdc896e17bfa518c.lock\n"
- ]
- },
- {
- "ename": "OSError",
- "evalue": "[Errno 28] No space left on device",
- "output_type": "error",
- "traceback": [
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
- "\u001b[0;31mOSError\u001b[0m Traceback (most recent call last)",
- "Cell \u001b[0;32mIn[18], line 142\u001b[0m\n\u001b[1;32m 136\u001b[0m has_cuda \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mcuda\u001b[38;5;241m.\u001b[39mis_available()\n\u001b[1;32m 138\u001b[0m \u001b[38;5;66;03m# has_cuda = False # force cpu\u001b[39;00m\n\u001b[1;32m 141\u001b[0m model_glm \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m--> 142\u001b[0m \u001b[43mAutoModel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfrom_pretrained\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_name\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrust_remote_code\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mcuda()\u001b[38;5;241m.\u001b[39mhalf()\n\u001b[1;32m 143\u001b[0m ) \u001b[38;5;66;03m# 3.92G\u001b[39;00m\n\u001b[1;32m 145\u001b[0m model_glm \u001b[38;5;241m=\u001b[39m model_glm\u001b[38;5;241m.\u001b[39meval()\n\u001b[1;32m 147\u001b[0m _ \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\"\"\u001b[39m\u001b[38;5;124mOverride Chatbot.postprocess\u001b[39m\u001b[38;5;124m\"\"\"\u001b[39m\n",
- "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py:488\u001b[0m, in \u001b[0;36m_BaseAutoModelClass.from_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[1;32m 486\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 487\u001b[0m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39mregister(config\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m, model_class, exist_ok\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[0;32m--> 488\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mmodel_class\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfrom_pretrained\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 489\u001b[0m \u001b[43m \u001b[49m\u001b[43mpretrained_model_name_or_path\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mhub_kwargs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 490\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 491\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mtype\u001b[39m(config) \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_model_mapping\u001b[38;5;241m.\u001b[39mkeys():\n\u001b[1;32m 492\u001b[0m model_class \u001b[38;5;241m=\u001b[39m _get_model_class(config, \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m_model_mapping)\n",
- "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/transformers/modeling_utils.py:2610\u001b[0m, in \u001b[0;36mPreTrainedModel.from_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, config, cache_dir, ignore_mismatched_sizes, force_download, local_files_only, token, revision, use_safetensors, *model_args, **kwargs)\u001b[0m\n\u001b[1;32m 2607\u001b[0m \u001b[38;5;66;03m# We'll need to download and cache each checkpoint shard if the checkpoint is sharded.\u001b[39;00m\n\u001b[1;32m 2608\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m is_sharded:\n\u001b[1;32m 2609\u001b[0m \u001b[38;5;66;03m# rsolved_archive_file becomes a list of files that point to the different checkpoint shards in this case.\u001b[39;00m\n\u001b[0;32m-> 2610\u001b[0m resolved_archive_file, sharded_metadata \u001b[38;5;241m=\u001b[39m \u001b[43mget_checkpoint_shard_files\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2611\u001b[0m \u001b[43m \u001b[49m\u001b[43mpretrained_model_name_or_path\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2612\u001b[0m \u001b[43m \u001b[49m\u001b[43mresolved_archive_file\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2613\u001b[0m \u001b[43m \u001b[49m\u001b[43mcache_dir\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcache_dir\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2614\u001b[0m \u001b[43m \u001b[49m\u001b[43mforce_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mforce_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2615\u001b[0m \u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2616\u001b[0m \u001b[43m \u001b[49m\u001b[43mresume_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mresume_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2617\u001b[0m \u001b[43m \u001b[49m\u001b[43mlocal_files_only\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlocal_files_only\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2618\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_auth_token\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtoken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2619\u001b[0m \u001b[43m \u001b[49m\u001b[43muser_agent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muser_agent\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2620\u001b[0m \u001b[43m \u001b[49m\u001b[43mrevision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrevision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2621\u001b[0m \u001b[43m \u001b[49m\u001b[43msubfolder\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msubfolder\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2622\u001b[0m \u001b[43m \u001b[49m\u001b[43m_commit_hash\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcommit_hash\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2623\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2625\u001b[0m \u001b[38;5;66;03m# load pt weights early so that we know which dtype to init the model under\u001b[39;00m\n\u001b[1;32m 2626\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m from_pt:\n",
- "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/transformers/utils/hub.py:958\u001b[0m, in \u001b[0;36mget_checkpoint_shard_files\u001b[0;34m(pretrained_model_name_or_path, index_filename, cache_dir, force_download, proxies, resume_download, local_files_only, use_auth_token, user_agent, revision, subfolder, _commit_hash)\u001b[0m\n\u001b[1;32m 955\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m shard_filename \u001b[38;5;129;01min\u001b[39;00m tqdm(shard_filenames, desc\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDownloading shards\u001b[39m\u001b[38;5;124m\"\u001b[39m, disable\u001b[38;5;241m=\u001b[39m\u001b[38;5;129;01mnot\u001b[39;00m show_progress_bar):\n\u001b[1;32m 956\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 957\u001b[0m \u001b[38;5;66;03m# Load from URL\u001b[39;00m\n\u001b[0;32m--> 958\u001b[0m cached_filename \u001b[38;5;241m=\u001b[39m \u001b[43mcached_file\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 959\u001b[0m \u001b[43m \u001b[49m\u001b[43mpretrained_model_name_or_path\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 960\u001b[0m \u001b[43m \u001b[49m\u001b[43mshard_filename\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 961\u001b[0m \u001b[43m \u001b[49m\u001b[43mcache_dir\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcache_dir\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 962\u001b[0m \u001b[43m \u001b[49m\u001b[43mforce_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mforce_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 963\u001b[0m \u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 964\u001b[0m \u001b[43m \u001b[49m\u001b[43mresume_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mresume_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 965\u001b[0m \u001b[43m \u001b[49m\u001b[43mlocal_files_only\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlocal_files_only\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 966\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_auth_token\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_auth_token\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 967\u001b[0m \u001b[43m \u001b[49m\u001b[43muser_agent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muser_agent\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 968\u001b[0m \u001b[43m \u001b[49m\u001b[43mrevision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrevision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 969\u001b[0m \u001b[43m \u001b[49m\u001b[43msubfolder\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msubfolder\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 970\u001b[0m \u001b[43m \u001b[49m\u001b[43m_commit_hash\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m_commit_hash\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 971\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 972\u001b[0m \u001b[38;5;66;03m# We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so\u001b[39;00m\n\u001b[1;32m 973\u001b[0m \u001b[38;5;66;03m# we don't have to catch them here.\u001b[39;00m\n\u001b[1;32m 974\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m EntryNotFoundError:\n",
- "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/transformers/utils/hub.py:417\u001b[0m, in \u001b[0;36mcached_file\u001b[0;34m(path_or_repo_id, filename, cache_dir, force_download, resume_download, proxies, use_auth_token, revision, local_files_only, subfolder, repo_type, user_agent, _raise_exceptions_for_missing_entries, _raise_exceptions_for_connection_errors, _commit_hash)\u001b[0m\n\u001b[1;32m 414\u001b[0m user_agent \u001b[38;5;241m=\u001b[39m http_user_agent(user_agent)\n\u001b[1;32m 415\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 416\u001b[0m \u001b[38;5;66;03m# Load from URL or cache if already cached\u001b[39;00m\n\u001b[0;32m--> 417\u001b[0m resolved_file \u001b[38;5;241m=\u001b[39m \u001b[43mhf_hub_download\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 418\u001b[0m \u001b[43m \u001b[49m\u001b[43mpath_or_repo_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 419\u001b[0m \u001b[43m \u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 420\u001b[0m \u001b[43m \u001b[49m\u001b[43msubfolder\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43msubfolder\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43msubfolder\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 421\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepo_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrepo_type\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 422\u001b[0m \u001b[43m \u001b[49m\u001b[43mrevision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrevision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 423\u001b[0m \u001b[43m \u001b[49m\u001b[43mcache_dir\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcache_dir\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 424\u001b[0m \u001b[43m \u001b[49m\u001b[43muser_agent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muser_agent\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 425\u001b[0m \u001b[43m \u001b[49m\u001b[43mforce_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mforce_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 426\u001b[0m \u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 427\u001b[0m \u001b[43m \u001b[49m\u001b[43mresume_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mresume_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 428\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_auth_token\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_auth_token\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 429\u001b[0m \u001b[43m \u001b[49m\u001b[43mlocal_files_only\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlocal_files_only\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 430\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 432\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m RepositoryNotFoundError:\n\u001b[1;32m 433\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mEnvironmentError\u001b[39;00m(\n\u001b[1;32m 434\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpath_or_repo_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m is not a local folder and is not a valid model identifier \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 435\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlisted on \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mhttps://huggingface.co/models\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mIf this is a private repository, make sure to \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 436\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpass a token having permission to this repo with `use_auth_token` or log in with \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 437\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m`huggingface-cli login` and pass `use_auth_token=True`.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 438\u001b[0m )\n",
- "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py:118\u001b[0m, in \u001b[0;36mvalidate_hf_hub_args.._inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m check_use_auth_token:\n\u001b[1;32m 116\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m smoothly_deprecate_use_auth_token(fn_name\u001b[38;5;241m=\u001b[39mfn\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, has_token\u001b[38;5;241m=\u001b[39mhas_token, kwargs\u001b[38;5;241m=\u001b[39mkwargs)\n\u001b[0;32m--> 118\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
- "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/huggingface_hub/file_download.py:1364\u001b[0m, in \u001b[0;36mhf_hub_download\u001b[0;34m(repo_id, filename, subfolder, repo_type, revision, library_name, library_version, cache_dir, local_dir, local_dir_use_symlinks, user_agent, force_download, force_filename, proxies, etag_timeout, resume_download, token, local_files_only, legacy_cache_layout)\u001b[0m\n\u001b[1;32m 1361\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m temp_file_manager() \u001b[38;5;28;01mas\u001b[39;00m temp_file:\n\u001b[1;32m 1362\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdownloading \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m to \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m\"\u001b[39m, url, temp_file\u001b[38;5;241m.\u001b[39mname)\n\u001b[0;32m-> 1364\u001b[0m \u001b[43mhttp_get\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1365\u001b[0m \u001b[43m \u001b[49m\u001b[43murl_to_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1366\u001b[0m \u001b[43m \u001b[49m\u001b[43mtemp_file\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1367\u001b[0m \u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1368\u001b[0m \u001b[43m \u001b[49m\u001b[43mresume_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mresume_size\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1369\u001b[0m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1370\u001b[0m \u001b[43m \u001b[49m\u001b[43mexpected_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mexpected_size\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1371\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1373\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m local_dir \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 1374\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mStoring \u001b[39m\u001b[38;5;132;01m{\u001b[39;00murl\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m in cache at \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mblob_path\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n",
- "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/huggingface_hub/file_download.py:544\u001b[0m, in \u001b[0;36mhttp_get\u001b[0;34m(url, temp_file, proxies, resume_size, headers, timeout, max_retries, expected_size)\u001b[0m\n\u001b[1;32m 542\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m chunk: \u001b[38;5;66;03m# filter out keep-alive new chunks\u001b[39;00m\n\u001b[1;32m 543\u001b[0m progress\u001b[38;5;241m.\u001b[39mupdate(\u001b[38;5;28mlen\u001b[39m(chunk))\n\u001b[0;32m--> 544\u001b[0m \u001b[43mtemp_file\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrite\u001b[49m\u001b[43m(\u001b[49m\u001b[43mchunk\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 546\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m expected_size \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m expected_size \u001b[38;5;241m!=\u001b[39m temp_file\u001b[38;5;241m.\u001b[39mtell():\n\u001b[1;32m 547\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mEnvironmentError\u001b[39;00m(\n\u001b[1;32m 548\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mConsistency check failed: file should be of size \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mexpected_size\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m but has size\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 549\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtemp_file\u001b[38;5;241m.\u001b[39mtell()\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m (\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdisplayed_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m).\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mWe are sorry for the inconvenience. Please retry download and\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 550\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m pass `force_download=True, resume_download=False` as argument.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mIf the issue persists, please let us\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 551\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m know by opening an issue on https://github.com/huggingface/huggingface_hub.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 552\u001b[0m )\n",
- "File \u001b[0;32m/opt/conda/lib/python3.10/tempfile.py:483\u001b[0m, in \u001b[0;36m_TemporaryFileWrapper.__getattr__..func_wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 481\u001b[0m \u001b[38;5;129m@_functools\u001b[39m\u001b[38;5;241m.\u001b[39mwraps(func)\n\u001b[1;32m 482\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mfunc_wrapper\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m--> 483\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
- "\u001b[0;31mOSError\u001b[0m: [Errno 28] No space left on device"
- ]
- }
- ],
- "source": [
- "import os\n",
- "import torch\n",
- "import librosa\n",
- "import gradio as gr\n",
- "from scipy.io.wavfile import write\n",
- "from transformers import WavLMModel\n",
- "\n",
- "import utils\n",
- "from models import SynthesizerTrn\n",
- "from mel_processing import mel_spectrogram_torch\n",
- "from speaker_encoder.voice_encoder import SpeakerEncoder\n",
- "\n",
- "import time\n",
- "from textwrap import dedent\n",
- "\n",
- "import mdtex2html\n",
- "from loguru import logger\n",
- "from transformers import AutoModel, AutoTokenizer\n",
- "\n",
- "from tts_voice import tts_order_voice\n",
- "import edge_tts\n",
- "import tempfile\n",
- "import anyio\n",
- "\n",
- "import os, sys\n",
- "import gradio as gr\n",
- "from src.gradio_demo import SadTalker \n",
- "\n",
- "\n",
- "try:\n",
- " import webui # in webui\n",
- " in_webui = True\n",
- "except:\n",
- " in_webui = False\n",
- "\n",
- "\n",
- "def toggle_audio_file(choice):\n",
- " if choice == False:\n",
- " return gr.update(visible=True), gr.update(visible=False)\n",
- " else:\n",
- " return gr.update(visible=False), gr.update(visible=True)\n",
- " \n",
- "def ref_video_fn(path_of_ref_video):\n",
- " if path_of_ref_video is not None:\n",
- " return gr.update(value=True)\n",
- " else:\n",
- " return gr.update(value=False)\n",
- "\n",
- "sad_talker = SadTalker(\"checkpoints\", \"src/config\", lazy_load=True)\n",
- "\n",
- "'''\n",
- "def get_wavlm():\n",
- " os.system('gdown https://drive.google.com/uc?id=12-cB34qCTvByWT-QtOcZaqwwO21FLSqU')\n",
- " shutil.move('WavLM-Large.pt', 'wavlm')\n",
- "'''\n",
- "\n",
- "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
- "\n",
- "smodel = SpeakerEncoder('speaker_encoder/ckpt/pretrained_bak_5805000.pt')\n",
- "\n",
- "print(\"Loading FreeVC(24k)...\")\n",
- "hps = utils.get_hparams_from_file(\"configs/freevc-24.json\")\n",
- "freevc_24 = SynthesizerTrn(\n",
- " hps.data.filter_length // 2 + 1,\n",
- " hps.train.segment_size // hps.data.hop_length,\n",
- " **hps.model).to(device)\n",
- "_ = freevc_24.eval()\n",
- "_ = utils.load_checkpoint(\"checkpoint/freevc-24.pth\", freevc_24, None)\n",
- "\n",
- "print(\"Loading WavLM for content...\")\n",
- "cmodel = WavLMModel.from_pretrained(\"microsoft/wavlm-large\").to(device)\n",
- " \n",
- "def convert(model, src, tgt):\n",
- " with torch.no_grad():\n",
- " # tgt\n",
- " wav_tgt, _ = librosa.load(tgt, sr=hps.data.sampling_rate)\n",
- " wav_tgt, _ = librosa.effects.trim(wav_tgt, top_db=20)\n",
- " if model == \"FreeVC\" or model == \"FreeVC (24kHz)\":\n",
- " g_tgt = smodel.embed_utterance(wav_tgt)\n",
- " g_tgt = torch.from_numpy(g_tgt).unsqueeze(0).to(device)\n",
- " else:\n",
- " wav_tgt = torch.from_numpy(wav_tgt).unsqueeze(0).to(device)\n",
- " mel_tgt = mel_spectrogram_torch(\n",
- " wav_tgt, \n",
- " hps.data.filter_length,\n",
- " hps.data.n_mel_channels,\n",
- " hps.data.sampling_rate,\n",
- " hps.data.hop_length,\n",
- " hps.data.win_length,\n",
- " hps.data.mel_fmin,\n",
- " hps.data.mel_fmax\n",
- " )\n",
- " # src\n",
- " wav_src, _ = librosa.load(src, sr=hps.data.sampling_rate)\n",
- " wav_src = torch.from_numpy(wav_src).unsqueeze(0).to(device)\n",
- " c = cmodel(wav_src).last_hidden_state.transpose(1, 2).to(device)\n",
- " # infer\n",
- " if model == \"FreeVC\":\n",
- " audio = freevc.infer(c, g=g_tgt)\n",
- " elif model == \"FreeVC-s\":\n",
- " audio = freevc_s.infer(c, mel=mel_tgt)\n",
- " else:\n",
- " audio = freevc_24.infer(c, g=g_tgt)\n",
- " audio = audio[0][0].data.cpu().float().numpy()\n",
- " if model == \"FreeVC\" or model == \"FreeVC-s\":\n",
- " write(\"out.wav\", hps.data.sampling_rate, audio)\n",
- " else:\n",
- " write(\"out.wav\", 24000, audio)\n",
- " out = \"out.wav\"\n",
- " return out\n",
- "\n",
- "# GLM2\n",
- "\n",
- "language_dict = tts_order_voice\n",
- "\n",
- "# fix timezone in Linux\n",
- "os.environ[\"TZ\"] = \"Asia/Shanghai\"\n",
- "try:\n",
- " time.tzset() # type: ignore # pylint: disable=no-member\n",
- "except Exception:\n",
- " # Windows\n",
- " logger.warning(\"Windows, cant run time.tzset()\")\n",
- "\n",
- "# model_name = \"THUDM/chatglm2-6b\"\n",
- "model_name = \"THUDM/chatglm2-6b\"\n",
- "\n",
- "RETRY_FLAG = False\n",
- "\n",
- "tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)\n",
- "\n",
- "# model = AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda()\n",
- "\n",
- "# 4/8 bit\n",
- "# model = AutoModel.from_pretrained(\"THUDM/chatglm2-6b\", trust_remote_code=True).quantize(4).cuda()\n",
- "\n",
- "has_cuda = torch.cuda.is_available()\n",
- "\n",
- "# has_cuda = False # force cpu\n",
- "\n",
- "\n",
- "model_glm = (\n",
- " AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda().half()\n",
- ") # 3.92G\n",
- "\n",
- "model_glm = model_glm.eval()\n",
- "\n",
- "_ = \"\"\"Override Chatbot.postprocess\"\"\"\n",
- "\n",
- "\n",
- "def postprocess(self, y):\n",
- " if y is None:\n",
- " return []\n",
- " for i, (message, response) in enumerate(y):\n",
- " y[i] = (\n",
- " None if message is None else mdtex2html.convert((message)),\n",
- " None if response is None else mdtex2html.convert(response),\n",
- " )\n",
- " return y\n",
- "\n",
- "\n",
- "gr.Chatbot.postprocess = postprocess\n",
- "\n",
- "\n",
- "def parse_text(text):\n",
- " \"\"\"copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/\"\"\"\n",
- " lines = text.split(\"\\n\")\n",
- " lines = [line for line in lines if line != \"\"]\n",
- " count = 0\n",
- " for i, line in enumerate(lines):\n",
- " if \"```\" in line:\n",
- " count += 1\n",
- " items = line.split(\"`\")\n",
- " if count % 2 == 1:\n",
- " lines[i] = f''\n",
- " else:\n",
- " lines[i] = \"
\"\n",
- " else:\n",
- " if i > 0:\n",
- " if count % 2 == 1:\n",
- " line = line.replace(\"`\", r\"\\`\")\n",
- " line = line.replace(\"<\", \"<\")\n",
- " line = line.replace(\">\", \">\")\n",
- " line = line.replace(\" \", \" \")\n",
- " line = line.replace(\"*\", \"*\")\n",
- " line = line.replace(\"_\", \"_\")\n",
- " line = line.replace(\"-\", \"-\")\n",
- " line = line.replace(\".\", \".\")\n",
- " line = line.replace(\"!\", \"!\")\n",
- " line = line.replace(\"(\", \"(\")\n",
- " line = line.replace(\")\", \")\")\n",
- " line = line.replace(\"$\", \"$\")\n",
- " lines[i] = \"
\" + line\n",
- " text = \"\".join(lines)\n",
- " return text\n",
- "\n",
- "\n",
- "def predict(\n",
- " RETRY_FLAG, input, chatbot, max_length, top_p, temperature, history, past_key_values\n",
- "):\n",
- " try:\n",
- " chatbot.append((parse_text(input), \"\"))\n",
- " except Exception as exc:\n",
- " logger.error(exc)\n",
- " logger.debug(f\"{chatbot=}\")\n",
- " _ = \"\"\"\n",
- " if chatbot:\n",
- " chatbot[-1] = (parse_text(input), str(exc))\n",
- " yield chatbot, history, past_key_values\n",
- " # \"\"\"\n",
- " yield chatbot, history, past_key_values\n",
- "\n",
- " for response, history, past_key_values in model_glm.stream_chat(\n",
- " tokenizer,\n",
- " input,\n",
- " history,\n",
- " past_key_values=past_key_values,\n",
- " return_past_key_values=True,\n",
- " max_length=max_length,\n",
- " top_p=top_p,\n",
- " temperature=temperature,\n",
- " ):\n",
- " chatbot[-1] = (parse_text(input), parse_text(response))\n",
- " # chatbot[-1][-1] = parse_text(response)\n",
- "\n",
- " yield chatbot, history, past_key_values, parse_text(response)\n",
- "\n",
- "\n",
- "def trans_api(input, max_length=4096, top_p=0.8, temperature=0.2):\n",
- " if max_length < 10:\n",
- " max_length = 4096\n",
- " if top_p < 0.1 or top_p > 1:\n",
- " top_p = 0.85\n",
- " if temperature <= 0 or temperature > 1:\n",
- " temperature = 0.01\n",
- " try:\n",
- " res, _ = model_glm.chat(\n",
- " tokenizer,\n",
- " input,\n",
- " history=[],\n",
- " past_key_values=None,\n",
- " max_length=max_length,\n",
- " top_p=top_p,\n",
- " temperature=temperature,\n",
- " )\n",
- " # logger.debug(f\"{res=} \\n{_=}\")\n",
- " except Exception as exc:\n",
- " logger.error(f\"{exc=}\")\n",
- " res = str(exc)\n",
- "\n",
- " return res\n",
- "\n",
- "\n",
- "def reset_user_input():\n",
- " return gr.update(value=\"\")\n",
- "\n",
- "\n",
- "def reset_state():\n",
- " return [], [], None, \"\"\n",
- "\n",
- "\n",
- "# Delete last turn\n",
- "def delete_last_turn(chat, history):\n",
- " if chat and history:\n",
- " chat.pop(-1)\n",
- " history.pop(-1)\n",
- " return chat, history\n",
- "\n",
- "\n",
- "# Regenerate response\n",
- "def retry_last_answer(\n",
- " user_input, chatbot, max_length, top_p, temperature, history, past_key_values\n",
- "):\n",
- " if chatbot and history:\n",
- " # Removing the previous conversation from chat\n",
- " chatbot.pop(-1)\n",
- " # Setting up a flag to capture a retry\n",
- " RETRY_FLAG = True\n",
- " # Getting last message from user\n",
- " user_input = history[-1][0]\n",
- " # Removing bot response from the history\n",
- " history.pop(-1)\n",
- "\n",
- " yield from predict(\n",
- " RETRY_FLAG, # type: ignore\n",
- " user_input,\n",
- " chatbot,\n",
- " max_length,\n",
- " top_p,\n",
- " temperature,\n",
- " history,\n",
- " past_key_values,\n",
- " )\n",
- "\n",
- "# print\n",
- "\n",
- "def print(text):\n",
- " return text\n",
- "\n",
- "# TTS\n",
- "\n",
- "async def text_to_speech_edge(text, language_code):\n",
- " voice = language_dict[language_code]\n",
- " communicate = edge_tts.Communicate(text, voice)\n",
- " with tempfile.NamedTemporaryFile(delete=False, suffix=\".mp3\") as tmp_file:\n",
- " tmp_path = tmp_file.name\n",
- "\n",
- " await communicate.save(tmp_path)\n",
- "\n",
- " return tmp_path\n",
- "\n",
- "\n",
- "with gr.Blocks(title=\"ChatGLM2-6B-int4\", theme=gr.themes.Soft(text_size=\"sm\")) as demo:\n",
- " gr.HTML(\"\"\n",
- " \"🥳💕🎶 - ChatGLM2 + 声音克隆:和你喜欢的角色畅所欲言吧!
\"\n",
- " \"\")\n",
- " gr.Markdown(\"## 💡 - 第二代ChatGLM大语言模型 + FreeVC变声,为您打造独一无二的沉浸式对话体验,支持中英双语\")\n",
- " gr.Markdown(\"## 🌊 - 更多精彩应用,尽在[滔滔AI](http://www.talktalkai.com);滔滔AI,为爱滔滔!💕\")\n",
- " gr.Markdown(\"### ⭐ - 如果您喜欢这个程序,欢迎给我的[Github项目](https://github.com/KevinWang676/ChatGLM2-Voice-Cloning)点赞支持!\")\n",
- " with gr.Tab(\"Chat\"):\n",
- " with gr.Accordion(\"📒 相关信息\", open=False):\n",
- " _ = f\"\"\" ChatGLM2的可选参数信息:\n",
- " * Low temperature: responses will be more deterministic and focused; High temperature: responses more creative.\n",
- " * Suggested temperatures -- translation: up to 0.3; chatting: > 0.4\n",
- " * Top P controls dynamic vocabulary selection based on context.\\n\n",
- " 如果您想让ChatGLM2进行角色扮演并与之对话,请先输入恰当的提示词,如“请你扮演成动漫角色蜡笔小新并和我进行对话”;您也可以为ChatGLM2提供自定义的角色设定\\n\n",
- " 当您使用声音克隆功能时,请先在此程序的对应位置上传一段您喜欢的音频\n",
- " \"\"\"\n",
- " gr.Markdown(dedent(_))\n",
- " chatbot = gr.Chatbot(height=300)\n",
- " with gr.Row():\n",
- " with gr.Column(scale=4):\n",
- " with gr.Column(scale=12):\n",
- " user_input = gr.Textbox(\n",
- " label=\"请在此处和GLM2聊天 (按回车键即可发送)\",\n",
- " placeholder=\"聊点什么吧\",\n",
- " )\n",
- " RETRY_FLAG = gr.Checkbox(value=False, visible=False)\n",
- " with gr.Column(min_width=32, scale=1):\n",
- " with gr.Row():\n",
- " submitBtn = gr.Button(\"开始和GLM2交流吧\", variant=\"primary\")\n",
- " deleteBtn = gr.Button(\"删除最新一轮对话\", variant=\"secondary\")\n",
- " retryBtn = gr.Button(\"重新生成最新一轮对话\", variant=\"secondary\")\n",
- " \n",
- " with gr.Accordion(\"🔧 更多设置\", open=False):\n",
- " with gr.Row():\n",
- " emptyBtn = gr.Button(\"清空所有聊天记录\")\n",
- " max_length = gr.Slider(\n",
- " 0,\n",
- " 32768,\n",
- " value=8192,\n",
- " step=1.0,\n",
- " label=\"Maximum length\",\n",
- " interactive=True,\n",
- " )\n",
- " top_p = gr.Slider(\n",
- " 0, 1, value=0.85, step=0.01, label=\"Top P\", interactive=True\n",
- " )\n",
- " temperature = gr.Slider(\n",
- " 0.01, 1, value=0.95, step=0.01, label=\"Temperature\", interactive=True\n",
- " )\n",
- "\n",
- "\n",
- " with gr.Row():\n",
- " test1 = gr.Textbox(label=\"GLM2的最新回答 (可编���)\", lines = 3)\n",
- " with gr.Column():\n",
- " language = gr.Dropdown(choices=list(language_dict.keys()), value=\"普通话 (中国大陆)-Xiaoxiao-女\", label=\"请选择文本对应的语言及您喜欢的说话人\")\n",
- " tts_btn = gr.Button(\"生成对应的音频吧\", variant=\"primary\")\n",
- " output_audio = gr.Audio(type=\"filepath\", label=\"为您生成的音频\", interactive=False)\n",
- " \n",
- " tts_btn.click(text_to_speech_edge, inputs=[test1, language], outputs=[output_audio])\n",
- " \n",
- " with gr.Row():\n",
- " model_choice = gr.Dropdown(choices=[\"FreeVC\", \"FreeVC-s\", \"FreeVC (24kHz)\"], value=\"FreeVC (24kHz)\", label=\"Model\", visible=False) \n",
- " audio1 = output_audio\n",
- " audio2 = gr.Audio(label=\"请上传您喜欢的声音进行声音克隆\", type='filepath')\n",
- " clone_btn = gr.Button(\"开始AI声音克隆吧\", variant=\"primary\")\n",
- " audio_cloned = gr.Audio(label=\"为您生成的专属声音克隆音频\", type='filepath')\n",
- " \n",
- " clone_btn.click(convert, inputs=[model_choice, audio1, audio2], outputs=[audio_cloned])\n",
- " \n",
- " history = gr.State([])\n",
- " past_key_values = gr.State(None)\n",
- " \n",
- " user_input.submit(\n",
- " predict,\n",
- " [\n",
- " RETRY_FLAG,\n",
- " user_input,\n",
- " chatbot,\n",
- " max_length,\n",
- " top_p,\n",
- " temperature,\n",
- " history,\n",
- " past_key_values,\n",
- " ],\n",
- " [chatbot, history, past_key_values, test1],\n",
- " show_progress=\"full\",\n",
- " )\n",
- " submitBtn.click(\n",
- " predict,\n",
- " [\n",
- " RETRY_FLAG,\n",
- " user_input,\n",
- " chatbot,\n",
- " max_length,\n",
- " top_p,\n",
- " temperature,\n",
- " history,\n",
- " past_key_values,\n",
- " ],\n",
- " [chatbot, history, past_key_values, test1],\n",
- " show_progress=\"full\",\n",
- " api_name=\"predict\",\n",
- " )\n",
- " submitBtn.click(reset_user_input, [], [user_input])\n",
- " \n",
- " emptyBtn.click(\n",
- " reset_state, outputs=[chatbot, history, past_key_values, test1], show_progress=\"full\"\n",
- " )\n",
- "\n",
- " retryBtn.click(\n",
- " retry_last_answer,\n",
- " inputs=[\n",
- " user_input,\n",
- " chatbot,\n",
- " max_length,\n",
- " top_p,\n",
- " temperature,\n",
- " history,\n",
- " past_key_values,\n",
- " ],\n",
- " # outputs = [chatbot, history, last_user_message, user_message]\n",
- " outputs=[chatbot, history, past_key_values, test1],\n",
- " )\n",
- " deleteBtn.click(delete_last_turn, [chatbot, history], [chatbot, history])\n",
- "\n",
- " with gr.Accordion(\"📔 提示词示例\", open=False):\n",
- " etext = \"\"\"In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. \"\"\"\n",
- " examples = gr.Examples(\n",
- " examples=[\n",
- " [\"Explain the plot of Cinderella in a sentence.\"],\n",
- " [\n",
- " \"How long does it take to become proficient in French, and what are the best methods for retaining information?\"\n",
- " ],\n",
- " [\"What are some common mistakes to avoid when writing code?\"],\n",
- " [\"Build a prompt to generate a beautiful portrait of a horse\"],\n",
- " [\"Suggest four metaphors to describe the benefits of AI\"],\n",
- " [\"Write a pop song about leaving home for the sandy beaches.\"],\n",
- " [\"Write a summary demonstrating my ability to tame lions\"],\n",
- " [\"鲁迅和周树人什么关系\"],\n",
- " [\"从前有一头牛,这头牛后面有什么?\"],\n",
- " [\"正无穷大加一大于正无穷大吗?\"],\n",
- " [\"正无穷大加正无穷大大于正无穷大吗?\"],\n",
- " [\"-2的平方根等于什么\"],\n",
- " [\"树上有5只鸟,猎人开枪打死了一只。树上还��几只鸟?\"],\n",
- " [\"树上有11只鸟,猎人开枪打死了一只。树上还有几只鸟?提示:需考虑鸟可能受惊吓飞走。\"],\n",
- " [\"鲁迅和周树人什么关系 用英文回答\"],\n",
- " [\"以红楼梦的行文风格写一张委婉的请假条。不少于320字。\"],\n",
- " [f\"{etext} 翻成中文,列出3个版本\"],\n",
- " [f\"{etext} \\n 翻成中文,保留原意,但使用文学性的语言。不要写解释。列出3个版本\"],\n",
- " [\"js 判断一个数是不是质数\"],\n",
- " [\"js 实现python 的 range(10)\"],\n",
- " [\"js 实现python 的 [*(range(10)]\"],\n",
- " [\"假定 1 + 2 = 4, 试求 7 + 8\"],\n",
- " [\"Erkläre die Handlung von Cinderella in einem Satz.\"],\n",
- " [\"Erkläre die Handlung von Cinderella in einem Satz. Auf Deutsch\"],\n",
- " ],\n",
- " inputs=[user_input],\n",
- " examples_per_page=30,\n",
- " )\n",
- " \n",
- " with gr.Accordion(\"For Chat/Translation API\", open=False, visible=False):\n",
- " input_text = gr.Text()\n",
- " tr_btn = gr.Button(\"Go\", variant=\"primary\")\n",
- " out_text = gr.Text()\n",
- " tr_btn.click(\n",
- " trans_api,\n",
- " [input_text, max_length, top_p, temperature],\n",
- " out_text,\n",
- " # show_progress=\"full\",\n",
- " api_name=\"tr\",\n",
- " )\n",
- " _ = \"\"\"\n",
- " input_text.submit(\n",
- " trans_api,\n",
- " [input_text, max_length, top_p, temperature],\n",
- " out_text,\n",
- " show_progress=\"full\",\n",
- " api_name=\"tr1\",\n",
- " )\n",
- " # \"\"\"\n",
- "\n",
- " with gr.Tab(\"Video\"):\n",
- " with gr.Row().style(equal_height=False):\n",
- " with gr.Column(variant='panel'):\n",
- " with gr.Tabs(elem_id=\"sadtalker_source_image\"):\n",
- " with gr.TabItem('Upload image'):\n",
- " with gr.Row():\n",
- " source_image = gr.Image(label=\"Source image\", source=\"upload\", type=\"filepath\", elem_id=\"img2img_image\").style(width=512)\n",
- " \n",
- " with gr.Tabs(elem_id=\"sadtalker_driven_audio\"):\n",
- " with gr.TabItem('Upload OR TTS'):\n",
- " with gr.Column(variant='panel'):\n",
- " driven_audio = gr.Audio(label=\"Input audio\", source=\"upload\", type=\"filepath\")\n",
- " \n",
- " with gr.Column(variant='panel'): \n",
- " with gr.Tabs(elem_id=\"sadtalker_checkbox\"):\n",
- " with gr.TabItem('Settings'):\n",
- " gr.Markdown(\"need help? please visit our [best practice page](https://github.com/OpenTalker/SadTalker/blob/main/docs/best_practice.md) for more detials\")\n",
- " with gr.Column(variant='panel'):\n",
- " # width = gr.Slider(minimum=64, elem_id=\"img2img_width\", maximum=2048, step=8, label=\"Manually Crop Width\", value=512) # img2img_width\n",
- " # height = gr.Slider(minimum=64, elem_id=\"img2img_height\", maximum=2048, step=8, label=\"Manually Crop Height\", value=512) # img2img_width\n",
- " pose_style = gr.Slider(minimum=0, maximum=46, step=1, label=\"Pose style\", value=0) # \n",
- " size_of_image = gr.Radio([256, 512], value=256, label='face model resolution', info=\"use 256/512 model?\") # \n",
- " preprocess_type = gr.Radio(['crop', 'resize','full', 'extcrop', 'extfull'], value='crop', label='preprocess', info=\"How to handle input image?\")\n",
- " is_still_mode = gr.Checkbox(label=\"Still Mode (fewer hand motion, works with preprocess `full`)\")\n",
- " batch_size = gr.Slider(label=\"batch size in generation\", step=1, maximum=10, value=2)\n",
- " enhancer = gr.Checkbox(label=\"GFPGAN as Face enhancer\")\n",
- " submit = gr.Button('Generate', elem_id=\"sadtalker_generate\", variant='primary')\n",
- " \n",
- " with gr.Tabs(elem_id=\"sadtalker_genearted\"):\n",
- " gen_video = gr.Video(label=\"Generated video\", format=\"mp4\").style(width=256)\n",
- " \n",
- " submit.click(\n",
- " fn=sad_talker.test, \n",
- " inputs=[source_image,\n",
- " driven_audio,\n",
- " preprocess_type,\n",
- " is_still_mode,\n",
- " enhancer,\n",
- " batch_size, \n",
- " size_of_image,\n",
- " pose_style\n",
- " ], \n",
- " outputs=[gen_video]\n",
- " )\n",
- "\n",
- "\n",
- " gr.Markdown(\"### 注意❗:请不要生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及个人娱乐使用。\")\n",
- " gr.Markdown(\"💡 - 如何使用此程序:输入您对ChatGLM的提问后,依次点击“开始和GLM2交流吧”、“生成对应的音频吧”、“开始AI声音克隆吧”三个按键即可;使用声音克隆功能时,请先上传一段您喜欢的音频\")\n",
- " gr.HTML('''\n",
- " \n",
- " ''')\n",
- "\n",
- "\n",
- "demo.queue().launch(show_error=True, debug=True, share=True)\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "cba1dd99-c797-4459-9a06-c294f30275de",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.10.11"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}