slug
stringlengths
15
15
content
listlengths
1
129
rawContent
stringlengths
1
2k
author
dict
attachments
listlengths
0
49
mentions
listlengths
0
49
reactions
listlengths
0
12
publishedAt
stringlengths
24
24
updatedAt
stringlengths
24
24
commentators
listlengths
0
52
url
stringlengths
25
46
totalUniqueImpressions
int64
1
42.1k
numComments
int64
0
621
958235956706194
[ { "type": "text", "value": "\"Multi-LoRA Composition for Image Generation\" introduces two new approaches for combining multiple visual elements in text-to-image generation using Low-Rank Adaptations (LoRAs)! 🎨", "raw": "\"Multi-LoRA Composition for Image Generation\" introduces two new approaches for combining multiple visual elements in text-to-image generation using Low-Rank Adaptations (LoRAs)! 🎨", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key Points:", "raw": "Key Points:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Proposes two methods - LoRA Switch and LoRA Composite - that activate/combine LoRAs during the denoising process rather than merging weights", "raw": "* Proposes two methods - LoRA Switch and LoRA Composite - that activate/combine LoRAs during the denoising process rather than merging weights", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* LoRA Switch cycles through different LoRAs at each step, while LoRA Composite averages guidance from all LoRAs simultaneously", "raw": "* LoRA Switch cycles through different LoRAs at each step, while LoRA Composite averages guidance from all LoRAs simultaneously", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.16843", "href": null, "resource": { "type": "paper", "id": "2402.16843", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.16843", "code": null, "user": null, "label": "Multi-LoRA Composition for Image Generation (2402.16843)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Project page: ", "raw": "Project page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://maszhongming.github.io/Multi-LoRA-Composition", "href": "https://maszhongming.github.io/Multi-LoRA-Composition", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Congrats to the authors for their work!", "raw": "Congrats to the authors for their work!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
"Multi-LoRA Composition for Image Generation" introduces two new approaches for combining multiple visual elements in text-to-image generation using Low-Rank Adaptations (LoRAs)! 🎨 Key Points: * Proposes two methods - LoRA Switch and LoRA Composite - that activate/combine LoRAs during the denoising process rather than merging weights * LoRA Switch cycles through different LoRAs at each step, while LoRA Composite averages guidance from all LoRAs simultaneously Paper: https://huggingface.co/papers/2402.16843 Project page: https://maszhongming.github.io/Multi-LoRA-Composition Congrats to the authors for their work!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657217faabb25ed8aedd5e48/UUHAXeGtOnQBXFD3nYtf2.jpeg", "fullname": "Vlad Bogolin", "name": "vladbogo", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 109, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/657217faabb25ed8aedd5e48/Fo6wE1Snur5a3UW9t3nXr.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/657217faabb25ed8aedd5e48/Ch1zvbgxaz0I-X69tzVba.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/657217faabb25ed8aedd5e48/uTaHFO1rzmdKEFJakCHDv.png" } ]
[]
[ { "reaction": "👍", "users": [ "DmitryRyumin", "osanseviero", "samusenps", "mvaloatto", "femboysLover" ], "count": 5 } ]
2024-03-08T19:48:20.000Z
2024-03-08T19:48:20.421Z
[]
/posts/vladbogo/958235956706194
88
0
799239298270899
[ { "type": "text", "value": "Introducing ", "raw": "Introducing ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/davanstrien/cosmopedia_chat", "href": null, "resource": { "type": "dataset", "id": "davanstrien/cosmopedia_chat", "discussionNum": null }, "url": "https://huggingface.co/datasets/davanstrien/cosmopedia_chat", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " (v0.0.1), my first experiment using the new NousResearch Genstruct model ", "raw": " (v0.0.1), my first experiment using the new NousResearch Genstruct model ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/NousResearch/Genstruct-7B", "href": null, "resource": { "type": "model", "id": "NousResearch/Genstruct-7B", "discussionNum": null }, "url": "https://huggingface.co/NousResearch/Genstruct-7B", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This dataset uses a subset of ", "raw": "This dataset uses a subset of ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/HuggingFaceTB/cosmopedia", "href": null, "resource": { "type": "dataset", "id": "HuggingFaceTB/cosmopedia", "discussionNum": null }, "url": "https://huggingface.co/datasets/HuggingFaceTB/cosmopedia", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ", a synthetic textbook-quality dataset, and Genstruct to generate user/assistant response pairs.", "raw": ", a synthetic textbook-quality dataset, and Genstruct to generate user/assistant response pairs.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "My current results are mixed, but I'm excited to see how much work is happening around synthetic data generation in the community. Most crucial next step is working more on data filtering from cosmopedia. ", "raw": "My current results are mixed, but I'm excited to see how much work is happening around synthetic data generation in the community. Most crucial next step is working more on data filtering from cosmopedia. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Massive thanks to ", "raw": "Massive thanks to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@euclaise", "href": null, "resource": null, "url": null, "code": null, "user": "euclaise", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@teknium", "href": null, "resource": null, "url": null, "code": null, "user": "teknium", "label": null, "lang": null }, { "type": "text", "value": " and the other NouseResearch folks for sharing this model ❤️", "raw": " and the other NouseResearch folks for sharing this model ❤️", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Introducing https://huggingface.co/datasets/davanstrien/cosmopedia_chat (v0.0.1), my first experiment using the new NousResearch Genstruct model https://huggingface.co/NousResearch/Genstruct-7B This dataset uses a subset of https://huggingface.co/datasets/HuggingFaceTB/cosmopedia, a synthetic textbook-quality dataset, and Genstruct to generate user/assistant response pairs. My current results are mixed, but I'm excited to see how much work is happening around synthetic data generation in the community. Most crucial next step is working more on data filtering from cosmopedia. Massive thanks to @euclaise @teknium and the other NouseResearch folks for sharing this model ❤️
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg", "fullname": "Daniel van Strien", "name": "davanstrien", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 410, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64137e2150358a805203cbac/w9RQx8Q07UvgFyIZ3ce_k.jpeg", "fullname": "Jade", "name": "euclaise", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 89 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6317aade83d8d2fd903192d9/erOwgMXc_CZih3uMoyTAp.jpeg", "fullname": "Teknium", "name": "teknium", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4267 } ]
[ { "reaction": "❤️", "users": [ "samusenps", "clem", "euclaise", "osanseviero" ], "count": 4 }, { "reaction": "😎", "users": [ "ZennyKenny" ], "count": 1 } ]
2024-03-08T17:59:48.000Z
2024-03-08T17:59:48.901Z
[]
/posts/davanstrien/799239298270899
43
0
506821771284732
[ { "type": "text", "value": "Create synthetic instruction datasets using open source LLM's and bonito🐟!", "raw": "Create synthetic instruction datasets using open source LLM's and bonito🐟!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "With Bonito, you can generate synthetic datasets for a wide variety of supported tasks. ", "raw": "With Bonito, you can generate synthetic datasets for a wide variety of supported tasks. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The Bonito model introduces a novel approach for conditional task generation, transforming unannotated text into task-specific training datasets to facilitate zero-shot adaptation of large language models on specialized data. ", "raw": "The Bonito model introduces a novel approach for conditional task generation, transforming unannotated text into task-specific training datasets to facilitate zero-shot adaptation of large language models on specialized data. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This methodology not only improves the adaptability of LLMs to new domains but also showcases the effectiveness of synthetic instruction tuning datasets in achieving substantial performance gains. ", "raw": "This methodology not only improves the adaptability of LLMs to new domains but also showcases the effectiveness of synthetic instruction tuning datasets in achieving substantial performance gains. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "AutoBonito🐟: ", "raw": "AutoBonito🐟: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://colab.research.google.com/drive/1l9zh_VX0X4ylbzpGckCjH5yEflFsLW04?usp=sharing", "href": "https://colab.research.google.com/drive/1l9zh_VX0X4ylbzpGckCjH5yEflFsLW04?usp=sharing", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Original Repo: ", "raw": "Original Repo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/BatsResearch/bonito?tab=readme-ov-file", "href": "https://github.com/BatsResearch/bonito?tab=readme-ov-file", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.18334", "href": null, "resource": { "type": "paper", "id": "2402.18334", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.18334", "code": null, "user": null, "label": "Learning to Generate Instruction Tuning Datasets for Zero-Shot Task\n Adaptation (2402.18334)", "lang": null } ]
Create synthetic instruction datasets using open source LLM's and bonito🐟! With Bonito, you can generate synthetic datasets for a wide variety of supported tasks. The Bonito model introduces a novel approach for conditional task generation, transforming unannotated text into task-specific training datasets to facilitate zero-shot adaptation of large language models on specialized data. This methodology not only improves the adaptability of LLMs to new domains but also showcases the effectiveness of synthetic instruction tuning datasets in achieving substantial performance gains. AutoBonito🐟: https://colab.research.google.com/drive/1l9zh_VX0X4ylbzpGckCjH5yEflFsLW04?usp=sharing Original Repo: https://github.com/BatsResearch/bonito?tab=readme-ov-file Paper: https://huggingface.co/papers/2402.18334
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6455cc8d679315e4ef16fbec/M6Cfifn05BUzkCFd2QDIT.png", "fullname": "Tim Dolan", "name": "macadeliccc", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 152, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6455cc8d679315e4ef16fbec/OUrpFONBFfoR0dbqQdq9F.png" } ]
[]
[ { "reaction": "❤️", "users": [ "samusenps", "pabloce", "clem", "nihalnayak", "damerajee", "jayomb", "nanyy1025" ], "count": 7 } ]
2024-03-08T17:40:28.000Z
2024-03-08T22:35:57.336Z
[ { "avatarUrl": "/avatars/793d0a07bc37dacc5b0a486e4bf11d7f.svg", "fullname": "Peter Kis", "name": "NePe", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6455cc8d679315e4ef16fbec/M6Cfifn05BUzkCFd2QDIT.png", "fullname": "Tim Dolan", "name": "macadeliccc", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 152, "isFollowing": false } ]
/posts/macadeliccc/506821771284732
830
2
395233681808448
[ { "type": "text", "value": "PixArt-Σ", "raw": "PixArt-Σ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Weak-to-Strong Training of Diffusion Transformer for 4K Text-to-Image Generation", "raw": "Weak-to-Strong Training of Diffusion Transformer for 4K Text-to-Image Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2403.04692", "href": null, "resource": { "type": "paper", "id": "2403.04692", "discussionNum": null }, "url": "https://huggingface.co/papers/2403.04692", "code": null, "user": null, "label": "PixArt-Σ: Weak-to-Strong Training of Diffusion Transformer for 4K\n Text-to-Image Generation (2403.04692)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In this paper, we introduce PixArt-\\Sigma, a Diffusion Transformer model~(DiT) capable of directly generating images at 4K resolution. PixArt-\\Sigma represents a significant advancement over its predecessor, PixArt-\\alpha, offering images of markedly higher fidelity and improved alignment with text prompts. A key feature of PixArt-\\Sigma is its training efficiency. Leveraging the foundational pre-training of PixArt-\\alpha, it evolves from the ", "raw": "In this paper, we introduce PixArt-\\Sigma, a Diffusion Transformer model~(DiT) capable of directly generating images at 4K resolution. PixArt-\\Sigma represents a significant advancement over its predecessor, PixArt-\\alpha, offering images of markedly higher fidelity and improved alignment with text prompts. A key feature of PixArt-\\Sigma is its training efficiency. Leveraging the foundational pre-training of PixArt-\\alpha, it evolves from the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`weaker' baseline to a `", "href": null, "resource": null, "url": null, "code": "weaker' baseline to a ", "user": null, "label": null, "lang": null }, { "type": "text", "value": "stronger' model via incorporating higher quality data, a process we term \"weak-to-strong training\". The advancements in PixArt-\\Sigma are twofold: (1) High-Quality Training Data: PixArt-\\Sigma incorporates superior-quality image data, paired with more precise and detailed image captions. (2) Efficient Token Compression: we propose a novel attention module within the DiT framework that compresses both keys and values, significantly improving efficiency and facilitating ultra-high-resolution image generation. Thanks to these improvements, PixArt-\\Sigma achieves superior image quality and user prompt adherence capabilities with significantly smaller model size (0.6B parameters) than existing text-to-image diffusion models, such as SDXL (2.6B parameters) and SD Cascade (5.1B parameters). Moreover, PixArt-\\Sigma's capability to generate 4K images supports the creation of high-resolution posters and wallpapers, efficiently bolstering the production of high-quality visual content in industries such as film and gaming.", "raw": "stronger' model via incorporating higher quality data, a process we term \"weak-to-strong training\". The advancements in PixArt-\\Sigma are twofold: (1) High-Quality Training Data: PixArt-\\Sigma incorporates superior-quality image data, paired with more precise and detailed image captions. (2) Efficient Token Compression: we propose a novel attention module within the DiT framework that compresses both keys and values, significantly improving efficiency and facilitating ultra-high-resolution image generation. Thanks to these improvements, PixArt-\\Sigma achieves superior image quality and user prompt adherence capabilities with significantly smaller model size (0.6B parameters) than existing text-to-image diffusion models, such as SDXL (2.6B parameters) and SD Cascade (5.1B parameters). Moreover, PixArt-\\Sigma's capability to generate 4K images supports the creation of high-resolution posters and wallpapers, efficiently bolstering the production of high-quality visual content in industries such as film and gaming.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
PixArt-Σ Weak-to-Strong Training of Diffusion Transformer for 4K Text-to-Image Generation https://huggingface.co/papers/2403.04692 In this paper, we introduce PixArt-\Sigma, a Diffusion Transformer model~(DiT) capable of directly generating images at 4K resolution. PixArt-\Sigma represents a significant advancement over its predecessor, PixArt-\alpha, offering images of markedly higher fidelity and improved alignment with text prompts. A key feature of PixArt-\Sigma is its training efficiency. Leveraging the foundational pre-training of PixArt-\alpha, it evolves from the `weaker' baseline to a `stronger' model via incorporating higher quality data, a process we term "weak-to-strong training". The advancements in PixArt-\Sigma are twofold: (1) High-Quality Training Data: PixArt-\Sigma incorporates superior-quality image data, paired with more precise and detailed image captions. (2) Efficient Token Compression: we propose a novel attention module within the DiT framework that compresses both keys and values, significantly improving efficiency and facilitating ultra-high-resolution image generation. Thanks to these improvements, PixArt-\Sigma achieves superior image quality and user prompt adherence capabilities with significantly smaller model size (0.6B parameters) than existing text-to-image diffusion models, such as SDXL (2.6B parameters) and SD Cascade (5.1B parameters). Moreover, PixArt-\Sigma's capability to generate 4K images supports the creation of high-resolution posters and wallpapers, efficiently bolstering the production of high-quality visual content in industries such as film and gaming.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/waM1MBY1WESf-j7TILYYa.png" } ]
[]
[ { "reaction": "❤️", "users": [ "samusenps", "clem", "mvaloatto", "victor", "thomwolf", "ianyeung", "DeathGodlike" ], "count": 7 }, { "reaction": "🔥", "users": [ "DeathGodlike" ], "count": 1 } ]
2024-03-08T16:15:43.000Z
2024-03-08T16:15:53.595Z
[]
/posts/akhaliq/395233681808448
131
0
523682484022119
[ { "type": "text", "value": "As promised, a video on the new Major TOM dataset on HF: ", "raw": "As promised, a video on the new Major TOM dataset on HF: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/KonWxQ1mCpA?si=tp3Cz6-F0exfgdx3", "href": "https://youtu.be/KonWxQ1mCpA?si=tp3Cz6-F0exfgdx3", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If you prefer audio, there is a podcast too: ", "raw": "If you prefer audio, there is a podcast too: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.satellite-image-deep-learning.com/p/major-tom-expandable-eo-datasets", "href": "https://www.satellite-image-deep-learning.com/p/major-tom-expandable-eo-datasets", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I think this is just the start for remote sensing datasets on HuggingFace..!", "raw": "I think this is just the start for remote sensing datasets on HuggingFace..!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
As promised, a video on the new Major TOM dataset on HF: https://youtu.be/KonWxQ1mCpA?si=tp3Cz6-F0exfgdx3 If you prefer audio, there is a podcast too: https://www.satellite-image-deep-learning.com/p/major-tom-expandable-eo-datasets I think this is just the start for remote sensing datasets on HuggingFace..!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1638541710084-noauth.png", "fullname": "Robin Cole", "name": "robmarkcole", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 28, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/61aa29997c693ae78a9dd1e2/v6zOeE5GosDL_33Wux4vn.jpeg" } ]
[]
[ { "reaction": "❤️", "users": [ "samusenps", "clem", "osanseviero", "simonMadec" ], "count": 4 }, { "reaction": "🤗", "users": [ "mikonvergence", "clem", "osanseviero" ], "count": 3 } ]
2024-03-08T16:13:02.000Z
2024-03-08T16:23:09.451Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1678741407493-6304c06eeb6d777a838eab63.png", "fullname": "Mikolaj Czerkawski", "name": "mikonvergence", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 25, "isFollowing": false } ]
/posts/robmarkcole/523682484022119
85
1
272975367405736
[ { "type": "text", "value": "🗄️ Massive data release on the HF Hub for 75 languages!", "raw": "🗄️ Massive data release on the HF Hub for 75 languages!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/datasets/BramVanroy/hplt_monolingual_v1_2", "href": "https://huggingface.co/datasets/BramVanroy/hplt_monolingual_v1_2", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In December of last year, HPLT (", "raw": "In December of last year, HPLT (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://hplt-project.org/", "href": "https://hplt-project.org/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ") released version 1.2 of their dataset. It covers web-crawled data of 75 languages!, in the raw format as well as deduplicated and cleaned sections. In total, we're talking about over 40TB of data! This data was already accessible via their website but I figured the accessibility could be improved by an integration with Hugging Face tooling. 🤗 So I added the dataset here to the Hugging Face hub, enabing direct use in your conventional training pipelines for LLMs or other language technologies. The data will automatically be downloaded and optimised with just one line of code:", "raw": ") released version 1.2 of their dataset. It covers web-crawled data of 75 languages!, in the raw format as well as deduplicated and cleaned sections. In total, we're talking about over 40TB of data! This data was already accessible via their website but I figured the accessibility could be improved by an integration with Hugging Face tooling. 🤗 So I added the dataset here to the Hugging Face hub, enabing direct use in your conventional training pipelines for LLMs or other language technologies. The data will automatically be downloaded and optimised with just one line of code:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```python\nload_dataset(\"BramVanroy/hplt_mono_v1_2\", \"nl_cleaned\")\n```", "href": null, "resource": null, "url": null, "code": "load_dataset(\"BramVanroy/hplt_mono_v1_2\", \"nl_cleaned\")", "user": null, "label": null, "lang": "python" }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Let's use this big blob of data to build something awesome in our languages! 🥳", "raw": "Let's use this big blob of data to build something awesome in our languages! 🥳", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🗄️ Massive data release on the HF Hub for 75 languages! https://huggingface.co/datasets/BramVanroy/hplt_monolingual_v1_2 In December of last year, HPLT (https://hplt-project.org/) released version 1.2 of their dataset. It covers web-crawled data of 75 languages!, in the raw format as well as deduplicated and cleaned sections. In total, we're talking about over 40TB of data! This data was already accessible via their website but I figured the accessibility could be improved by an integration with Hugging Face tooling. 🤗 So I added the dataset here to the Hugging Face hub, enabing direct use in your conventional training pipelines for LLMs or other language technologies. The data will automatically be downloaded and optimised with just one line of code: ```python load_dataset("BramVanroy/hplt_mono_v1_2", "nl_cleaned") ``` Let's use this big blob of data to build something awesome in our languages! 🥳
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1594192845975-5e1e17b6fcf41d740b6996a8.jpeg", "fullname": "Bram Vanroy", "name": "BramVanroy", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 173, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "stefan-it", "samusenps", "ajibawa-2023", "clem", "osanseviero", "ymoslem", "damerajee", "rafaelpierrehf" ], "count": 8 } ]
2024-03-08T14:47:52.000Z
2024-03-09T16:54:00.309Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6487239cca30096ea9f52115/HMte9wjKJgfcxsO-5vb_Q.jpeg", "fullname": "dame rajee", "name": "damerajee", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 11, "isFollowing": false } ]
/posts/BramVanroy/272975367405736
96
1
208953986335856
[ { "type": "text", "value": "We just released bitsandbytes==0.43.0 📦 , with these significant new additions:", "raw": "We just released bitsandbytes==0.43.0 📦 , with these significant new additions:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ 🛫 FSDP+QLoRA support (alpha release)", "raw": "‣ 🛫 FSDP+QLoRA support (alpha release)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ◦ now anyone with 2 powerful gaming GPUs can fine-tune 70B param models at home!", "raw": " ◦ now anyone with 2 powerful gaming GPUs can fine-tune 70B param models at home!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ◦ in collab with Jeremy Howard + team @ answer.ai", "raw": " ◦ in collab with Jeremy Howard + team @ answer.ai", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ◦ answer.ai blogpost: ", "raw": " ◦ answer.ai blogpost: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.answer.ai/posts/2024-03-06-fsdp-qlora.html", "href": "https://www.answer.ai/posts/2024-03-06-fsdp-qlora.html", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ◦ example repo: ", "raw": " ◦ example repo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/AnswerDotAI/fsdp_qlora/", "href": "https://github.com/AnswerDotAI/fsdp_qlora/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ 🌈⊞ Official Windows support", "raw": "‣ 🌈⊞ Official Windows support", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ◦ now via simple ", "raw": " ◦ now via simple ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`pip install bitsandbytes>=0.43.0`", "href": null, "resource": null, "url": null, "code": "pip install bitsandbytes>=0.43.0", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ 📄 Huge docs update:", "raw": "‣ 📄 Huge docs update:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ◦ ", "raw": " ◦ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/docs/bitsandbytes/main", "href": "https://huggingface.co/docs/bitsandbytes/main", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ◦ Be sure to check out the optimizers and the API docs", "raw": " ◦ Be sure to check out the optimizers and the API docs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ◦ ... even more upcoming ...", "raw": " ◦ ... even more upcoming ...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Under the hood there we have many other improvements, due to extensive maintenance activity, community contributions by super active + knowledgable volunteers ✨ 🚀 and the official sponsorship by Hugging Face that makes all this possible 🤗 ❤️ 🌍", "raw": "Under the hood there we have many other improvements, due to extensive maintenance activity, community contributions by super active + knowledgable volunteers ✨ 🚀 and the official sponsorship by Hugging Face that makes all this possible 🤗 ❤️ 🌍", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We would greatly appreciate any further community contributions, be it to help with refactorings, exterminating flaky tests, writing doc-strings, tutorials, new features. Don't be shy, just contact us and we see where this leads us:", "raw": "We would greatly appreciate any further community contributions, be it to help with refactorings, exterminating flaky tests, writing doc-strings, tutorials, new features. Don't be shy, just contact us and we see where this leads us:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/TimDettmers/bitsandbytes/discussions", "href": "https://github.com/TimDettmers/bitsandbytes/discussions", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Have a great weekend everyone!", "raw": "Have a great weekend everyone!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
We just released bitsandbytes==0.43.0 📦 , with these significant new additions: ‣ 🛫 FSDP+QLoRA support (alpha release) ◦ now anyone with 2 powerful gaming GPUs can fine-tune 70B param models at home! ◦ in collab with Jeremy Howard + team @ answer.ai ◦ answer.ai blogpost: https://www.answer.ai/posts/2024-03-06-fsdp-qlora.html ◦ example repo: https://github.com/AnswerDotAI/fsdp_qlora/ ‣ 🌈⊞ Official Windows support ◦ now via simple `pip install bitsandbytes>=0.43.0` ‣ 📄 Huge docs update: ◦ https://huggingface.co/docs/bitsandbytes/main ◦ Be sure to check out the optimizers and the API docs ◦ ... even more upcoming ... Under the hood there we have many other improvements, due to extensive maintenance activity, community contributions by super active + knowledgable volunteers ✨ 🚀 and the official sponsorship by Hugging Face that makes all this possible 🤗 ❤️ 🌍 We would greatly appreciate any further community contributions, be it to help with refactorings, exterminating flaky tests, writing doc-strings, tutorials, new features. Don't be shy, just contact us and we see where this leads us: https://github.com/TimDettmers/bitsandbytes/discussions Have a great weekend everyone!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/650e8d27463e7e33e95f1963/TH-6foaqFpCso1Y7NBEF4.png", "fullname": "Titus von Koeller", "name": "Titus-von-Koeller", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 41, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "Titus-von-Koeller", "mdouglas", "clem", "osanseviero", "rganti", "ajibawa-2023", "julien-c", "t1u1", "ybelkada", "cstr", "tolgacangoz", "hiyouga", "danielus", "smangrul" ], "count": 14 }, { "reaction": "❤️", "users": [ "Djruffkutz", "clem", "osanseviero", "samusenps", "julien-c", "Theli", "ybelkada", "hiyouga", "smangrul" ], "count": 9 } ]
2024-03-08T13:59:26.000Z
2024-03-08T20:45:01.664Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false } ]
/posts/Titus-von-Koeller/208953986335856
206
1
175670822281132
[ { "type": "text", "value": "Speaking of the missing piece in today’s generative AI: Reasoning (or more appropriately, the proper use of Common-Sense)", "raw": "Speaking of the missing piece in today’s generative AI: Reasoning (or more appropriately, the proper use of Common-Sense)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Human Intelligence is hinged on the brain’s ability to learn vast amounts of background knowledge about the world just by passively observing it. Such common-sense information is believed to be the enabler of intelligent behavior (planning, reasoning and grounding).", "raw": "Human Intelligence is hinged on the brain’s ability to learn vast amounts of background knowledge about the world just by passively observing it. Such common-sense information is believed to be the enabler of intelligent behavior (planning, reasoning and grounding).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Unusual question: how do we actually learn common-sense knowledge?", "raw": "Unusual question: how do we actually learn common-sense knowledge?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Unusual opinion: I personally believe we haven’t fully understand how the brain learns, thus cannot get machines to mimic how we learn.", "raw": "Unusual opinion: I personally believe we haven’t fully understand how the brain learns, thus cannot get machines to mimic how we learn.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Well so far AI godfather Prof. Yann have quite a promising vision of how machines can learn world models like we humans do. Excited to share his vision after giving I-JEPA a read.", "raw": "Well so far AI godfather Prof. Yann have quite a promising vision of how machines can learn world models like we humans do. Excited to share his vision after giving I-JEPA a read.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I-JEPA (Image-based Joint-Embedding Predictive Architecture) is a novel approach to self-supervised learning from images. This method focuses on learning semantic image features without relying on pre-set rules based on manual data changes. Instead, I-JEPA predicts the representations of multiple target blocks within a single image using a single context block.", "raw": "I-JEPA (Image-based Joint-Embedding Predictive Architecture) is a novel approach to self-supervised learning from images. This method focuses on learning semantic image features without relying on pre-set rules based on manual data changes. Instead, I-JEPA predicts the representations of multiple target blocks within a single image using a single context block.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The I-JEPA architecture consists of a context encoder, a target encoder, and a predictor. The context encoder extracts context features from a context block, while the target encoder extracts target features from the target blocks. The predictor then uses the context features to predict the target features.", "raw": "The I-JEPA architecture consists of a context encoder, a target encoder, and a predictor. The context encoder extracts context features from a context block, while the target encoder extracts target features from the target blocks. The predictor then uses the context features to predict the target features.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "One of the main advantages of I-JEPA is that it is non-generative, meaning it does not rely on pre-set rules based on manual data changes. It also uses multi-block masking, which allows it to learn semantic representations more effectively.", "raw": "One of the main advantages of I-JEPA is that it is non-generative, meaning it does not rely on pre-set rules based on manual data changes. It also uses multi-block masking, which allows it to learn semantic representations more effectively.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is very promising, hopefully we can look back this one day and amuse at how we got it right.", "raw": "This is very promising, hopefully we can look back this one day and amuse at how we got it right.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2301.08243", "href": "https://arxiv.org/abs/2301.08243", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Code: ", "raw": "Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/facebookresearch/ijepa", "href": "https://github.com/facebookresearch/ijepa", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Speaking of the missing piece in today’s generative AI: Reasoning (or more appropriately, the proper use of Common-Sense) Human Intelligence is hinged on the brain’s ability to learn vast amounts of background knowledge about the world just by passively observing it. Such common-sense information is believed to be the enabler of intelligent behavior (planning, reasoning and grounding). Unusual question: how do we actually learn common-sense knowledge? Unusual opinion: I personally believe we haven’t fully understand how the brain learns, thus cannot get machines to mimic how we learn. Well so far AI godfather Prof. Yann have quite a promising vision of how machines can learn world models like we humans do. Excited to share his vision after giving I-JEPA a read. I-JEPA (Image-based Joint-Embedding Predictive Architecture) is a novel approach to self-supervised learning from images. This method focuses on learning semantic image features without relying on pre-set rules based on manual data changes. Instead, I-JEPA predicts the representations of multiple target blocks within a single image using a single context block. The I-JEPA architecture consists of a context encoder, a target encoder, and a predictor. The context encoder extracts context features from a context block, while the target encoder extracts target features from the target blocks. The predictor then uses the context features to predict the target features. One of the main advantages of I-JEPA is that it is non-generative, meaning it does not rely on pre-set rules based on manual data changes. It also uses multi-block masking, which allows it to learn semantic representations more effectively. This is very promising, hopefully we can look back this one day and amuse at how we got it right. Paper: https://arxiv.org/abs/2301.08243 Code: https://github.com/facebookresearch/ijepa
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg", "fullname": "Jaward Sesay", "name": "Jaward", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 191, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/1UmImSDP6ssP_12asIKH4.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/fmIuwqxOMUDxAX1ayX7VS.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/Q282IiI0HSk2tFMarNpIE.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/h-cgceOeFnyxmd6urPBNw.jpeg" } ]
[]
[ { "reaction": "❤️", "users": [ "Djruffkutz", "clem", "osanseviero", "samusenps" ], "count": 4 } ]
2024-03-08T12:27:18.000Z
2024-03-08T12:27:18.601Z
[]
/posts/Jaward/175670822281132
46
0
420196569261406
[ { "type": "text", "value": "People in Paris 🇫🇷 🥐", "raw": "People in Paris 🇫🇷 🥐", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Next week we'll be hosting our first Post-Deployment Data Science Meetup in Paris!", "raw": "Next week we'll be hosting our first Post-Deployment Data Science Meetup in Paris!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "My boss will be talking about Quantifying the Impact of Data Drift on Model ", "raw": "My boss will be talking about Quantifying the Impact of Data Drift on Model ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Performance. 👀", "raw": "Performance. 👀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The event is completely free, and there's only space for 50 people, so if you are interested, RSVP as soon as possible 🤗", "raw": "The event is completely free, and there's only space for 50 people, so if you are interested, RSVP as soon as possible 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🗓️ Thursday, March 14", "raw": "🗓️ Thursday, March 14", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🕠 5:30 PM - 8:30 PM GMT+1", "raw": "🕠 5:30 PM - 8:30 PM GMT+1", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 RSVP: ", "raw": "🔗 RSVP: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://lu.ma/postdeploymentparis", "href": "https://lu.ma/postdeploymentparis", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
People in Paris 🇫🇷 🥐 Next week we'll be hosting our first Post-Deployment Data Science Meetup in Paris! My boss will be talking about Quantifying the Impact of Data Drift on Model Performance. 👀 The event is completely free, and there's only space for 50 people, so if you are interested, RSVP as soon as possible 🤗 🗓️ Thursday, March 14 🕠 5:30 PM - 8:30 PM GMT+1 🔗 RSVP: https://lu.ma/postdeploymentparis
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1657144463525-629a173153a72d997d3f57d0.jpeg", "fullname": "Santiago Viquez", "name": "santiviquez", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 84, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/629a173153a72d997d3f57d0/Bo4O0V-Fgsg87dN8wHACI.jpeg" } ]
[]
[ { "reaction": "❤️", "users": [ "clem", "sbrandeis" ], "count": 2 } ]
2024-03-08T09:56:26.000Z
2024-03-08T09:56:26.308Z
[]
/posts/santiviquez/420196569261406
98
0
170929653134354
[ { "type": "text", "value": "🚀🎭🌟 New Research Alert - ICLR 2024! 🌟🎭 🚀", "raw": "🚀🎭🌟 New Research Alert - ICLR 2024! 🌟🎭 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📄 Real3D-Portrait: One-shot Realistic 3D Talking Portrait Synthesis 🌟🚀", "raw": "📄 Real3D-Portrait: One-shot Realistic 3D Talking Portrait Synthesis 🌟🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👥 Authors: Zhenhui Ye et al.", "raw": "👥 Authors: Zhenhui Ye et al.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📅 Conference: ICLR, May 7-11, 2024 | Vienna, Austria 🇦🇹", "raw": "📅 Conference: ICLR, May 7-11, 2024 | Vienna, Austria 🇦🇹", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Paper: ", "raw": "🔗 Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2401.08503", "href": null, "resource": { "type": "paper", "id": "2401.08503", "discussionNum": null }, "url": "https://huggingface.co/papers/2401.08503", "code": null, "user": null, "label": "Real3D-Portrait: One-shot Realistic 3D Talking Portrait Synthesis (2401.08503)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Github Page: ", "raw": "🔗 Github Page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://real3dportrait.github.io/", "href": "https://real3dportrait.github.io/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Repository: ", "raw": "🔗 Repository: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/yerfor/Real3DPortrait", "href": "https://github.com/yerfor/Real3DPortrait", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔥 Model 🤖: ", "raw": "🔥 Model 🤖: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/ameerazam08/Real3DPortrait", "href": null, "resource": { "type": "model", "id": "ameerazam08/Real3DPortrait", "discussionNum": null }, "url": "https://huggingface.co/ameerazam08/Real3DPortrait", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📚 More Papers: more cutting-edge research presented at other conferences in the ", "raw": "📚 More Papers: more cutting-edge research presented at other conferences in the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "href": null, "resource": { "type": "space", "id": "DmitryRyumin/NewEraAI-Papers", "discussionNum": null }, "url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " curated by ", "raw": " curated by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@DmitryRyumin", "href": null, "resource": null, "url": null, "code": null, "user": "DmitryRyumin", "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🚀 Added to the Avatars Collection: ", "raw": "🚀 Added to the Avatars Collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "href": null, "resource": { "type": "collection", "id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "discussionNum": null }, "url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔍 Keywords: #Real3D-Potrait #I2P #HTB-SR #A2M #Synthesis #LipSyncing #HighResolutionVideos #ICLR2024 #DeepLearning #Animation #Innovation", "raw": "🔍 Keywords: #Real3D-Potrait #I2P #HTB-SR #A2M #Synthesis #LipSyncing #HighResolutionVideos #ICLR2024 #DeepLearning #Animation #Innovation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🚀🎭🌟 New Research Alert - ICLR 2024! 🌟🎭 🚀 📄 Real3D-Portrait: One-shot Realistic 3D Talking Portrait Synthesis 🌟🚀 👥 Authors: Zhenhui Ye et al. 📅 Conference: ICLR, May 7-11, 2024 | Vienna, Austria 🇦🇹 🔗 Paper: https://huggingface.co/papers/2401.08503 🔗 Github Page: https://real3dportrait.github.io/ 🔗 Repository: https://github.com/yerfor/Real3DPortrait 🔥 Model 🤖: https://huggingface.co/ameerazam08/Real3DPortrait 📚 More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin 🚀 Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36 🔍 Keywords: #Real3D-Potrait #I2P #HTB-SR #A2M #Synthesis #LipSyncing #HighResolutionVideos #ICLR2024 #DeepLearning #Animation #Innovation
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/hp9r0AuATx-dqbKua1ZmN.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/KsI9WwnpF8ZZYcnFf4Lo9.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/kD1TQbt7L1_ZJTDvY1iz6.png" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/UM03vFvrA3Q3GGcjnkeA7.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/NAZ-Yequkxiv_FLM7dbHd.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/kxBkO7Fq-FFMRiXxqDapI.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/7DLY_-mQ2HcrRMCrfj1ta.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/JTBP8H8ictrrmBAadoUzj.mp4" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377 } ]
[ { "reaction": "❤️", "users": [ "DmitryRyumin", "samusenps", "clem", "osanseviero", "MiSTe-R", "Saba99" ], "count": 6 }, { "reaction": "👍", "users": [ "samusenps", "clem", "osanseviero", "DmitryRyumin", "MiSTe-R" ], "count": 5 } ]
2024-03-08T09:51:34.000Z
2024-03-08T09:51:34.799Z
[]
/posts/DmitryRyumin/170929653134354
163
0
711011176323862
[ { "type": "text", "value": " The past year has seen a rapid pace of delivery of open-to-use Large Language Models (LLMs) for Generative AI. Simultaneously, a number of service providers have bootstrapped, which provide cloud based APIs for generative inference using these LLMs. The list is long: Together.AI, Fireworks.AI, Mosaic ML (Databricks), Anyscale , Perplexity Labs... Assuming that you are fine with privacy and security issues that sending your data to a cloud hosted solution bring, these providers can help launch your Generative AI use case with little friction.", "raw": " The past year has seen a rapid pace of delivery of open-to-use Large Language Models (LLMs) for Generative AI. Simultaneously, a number of service providers have bootstrapped, which provide cloud based APIs for generative inference using these LLMs. The list is long: Together.AI, Fireworks.AI, Mosaic ML (Databricks), Anyscale , Perplexity Labs... Assuming that you are fine with privacy and security issues that sending your data to a cloud hosted solution bring, these providers can help launch your Generative AI use case with little friction.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Given that all the cloud based providers provide access to the same/similar open models, how do you distinguish between them? One way is to look at the performance vs cost tradeoff. This post talks about performance.", "raw": "Given that all the cloud based providers provide access to the same/similar open models, how do you distinguish between them? One way is to look at the performance vs cost tradeoff. This post talks about performance.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "To understand performance, one needs benchmarks and metrics. There exist a number of benchmarks and leader boards to compare solutions. But, before you start comparing data points flying across the internet, I suggest you to consider the following.", "raw": "To understand performance, one needs benchmarks and metrics. There exist a number of benchmarks and leader boards to compare solutions. But, before you start comparing data points flying across the internet, I suggest you to consider the following.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1) Play with your own benchmark: You need to understand the performance for your use case, not some random workload a off-the-shelf benchmark seeks to represent and its authors dreamed off. You need to build and run your own benchmark. You could either understand and re-work the publicly available benchmarks like LLMPerf, or design your own one. Contrary to wisdom, writing a benchmark grounds up isn't hard. I wrote a simple benchmark called GenAI-Bench to compare the response time of cloud providers. You can do it too.", "raw": "1) Play with your own benchmark: You need to understand the performance for your use case, not some random workload a off-the-shelf benchmark seeks to represent and its authors dreamed off. You need to build and run your own benchmark. You could either understand and re-work the publicly available benchmarks like LLMPerf, or design your own one. Contrary to wisdom, writing a benchmark grounds up isn't hard. I wrote a simple benchmark called GenAI-Bench to compare the response time of cloud providers. You can do it too.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2) Understand the metrics you care about. Is your use case a chatbot which needs a quick turnaround for the first word? Or do you need to parse a large number of documents and get a summary over-night. Correctly identifying what you care for will help you narrow down the best provider for your needs. ", "raw": "2) Understand the metrics you care about. Is your use case a chatbot which needs a quick turnaround for the first word? Or do you need to parse a large number of documents and get a summary over-night. Correctly identifying what you care for will help you narrow down the best provider for your needs. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3) Keep generating data points periodically. Cloud providers routinely update their software. Metrics change. Measure them periodically. ", "raw": "3) Keep generating data points periodically. Cloud providers routinely update their software. Metrics change. Measure them periodically. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
The past year has seen a rapid pace of delivery of open-to-use Large Language Models (LLMs) for Generative AI. Simultaneously, a number of service providers have bootstrapped, which provide cloud based APIs for generative inference using these LLMs. The list is long: Together.AI, Fireworks.AI, Mosaic ML (Databricks), Anyscale , Perplexity Labs... Assuming that you are fine with privacy and security issues that sending your data to a cloud hosted solution bring, these providers can help launch your Generative AI use case with little friction. Given that all the cloud based providers provide access to the same/similar open models, how do you distinguish between them? One way is to look at the performance vs cost tradeoff. This post talks about performance. To understand performance, one needs benchmarks and metrics. There exist a number of benchmarks and leader boards to compare solutions. But, before you start comparing data points flying across the internet, I suggest you to consider the following. 1) Play with your own benchmark: You need to understand the performance for your use case, not some random workload a off-the-shelf benchmark seeks to represent and its authors dreamed off. You need to build and run your own benchmark. You could either understand and re-work the publicly available benchmarks like LLMPerf, or design your own one. Contrary to wisdom, writing a benchmark grounds up isn't hard. I wrote a simple benchmark called GenAI-Bench to compare the response time of cloud providers. You can do it too. 2) Understand the metrics you care about. Is your use case a chatbot which needs a quick turnaround for the first word? Or do you need to parse a large number of documents and get a summary over-night. Correctly identifying what you care for will help you narrow down the best provider for your needs. 3) Keep generating data points periodically. Cloud providers routinely update their software. Metrics change. Measure them periodically.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63b72b906fc56e43c3bf8f30/Tl_H7W8WjJT677f81A9W-.jpeg", "fullname": "Amitabha Banerjee", "name": "hiamitabha", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "samusenps", "victor", "osanseviero", "antiven0m" ], "count": 4 }, { "reaction": "❤️", "users": [ "samusenps", "rohanp24", "clem", "osanseviero" ], "count": 4 }, { "reaction": "🧠", "users": [ "carson-together" ], "count": 1 } ]
2024-03-08T07:41:07.000Z
2024-03-08T07:41:07.004Z
[]
/posts/hiamitabha/711011176323862
35
0
791362158889035
[ { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "After the Supervised Fine-Tuning (SFT) phase, we observed a notable degradation in the instruction-following capabilities of the LLaVA Multi-Modal Large Language Model (MM-LLM). To address this issue, we introduced a 6K-entry VQA preference dataset and employed Direct Preference Optimization (DPO), alongside testing other algorithms such as Rejection Sampling and SteerLM, to enhance instruction-following proficiency. Our methodology not only fully restored the language following capabilities of LLaVa on the MT-Bench but also outperformed LLaVA-RLHF and Vicuna. Additionally, our approach extended to visual VQA tasks, as demonstrated by significant performance improvements on MM-Vet and LLaVa-Bench. An interesting observation was that, compared to models using distilled SFT, our method showed substantial out-of-distribution improvements. ", "raw": "After the Supervised Fine-Tuning (SFT) phase, we observed a notable degradation in the instruction-following capabilities of the LLaVA Multi-Modal Large Language Model (MM-LLM). To address this issue, we introduced a 6K-entry VQA preference dataset and employed Direct Preference Optimization (DPO), alongside testing other algorithms such as Rejection Sampling and SteerLM, to enhance instruction-following proficiency. Our methodology not only fully restored the language following capabilities of LLaVa on the MT-Bench but also outperformed LLaVA-RLHF and Vicuna. Additionally, our approach extended to visual VQA tasks, as demonstrated by significant performance improvements on MM-Vet and LLaVa-Bench. An interesting observation was that, compared to models using distilled SFT, our method showed substantial out-of-distribution improvements. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2402.10884", "href": "https://arxiv.org/abs/2402.10884", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model available", "raw": "Model available", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/alexshengzhili/llava-v1.5-13b-dpo", "href": null, "resource": { "type": "model", "id": "alexshengzhili/llava-v1.5-13b-dpo", "discussionNum": null }, "url": "https://huggingface.co/alexshengzhili/llava-v1.5-13b-dpo", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "GitHub: ", "raw": "GitHub: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/findalexli/mllm-dpo/edit/main/README.MD", "href": "https://github.com/findalexli/mllm-dpo/edit/main/README.MD", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
After the Supervised Fine-Tuning (SFT) phase, we observed a notable degradation in the instruction-following capabilities of the LLaVA Multi-Modal Large Language Model (MM-LLM). To address this issue, we introduced a 6K-entry VQA preference dataset and employed Direct Preference Optimization (DPO), alongside testing other algorithms such as Rejection Sampling and SteerLM, to enhance instruction-following proficiency. Our methodology not only fully restored the language following capabilities of LLaVa on the MT-Bench but also outperformed LLaVA-RLHF and Vicuna. Additionally, our approach extended to visual VQA tasks, as demonstrated by significant performance improvements on MM-Vet and LLaVa-Bench. An interesting observation was that, compared to models using distilled SFT, our method showed substantial out-of-distribution improvements. https://arxiv.org/abs/2402.10884 Model available https://huggingface.co/alexshengzhili/llava-v1.5-13b-dpo GitHub: https://github.com/findalexli/mllm-dpo/edit/main/README.MD
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/no-auth/wthru065DlrO99caaTL2R.png", "fullname": "shengzhi alex li", "name": "alexshengzhili", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/638913002a897944ea5bd2ab/b7LSAIgEpa0I2SHTdDUJq.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/638913002a897944ea5bd2ab/XpoDgVjr22JAKItWSWE3W.png" } ]
[]
[ { "reaction": "❤️", "users": [ "samusenps", "osanseviero", "cstr" ], "count": 3 } ]
2024-03-08T03:24:47.000Z
2024-04-13T02:18:47.142Z
[]
/posts/alexshengzhili/791362158889035
255
0
576838864748336
[ { "type": "text", "value": "Dear music lovers 🕺, ", "raw": "Dear music lovers 🕺, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "MusicLang Space is now live: ", "raw": "MusicLang Space is now live: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/musiclang/README", "href": null, "resource": { "type": "space", "id": "musiclang/README", "discussionNum": null }, "url": "https://huggingface.co/spaces/musiclang/README", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "MusicLang is a controllable model for music generation: ", "raw": "MusicLang is a controllable model for music generation: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> 🦙 Discover the LLAMA2 architecture, trained from scratch for symbolic music generation, ensuring exceptional quality;", "raw": "> 🦙 Discover the LLAMA2 architecture, trained from scratch for symbolic music generation, ensuring exceptional quality;", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> 👨‍🎨 Unleash your creativity by extending an existing music, or create new ones from scratch;", "raw": "> 👨‍🎨 Unleash your creativity by extending an existing music, or create new ones from scratch;", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> 🤖 Integrate MusicLang into your applications, with an inference optimized for CPUs written in C, other integrations and optimizations coming soon.", "raw": "> 🤖 Integrate MusicLang into your applications, with an inference optimized for CPUs written in C, other integrations and optimizations coming soon.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In the space, you’ll find :", "raw": "In the space, you’ll find :", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1️⃣ MusicLang foundation model: our fondation model for creating and generating original midi soundtracks ", "raw": "1️⃣ MusicLang foundation model: our fondation model for creating and generating original midi soundtracks ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/musiclang/musiclang-v2", "href": null, "resource": { "type": "model", "id": "musiclang/musiclang-v2", "discussionNum": null }, "url": "https://huggingface.co/musiclang/musiclang-v2", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ";", "raw": ";", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2️⃣ MusicLang predict: our AI prediction api of the MusicLang package ", "raw": "2️⃣ MusicLang predict: our AI prediction api of the MusicLang package ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/musiclang/musiclang_predict?tab=readme-ov-file", "href": "https://github.com/musiclang/musiclang_predict?tab=readme-ov-file", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ";", "raw": ";", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3️⃣ MusicLang Language:a new language for tonal music. This language allows composers to load, write, transform and predict symbolic music in a simple, condensed and high level manner ", "raw": "3️⃣ MusicLang Language:a new language for tonal music. This language allows composers to load, write, transform and predict symbolic music in a simple, condensed and high level manner ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/MusicLang/musiclang", "href": "https://github.com/MusicLang/musiclang", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ";", "raw": ";", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4️⃣ MusicLang Demo Space: ", "raw": "4️⃣ MusicLang Demo Space: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/musiclang/musiclang-predict", "href": null, "resource": { "type": "space", "id": "musiclang/musiclang-predict", "discussionNum": null }, "url": "https://huggingface.co/spaces/musiclang/musiclang-predict", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5️⃣ Our Colab: ", "raw": "5️⃣ Our Colab: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://colab.research.google.com/drive/1MA2mek826c05BjbWk2nRkVv2rW7kIU_S?usp=sharing", "href": "https://colab.research.google.com/drive/1MA2mek826c05BjbWk2nRkVv2rW7kIU_S?usp=sharing", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Help us share the future of music composition! Spread the word, show your support by adding a star or contribute to our project. ⭐️✨", "raw": "Help us share the future of music composition! Spread the word, show your support by adding a star or contribute to our project. ⭐️✨", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Music Sounds Definitely Better with You 🎶 🖤", "raw": "Music Sounds Definitely Better with You 🎶 🖤", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "cc ", "raw": "cc ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@floriangardin", "href": null, "resource": null, "url": null, "code": null, "user": "floriangardin", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@MehdiLeZ", "href": null, "resource": null, "url": null, "code": null, "user": "MehdiLeZ", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@reach-vb", "href": null, "resource": null, "url": null, "code": null, "user": "reach-vb", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thanks a lot, ", "raw": "Thanks a lot, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The MusicLang team ❤️", "raw": "The MusicLang team ❤️", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Dear music lovers 🕺, MusicLang Space is now live: https://huggingface.co/spaces/musiclang/README MusicLang is a controllable model for music generation: > 🦙 Discover the LLAMA2 architecture, trained from scratch for symbolic music generation, ensuring exceptional quality; > 👨‍🎨 Unleash your creativity by extending an existing music, or create new ones from scratch; > 🤖 Integrate MusicLang into your applications, with an inference optimized for CPUs written in C, other integrations and optimizations coming soon. In the space, you’ll find : 1️⃣ MusicLang foundation model: our fondation model for creating and generating original midi soundtracks https://huggingface.co/musiclang/musiclang-v2; 2️⃣ MusicLang predict: our AI prediction api of the MusicLang package https://github.com/musiclang/musiclang_predict?tab=readme-ov-file; 3️⃣ MusicLang Language:a new language for tonal music. This language allows composers to load, write, transform and predict symbolic music in a simple, condensed and high level manner https://github.com/MusicLang/musiclang; 4️⃣ MusicLang Demo Space: https://huggingface.co/spaces/musiclang/musiclang-predict 5️⃣ Our Colab: https://colab.research.google.com/drive/1MA2mek826c05BjbWk2nRkVv2rW7kIU_S?usp=sharing Help us share the future of music composition! Spread the word, show your support by adding a star or contribute to our project. ⭐️✨ Music Sounds Definitely Better with You 🎶 🖤 cc @floriangardin @MehdiLeZ @reach-vb Thanks a lot, The MusicLang team ❤️
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6544b4a04200ae379ea69302/-OnV6YPGaTUyBKOJiq4WH.png", "fullname": "Mehdi Zatar", "name": "MehdiLeZ", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 30, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6544b4a04200ae379ea69302/OQ3RzzSFcyBw59qrjhWyz.qt" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6329d2d3cf7d40df9b3ac5b3/yf32wwK_h6fqQYstU44CF.jpeg", "fullname": "Florian GARDIN", "name": "floriangardin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 14 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6544b4a04200ae379ea69302/-OnV6YPGaTUyBKOJiq4WH.png", "fullname": "Mehdi Zatar", "name": "MehdiLeZ", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 30 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655385361868-61b85ce86eb1f2c5e6233736.jpeg", "fullname": "Vaibhav Srivastav", "name": "reach-vb", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 460 } ]
[ { "reaction": "❤️", "users": [ "floriangardin", "reach-vb", "samusenps", "Bils", "kramp", "fffiloni", "clem", "osanseviero", "victor", "MehdiLeZ", "asigalov61", "RadMann" ], "count": 12 }, { "reaction": "🤯", "users": [ "reach-vb", "clem", "osanseviero" ], "count": 3 }, { "reaction": "🤗", "users": [ "reach-vb", "clem" ], "count": 2 }, { "reaction": "👍", "users": [ "goodgame", "asigalov61" ], "count": 2 } ]
2024-03-07T18:28:09.000Z
2024-04-29T16:03:26.668Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655385361868-61b85ce86eb1f2c5e6233736.jpeg", "fullname": "Vaibhav Srivastav", "name": "reach-vb", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 460, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6544b4a04200ae379ea69302/-OnV6YPGaTUyBKOJiq4WH.png", "fullname": "Mehdi Zatar", "name": "MehdiLeZ", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 30, "isFollowing": false }, { "avatarUrl": "/avatars/2bf1b3dcf5265ce38c6c79f5d04fb3de.svg", "fullname": "t1u1", "name": "t1u1", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f57ea2d3f32f12a3c0692e6/b-9GG2p--smCameUPeCBN.jpeg", "fullname": "Alex", "name": "asigalov61", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 65, "isFollowing": false } ]
/posts/MehdiLeZ/576838864748336
121
8
711093324398310
[ { "type": "text", "value": "GaLore", "raw": "GaLore", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Memory-Efficient LLM Training by Gradient Low-Rank Projection", "raw": "Memory-Efficient LLM Training by Gradient Low-Rank Projection", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2403.03507", "href": null, "resource": { "type": "paper", "id": "2403.03507", "discussionNum": null }, "url": "https://huggingface.co/papers/2403.03507", "code": null, "user": null, "label": "GaLore: Memory-Efficient LLM Training by Gradient Low-Rank Projection (2403.03507)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Training Large Language Models (LLMs) presents significant memory challenges, predominantly due to the growing size of weights and optimizer states. Common memory-reduction approaches, such as low-rank adaptation (LoRA), add a trainable low-rank matrix to the frozen pre-trained weight in each layer, reducing trainable parameters and optimizer states. However, such approaches typically underperform training with full-rank weights in both pre-training and fine-tuning stages since they limit the parameter search to a low-rank subspace and alter the training dynamics, and further, may require full-rank warm start. In this work, we propose Gradient Low-Rank Projection (GaLore), a training strategy that allows full-parameter learning but is more memory-efficient than common low-rank adaptation methods such as LoRA. Our approach reduces memory usage by up to 65.5% in optimizer states while maintaining both efficiency and performance for pre-training on LLaMA 1B and 7B architectures with C4 dataset with up to 19.7B tokens, and on fine-tuning RoBERTa on GLUE tasks. Our 8-bit GaLore further reduces optimizer memory by up to 82.5% and total training memory by 63.3%, compared to a BF16 baseline. Notably, we demonstrate, for the first time, the feasibility of pre-training a 7B model on consumer GPUs with 24GB memory (e.g., NVIDIA RTX 4090) without model parallel, checkpointing, or offloading strategies.", "raw": "Training Large Language Models (LLMs) presents significant memory challenges, predominantly due to the growing size of weights and optimizer states. Common memory-reduction approaches, such as low-rank adaptation (LoRA), add a trainable low-rank matrix to the frozen pre-trained weight in each layer, reducing trainable parameters and optimizer states. However, such approaches typically underperform training with full-rank weights in both pre-training and fine-tuning stages since they limit the parameter search to a low-rank subspace and alter the training dynamics, and further, may require full-rank warm start. In this work, we propose Gradient Low-Rank Projection (GaLore), a training strategy that allows full-parameter learning but is more memory-efficient than common low-rank adaptation methods such as LoRA. Our approach reduces memory usage by up to 65.5% in optimizer states while maintaining both efficiency and performance for pre-training on LLaMA 1B and 7B architectures with C4 dataset with up to 19.7B tokens, and on fine-tuning RoBERTa on GLUE tasks. Our 8-bit GaLore further reduces optimizer memory by up to 82.5% and total training memory by 63.3%, compared to a BF16 baseline. Notably, we demonstrate, for the first time, the feasibility of pre-training a 7B model on consumer GPUs with 24GB memory (e.g., NVIDIA RTX 4090) without model parallel, checkpointing, or offloading strategies.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
GaLore Memory-Efficient LLM Training by Gradient Low-Rank Projection https://huggingface.co/papers/2403.03507 Training Large Language Models (LLMs) presents significant memory challenges, predominantly due to the growing size of weights and optimizer states. Common memory-reduction approaches, such as low-rank adaptation (LoRA), add a trainable low-rank matrix to the frozen pre-trained weight in each layer, reducing trainable parameters and optimizer states. However, such approaches typically underperform training with full-rank weights in both pre-training and fine-tuning stages since they limit the parameter search to a low-rank subspace and alter the training dynamics, and further, may require full-rank warm start. In this work, we propose Gradient Low-Rank Projection (GaLore), a training strategy that allows full-parameter learning but is more memory-efficient than common low-rank adaptation methods such as LoRA. Our approach reduces memory usage by up to 65.5% in optimizer states while maintaining both efficiency and performance for pre-training on LLaMA 1B and 7B architectures with C4 dataset with up to 19.7B tokens, and on fine-tuning RoBERTa on GLUE tasks. Our 8-bit GaLore further reduces optimizer memory by up to 82.5% and total training memory by 63.3%, compared to a BF16 baseline. Notably, we demonstrate, for the first time, the feasibility of pre-training a 7B model on consumer GPUs with 24GB memory (e.g., NVIDIA RTX 4090) without model parallel, checkpointing, or offloading strategies.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/vYB994agWqC6aKu3XjtK6.png" } ]
[]
[ { "reaction": "🤯", "users": [ "osanseviero", "notsahil", "andysalerno", "nofreewill", "femboysLover", "OneLoneTurnip", "boapps", "arcdyn", "megataro", "bytesizedllm", "clem", "jthk-HF", "rganti", "shing3232", "mrm8488" ], "count": 15 }, { "reaction": "👍", "users": [ "malhajar", "clem", "goodgame", "Jason233", "mathiasn1", "mrm8488", "deepkyu" ], "count": 7 } ]
2024-03-07T15:54:09.000Z
2024-03-14T23:11:23.207Z
[]
/posts/akhaliq/711093324398310
323
1
335711068632455
[ { "type": "text", "value": "BioT5: Enriching Cross-modal Integration in Biology with Chemical Knowledge and Natural Language Associations", "raw": "BioT5: Enriching Cross-modal Integration in Biology with Chemical Knowledge and Natural Language Associations", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "BioT5 achieves superior performance on various biological tasks by integrating natural language. See more details at:", "raw": "BioT5 achieves superior performance on various biological tasks by integrating natural language. See more details at:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2310.07276", "href": null, "resource": { "type": "paper", "id": "2310.07276", "discussionNum": null }, "url": "https://huggingface.co/papers/2310.07276", "code": null, "user": null, "label": "BioT5: Enriching Cross-modal Integration in Biology with Chemical\n Knowledge and Natural Language Associations (2310.07276)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Code: ", "raw": "Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/QizhiPei/BioT5", "href": "https://github.com/QizhiPei/BioT5", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model (base): ", "raw": "Model (base): ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/QizhiPei/biot5-base", "href": null, "resource": { "type": "model", "id": "QizhiPei/biot5-base", "discussionNum": null }, "url": "https://huggingface.co/QizhiPei/biot5-base", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model (molecule captioning): ", "raw": "Model (molecule captioning): ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/QizhiPei/biot5-base-mol2text", "href": null, "resource": { "type": "model", "id": "QizhiPei/biot5-base-mol2text", "discussionNum": null }, "url": "https://huggingface.co/QizhiPei/biot5-base-mol2text", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model (Text-based Molecule Design): ", "raw": "Model (Text-based Molecule Design): ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/QizhiPei/biot5-base-text2mol", "href": null, "resource": { "type": "model", "id": "QizhiPei/biot5-base-text2mol", "discussionNum": null }, "url": "https://huggingface.co/QizhiPei/biot5-base-text2mol", "code": null, "user": null, "label": null, "lang": null } ]
BioT5: Enriching Cross-modal Integration in Biology with Chemical Knowledge and Natural Language Associations BioT5 achieves superior performance on various biological tasks by integrating natural language. See more details at: Paper: https://huggingface.co/papers/2310.07276 Code: https://github.com/QizhiPei/BioT5 Model (base): https://huggingface.co/QizhiPei/biot5-base Model (molecule captioning): https://huggingface.co/QizhiPei/biot5-base-mol2text Model (Text-based Molecule Design): https://huggingface.co/QizhiPei/biot5-base-text2mol
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6397f6081323f19c578f142e/it7FYYKjlLX8wSsMLm8EO.jpeg", "fullname": "QizhiPei", "name": "QizhiPei", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 11, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "victor", "katielink", "samusenps", "Tonic", "QizhiPei", "clem" ], "count": 7 }, { "reaction": "🤗", "users": [ "Tonic" ], "count": 1 } ]
2024-03-07T13:44:24.000Z
2024-03-07T14:55:20.776Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1656519120848-6201c0129dab2e6e083d023c.jpeg", "fullname": "Katie Link", "name": "katielink", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2157, "isFollowing": false } ]
/posts/QizhiPei/335711068632455
309
1
546194818555981
[ { "type": "text", "value": "Check out quantized weights from ISTA-DAS Lab directly in their organisation page: ", "raw": "Check out quantized weights from ISTA-DAS Lab directly in their organisation page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/ISTA-DASLab", "href": "https://huggingface.co/ISTA-DASLab", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ! With official weights of AQLM (for 2bit quantization) & QMoE (1-bit MoE quantization)", "raw": " ! With official weights of AQLM (for 2bit quantization) & QMoE (1-bit MoE quantization)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read more about these techniques below:", "raw": "Read more about these techniques below:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "AQLM paper: ", "raw": "AQLM paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://hf.co/papers/2401.06118", "href": null, "resource": { "type": "paper", "id": "2401.06118", "discussionNum": null }, "url": "https://hf.co/papers/2401.06118", "code": null, "user": null, "label": "Extreme Compression of Large Language Models via Additive Quantization (2401.06118)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "QMoE: ", "raw": "QMoE: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2310.16795", "href": null, "resource": { "type": "paper", "id": "2310.16795", "discussionNum": null }, "url": "https://huggingface.co/papers/2310.16795", "code": null, "user": null, "label": "QMoE: Practical Sub-1-Bit Compression of Trillion-Parameter Models (2310.16795)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Some useful links below:", "raw": "Some useful links below:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "AQLM repo: ", "raw": "AQLM repo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/Vahe1994/AQLM", "href": "https://github.com/Vahe1994/AQLM", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How to use AQLM & transformers: ", "raw": "How to use AQLM & transformers: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/docs/transformers/quantization#aqlm", "href": "https://huggingface.co/docs/transformers/quantization#aqlm", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How to use AQLM & PEFT: ", "raw": "How to use AQLM & PEFT: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/docs/peft/developer_guides/quantization#aqlm-quantizaion", "href": "https://huggingface.co/docs/peft/developer_guides/quantization#aqlm-quantizaion", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Great work from ", "raw": "Great work from ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@BlackSamorez", "href": null, "resource": null, "url": null, "code": null, "user": "BlackSamorez", "label": null, "lang": null }, { "type": "text", "value": " and team !", "raw": " and team !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Check out quantized weights from ISTA-DAS Lab directly in their organisation page: https://huggingface.co/ISTA-DASLab ! With official weights of AQLM (for 2bit quantization) & QMoE (1-bit MoE quantization) Read more about these techniques below: AQLM paper: https://hf.co/papers/2401.06118 QMoE: https://huggingface.co/papers/2310.16795 Some useful links below: AQLM repo: https://github.com/Vahe1994/AQLM How to use AQLM & transformers: https://huggingface.co/docs/transformers/quantization#aqlm How to use AQLM & PEFT: https://huggingface.co/docs/peft/developer_guides/quantization#aqlm-quantizaion Great work from @BlackSamorez and team !
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648631057413-noauth.png", "fullname": "Younes Belkada", "name": "ybelkada", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 417, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/623753b5eddd7763adc9346a/rcpQAKZNrkn1-tMtraQBX.jpeg", "fullname": "Andrei Panferov", "name": "BlackSamorez", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 33 } ]
[ { "reaction": "❤️", "users": [ "alielfilali01", "marcsun13", "osanseviero", "samusenps", "FM-1976", "mayank-mishra", "clem", "goodgame", "dillfrescott" ], "count": 9 }, { "reaction": "👍", "users": [ "goodgame", "dillfrescott" ], "count": 2 } ]
2024-03-07T13:37:20.000Z
2024-03-07T13:42:37.767Z
[]
/posts/ybelkada/546194818555981
2,687
0
577391544681818
[ { "type": "text", "value": "🚀 Major Update: OpenLLM Turkish Benchmarks & Leaderboard Launch! 🚀", "raw": "🚀 Major Update: OpenLLM Turkish Benchmarks & Leaderboard Launch! 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Exciting news for the Hugging Face community! I'm thrilled to announce the launch of my fully translated OpenLLM Benchmarks in Turkish, accompanied by my innovative leaderboard, ready to highlight the capabilities of Turkish language models. This marks a landmark achievement in supporting and advancing Turkish AI research.", "raw": "Exciting news for the Hugging Face community! I'm thrilled to announce the launch of my fully translated OpenLLM Benchmarks in Turkish, accompanied by my innovative leaderboard, ready to highlight the capabilities of Turkish language models. This marks a landmark achievement in supporting and advancing Turkish AI research.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What’s New:", "raw": "What’s New:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📚 Complete OpenLLM Benchmarks in Turkish: Dive into my comprehensive suite of benchmarks, now available for thorough evaluation of Turkish LLMs.", "raw": "📚 Complete OpenLLM Benchmarks in Turkish: Dive into my comprehensive suite of benchmarks, now available for thorough evaluation of Turkish LLMs.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📈 Live Leaderboard: Explore my live leaderboard showcasing the progress and excellence in Turkish language AI. (Note: Current evaluations are conducted manually but are consistently updated.)", "raw": "📈 Live Leaderboard: Explore my live leaderboard showcasing the progress and excellence in Turkish language AI. (Note: Current evaluations are conducted manually but are consistently updated.)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Partnership Invitation:", "raw": "Partnership Invitation:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🤝 Join My Automation Mission: I'm on the lookout for partners to help transition from manual to automated leaderboard evaluations. Your support can catalyze real-time, streamlined assessments, pushing Turkish LLMs to new heights.", "raw": "🤝 Join My Automation Mission: I'm on the lookout for partners to help transition from manual to automated leaderboard evaluations. Your support can catalyze real-time, streamlined assessments, pushing Turkish LLMs to new heights.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key Resources:", "raw": "Key Resources:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📚 Explore the Turkish OpenLLM Collection: (", "raw": "📚 Explore the Turkish OpenLLM Collection: (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/malhajar/openllmturkishleadboard-datasets-65e5854490a87c0f2670ec18", "href": null, "resource": { "type": "collection", "id": "malhajar/openllmturkishleadboard-datasets-65e5854490a87c0f2670ec18", "discussionNum": null }, "url": "https://huggingface.co/collections/malhajar/openllmturkishleadboard-datasets-65e5854490a87c0f2670ec18", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ")", "raw": ")", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🏆 Discover the Leaderboard: (", "raw": "🏆 Discover the Leaderboard: (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/malhajar/OpenLLMTurkishLeaderboard", "href": null, "resource": { "type": "space", "id": "malhajar/OpenLLMTurkishLeaderboard", "discussionNum": null }, "url": "https://huggingface.co/spaces/malhajar/OpenLLMTurkishLeaderboard", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ")", "raw": ")", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Get Involved:", "raw": "Get Involved:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💡 Share Your Models: Contribute to the burgeoning field of Turkish AI, showcasing your work and contributing to the collective progress.", "raw": "💡 Share Your Models: Contribute to the burgeoning field of Turkish AI, showcasing your work and contributing to the collective progress.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Let's unite to propel Turkish AI forward and set a precedent for the global community. Stay tuned as I plan to expand these efforts to other languages, further enriching the AI ecosystem!", "raw": "Let's unite to propel Turkish AI forward and set a precedent for the global community. Stay tuned as I plan to expand these efforts to other languages, further enriching the AI ecosystem!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Join this groundbreaking endeavor and let’s shape the future of AI together! 🌐", "raw": "Join this groundbreaking endeavor and let’s shape the future of AI together! 🌐", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#TurkishLLM #AI #MachineLearning #LanguageModels #OpenLLM #HuggingFace", "raw": "#TurkishLLM #AI #MachineLearning #LanguageModels #OpenLLM #HuggingFace", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🚀 Major Update: OpenLLM Turkish Benchmarks & Leaderboard Launch! 🚀 Exciting news for the Hugging Face community! I'm thrilled to announce the launch of my fully translated OpenLLM Benchmarks in Turkish, accompanied by my innovative leaderboard, ready to highlight the capabilities of Turkish language models. This marks a landmark achievement in supporting and advancing Turkish AI research. What’s New: 📚 Complete OpenLLM Benchmarks in Turkish: Dive into my comprehensive suite of benchmarks, now available for thorough evaluation of Turkish LLMs. 📈 Live Leaderboard: Explore my live leaderboard showcasing the progress and excellence in Turkish language AI. (Note: Current evaluations are conducted manually but are consistently updated.) Partnership Invitation: 🤝 Join My Automation Mission: I'm on the lookout for partners to help transition from manual to automated leaderboard evaluations. Your support can catalyze real-time, streamlined assessments, pushing Turkish LLMs to new heights. Key Resources: 📚 Explore the Turkish OpenLLM Collection: (https://huggingface.co/collections/malhajar/openllmturkishleadboard-datasets-65e5854490a87c0f2670ec18) 🏆 Discover the Leaderboard: (https://huggingface.co/spaces/malhajar/OpenLLMTurkishLeaderboard) Get Involved: 💡 Share Your Models: Contribute to the burgeoning field of Turkish AI, showcasing your work and contributing to the collective progress. Let's unite to propel Turkish AI forward and set a precedent for the global community. Stay tuned as I plan to expand these efforts to other languages, further enriching the AI ecosystem! Join this groundbreaking endeavor and let’s shape the future of AI together! 🌐 #TurkishLLM #AI #MachineLearning #LanguageModels #OpenLLM #HuggingFace
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/639c5c448a34ed9a404a956b/jcypw-eh7JzKHTffd0N9l.jpeg", "fullname": "Mohamad Alhajar", "name": "malhajar", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 91, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "badayvedat", "osanseviero", "clefourrier", "Wauplin", "uisikdag", "samusenps", "comarproject", "thomwolf", "clem", "Yhyu13", "ahmet27", "mohammedbriman" ], "count": 12 }, { "reaction": "👍", "users": [ "kyo-takano", "victor", "Wauplin", "comarproject", "thomwolf", "HamzaMuaz", "ahmet27" ], "count": 7 }, { "reaction": "🤗", "users": [ "Wauplin", "comarproject", "thomwolf", "clem" ], "count": 4 } ]
2024-03-07T12:45:36.000Z
2024-04-29T16:03:26.667Z
[]
/posts/malhajar/577391544681818
3,754
1
154394413736486
[ { "type": "inline_code", "value": null, "raw": "`mamba`", "href": null, "resource": null, "url": null, "code": "mamba", "user": null, "label": null, "lang": null }, { "type": "text", "value": " is now available in transformers. Thanks to ", "raw": " is now available in transformers. Thanks to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@tridao", "href": null, "resource": null, "url": null, "code": null, "user": "tridao", "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@albertgu", "href": null, "resource": null, "url": null, "code": null, "user": "albertgu", "label": null, "lang": null }, { "type": "text", "value": " for this brilliant model! 🚀 and the amazing ", "raw": " for this brilliant model! 🚀 and the amazing ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`mamba-ssm`", "href": null, "resource": null, "url": null, "code": "mamba-ssm", "user": null, "label": null, "lang": null }, { "type": "text", "value": " kernels powering this!", "raw": " kernels powering this!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Checkout the collection here:", "raw": "Checkout the collection here:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/state-spaces/transformers-compatible-mamba-65e7b40ab87e5297e45ae406", "href": null, "resource": { "type": "collection", "id": "state-spaces/transformers-compatible-mamba-65e7b40ab87e5297e45ae406", "discussionNum": null }, "url": "https://huggingface.co/collections/state-spaces/transformers-compatible-mamba-65e7b40ab87e5297e45ae406", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
`mamba` is now available in transformers. Thanks to @tridao and @albertgu for this brilliant model! 🚀 and the amazing `mamba-ssm` kernels powering this! Checkout the collection here: https://huggingface.co/collections/state-spaces/transformers-compatible-mamba-65e7b40ab87e5297e45ae406
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674683851722-62441cb7456803e95009a08f.jpeg", "fullname": "Arthur Zucker", "name": "ArthurZ", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 294, "isFollowing": false }
[]
[ { "avatarUrl": "/avatars/a293aac4aa53800ad7dd08b28bd6b6f5.svg", "fullname": "Albert Gu", "name": "albertgu", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 45 }, { "avatarUrl": "/avatars/dbc009451865435bf290791beadc4723.svg", "fullname": "Tri Dao", "name": "tridao", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 72 } ]
[ { "reaction": "🤝", "users": [ "lysandre", "osanseviero", "susnato", "victor", "ZetangForward", "samusenps", "chansung", "tomaarsen", "ceyda", "zjr2000" ], "count": 10 }, { "reaction": "❤️", "users": [ "samusenps", "clem", "javiermartinezcebrian", "comarproject", "imohammad12", "zjr2000", "Na0s" ], "count": 7 } ]
2024-03-07T10:07:55.000Z
2024-03-19T22:21:01.166Z
[ { "avatarUrl": "/avatars/d5d3424127f7d20f37ec8df08de06966.svg", "fullname": "Zhichao Yang", "name": "whaleloops", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674683851722-62441cb7456803e95009a08f.jpeg", "fullname": "Arthur Zucker", "name": "ArthurZ", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 294, "isFollowing": false }, { "avatarUrl": "/avatars/e5b9cc41377f6a06c56e4a1bd59978bc.svg", "fullname": "FilipV", "name": "PheelaV", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/318ab95ff14c9f825a92f280d04f756e.svg", "fullname": "Filip Reka", "name": "filiphand", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/ArthurZ/154394413736486
3,802
5
681723324028999
[ { "type": "text", "value": "Anchor Large Language Models: Up to 99% KV cache reduction!", "raw": "Anchor Large Language Models: Up to 99% KV cache reduction!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "paper: ", "raw": "paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/pdf/2402.07616.pdf", "href": "https://arxiv.org/pdf/2402.07616.pdf", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Anchor Large Language Models: Up to 99% KV cache reduction! paper: https://arxiv.org/pdf/2402.07616.pdf
{ "avatarUrl": "/avatars/6b6b550d96be4a6473e2ccf74df438f7.svg", "fullname": "Jianhuipang", "name": "pangjh3", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "damerajee", "samusenps", "pangjh3", "clem", "Excursions", "Tonic" ], "count": 7 }, { "reaction": "👍", "users": [ "FM-1976", "Tonic" ], "count": 2 } ]
2024-03-07T09:37:05.000Z
2024-05-23T11:03:50.356Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false } ]
/posts/pangjh3/681723324028999
23
1
704473068071462
[ { "type": "text", "value": "LLM “Patchnization”", "raw": "LLM “Patchnization”", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Minimal, clean code for video/image \"patchnization\" - a process commonly used in tokenizing visual data for use in a Transformer encoder. ", "raw": "Minimal, clean code for video/image \"patchnization\" - a process commonly used in tokenizing visual data for use in a Transformer encoder. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Code: ", "raw": "Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/Jaykef/min-patchnizer", "href": "https://github.com/Jaykef/min-patchnizer", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The code above, first extracts still images (frames) from a video, splits the image frames into smaller fixed-size patches, linearly embeds each of them, adds position embeddings and then saves the resulting sequence of vectors for use in a Vision Transformer encoder. I tried training the resulting sequence vectors with Karpathy's minbpe and it took ~2173s per frame to tokenize. The whole \"patchnization\" took ~77.4s for a 20s video on my M2 Air.", "raw": "The code above, first extracts still images (frames) from a video, splits the image frames into smaller fixed-size patches, linearly embeds each of them, adds position embeddings and then saves the resulting sequence of vectors for use in a Vision Transformer encoder. I tried training the resulting sequence vectors with Karpathy's minbpe and it took ~2173s per frame to tokenize. The whole \"patchnization\" took ~77.4s for a 20s video on my M2 Air.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The files in the repo work as follows:", "raw": "The files in the repo work as follows:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. patchnizer.py: Holds code for simple implementation of the three stages involved (extract_image_frames from video, reduce image_frames_to_patches of fixed sizes 16x16 pixels, then linearly_embed_patches into a 1D vector sequence with additional position embeddings.", "raw": "1. patchnizer.py: Holds code for simple implementation of the three stages involved (extract_image_frames from video, reduce image_frames_to_patches of fixed sizes 16x16 pixels, then linearly_embed_patches into a 1D vector sequence with additional position embeddings.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. patchnize.py: performs the whole process with custom configs (patch_size, created dirs, video - I am using the \"dogs playing in snow\" video by sora).", "raw": "2. patchnize.py: performs the whole process with custom configs (patch_size, created dirs, video - I am using the \"dogs playing in snow\" video by sora).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. train.py: Trains the resulting one-dimensional vector sequence (linear_patch_embeddings + position_embeddings) on Karpathy's minbpe (a minimal implementation of the byte-pair encoding algorithm).", "raw": "3. train.py: Trains the resulting one-dimensional vector sequence (linear_patch_embeddings + position_embeddings) on Karpathy's minbpe (a minimal implementation of the byte-pair encoding algorithm).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. check.py: Checks if the patch embeddings match the original image patches by recovering the image frames from their corresponding image patches.", "raw": "4. check.py: Checks if the patch embeddings match the original image patches by recovering the image frames from their corresponding image patches.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The Patchnizer class has three stubs:", "raw": "The Patchnizer class has three stubs:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- extract_image_frames() which chops the video (20sec) into 60 frames (i.e each frame is ~0.33 secs) each of size 1280x720 pixels (original video dims).", "raw": "- extract_image_frames() which chops the video (20sec) into 60 frames (i.e each frame is ~0.33 secs) each of size 1280x720 pixels (original video dims).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- image_frames_to_patches() which grids each image frame into 16x16 pixels tiles. This makes each frame has a total of 3600 image patches (i.e 80 rows by 45 columns).", "raw": "- image_frames_to_patches() which grids each image frame into 16x16 pixels tiles. This makes each frame has a total of 3600 image patches (i.e 80 rows by 45 columns).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- linearly_embed_patches() turns the image patches into patch embeddings (a long string of integers for each image patch) then adds a position embedding for each patch.", "raw": "- linearly_embed_patches() turns the image patches into patch embeddings (a long string of integers for each image patch) then adds a position embedding for each patch.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
LLM “Patchnization” Minimal, clean code for video/image "patchnization" - a process commonly used in tokenizing visual data for use in a Transformer encoder. Code: https://github.com/Jaykef/min-patchnizer The code above, first extracts still images (frames) from a video, splits the image frames into smaller fixed-size patches, linearly embeds each of them, adds position embeddings and then saves the resulting sequence of vectors for use in a Vision Transformer encoder. I tried training the resulting sequence vectors with Karpathy's minbpe and it took ~2173s per frame to tokenize. The whole "patchnization" took ~77.4s for a 20s video on my M2 Air. The files in the repo work as follows: 1. patchnizer.py: Holds code for simple implementation of the three stages involved (extract_image_frames from video, reduce image_frames_to_patches of fixed sizes 16x16 pixels, then linearly_embed_patches into a 1D vector sequence with additional position embeddings. 2. patchnize.py: performs the whole process with custom configs (patch_size, created dirs, video - I am using the "dogs playing in snow" video by sora). 3. train.py: Trains the resulting one-dimensional vector sequence (linear_patch_embeddings + position_embeddings) on Karpathy's minbpe (a minimal implementation of the byte-pair encoding algorithm). 4. check.py: Checks if the patch embeddings match the original image patches by recovering the image frames from their corresponding image patches. The Patchnizer class has three stubs: - extract_image_frames() which chops the video (20sec) into 60 frames (i.e each frame is ~0.33 secs) each of size 1280x720 pixels (original video dims). - image_frames_to_patches() which grids each image frame into 16x16 pixels tiles. This makes each frame has a total of 3600 image patches (i.e 80 rows by 45 columns). - linearly_embed_patches() turns the image patches into patch embeddings (a long string of integers for each image patch) then adds a position embedding for each patch.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg", "fullname": "Jaward Sesay", "name": "Jaward", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 191, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/99pRjjNuGmNZG77R-gh13.qt" } ]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "femboysLover", "samusenps", "clem", "Lwasinam", "whooray", "cahlen" ], "count": 7 } ]
2024-03-07T07:20:09.000Z
2024-03-07T07:20:09.480Z
[]
/posts/Jaward/704473068071462
52
0
758016962866206
[ { "type": "text", "value": "Hello fellow huggers!", "raw": "Hello fellow huggers!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hello fellow huggers!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b9df9b22e5b0fdd501a113/i2yTGbK7pFnw9YLwZ7elp.jpeg", "fullname": "Akhil B", "name": "hakunamatata1997", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "Stopwolf", "julien-c", "samusenps", "clem", "dillfrescott" ], "count": 5 } ]
2024-03-07T05:35:26.000Z
2024-03-07T08:51:09.342Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg", "fullname": "Feynman Innovations", "name": "ajibawa-2023", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 138, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false } ]
/posts/hakunamatata1997/758016962866206
36
2
136441359023688
[ { "type": "text", "value": "Released updated weights for moondream2 today, with significantly improved benchmark scores!", "raw": "Released updated weights for moondream2 today, with significantly improved benchmark scores!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/vikhyatk/moondream2", "href": null, "resource": { "type": "space", "id": "vikhyatk/moondream2", "discussionNum": null }, "url": "https://huggingface.co/spaces/vikhyatk/moondream2", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/vikhyatk/moondream2", "href": null, "resource": { "type": "model", "id": "vikhyatk/moondream2", "discussionNum": null }, "url": "https://huggingface.co/vikhyatk/moondream2", "code": null, "user": null, "label": null, "lang": null } ]
Released updated weights for moondream2 today, with significantly improved benchmark scores! https://huggingface.co/spaces/vikhyatk/moondream2 https://huggingface.co/vikhyatk/moondream2
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63117568fa95534e218da163/8h9zN8aKRxPLBnXW7sqY9.jpeg", "fullname": "Vik Korrapati", "name": "vikhyatk", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 375, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63117568fa95534e218da163/lJZKCI3MVC380WPMvTcji.png" } ]
[]
[ { "reaction": "❤️", "users": [ "Sylvestre", "osanseviero", "victor", "dim", "samusenps", "Dlbk", "clem", "Lewdiculous", "Tom-Neverwinter", "not-lain", "MoonRide" ], "count": 11 }, { "reaction": "👍", "users": [ "ajibawa-2023", "clem", "Norod78", "Tom-Neverwinter", "not-lain" ], "count": 5 } ]
2024-03-07T04:50:51.000Z
2024-03-07T08:28:19.967Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg", "fullname": "Feynman Innovations", "name": "ajibawa-2023", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 138, "isFollowing": false } ]
/posts/vikhyatk/136441359023688
179
1
123362113265640
[ { "type": "text", "value": "🚀 Introducing UltraTextbooks v2: The Ultimate Educational NLP Dataset! 📚", "raw": "🚀 Introducing UltraTextbooks v2: The Ultimate Educational NLP Dataset! 📚", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I've expanded the dataset to include an even wider range of high-quality textbooks, with a special focus on machine learning, mathematics, and coding. 💻🧮", "raw": "I've expanded the dataset to include an even wider range of high-quality textbooks, with a special focus on machine learning, mathematics, and coding. 💻🧮", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "With over 3 million examples and 6 GB of data, UltraTextbooks v2 is your go-to resource for training advanced language models and developing cutting-edge educational applications. 🎓", "raw": "With over 3 million examples and 6 GB of data, UltraTextbooks v2 is your go-to resource for training advanced language models and developing cutting-edge educational applications. 🎓", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Explore the dataset on Hugging Face and unlock the power of AI in education! 🔓", "raw": "Explore the dataset on Hugging Face and unlock the power of AI in education! 🔓", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/Locutusque/UltraTextbooks-2.0", "href": null, "resource": { "type": "dataset", "id": "Locutusque/UltraTextbooks-2.0", "discussionNum": null }, "url": "https://huggingface.co/datasets/Locutusque/UltraTextbooks-2.0", "code": null, "user": null, "label": null, "lang": null } ]
🚀 Introducing UltraTextbooks v2: The Ultimate Educational NLP Dataset! 📚 I've expanded the dataset to include an even wider range of high-quality textbooks, with a special focus on machine learning, mathematics, and coding. 💻🧮 With over 3 million examples and 6 GB of data, UltraTextbooks v2 is your go-to resource for training advanced language models and developing cutting-edge educational applications. 🎓 Explore the dataset on Hugging Face and unlock the power of AI in education! 🔓 https://huggingface.co/datasets/Locutusque/UltraTextbooks-2.0
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/YeFyz1AZVcCRsyNHHtwJG.jpeg", "fullname": "Sebastian Gabarain", "name": "Locutusque", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 180, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "Felladrin", "clem", "samusenps", "Jjjjjo" ], "count": 5 } ]
2024-03-07T04:18:53.000Z
2024-03-07T04:19:17.070Z
[]
/posts/Locutusque/123362113265640
256
0
337420060295579
[ { "type": "text", "value": "Empower yourself with knowledge and skills. The power of knowledge is the key for advancement. Realize that without discovering your weaknesses, it would be hard to improve yourself. No one is perfect, no one knows everything, but we all have the freedom to learn and discover our true potential. It's important to surround yourself with positivity and people who help you push forward to improve. Lifelong learning is a crucial key of success.", "raw": "Empower yourself with knowledge and skills. The power of knowledge is the key for advancement. Realize that without discovering your weaknesses, it would be hard to improve yourself. No one is perfect, no one knows everything, but we all have the freedom to learn and discover our true potential. It's important to surround yourself with positivity and people who help you push forward to improve. Lifelong learning is a crucial key of success.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#LifelongLearning #Success #Enthusiasm ", "raw": "#LifelongLearning #Success #Enthusiasm ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Empower yourself with knowledge and skills. The power of knowledge is the key for advancement. Realize that without discovering your weaknesses, it would be hard to improve yourself. No one is perfect, no one knows everything, but we all have the freedom to learn and discover our true potential. It's important to surround yourself with positivity and people who help you push forward to improve. Lifelong learning is a crucial key of success. #LifelongLearning #Success #Enthusiasm
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647280de0c2b5fdaf1f49b87/KosE59r9VBXocSnoQEcxw.jpeg", "fullname": "Michael Shenoda", "name": "mshenoda", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "samusenps", "osanseviero" ], "count": 2 }, { "reaction": "👍", "users": [ "samusenps" ], "count": 1 } ]
2024-03-06T22:13:28.000Z
2024-03-07T03:34:02.078Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6538119803519fddb4a17e10/ffJMkdx-rM7VvLTCM6ri_.jpeg", "fullname": "samusenps", "name": "samusenps", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 91, "isFollowing": false } ]
/posts/mshenoda/337420060295579
256
1
712785447418194
[ { "type": "text", "value": "Hi everyone,", "raw": "Hi everyone,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'd like to share our free GPT-4 chatbot: ", "raw": "I'd like to share our free GPT-4 chatbot: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/yuntian-deng/ChatGPT4", "href": null, "resource": { "type": "space", "id": "yuntian-deng/ChatGPT4", "discussionNum": null }, "url": "https://huggingface.co/spaces/yuntian-deng/ChatGPT4", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ". Data collected from it will be shared back with the community in future releases of the WildChat dataset: ", "raw": ". Data collected from it will be shared back with the community in future releases of the WildChat dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/allenai/WildChat", "href": null, "resource": { "type": "dataset", "id": "allenai/WildChat", "discussionNum": null }, "url": "https://huggingface.co/datasets/allenai/WildChat", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ". Please help us reach 1 million conversations!", "raw": ". Please help us reach 1 million conversations!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thanks,", "raw": "Thanks,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Yuntian", "raw": "Yuntian", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hi everyone, I'd like to share our free GPT-4 chatbot: https://huggingface.co/spaces/yuntian-deng/ChatGPT4. Data collected from it will be shared back with the community in future releases of the WildChat dataset: https://huggingface.co/datasets/allenai/WildChat. Please help us reach 1 million conversations! Thanks, Yuntian
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63081e15a670ed10f9d44229/w1b9uq-9774bMMgJbSPsS.jpeg", "fullname": "Yuntian Deng", "name": "yuntian-deng", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 196, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "yuntian-deng", "iDrops", "jinghan23", "ajibawa-2023", "osanseviero", "samusenps", "Dlbk", "Tonic", "Lewdiculous", "psy-taha", "artghoul", "RobertRoss", "asp933", "IdleIdiot", "SwedMlite", "Maria200035", "LinJuan" ], "count": 17 }, { "reaction": "❤️", "users": [ "artghoul", "Aerialyn", "lololololololokok", "caiquemoa", "Johanbb" ], "count": 5 }, { "reaction": "🔥", "users": [ "psy-taha", "artghoul", "hasi243" ], "count": 3 } ]
2024-03-06T22:10:56.000Z
2024-11-22T12:33:03.587Z
[ { "avatarUrl": "/avatars/8670c8b42b82685ceccac483a760edab.svg", "fullname": "Hellyc Huang", "name": "Hellyc01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "/avatars/2df385a1a554f2ce0da90ac9f2427d44.svg", "fullname": "nhan nguyen", "name": "nhannguyen26", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/ebe9d1e21e3e56178e5db5acbb54d1ae.svg", "fullname": "Ockert Slabbert", "name": "Ocks", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/c75047b7649e851e1bf6411794881b3b.svg", "fullname": "Johh West", "name": "Smokez17", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/yuntian-deng/712785447418194
11,729
4
286795551282908
[ { "type": "text", "value": "The \"Design2Code: How Far Are We From Automating Front-End Engineering\" paper presents a benchmark for multimodal large language models (LLMs) aimed at automating front-end web development by translating webpage designs (screenshots) into code. This task evaluates the models' ability to recreate webpages that are visually and structurally similar to the original designs.", "raw": "The \"Design2Code: How Far Are We From Automating Front-End Engineering\" paper presents a benchmark for multimodal large language models (LLMs) aimed at automating front-end web development by translating webpage designs (screenshots) into code. This task evaluates the models' ability to recreate webpages that are visually and structurally similar to the original designs.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key Points:", "raw": "Key Points:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Introduces the Design2Code task and benchmark for converting webpage screenshots into code, aiming to automate front-end web development.", "raw": "* Introduces the Design2Code task and benchmark for converting webpage screenshots into code, aiming to automate front-end web development.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Evaluates multimodal LLMs using comprehensive metrics for visual similarity and element matching.", "raw": "* Evaluates multimodal LLMs using comprehensive metrics for visual similarity and element matching.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* GPT-4V outperforms other models in terms of visual resemblance and content accuracy, with generated webpages often preferred over the original references.", "raw": "* GPT-4V outperforms other models in terms of visual resemblance and content accuracy, with generated webpages often preferred over the original references.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2403.03163", "href": null, "resource": { "type": "paper", "id": "2403.03163", "discussionNum": null }, "url": "https://huggingface.co/papers/2403.03163", "code": null, "user": null, "label": "Design2Code: How Far Are We From Automating Front-End Engineering? (2403.03163)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Project page: ", "raw": "Project page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://salt-nlp.github.io/Design2Code/", "href": "https://salt-nlp.github.io/Design2Code/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset: ", "raw": "Dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/SALT-NLP/Design2Code", "href": null, "resource": { "type": "dataset", "id": "SALT-NLP/Design2Code", "discussionNum": null }, "url": "https://huggingface.co/datasets/SALT-NLP/Design2Code", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Congrats to the authors for their work!", "raw": "Congrats to the authors for their work!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
The "Design2Code: How Far Are We From Automating Front-End Engineering" paper presents a benchmark for multimodal large language models (LLMs) aimed at automating front-end web development by translating webpage designs (screenshots) into code. This task evaluates the models' ability to recreate webpages that are visually and structurally similar to the original designs. Key Points: * Introduces the Design2Code task and benchmark for converting webpage screenshots into code, aiming to automate front-end web development. * Evaluates multimodal LLMs using comprehensive metrics for visual similarity and element matching. * GPT-4V outperforms other models in terms of visual resemblance and content accuracy, with generated webpages often preferred over the original references. Paper: https://huggingface.co/papers/2403.03163 Project page: https://salt-nlp.github.io/Design2Code/ Dataset: https://huggingface.co/datasets/SALT-NLP/Design2Code Congrats to the authors for their work!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657217faabb25ed8aedd5e48/UUHAXeGtOnQBXFD3nYtf2.jpeg", "fullname": "Vlad Bogolin", "name": "vladbogo", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 109, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "samusenps" ], "count": 2 }, { "reaction": "👍", "users": [ "samusenps" ], "count": 1 } ]
2024-03-06T22:10:01.000Z
2024-03-06T22:11:54.727Z
[]
/posts/vladbogo/286795551282908
37
0
704777926081097
[ { "type": "text", "value": "Hello world! ", "raw": "Hello world! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hello world!
{ "avatarUrl": "/avatars/d4947439475dc81f2c9e9304382b6257.svg", "fullname": "Qun Gao", "name": "qgao007", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "samusenps", "QizhiPei" ], "count": 2 } ]
2024-03-06T22:07:41.000Z
2024-03-08T11:13:16.308Z
[ { "avatarUrl": "/avatars/9fe1518c4b2d12e36733650bb0c87932.svg", "fullname": "Flouga Droi", "name": "flofloga", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/qgao007/704777926081097
31
1
625621795187857
[ { "type": "text", "value": "huggingface? X(formerly known as twitter)! ", "raw": "huggingface? X(formerly known as twitter)! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Yufan here, CS PhD student from UC San Diego. ", "raw": "Yufan here, CS PhD student from UC San Diego. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LLMs as ML algorithms? see my latest work MetaTree: ", "raw": "LLMs as ML algorithms? see my latest work MetaTree: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.03774", "href": null, "resource": { "type": "paper", "id": "2402.03774", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.03774", "code": null, "user": null, "label": "Learning a Decision Tree Algorithm with Transformers (2402.03774)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read more:", "raw": "Read more:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://evanzhuang.github.io/", "href": "https://evanzhuang.github.io/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://twitter.com/yufan_zhuang", "href": "https://twitter.com/yufan_zhuang", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
huggingface? X(formerly known as twitter)! Yufan here, CS PhD student from UC San Diego. LLMs as ML algorithms? see my latest work MetaTree: https://huggingface.co/papers/2402.03774 Read more: https://evanzhuang.github.io/ https://twitter.com/yufan_zhuang
{ "avatarUrl": "/avatars/baa624d417b0b905e82127dc66346478.svg", "fullname": "Yufan Zhuang", "name": "yzhuang", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 8, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "yzhuang", "qnguyen3", "osanseviero", "victor", "samusenps", "shing3232", "ZennyKenny" ], "count": 7 } ]
2024-03-06T22:00:21.000Z
2024-03-06T22:00:21.397Z
[]
/posts/yzhuang/625621795187857
407
0
515989665788135
[ { "type": "text", "value": "Wow! Hello world. How's ECCV submissions going?", "raw": "Wow! Hello world. How's ECCV submissions going?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Wow! Hello world. How's ECCV submissions going?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626a9b5205fe1cb65720e00e/hyWcWn_8jVZsu1Yc5Z0R8.png", "fullname": "Bill Psomas", "name": "billpsomas", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "osanseviero" ], "count": 1 } ]
2024-03-06T21:37:59.000Z
2024-03-06T21:37:59.215Z
[]
/posts/billpsomas/515989665788135
37
0
369453933037543
[ { "type": "text", "value": "Hi everyone! ", "raw": "Hi everyone! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Looking forward to engaging with you all 🤗", "raw": "Looking forward to engaging with you all 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hi everyone! Looking forward to engaging with you all 🤗
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1664378156457-noauth.png", "fullname": "Marko Vidrih", "name": "MarkoVidrih", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 10, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "Rexhaif", "Puyush", "Gigahardglob" ], "count": 3 } ]
2024-03-06T21:32:06.000Z
2024-03-25T23:15:29.396Z
[ { "avatarUrl": "/avatars/b238e7bb6d0dca0c8b9a5b7142d742d8.svg", "fullname": "Puyush Gupta", "name": "Puyush", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1664378156457-noauth.png", "fullname": "Marko Vidrih", "name": "MarkoVidrih", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 10, "isFollowing": false } ]
/posts/MarkoVidrih/369453933037543
526
6
844012818399912
[ { "type": "text", "value": "Hello world! 🔥", "raw": "Hello world! 🔥", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hello world! 🔥
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5e4318d616b09a31220980d6/24rMJ_vPh3gW9ZEmj64xr.png", "fullname": "Manuel Romero", "name": "mrm8488", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2200, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "radames", "DmitryRyumin", "weizhey", "osanseviero", "stefan-it", "apol", "taesiri", "ZennyKenny", "den0620", "samusenps", "edeani", "rahmanansari", "iv7dev", "Nacholmo", "erickdp", "EddyGiusepe", "jbilcke-hf", "leegao19", "Noomam" ], "count": 19 }, { "reaction": "👍", "users": [ "rtscott2001", "edeani", "CreitinGameplays", "jbilcke-hf" ], "count": 4 }, { "reaction": "🤝", "users": [ "junaid1993", "jbilcke-hf" ], "count": 2 } ]
2024-03-06T20:39:40.000Z
2024-03-06T20:39:40.862Z
[]
/posts/mrm8488/844012818399912
2,207
0
289347657662488
[ { "type": "text", "value": "Sharing our paper and library for building LLM agent. The library is less than 1K code lines!", "raw": "Sharing our paper and library for building LLM agent. The library is less than 1K code lines!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/SalesforceAIResearch/AgentLite", "href": "https://github.com/SalesforceAIResearch/AgentLite", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2402.15538", "href": "https://arxiv.org/abs/2402.15538", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Sharing our paper and library for building LLM agent. The library is less than 1K code lines! https://github.com/SalesforceAIResearch/AgentLite https://arxiv.org/abs/2402.15538
{ "avatarUrl": "/avatars/89f118f880cce3d01658b123bbbf4402.svg", "fullname": "Zhiwei Liu", "name": "jimzhiwei", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "clem", "osanseviero", "taesiri", "qgao007", "victor", "samusenps", "kumarh1982", "Hanyu66", "theArif" ], "count": 9 } ]
2024-03-06T20:33:27.000Z
2024-03-06T20:33:27.402Z
[]
/posts/jimzhiwei/289347657662488
18
0
170404494417782
[ { "type": "text", "value": "🤗", "raw": "🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🤗
{ "avatarUrl": "/avatars/bbfffad6f86fae9851ddfea1d328a9cd.svg", "fullname": "Artan Salihu", "name": "Artan", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "osanseviero", "Suparious", "samusenps", "dashfunnydashdash", "ZennyKenny", "thomwolf", "Lewdiculous", "victor" ], "count": 8 }, { "reaction": "🤝", "users": [ "victor" ], "count": 1 } ]
2024-03-06T20:27:33.000Z
2024-03-06T20:27:33.202Z
[]
/posts/Artan/170404494417782
18
0
529779874653554
[ { "type": "text", "value": "👋 I'm Sam", "raw": "👋 I'm Sam", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I use ML and HPC to accelerate scientific discovery @ Argonne National Laboratory*", "raw": "I use ML and HPC to accelerate scientific discovery @ Argonne National Laboratory*", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://samforeman.me", "href": "https://samforeman.me", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://twitter.com/saforem2", "href": "https://twitter.com/saforem2", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/saforem2", "href": "https://github.com/saforem2", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* ", "raw": "* ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://alcf.anl.gov/about/people/sam-foreman", "href": "https://alcf.anl.gov/about/people/sam-foreman", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
👋 I'm Sam I use ML and HPC to accelerate scientific discovery @ Argonne National Laboratory* https://samforeman.me https://twitter.com/saforem2 https://github.com/saforem2 * https://alcf.anl.gov/about/people/sam-foreman
{ "avatarUrl": "/avatars/460be23a3e2eb8d344633c3dfdab8a18.svg", "fullname": "Sam Foreman", "name": "samforeman", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 7, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "osanseviero", "tomaarsen", "victor", "medmac01", "mozayed" ], "count": 5 } ]
2024-03-06T20:18:59.000Z
2024-03-06T20:18:59.967Z
[]
/posts/samforeman/529779874653554
37
0
773812352717275
[ { "type": "text", "value": "Hello world!", "raw": "Hello world!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hello world!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1630793941366-noauth.png", "fullname": "Jay Wang", "name": "xiaohk", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 6, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "weizhey", "osanseviero", "tomaarsen", "MarinaraSpaghetti", "samusenps", "polodealvarado", "Lewdiculous", "HaiderSultanArc" ], "count": 8 } ]
2024-03-06T20:08:30.000Z
2024-03-06T20:08:30.772Z
[]
/posts/xiaohk/773812352717275
42
0
107447901609269
[ { "type": "text", "value": "Hi everyone,", "raw": "Hi everyone,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'd like to share our project on open-type Named Entity Recognition (NER). Our model uses a transformer encoder (BERT-like), making the computation overhead very minimal compared to use of LLMs. I've developed a demo that runs on CPU on Google Colab.", "raw": "I'd like to share our project on open-type Named Entity Recognition (NER). Our model uses a transformer encoder (BERT-like), making the computation overhead very minimal compared to use of LLMs. I've developed a demo that runs on CPU on Google Colab.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Colab Demo: ", "raw": "Colab Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://colab.research.google.com/drive/1mhalKWzmfSTqMnR0wQBZvt9-ktTsATHB?usp=sharing", "href": "https://colab.research.google.com/drive/1mhalKWzmfSTqMnR0wQBZvt9-ktTsATHB?usp=sharing", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Code: ", "raw": "Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/urchade/GLiNER", "href": "https://github.com/urchade/GLiNER", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2311.08526", "href": "https://arxiv.org/abs/2311.08526", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hi everyone, I'd like to share our project on open-type Named Entity Recognition (NER). Our model uses a transformer encoder (BERT-like), making the computation overhead very minimal compared to use of LLMs. I've developed a demo that runs on CPU on Google Colab. Colab Demo: https://colab.research.google.com/drive/1mhalKWzmfSTqMnR0wQBZvt9-ktTsATHB?usp=sharing Code: https://github.com/urchade/GLiNER Paper: https://arxiv.org/abs/2311.08526
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62111fdbe1d974ee5bcbfa27/YUzX6lBvW8pbxDorx1kgV.png", "fullname": "Urchade Zaratiana", "name": "urchade", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 149, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62111fdbe1d974ee5bcbfa27/Jqys92J-BfM06rAsM67Kr.png" } ]
[]
[ { "reaction": "❤️", "users": [ "clem", "NePe", "sgarbi", "osanseviero", "samusenps", "Tanvir1337", "medmac01", "merve", "ruman1", "awinml", "maortal", "Antoine", "imihalcea", "richgjacksonaz", "Bailefan", "eek" ], "count": 16 }, { "reaction": "🔥", "users": [ "shahasim", "nicodecoker", "Hildeberto" ], "count": 3 } ]
2024-03-06T20:03:13.000Z
2024-04-09T21:21:37.866Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6317233cc92fd6fee317e030/cJHSvvimr1kqgQfHOjO5n.png", "fullname": "Tom Aarsen", "name": "tomaarsen", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1060, "isFollowing": false }, { "avatarUrl": "/avatars/ae146a62c56a71e51818a620b344d99e.svg", "fullname": "Mitesh Yadav", "name": "Myadav", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62111fdbe1d974ee5bcbfa27/YUzX6lBvW8pbxDorx1kgV.png", "fullname": "Urchade Zaratiana", "name": "urchade", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 149, "isFollowing": false }, { "avatarUrl": "/avatars/bfcfd60cef1e214cd79997e5aa4debca.svg", "fullname": "adam field", "name": "adamvf", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/urchade/107447901609269
1,234
8
643116669090778
[ { "type": "text", "value": "DeepLearning.AI just announced a new short course: Open Source Models with Hugging Face 🤗, taught by Hugging Face's own Maria Khalusova, Marc Sun and Younes Belkada! ", "raw": "DeepLearning.AI just announced a new short course: Open Source Models with Hugging Face 🤗, taught by Hugging Face's own Maria Khalusova, Marc Sun and Younes Belkada! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "As many of you already know, Hugging Face has been a game changer by letting developers quickly grab any of hundreds of thousands of already-trained open source models to assemble into new applications. This course teaches you best practices for building this way, including how to search and choose among models. ", "raw": "As many of you already know, Hugging Face has been a game changer by letting developers quickly grab any of hundreds of thousands of already-trained open source models to assemble into new applications. This course teaches you best practices for building this way, including how to search and choose among models. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You'll learn to use the Transformers library and walk through multiple models for text, audio, and image processing, including zero-shot image segmentation, zero-shot audio classification, and speech recognition. You'll also learn to use multimodal models for visual question answering, image search, and image captioning. Finally, you’ll learn how to demo what you build locally, on the cloud, or via an API using Gradio and Hugging Face Spaces. ", "raw": "You'll learn to use the Transformers library and walk through multiple models for text, audio, and image processing, including zero-shot image segmentation, zero-shot audio classification, and speech recognition. You'll also learn to use multimodal models for visual question answering, image search, and image captioning. Finally, you’ll learn how to demo what you build locally, on the cloud, or via an API using Gradio and Hugging Face Spaces. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thank you very much to Hugging Face's wonderful team for working with us on this. ", "raw": "Thank you very much to Hugging Face's wonderful team for working with us on this. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can sign up for the course here: ", "raw": "You can sign up for the course here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.deeplearning.ai/short-courses/open-source-models-hugging-face/", "href": "https://www.deeplearning.ai/short-courses/open-source-models-hugging-face/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
DeepLearning.AI just announced a new short course: Open Source Models with Hugging Face 🤗, taught by Hugging Face's own Maria Khalusova, Marc Sun and Younes Belkada! As many of you already know, Hugging Face has been a game changer by letting developers quickly grab any of hundreds of thousands of already-trained open source models to assemble into new applications. This course teaches you best practices for building this way, including how to search and choose among models. You'll learn to use the Transformers library and walk through multiple models for text, audio, and image processing, including zero-shot image segmentation, zero-shot audio classification, and speech recognition. You'll also learn to use multimodal models for visual question answering, image search, and image captioning. Finally, you’ll learn how to demo what you build locally, on the cloud, or via an API using Gradio and Hugging Face Spaces. Thank you very much to Hugging Face's wonderful team for working with us on this. You can sign up for the course here: https://www.deeplearning.ai/short-courses/open-source-models-hugging-face/
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6467fcf0946476c5d2194c14/zAy7PYR3HkC9NWcpZw8X1.png", "fullname": "Andrew Ng", "name": "andrewyng", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 178, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6467fcf0946476c5d2194c14/8S8jMNwCioZ7-sQM1xJsp.png" } ]
[]
[ { "reaction": "👍", "users": [ "osanseviero", "MariaK", "clem", "abidlabs", "Wauplin", "thomwolf", "Jofthomas", "kramp", "julien-c", "Violette", "philschmid", "VictorSanh", "lhoestq", "victor", "DmitryRyumin", "brunatrevelin", "m-ric", "loubnabnl", "samusenps", "leegao19", "radames", "P3rry", "JAkinyemi", "qgao007", "mvaloatto", "jeffboudier", "ybelkada", "hysts", "neerajjulka", "lysandre", "linoyts", "muhtasham", "madroid", "marcsun13", "keerekeerweere", "OfferL", "MarinaraSpaghetti", "bulentsiyah", "dijvar", "pankaj-munde", "medmac01", "beenakurian" ], "count": 42 }, { "reaction": "❤️", "users": [ "clem", "abidlabs", "Wauplin", "thomwolf", "osanseviero", "Jofthomas", "julien-c", "Violette", "philschmid", "lhoestq", "victor", "katielink", "loubnabnl", "samusenps", "rowbradley", "hmb", "andrewrreed", "radames", "rtscott2001", "yjernite", "jeffboudier", "ybelkada", "lysandre", "muhtasham", "madroid", "marcsun13", "quibblerquery", "dimbyTa", "mayacinka" ], "count": 29 }, { "reaction": "🤯", "users": [ "osanseviero", "clem", "abidlabs", "Wauplin", "thomwolf", "Jofthomas", "julien-c", "Violette", "philschmid", "VictorSanh", "lhoestq", "victor", "brunatrevelin", "m-ric", "loubnabnl", "radames", "ybelkada", "lysandre", "muhtasham", "marcsun13" ], "count": 20 } ]
2024-03-06T17:23:00.000Z
2024-03-06T17:25:36.683Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1621947938344-noauth.png", "fullname": "Abubakar Abid", "name": "abidlabs", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 487, "isFollowing": false } ]
/posts/andrewyng/643116669090778
420
1
586633084598443
[ { "type": "text", "value": "Design2Code", "raw": "Design2Code", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How Far Are We From Automating Front-End Engineering?", "raw": "How Far Are We From Automating Front-End Engineering?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2403.03163", "href": null, "resource": { "type": "paper", "id": "2403.03163", "discussionNum": null }, "url": "https://huggingface.co/papers/2403.03163", "code": null, "user": null, "label": "Design2Code: How Far Are We From Automating Front-End Engineering? (2403.03163)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Generative AI has made rapid advancements in recent years, achieving unprecedented capabilities in multimodal understanding and code generation. This can enable a new paradigm of front-end development, in which multimodal LLMs might directly convert visual designs into code implementations. In this work, we formalize this as a Design2Code task and conduct comprehensive benchmarking. Specifically, we manually curate a benchmark of 484 diverse real-world webpages as test cases and develop a set of automatic evaluation metrics to assess how well current multimodal LLMs can generate the code implementations that directly render into the given reference webpages, given the screenshots as input. We also complement automatic metrics with comprehensive human evaluations. We develop a suite of multimodal prompting methods and show their effectiveness on GPT-4V and Gemini Pro Vision. We further finetune an open-source Design2Code-18B model that successfully matches the performance of Gemini Pro Vision. Both human evaluation and automatic metrics show that GPT-4V performs the best on this task compared to other models. Moreover, annotators think GPT-4V generated webpages can replace the original reference webpages in 49% of cases in terms of visual appearance and content; and perhaps surprisingly, in 64% of cases GPT-4V generated webpages are considered better than the original reference webpages. Our fine-grained break-down metrics indicate that open-source models mostly lag in recalling visual elements from the input webpages and in generating correct layout designs, while aspects like text content and coloring can be drastically improved with proper finetuning.", "raw": "Generative AI has made rapid advancements in recent years, achieving unprecedented capabilities in multimodal understanding and code generation. This can enable a new paradigm of front-end development, in which multimodal LLMs might directly convert visual designs into code implementations. In this work, we formalize this as a Design2Code task and conduct comprehensive benchmarking. Specifically, we manually curate a benchmark of 484 diverse real-world webpages as test cases and develop a set of automatic evaluation metrics to assess how well current multimodal LLMs can generate the code implementations that directly render into the given reference webpages, given the screenshots as input. We also complement automatic metrics with comprehensive human evaluations. We develop a suite of multimodal prompting methods and show their effectiveness on GPT-4V and Gemini Pro Vision. We further finetune an open-source Design2Code-18B model that successfully matches the performance of Gemini Pro Vision. Both human evaluation and automatic metrics show that GPT-4V performs the best on this task compared to other models. Moreover, annotators think GPT-4V generated webpages can replace the original reference webpages in 49% of cases in terms of visual appearance and content; and perhaps surprisingly, in 64% of cases GPT-4V generated webpages are considered better than the original reference webpages. Our fine-grained break-down metrics indicate that open-source models mostly lag in recalling visual elements from the input webpages and in generating correct layout designs, while aspects like text content and coloring can be drastically improved with proper finetuning.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Design2Code How Far Are We From Automating Front-End Engineering? https://huggingface.co/papers/2403.03163 Generative AI has made rapid advancements in recent years, achieving unprecedented capabilities in multimodal understanding and code generation. This can enable a new paradigm of front-end development, in which multimodal LLMs might directly convert visual designs into code implementations. In this work, we formalize this as a Design2Code task and conduct comprehensive benchmarking. Specifically, we manually curate a benchmark of 484 diverse real-world webpages as test cases and develop a set of automatic evaluation metrics to assess how well current multimodal LLMs can generate the code implementations that directly render into the given reference webpages, given the screenshots as input. We also complement automatic metrics with comprehensive human evaluations. We develop a suite of multimodal prompting methods and show their effectiveness on GPT-4V and Gemini Pro Vision. We further finetune an open-source Design2Code-18B model that successfully matches the performance of Gemini Pro Vision. Both human evaluation and automatic metrics show that GPT-4V performs the best on this task compared to other models. Moreover, annotators think GPT-4V generated webpages can replace the original reference webpages in 49% of cases in terms of visual appearance and content; and perhaps surprisingly, in 64% of cases GPT-4V generated webpages are considered better than the original reference webpages. Our fine-grained break-down metrics indicate that open-source models mostly lag in recalling visual elements from the input webpages and in generating correct layout designs, while aspects like text content and coloring can be drastically improved with proper finetuning.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/fAQ2f7bO2wmqhQmGB5zZD.png" } ]
[]
[ { "reaction": "❤️", "users": [ "victor", "DavidVivancos", "hmb", "vladbogo", "clem", "TheSaifurRahman", "sgarbi", "krishnapraveen", "samusenps", "jnopareboateng", "seanmiranda", "julien-c", "awacke1", "marcelovicentegc", "Ananze" ], "count": 15 } ]
2024-03-06T16:03:50.000Z
2024-03-13T02:18:00.434Z
[ { "avatarUrl": "/avatars/8f5c8843cb98c7590b776a2a9c98d7e7.svg", "fullname": "KAKOU", "name": "Ananze", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1671537650254-noauth.jpeg", "fullname": "David Vivancos", "name": "DavidVivancos", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 27, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63c3490c6e6561b339e3bbec/99e-dpy7_V-CnfplrchYl.jpeg", "fullname": "hannah", "name": "hmb", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 28, "isFollowing": false }, { "avatarUrl": "/avatars/20accb6d5780bae134e8b266068c4eae.svg", "fullname": "krishna praveen", "name": "krishnapraveen", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false } ]
/posts/akhaliq/586633084598443
108
4
602953440406884
[ { "type": "text", "value": "Hey, it took some time but I finally moved out and got internet back, so here I am again!", "raw": "Hey, it took some time but I finally moved out and got internet back, so here I am again!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A lot of things to get updated on, I will try to reply to each of you ASAP.", "raw": "A lot of things to get updated on, I will try to reply to each of you ASAP.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "See you soon!", "raw": "See you soon!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hey, it took some time but I finally moved out and got internet back, so here I am again! A lot of things to get updated on, I will try to reply to each of you ASAP. See you soon!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63ab1241ad514ca8d1430003/d-43TcOxG-zqAbzrH2m7H.png", "fullname": "Undi", "name": "Undi95", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 3311, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "kawaiibastard", "RUcrittingME", "Ainonake", "MarinaraSpaghetti", "victor", "okeksama", "papercanteen111", "saishf", "WaefreBeorn", "eevee32x", "antiven0m", "010O11", "damerajee", "CarlosIABn", "samusenps", "AtonMountlook", "krisshen", "Zeldazackman", "Diavator", "Adriato", "wath5", "ConquestofElysium", "jarjarbinks", "SGNghiax", "SerialKicked", "ClaudioItaly", "WilsonWan", "IHaBiS", "locke12", "WeForgot", "Shadowplague", "Tillx85" ], "count": 32 }, { "reaction": "👍", "users": [ "mayurninama", "bluuwhale", "ZZnake", "rywiz", "Diavator", "cpham9000", "xpgx1", "AtonMountlook", "frgthzjmk" ], "count": 9 }, { "reaction": "🤗", "users": [ "WaefreBeorn", "eevee32x", "damerajee", "Diavator" ], "count": 4 }, { "reaction": "🔥", "users": [ "ggnick" ], "count": 1 } ]
2024-03-06T15:46:03.000Z
2024-03-15T21:46:52.326Z
[ { "avatarUrl": "/avatars/92fafa8b0d8ec55e718bcc3ec8fc3fe5.svg", "fullname": "Arena", "name": "ClaudioItaly", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 7, "isFollowing": false } ]
/posts/Undi95/602953440406884
13,451
1
562021819495752
[ { "type": "text", "value": "hey there folks new Yi model just came out , and i had a gradio interface ready since their last releases.", "raw": "hey there folks new Yi model just came out , and i had a gradio interface ready since their last releases.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "it's just a base model but you can check it out here : ", "raw": "it's just a base model but you can check it out here : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Tonic/Yi-9B", "href": null, "resource": { "type": "space", "id": "Tonic/Yi-9B", "discussionNum": null }, "url": "https://huggingface.co/spaces/Tonic/Yi-9B", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "cant wait to fine tune this one 🤗🚀", "raw": "cant wait to fine tune this one 🤗🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
hey there folks new Yi model just came out , and i had a gradio interface ready since their last releases. it's just a base model but you can check it out here : https://huggingface.co/spaces/Tonic/Yi-9B cant wait to fine tune this one 🤗🚀
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[]
[]
[ { "reaction": "🤝", "users": [ "victor", "osanseviero", "clem" ], "count": 3 } ]
2024-03-06T13:27:25.000Z
2024-03-06T13:27:25.918Z
[]
/posts/Tonic/562021819495752
35
0
649809048078385
[ { "type": "text", "value": "You can now use gpt4all.io to instantly search, download, and chat with models hosted on huggingface!", "raw": "You can now use gpt4all.io to instantly search, download, and chat with models hosted on huggingface!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
You can now use gpt4all.io to instantly search, download, and chat with models hosted on huggingface!
{ "avatarUrl": "/avatars/68b9f9a8a5d7b3b951932527deab0275.svg", "fullname": "Brandon Duderstadt", "name": "bstadt", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 10, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6421f12aeaad1bcb28b22bb4/JYZr2IdoWqRMa53Qzl81v.gif" } ]
[]
[ { "reaction": "👍", "users": [ "osanseviero", "victor", "diegotluz", "samusenps", "ybelkada", "Tonic", "pcuenq", "ajibawa-2023", "davanstrien", "clem", "zpn", "EtienneDosSantos", "Nymbo" ], "count": 13 }, { "reaction": "❤️", "users": [ "osanseviero", "samusenps", "ybelkada", "alielfilali01", "pcuenq", "clem", "Hugorowan", "zpn", "PeepDaSlan9", "Nymbo" ], "count": 10 } ]
2024-03-05T22:13:50.000Z
2024-03-06T03:02:04.745Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648631057413-noauth.png", "fullname": "Younes Belkada", "name": "ybelkada", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 417, "isFollowing": false }, { "avatarUrl": "/avatars/4d77428c302dc8866e0073c3ce667323.svg", "fullname": "vhjghvy uyfyfuyfy", "name": "WbjuSrceu", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/bstadt/649809048078385
47
2
733100901107547
[ { "type": "text", "value": "🚀😈🌟 New Research Alert - CVPR 2024! 🌟😈 🚀", "raw": "🚀😈🌟 New Research Alert - CVPR 2024! 🌟😈 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📄 Title: SyncTalk: The Devil 😈 is in the Synchronization for Talking Head Synthesis 🌟🚀", "raw": "📄 Title: SyncTalk: The Devil 😈 is in the Synchronization for Talking Head Synthesis 🌟🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📝 Description: SyncTalk synthesizes synchronized talking head videos, employing tri-plane hash representations to maintain subject identity. It can generate synchronized lip movements, facial expressions, and stable head poses, and restores hair details to create high-resolution videos.", "raw": "📝 Description: SyncTalk synthesizes synchronized talking head videos, employing tri-plane hash representations to maintain subject identity. It can generate synchronized lip movements, facial expressions, and stable head poses, and restores hair details to create high-resolution videos.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👥 Authors: Ziqiao Peng et al.", "raw": "👥 Authors: Ziqiao Peng et al.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸", "raw": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Paper: ", "raw": "🔗 Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2311.17590", "href": null, "resource": { "type": "paper", "id": "2311.17590", "discussionNum": null }, "url": "https://huggingface.co/papers/2311.17590", "code": null, "user": null, "label": "SyncTalk: The Devil is in the Synchronization for Talking Head Synthesis (2311.17590)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Github Page: ", "raw": "🔗 Github Page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://ziqiaopeng.github.io/synctalk", "href": "https://ziqiaopeng.github.io/synctalk", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Repository: ", "raw": "🔗 Repository: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/ZiqiaoPeng/SyncTalk", "href": "https://github.com/ZiqiaoPeng/SyncTalk", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📚 More Papers: more cutting-edge research presented at other conferences in the ", "raw": "📚 More Papers: more cutting-edge research presented at other conferences in the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "href": null, "resource": { "type": "space", "id": "DmitryRyumin/NewEraAI-Papers", "discussionNum": null }, "url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " curated by ", "raw": " curated by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@DmitryRyumin", "href": null, "resource": null, "url": null, "code": null, "user": "DmitryRyumin", "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🚀 Added to the Avatars Collection: ", "raw": "🚀 Added to the Avatars Collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "href": null, "resource": { "type": "collection", "id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "discussionNum": null }, "url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔍 Keywords: #TalkingHeads #Synthesis #TriPlaneHash #FacialExpressions #LipSyncing #HighResolutionVideos #CVPR2024 #DeepLearning #Animation #Innovation", "raw": "🔍 Keywords: #TalkingHeads #Synthesis #TriPlaneHash #FacialExpressions #LipSyncing #HighResolutionVideos #CVPR2024 #DeepLearning #Animation #Innovation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🚀😈🌟 New Research Alert - CVPR 2024! 🌟😈 🚀 📄 Title: SyncTalk: The Devil 😈 is in the Synchronization for Talking Head Synthesis 🌟🚀 📝 Description: SyncTalk synthesizes synchronized talking head videos, employing tri-plane hash representations to maintain subject identity. It can generate synchronized lip movements, facial expressions, and stable head poses, and restores hair details to create high-resolution videos. 👥 Authors: Ziqiao Peng et al. 📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸 🔗 Paper: https://huggingface.co/papers/2311.17590 🔗 Github Page: https://ziqiaopeng.github.io/synctalk 🔗 Repository: https://github.com/ZiqiaoPeng/SyncTalk 📚 More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin 🚀 Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36 🔍 Keywords: #TalkingHeads #Synthesis #TriPlaneHash #FacialExpressions #LipSyncing #HighResolutionVideos #CVPR2024 #DeepLearning #Animation #Innovation
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/XDVLZECN-Xyr525ieQL1j.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/3pCj3NFSJtgmWEXHOfBSV.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/hMQKzQbkiWqqMqGeNpJjl.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/qQHOdGXI4UgHzfgyIbbMQ.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377 } ]
[ { "reaction": "👍", "users": [ "DmitryRyumin", "osanseviero", "samusenps", "minhdang", "Tonic", "takeraparterer", "brainhome", "MiSTe-R", "JasonZJ", "victor", "vladbogo", "clem", "dashfunnydashdash", "lvalue" ], "count": 14 }, { "reaction": "❤️", "users": [ "clem", "samusenps" ], "count": 2 } ]
2024-03-05T21:31:24.000Z
2024-03-05T21:31:54.206Z
[]
/posts/DmitryRyumin/733100901107547
128
0
674644082063278
[ { "type": "text", "value": "Diaries of Open Source. Part 2. Open Source is going brrrrr", "raw": "Diaries of Open Source. Part 2. Open Source is going brrrrr", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🚀The European Space Agency releases MajorTOM, a dataset of earth observation covering half the earth. The dataset has 2.5 trillion pixels! Congrats ", "raw": "🚀The European Space Agency releases MajorTOM, a dataset of earth observation covering half the earth. The dataset has 2.5 trillion pixels! Congrats ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@aliFrancis", "href": null, "resource": null, "url": null, "code": null, "user": "aliFrancis", "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@mikonvergence", "href": null, "resource": null, "url": null, "code": null, "user": "mikonvergence", "label": null, "lang": null }, { "type": "text", "value": " !", "raw": " !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset: ", "raw": "Dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/Major-TOM/Core-S2L2A", "href": null, "resource": { "type": "dataset", "id": "Major-TOM/Core-S2L2A", "discussionNum": null }, "url": "https://huggingface.co/datasets/Major-TOM/Core-S2L2A", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Viewer: ", "raw": "Viewer: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Major-TOM/MajorTOM-Core-Viewer", "href": null, "resource": { "type": "space", "id": "Major-TOM/MajorTOM-Core-Viewer", "discussionNum": null }, "url": "https://huggingface.co/spaces/Major-TOM/MajorTOM-Core-Viewer", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🍞Re-ranking models by MixedBreadAI, with very high quality, Apache 2 license, and easy to use!", "raw": "🍞Re-ranking models by MixedBreadAI, with very high quality, Apache 2 license, and easy to use!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Models: ", "raw": "Models: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/models?other=reranker&sort=trending&search=mixedbread-ai", "href": "https://huggingface.co/models?other=reranker&sort=trending&search=mixedbread-ai", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Blog: ", "raw": "Blog: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.mixedbread.ai/blog/mxbai-rerank-v1", "href": "https://www.mixedbread.ai/blog/mxbai-rerank-v1", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🧊StabilityAI and TripoAI release TripoSR, a super-fast MIT-licensed image-to-3D model!", "raw": "🧊StabilityAI and TripoAI release TripoSR, a super-fast MIT-licensed image-to-3D model!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model: ", "raw": "Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/stabilityai/TripoSR", "href": null, "resource": { "type": "model", "id": "stabilityai/TripoSR", "discussionNum": null }, "url": "https://huggingface.co/stabilityai/TripoSR", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Demo: ", "raw": "Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/stabilityai/TripoSR", "href": null, "resource": { "type": "space", "id": "stabilityai/TripoSR", "discussionNum": null }, "url": "https://huggingface.co/spaces/stabilityai/TripoSR", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🤝Together AI and HazyResearch release Based ", "raw": "🤝Together AI and HazyResearch release Based ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Models and datasets: ", "raw": "Models and datasets: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/hazyresearch/based-65d77fb76f9c813c8b94339c", "href": null, "resource": { "type": "collection", "id": "hazyresearch/based-65d77fb76f9c813c8b94339c", "discussionNum": null }, "url": "https://huggingface.co/collections/hazyresearch/based-65d77fb76f9c813c8b94339c", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "GH repo: ", "raw": "GH repo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/HazyResearch/based", "href": "https://github.com/HazyResearch/based", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌊LaVague: an open-source pipeline to turn natural language into browser actions! It can run locally with ", "raw": "🌊LaVague: an open-source pipeline to turn natural language into browser actions! It can run locally with ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1", "href": null, "resource": { "type": "model", "id": "HuggingFaceH4/zephyr-7b-gemma-v0.1", "discussionNum": null }, "url": "https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read more about it at ", "raw": "Read more about it at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/posts/dhuynh95/717319217106504", "href": "https://huggingface.co/posts/dhuynh95/717319217106504", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🏆Berkeley Function-Calling Leaderboard ", "raw": "🏆Berkeley Function-Calling Leaderboard ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read about it: ", "raw": "Read about it: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://gorilla.cs.berkeley.edu/blogs/8_berkeley_function_calling_leaderboard.html", "href": "https://gorilla.cs.berkeley.edu/blogs/8_berkeley_function_calling_leaderboard.html", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Leaderboard: ", "raw": "Leaderboard: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://gorilla.cs.berkeley.edu/leaderboard.html", "href": "https://gorilla.cs.berkeley.edu/leaderboard.html", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🐬Sailor-Chat: chat models built on top of OpenOrca and ", "raw": "🐬Sailor-Chat: chat models built on top of OpenOrca and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@sarahooker", "href": null, "resource": null, "url": null, "code": null, "user": "sarahooker", "label": null, "lang": null }, { "type": "text", "value": " CohereForAI Aya project. They can be used for South-East Asia languages such as Indonesian, Thai, Vietnamese, Malay and Lao!", "raw": " CohereForAI Aya project. They can be used for South-East Asia languages such as Indonesian, Thai, Vietnamese, Malay and Lao!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Models: ", "raw": "Models: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/sail/sailor-language-models-65e19a749f978976f1959825", "href": null, "resource": { "type": "collection", "id": "sail/sailor-language-models-65e19a749f978976f1959825", "discussionNum": null }, "url": "https://huggingface.co/collections/sail/sailor-language-models-65e19a749f978976f1959825", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Demo: ", "raw": "Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/sail/Sailor-7B-Chat", "href": null, "resource": { "type": "space", "id": "sail/Sailor-7B-Chat", "discussionNum": null }, "url": "https://huggingface.co/spaces/sail/Sailor-7B-Chat", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🤗Arabic-OpenHermes-2.5: OpenHermes dataset translated to Arabic ", "raw": "🤗Arabic-OpenHermes-2.5: OpenHermes dataset translated to Arabic ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/2A2I/Arabic-OpenHermes-2.5", "href": null, "resource": { "type": "dataset", "id": "2A2I/Arabic-OpenHermes-2.5", "discussionNum": null }, "url": "https://huggingface.co/datasets/2A2I/Arabic-OpenHermes-2.5", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "See the previous part here ", "raw": "See the previous part here ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/posts/osanseviero/622788932781684", "href": "https://huggingface.co/posts/osanseviero/622788932781684", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Diaries of Open Source. Part 2. Open Source is going brrrrr 🚀The European Space Agency releases MajorTOM, a dataset of earth observation covering half the earth. The dataset has 2.5 trillion pixels! Congrats @aliFrancis and @mikonvergence ! Dataset: https://huggingface.co/datasets/Major-TOM/Core-S2L2A Viewer: https://huggingface.co/spaces/Major-TOM/MajorTOM-Core-Viewer 🍞Re-ranking models by MixedBreadAI, with very high quality, Apache 2 license, and easy to use! Models: https://huggingface.co/models?other=reranker&sort=trending&search=mixedbread-ai Blog: https://www.mixedbread.ai/blog/mxbai-rerank-v1 🧊StabilityAI and TripoAI release TripoSR, a super-fast MIT-licensed image-to-3D model! Model: https://huggingface.co/stabilityai/TripoSR Demo: https://huggingface.co/spaces/stabilityai/TripoSR 🤝Together AI and HazyResearch release Based Models and datasets: https://huggingface.co/collections/hazyresearch/based-65d77fb76f9c813c8b94339c GH repo: https://github.com/HazyResearch/based 🌊LaVague: an open-source pipeline to turn natural language into browser actions! It can run locally with https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1 Read more about it at https://huggingface.co/posts/dhuynh95/717319217106504 🏆Berkeley Function-Calling Leaderboard Read about it: https://gorilla.cs.berkeley.edu/blogs/8_berkeley_function_calling_leaderboard.html Leaderboard: https://gorilla.cs.berkeley.edu/leaderboard.html 🐬Sailor-Chat: chat models built on top of OpenOrca and @sarahooker CohereForAI Aya project. They can be used for South-East Asia languages such as Indonesian, Thai, Vietnamese, Malay and Lao! Models: https://huggingface.co/collections/sail/sailor-language-models-65e19a749f978976f1959825 Demo: https://huggingface.co/spaces/sail/Sailor-7B-Chat 🤗Arabic-OpenHermes-2.5: OpenHermes dataset translated to Arabic https://huggingface.co/datasets/2A2I/Arabic-OpenHermes-2.5 See the previous part here https://huggingface.co/posts/osanseviero/622788932781684
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/635011301d81beb8e2455ee9/NyDIbzavucEIyFDHnaAv0.jpeg", "fullname": "Alistair Francis", "name": "aliFrancis", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 21 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1678741407493-6304c06eeb6d777a838eab63.png", "fullname": "Mikolaj Czerkawski", "name": "mikonvergence", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 25 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63434eb76f59b79da07dbddf/BEwmVjqPNYlqmutXG0G6e.jpeg", "fullname": "Sara Hooker", "name": "sarahooker", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 81 } ]
[ { "reaction": "👍", "users": [ "mrm8488", "mikonvergence", "peterschmidt85", "DmitryRyumin", "yjernite", "SivilTaram", "zirui3", "ajibawa-2023", "davanstrien", "vladbogo", "clem", "irenesolaiman", "mvaloatto", "Jose7juanFdz", "kramp", "juliuslipp" ], "count": 16 }, { "reaction": "❤️", "users": [ "kshitizkhanal7", "samusenps", "SivilTaram", "clefourrier", "lucabaggi", "clem", "irenesolaiman", "nisten" ], "count": 8 }, { "reaction": "🤗", "users": [ "yjernite", "Tonic", "Zmu", "clem", "irenesolaiman" ], "count": 5 } ]
2024-03-05T21:09:32.000Z
2024-03-06T13:06:25.932Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6271a90e7b9f120adb3adff1/-WeBn2fA9Z1KHuXfHeBJp.png", "fullname": "Kshitiz Khanal", "name": "kshitizkhanal7", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 8, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1644340617257-noauth.png", "fullname": "Clémentine Fourrier", "name": "clefourrier", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 459, "isFollowing": false } ]
/posts/osanseviero/674644082063278
56
3
176719659310924
[ { "type": "text", "value": "AtomoVideo", "raw": "AtomoVideo", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "High Fidelity Image-to-Video Generation", "raw": "High Fidelity Image-to-Video Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2403.01800", "href": null, "resource": { "type": "paper", "id": "2403.01800", "discussionNum": null }, "url": "https://huggingface.co/papers/2403.01800", "code": null, "user": null, "label": "AtomoVideo: High Fidelity Image-to-Video Generation (2403.01800)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Recently, video generation has achieved significant rapid development based on superior text-to-image generation techniques. In this work, we propose a high fidelity framework for image-to-video generation, named AtomoVideo. Based on multi-granularity image injection, we achieve higher fidelity of the generated video to the given image. In addition, thanks to high quality datasets and training strategies, we achieve greater motion intensity while maintaining superior temporal consistency and stability. Our architecture extends flexibly to the video frame prediction task, enabling long sequence prediction through iterative generation. Furthermore, due to the design of adapter training, our approach can be well combined with existing personalised models and controllable modules. ", "raw": "Recently, video generation has achieved significant rapid development based on superior text-to-image generation techniques. In this work, we propose a high fidelity framework for image-to-video generation, named AtomoVideo. Based on multi-granularity image injection, we achieve higher fidelity of the generated video to the given image. In addition, thanks to high quality datasets and training strategies, we achieve greater motion intensity while maintaining superior temporal consistency and stability. Our architecture extends flexibly to the video frame prediction task, enabling long sequence prediction through iterative generation. Furthermore, due to the design of adapter training, our approach can be well combined with existing personalised models and controllable modules. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
AtomoVideo High Fidelity Image-to-Video Generation https://huggingface.co/papers/2403.01800 Recently, video generation has achieved significant rapid development based on superior text-to-image generation techniques. In this work, we propose a high fidelity framework for image-to-video generation, named AtomoVideo. Based on multi-granularity image injection, we achieve higher fidelity of the generated video to the given image. In addition, thanks to high quality datasets and training strategies, we achieve greater motion intensity while maintaining superior temporal consistency and stability. Our architecture extends flexibly to the video frame prediction task, enabling long sequence prediction through iterative generation. Furthermore, due to the design of adapter training, our approach can be well combined with existing personalised models and controllable modules.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/5cE6pdITEmEKePBHiF67_.png" } ]
[]
[ { "reaction": "❤️", "users": [ "clem", "Finding-new-code5", "MexIvanov" ], "count": 3 } ]
2024-03-05T19:39:06.000Z
2024-03-07T04:06:51.168Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/OMEqajG_I9VemRa-NndDs.png", "fullname": "Michael bollox", "name": "MichaelBoll", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "/avatars/3dd3d263ebbaa818afe15ecde812cd34.svg", "fullname": "Emre Bucak", "name": "ebucak", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/akhaliq/176719659310924
100
2
594802261431700
[ { "type": "text", "value": "We are thrilled to announce the release of the OmniACT dataset! This revolutionary dataset and benchmark focuses on pushing the limits of how virtual agents can facilitate the automation of our computer tasks. Imagine less clicking and typing, and more observation as your computer takes care of tasks such as organizing schedules or arranging travel arrangements on its own.", "raw": "We are thrilled to announce the release of the OmniACT dataset! This revolutionary dataset and benchmark focuses on pushing the limits of how virtual agents can facilitate the automation of our computer tasks. Imagine less clicking and typing, and more observation as your computer takes care of tasks such as organizing schedules or arranging travel arrangements on its own.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check it out ➡️ [OmniACT Dataset on Hugging Face](", "raw": "Check it out ➡️ [OmniACT Dataset on Hugging Face](", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/Writer/omniact", "href": null, "resource": { "type": "dataset", "id": "Writer/omniact", "discussionNum": null }, "url": "https://huggingface.co/datasets/Writer/omniact", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ")", "raw": ")", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For a deep dive, here’s the paper: [OmniACT Paper](", "raw": "For a deep dive, here’s the paper: [OmniACT Paper](", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2402.17553", "href": "https://arxiv.org/abs/2402.17553", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ")", "raw": ")", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
We are thrilled to announce the release of the OmniACT dataset! This revolutionary dataset and benchmark focuses on pushing the limits of how virtual agents can facilitate the automation of our computer tasks. Imagine less clicking and typing, and more observation as your computer takes care of tasks such as organizing schedules or arranging travel arrangements on its own. Check it out ➡️ [OmniACT Dataset on Hugging Face](https://huggingface.co/datasets/Writer/omniact) For a deep dive, here’s the paper: [OmniACT Paper](https://arxiv.org/abs/2402.17553)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60cd486d723acf5eb46fe8d3/Z1bD1kjvZ0QAOjZna41Xr.jpeg", "fullname": "Waseem AlShikh", "name": "wassemgtk", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 23, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60cd486d723acf5eb46fe8d3/P6RWMssEhyXaxygsYhzaD.png" } ]
[]
[ { "reaction": "❤️", "users": [ "melisa", "osanseviero", "victor", "nbroad", "samusenps", "jeffboudier", "julien-c", "kiranr", "lhoestq", "pierrci", "nikjain", "clem", "NERDDISCO", "bk-ignite", "medmac01", "thomwolf", "sbrandeis" ], "count": 17 }, { "reaction": "🤯", "users": [ "femboysLover", "victor", "M1cler", "julien-c", "lhoestq", "clem", "bk-ignite", "medmac01", "thomwolf" ], "count": 9 } ]
2024-03-05T16:15:29.000Z
2024-03-05T16:23:55.143Z
[]
/posts/wassemgtk/594802261431700
218
0
124549441585969
[ { "type": "text", "value": "🔥 New multimodal leaderboard on the hub: ConTextual!", "raw": "🔥 New multimodal leaderboard on the hub: ConTextual!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Many situations require models to parse images containing text: maps, web pages, real world pictures, memes, ... 🖼️", "raw": "Many situations require models to parse images containing text: maps, web pages, real world pictures, memes, ... 🖼️", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "So how do you evaluate performance on this task?", "raw": "So how do you evaluate performance on this task?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The ConTextual team introduced a brand new dataset of instructions and images, to test LMM (large multimodal models) reasoning capabilities, and an associated leaderboard (with a private test set).", "raw": "The ConTextual team introduced a brand new dataset of instructions and images, to test LMM (large multimodal models) reasoning capabilities, and an associated leaderboard (with a private test set).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is super exciting imo because it has the potential to be a good benchmark both for multimodal models and for assistants' vision capabilities, thanks to the instructions in the dataset.", "raw": "This is super exciting imo because it has the potential to be a good benchmark both for multimodal models and for assistants' vision capabilities, thanks to the instructions in the dataset.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Congrats to ", "raw": "Congrats to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@rohan598", "href": null, "resource": null, "url": null, "code": null, "user": "rohan598", "label": null, "lang": null }, { "type": "text", "value": ", ", "raw": ", ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@hbXNov", "href": null, "resource": null, "url": null, "code": null, "user": "hbXNov", "label": null, "lang": null }, { "type": "text", "value": ", ", "raw": ", ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@kaiweichang", "href": null, "resource": null, "url": null, "code": null, "user": "kaiweichang", "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@violetpeng", "href": null, "resource": null, "url": null, "code": null, "user": "violetpeng", "label": null, "lang": null }, { "type": "text", "value": " !!", "raw": " !!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Learn more in the blog: ", "raw": "Learn more in the blog: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/leaderboard-contextual", "href": "https://huggingface.co/blog/leaderboard-contextual", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Leaderboard: ", "raw": "Leaderboard: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/ucla-contextual/contextual_leaderboard", "href": null, "resource": { "type": "space", "id": "ucla-contextual/contextual_leaderboard", "discussionNum": null }, "url": "https://huggingface.co/spaces/ucla-contextual/contextual_leaderboard", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🔥 New multimodal leaderboard on the hub: ConTextual! Many situations require models to parse images containing text: maps, web pages, real world pictures, memes, ... 🖼️ So how do you evaluate performance on this task? The ConTextual team introduced a brand new dataset of instructions and images, to test LMM (large multimodal models) reasoning capabilities, and an associated leaderboard (with a private test set). This is super exciting imo because it has the potential to be a good benchmark both for multimodal models and for assistants' vision capabilities, thanks to the instructions in the dataset. Congrats to @rohan598, @hbXNov, @kaiweichang and @violetpeng !! Learn more in the blog: https://huggingface.co/blog/leaderboard-contextual Leaderboard: https://huggingface.co/spaces/ucla-contextual/contextual_leaderboard
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1644340617257-noauth.png", "fullname": "Clémentine Fourrier", "name": "clefourrier", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 459, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6202a599216215a22221dea9/ju2JBh4mB8nXLWWchbY8I.png" } ]
[ { "avatarUrl": "/avatars/8a89e040dc331b7a83d9a704c4fc29d2.svg", "fullname": "Hritik Bansal", "name": "hbXNov", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1622653364258-noauth.jpeg", "fullname": "Kai-Wei Chang", "name": "kaiweichang", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4 }, { "avatarUrl": "/avatars/422234f3ceeb7f95fe41b25eb8442485.svg", "fullname": "Rohan Wadhawan", "name": "rohan598", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64c13de54cbd12e168f09a58/Aw5BaNzf6uuwqQ333OQ5y.png", "fullname": "NANYUN (Violet) PENG", "name": "violetpeng", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2 } ]
[ { "reaction": "👍", "users": [ "vladbogo", "BrigitteTousi", "Kukedlc", "osanseviero", "victor", "VictorSanh", "samusenps", "JasonZJ", "mvaloatto", "rohan598", "puneetjindalisb", "whitew1994" ], "count": 12 } ]
2024-03-05T15:59:24.000Z
2024-03-05T15:59:35.240Z
[]
/posts/clefourrier/124549441585969
2,374
0
659843073397506
[ { "type": "text", "value": "VisionLLaMA is a new vision transformer architecture that adapts the successful LLaMA language model design for vision tasks. By integrating components like rotary positional embeddings, SwiGLU activation, and LayerNorm from LLaMA, VisionLLaMA achieves very promising performance across various vision tasks, including image generation, classification, semantic segmentation, and object detection.", "raw": "VisionLLaMA is a new vision transformer architecture that adapts the successful LLaMA language model design for vision tasks. By integrating components like rotary positional embeddings, SwiGLU activation, and LayerNorm from LLaMA, VisionLLaMA achieves very promising performance across various vision tasks, including image generation, classification, semantic segmentation, and object detection.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Keypoints:", "raw": "Keypoints:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Outperforms state-of-the-art vision transformers like DiT, SiT, DeiT3, and Swin on multiple benchmarks and tasks.", "raw": "* Outperforms state-of-the-art vision transformers like DiT, SiT, DeiT3, and Swin on multiple benchmarks and tasks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Leverages Auto-Scaled 2D Rotary Positional Embeddings (AS2DRoPE) to handle variable input resolutions efficiently.", "raw": "* Leverages Auto-Scaled 2D Rotary Positional Embeddings (AS2DRoPE) to handle variable input resolutions efficiently.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Serves as a powerful, unified modeling framework for vision generation and understanding tasks.", "raw": "* Serves as a powerful, unified modeling framework for vision generation and understanding tasks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2403.00522", "href": null, "resource": { "type": "paper", "id": "2403.00522", "discussionNum": null }, "url": "https://huggingface.co/papers/2403.00522", "code": null, "user": null, "label": "VisionLLaMA: A Unified LLaMA Interface for Vision Tasks (2403.00522)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "GitHub repo: ", "raw": "GitHub repo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/Meituan-AutoML/VisionLLaMA", "href": "https://github.com/Meituan-AutoML/VisionLLaMA", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Congrats to the authors for their work!", "raw": "Congrats to the authors for their work!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
VisionLLaMA is a new vision transformer architecture that adapts the successful LLaMA language model design for vision tasks. By integrating components like rotary positional embeddings, SwiGLU activation, and LayerNorm from LLaMA, VisionLLaMA achieves very promising performance across various vision tasks, including image generation, classification, semantic segmentation, and object detection. Keypoints: * Outperforms state-of-the-art vision transformers like DiT, SiT, DeiT3, and Swin on multiple benchmarks and tasks. * Leverages Auto-Scaled 2D Rotary Positional Embeddings (AS2DRoPE) to handle variable input resolutions efficiently. * Serves as a powerful, unified modeling framework for vision generation and understanding tasks. Paper: https://huggingface.co/papers/2403.00522 GitHub repo: https://github.com/Meituan-AutoML/VisionLLaMA Congrats to the authors for their work!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657217faabb25ed8aedd5e48/UUHAXeGtOnQBXFD3nYtf2.jpeg", "fullname": "Vlad Bogolin", "name": "vladbogo", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 109, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "clem", "jucamohedano", "osanseviero", "victor", "samusenps", "sgarbi", "MexIvanov", "trysem" ], "count": 8 } ]
2024-03-05T15:32:23.000Z
2024-03-05T15:32:23.041Z
[]
/posts/vladbogo/659843073397506
105
0
950358996719386
[ { "type": "text", "value": "Why does returning timestamps help Whisper reduce hallucinations? 🧐", "raw": "Why does returning timestamps help Whisper reduce hallucinations? 🧐", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Empirically, most practitioners have found that setting ", "raw": "Empirically, most practitioners have found that setting ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`return_timestamps=True`", "href": null, "resource": null, "url": null, "code": "return_timestamps=True", "user": null, "label": null, "lang": null }, { "type": "text", "value": " helps reduce hallucinations, particularly when doing long-form evaluation with Transformers’ “chunked” algorithm. ", "raw": " helps reduce hallucinations, particularly when doing long-form evaluation with Transformers’ “chunked” algorithm. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "But why does this work?..", "raw": "But why does this work?..", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "My interpretation is that forcing the model to predict timestamps is contradictory to hallucinations. Suppose you have the transcription:", "raw": "My interpretation is that forcing the model to predict timestamps is contradictory to hallucinations. Suppose you have the transcription:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```markdown\nThe cat sat on the on the on the mat.\n```", "href": null, "resource": null, "url": null, "code": "The cat sat on the on the on the mat.", "user": null, "label": null, "lang": "markdown" }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Where we have a repeated hallucination for “on the”. If we ask the model to predict timestamps, then the “on the” has to contribute to the overall segment-level timing, e.g.:", "raw": "Where we have a repeated hallucination for “on the”. If we ask the model to predict timestamps, then the “on the” has to contribute to the overall segment-level timing, e.g.:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```markdown\n<|0.00|> The cat sat on the on the on the mat.<|5.02|>\n```", "href": null, "resource": null, "url": null, "code": "<|0.00|> The cat sat on the on the on the mat.<|5.02|>", "user": null, "label": null, "lang": "markdown" }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "However, it’s impossible to fit 3 copies of “on the” within the time allocation given to the segment, so the probability for this hallucinatory sequence becomes lower, and the model actually predicts the correct transcription with highest probability:", "raw": "However, it’s impossible to fit 3 copies of “on the” within the time allocation given to the segment, so the probability for this hallucinatory sequence becomes lower, and the model actually predicts the correct transcription with highest probability:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```markdown\n<|0.00|> The cat sat on the mat.<|5.02|>\n```", "href": null, "resource": null, "url": null, "code": "<|0.00|> The cat sat on the mat.<|5.02|>", "user": null, "label": null, "lang": "markdown" }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In this sense, the end timestamp is of the opposite of the initial timestamp constraint they describe in Section 4.5 of the paper ", "raw": "In this sense, the end timestamp is of the opposite of the initial timestamp constraint they describe in Section 4.5 of the paper ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2212.04356", "href": null, "resource": { "type": "paper", "id": "2212.04356", "discussionNum": null }, "url": "https://huggingface.co/papers/2212.04356", "code": null, "user": null, "label": "Robust Speech Recognition via Large-Scale Weak Supervision (2212.04356)", "lang": null }, { "type": "text", "value": " → it helps the model remove extra words at the end of the sequence (rather than the initial timestamp which helps when the model ignores words at the start), but the overall principle is the same (using timestamps to improve the probability of more realistic sequences).", "raw": " → it helps the model remove extra words at the end of the sequence (rather than the initial timestamp which helps when the model ignores words at the start), but the overall principle is the same (using timestamps to improve the probability of more realistic sequences).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Leaving it open to you: why do you think timestamps reduces Whisper hallucinations?", "raw": "Leaving it open to you: why do you think timestamps reduces Whisper hallucinations?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Why does returning timestamps help Whisper reduce hallucinations? 🧐 Empirically, most practitioners have found that setting `return_timestamps=True` helps reduce hallucinations, particularly when doing long-form evaluation with Transformers’ “chunked” algorithm. But why does this work?.. My interpretation is that forcing the model to predict timestamps is contradictory to hallucinations. Suppose you have the transcription: ```markdown The cat sat on the on the on the mat. ``` Where we have a repeated hallucination for “on the”. If we ask the model to predict timestamps, then the “on the” has to contribute to the overall segment-level timing, e.g.: ```markdown <|0.00|> The cat sat on the on the on the mat.<|5.02|> ``` However, it’s impossible to fit 3 copies of “on the” within the time allocation given to the segment, so the probability for this hallucinatory sequence becomes lower, and the model actually predicts the correct transcription with highest probability: ```markdown <|0.00|> The cat sat on the mat.<|5.02|> ``` In this sense, the end timestamp is of the opposite of the initial timestamp constraint they describe in Section 4.5 of the paper https://huggingface.co/papers/2212.04356 → it helps the model remove extra words at the end of the sequence (rather than the initial timestamp which helps when the model ignores words at the start), but the overall principle is the same (using timestamps to improve the probability of more realistic sequences). Leaving it open to you: why do you think timestamps reduces Whisper hallucinations?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1653243468328-61f91cf54a8e5a275b2b3e7c.jpeg", "fullname": "Sanchit Gandhi", "name": "sanchit-gandhi", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 565, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "clem", "abellion", "vladbogo", "daedaluswave", "osanseviero", "samusenps", "Csplk", "DRRRRDFFD", "on1onmangoes", "sanchit-gandhi", "jayzhuo", "michaelsymph", "EBalykov", "Mutturaj", "DattaKiran", "OjciecTadeusz", "ericlaycock", "dkakaie", "WIldOni", "ayoalfonso", "dzlandis", "peterciank" ], "count": 22 }, { "reaction": "👍", "users": [ "emirvmendoza", "oyemade", "EBalykov", "Mutturaj" ], "count": 4 }, { "reaction": "🤗", "users": [ "nss-ysasaki" ], "count": 1 }, { "reaction": "👀", "users": [ "Wok" ], "count": 1 }, { "reaction": "➕", "users": [ "Mutturaj" ], "count": 1 } ]
2024-03-05T15:02:19.000Z
2024-11-12T07:09:16.240Z
[ { "avatarUrl": "/avatars/13fbba68d1d4d29afa0d2ac1a24faa28.svg", "fullname": "Raj Gothi", "name": "RajGothi", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1653243468328-61f91cf54a8e5a275b2b3e7c.jpeg", "fullname": "Sanchit Gandhi", "name": "sanchit-gandhi", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 565, "isFollowing": false }, { "avatarUrl": "/avatars/c83664b8b11a1e72a41866e98e377426.svg", "fullname": "Sai", "name": "Chandrahema", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/670203bca5a482f20ad7aa47/Dec9xTwFkCeSLti_9tYPO.png", "fullname": "Danielle", "name": "Sin2pi", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }, { "avatarUrl": "/avatars/ec2b0fa1dca861fdf3e41b33cf27bbc3.svg", "fullname": "Regent Yung", "name": "regentyung", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64cdcbef244a7de1132247f4/dEezgF4LW9lqxAgvmc2oH.jpeg", "fullname": "Wadi Doank", "name": "wadidoank", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/sanchit-gandhi/950358996719386
12,882
6
551840295310141
[ { "type": "text", "value": "What if you could casually access your remote GPU in HF Spaces from the comfort of your local VSCode 🤯", "raw": "What if you could casually access your remote GPU in HF Spaces from the comfort of your local VSCode 🤯", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
What if you could casually access your remote GPU in HF Spaces from the comfort of your local VSCode 🤯
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5dd96eb166059660ed1ee413/nM-ivJyqs1ftp35Mi27Hs.png" } ]
[]
[ { "reaction": "👍", "users": [ "ajibawa-2023", "Gabriel", "victor", "mvaloatto", "medmac01", "clem", "BrigitteTousi", "musfiqdehan", "osanseviero", "asusevski", "samusenps", "cnmoro", "mathiasn1", "JackCloudman", "Nadav-Timor", "Felladrin", "muhtasham", "dwidlee", "peterschroedl", "not-lain", "radames" ], "count": 21 }, { "reaction": "🤯", "users": [ "fffiloni", "taufiqdp", "medmac01", "natolambert", "clem", "BrigitteTousi", "umuthopeyildirim", "osanseviero", "vtiyyal1", "xxXWarMachineRoXxx", "muhtasham", "peterschroedl", "kramp", "not-lain", "seyf1elislam", "radames" ], "count": 16 }, { "reaction": "🤗", "users": [ "tuanlda78202", "medmac01", "clem", "BrigitteTousi", "muhtasham", "peterschroedl", "not-lain" ], "count": 7 } ]
2024-03-05T14:16:38.000Z
2024-03-18T11:12:14.428Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg", "fullname": "Feynman Innovations", "name": "ajibawa-2023", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 138, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/628e5f90a9a3c754c1f7c88f/iWqMY_l6dalrgRaJZWbK3.png", "fullname": "Nathan Lambert", "name": "natolambert", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 122, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1652197428055-625a6e0c535747b1a15be2de.jpeg", "fullname": "Md. Musfiqur Rahaman", "name": "musfiqdehan", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1651847561574-5fcaabed246881afd5b00167.jpeg", "fullname": "Muhtasham Oblokulov", "name": "muhtasham", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 43, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6468ce47e134d050a58aa89c/ApFcPlOzgI6Cjr0SYPpk6.png", "fullname": "Yağız Çalık", "name": "Weyaxi", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 272, "isFollowing": false } ]
/posts/julien-c/551840295310141
265
8
506341332427467
[ { "type": "text", "value": "Huge new dataset for remote sensing on HF: Major TOM", "raw": "Huge new dataset for remote sensing on HF: Major TOM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "More info in this thread ", "raw": "More info in this thread ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://x.com/mikonvergence/status/1764913256102031402?s=20", "href": "https://x.com/mikonvergence/status/1764913256102031402?s=20", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Note: video coming soon ", "raw": "Note: video coming soon ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Huge new dataset for remote sensing on HF: Major TOM More info in this thread https://x.com/mikonvergence/status/1764913256102031402?s=20 Note: video coming soon
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1638541710084-noauth.png", "fullname": "Robin Cole", "name": "robmarkcole", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 28, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "mikonvergence", "abbasm2", "clem", "osanseviero", "samusenps", "dillfrescott" ], "count": 6 }, { "reaction": "🤗", "users": [ "taufiqdp", "clem", "osanseviero", "dillfrescott" ], "count": 4 } ]
2024-03-05T13:59:32.000Z
2024-03-05T20:10:15.335Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false } ]
/posts/robmarkcole/506341332427467
85
1
380906232971746
[ { "type": "text", "value": "🧠  𝗖𝗟𝗘𝗔𝗥: 𝗳𝗶𝗿𝘀𝘁 𝗺𝘂𝗹𝘁𝗶𝗺𝗼𝗱𝗮𝗹 𝗯𝗲𝗻𝗰𝗵𝗺𝗮𝗿𝗸 𝘁𝗼 𝗺𝗮𝗸𝗲 𝗺𝗼𝗱𝗲𝗹𝘀 𝗳𝗼𝗿𝗴𝗲𝘁 𝘄𝗵𝗮𝘁 𝘄𝗲 𝘄𝗮𝗻𝘁 𝘁𝗵𝗲𝗺 𝘁𝗼 𝗳𝗼𝗿𝗴𝗲𝘁", "raw": "🧠  𝗖𝗟𝗘𝗔𝗥: 𝗳𝗶𝗿𝘀𝘁 𝗺𝘂𝗹𝘁𝗶𝗺𝗼𝗱𝗮𝗹 𝗯𝗲𝗻𝗰𝗵𝗺𝗮𝗿𝗸 𝘁𝗼 𝗺𝗮𝗸𝗲 𝗺𝗼𝗱𝗲𝗹𝘀 𝗳𝗼𝗿𝗴𝗲𝘁 𝘄𝗵𝗮𝘁 𝘄𝗲 𝘄𝗮𝗻𝘁 𝘁𝗵𝗲𝗺 𝘁𝗼 𝗳𝗼𝗿𝗴𝗲𝘁", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "With privacy concerns rising, we sometimes need our models to \"forget\" specific information - like a person's data - while keeping everything else intact. Researchers just released CLEAR, the first benchmark to test how well this works with both text and images.", "raw": "With privacy concerns rising, we sometimes need our models to \"forget\" specific information - like a person's data - while keeping everything else intact. Researchers just released CLEAR, the first benchmark to test how well this works with both text and images.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "❌ Bad news: Current methods either fail to truly forget or end up forgetting way too much. It's like trying to remove a single ingredient from a baked cake!", "raw": "❌ Bad news: Current methods either fail to truly forget or end up forgetting way too much. It's like trying to remove a single ingredient from a baked cake!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✨ But there's hope: Adding simple mathematical constraints (L1 regularization) during the forgetting process significantly improves results.", "raw": "✨ But there's hope: Adding simple mathematical constraints (L1 regularization) during the forgetting process significantly improves results.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🎯 Key insights:", "raw": "🎯 Key insights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✅ The benchmark tests forgetting on 200 fictional personas", "raw": "✅ The benchmark tests forgetting on 200 fictional personas", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ 3,770 visual Q&A pairs", "raw": "‣ 3,770 visual Q&A pairs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ 4,000 textual Q&A pairs", "raw": "‣ 4,000 textual Q&A pairs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ Additional real-world tests", "raw": "‣ Additional real-world tests", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🛑 Most current forgetting methods don't work well with both text and images", "raw": "🛑 Most current forgetting methods don't work well with both text and images", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ They either remember what they should forget", "raw": "‣ They either remember what they should forget", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ Or they forget too much unrelated information", "raw": "‣ Or they forget too much unrelated information", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✨ Simple mathematical constraints work surprisingly well", "raw": "✨ Simple mathematical constraints work surprisingly well", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ L1 regularization prevents excessive forgetting", "raw": "‣ L1 regularization prevents excessive forgetting", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ Works especially well with the LLMU method", "raw": "‣ Works especially well with the LLMU method", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👉 Read the full paper here: ", "raw": "👉 Read the full paper here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2410.18057", "href": null, "resource": { "type": "paper", "id": "2410.18057", "discussionNum": null }, "url": "https://huggingface.co/papers/2410.18057", "code": null, "user": null, "label": "CLEAR: Character Unlearning in Textual and Visual Modalities (2410.18057)", "lang": null } ]
🧠  𝗖𝗟𝗘𝗔𝗥: 𝗳𝗶𝗿𝘀𝘁 𝗺𝘂𝗹𝘁𝗶𝗺𝗼𝗱𝗮𝗹 𝗯𝗲𝗻𝗰𝗵𝗺𝗮𝗿𝗸 𝘁𝗼 𝗺𝗮𝗸𝗲 𝗺𝗼𝗱𝗲𝗹𝘀 𝗳𝗼𝗿𝗴𝗲𝘁 𝘄𝗵𝗮𝘁 𝘄𝗲 𝘄𝗮𝗻𝘁 𝘁𝗵𝗲𝗺 𝘁𝗼 𝗳𝗼𝗿𝗴𝗲𝘁 With privacy concerns rising, we sometimes need our models to "forget" specific information - like a person's data - while keeping everything else intact. Researchers just released CLEAR, the first benchmark to test how well this works with both text and images. ❌ Bad news: Current methods either fail to truly forget or end up forgetting way too much. It's like trying to remove a single ingredient from a baked cake! ✨ But there's hope: Adding simple mathematical constraints (L1 regularization) during the forgetting process significantly improves results. 🎯 Key insights: ✅ The benchmark tests forgetting on 200 fictional personas ‣ 3,770 visual Q&A pairs ‣ 4,000 textual Q&A pairs ‣ Additional real-world tests 🛑 Most current forgetting methods don't work well with both text and images ‣ They either remember what they should forget ‣ Or they forget too much unrelated information ✨ Simple mathematical constraints work surprisingly well ‣ L1 regularization prevents excessive forgetting ‣ Works especially well with the LLMU method 👉 Read the full paper here: https://huggingface.co/papers/2410.18057
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/j7qbmAMixW9v9FdqonSWK.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666", "CYGDEN", "xpgx1" ], "count": 3 } ]
2024-11-02T14:52:11.000Z
2024-11-02T14:52:11.476Z
[]
/posts/m-ric/380906232971746
1,540
0
563565199854269
[ { "type": "text", "value": "Last Week in Medical AI: Top Research ", "raw": "Last Week in Medical AI: Top Research ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Papers/Models", "raw": "Papers/Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 🔥", "raw": " 🔥", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🏅 (October 26 - November 2, 2024)", "raw": "🏅 (October 26 - November 2, 2024)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🏅 Medical AI Paper of the Week:", "raw": "🏅 Medical AI Paper of the Week:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Google Presents MDAgents: An Adaptive Collaboration of LLMs for Medical Decision-Making", "raw": "Google Presents MDAgents: An Adaptive Collaboration of LLMs for Medical Decision-Making", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLM & Other Models:", "raw": "Medical LLM & Other Models:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Matchmaker: Schema Matching with LLMs", "raw": "- Matchmaker: Schema Matching with LLMs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- UltraMedical: Specialized Biomedical Models", "raw": "- UltraMedical: Specialized Biomedical Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ZALM3: Vision-Language Medical Dialogue", "raw": "- ZALM3: Vision-Language Medical Dialogue", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- EchoFM: Echocardiogram Foundation Model", "raw": "- EchoFM: Echocardiogram Foundation Model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Frameworks and Methodologies:", "raw": "Frameworks and Methodologies:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- FEDKIM: Federated Medical Knowledge Injection", "raw": "- FEDKIM: Federated Medical Knowledge Injection", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Flex-MoE: Flexible Modality Combination", "raw": "- Flex-MoE: Flexible Modality Combination", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MAISI: Synthetic Medical Imaging", "raw": "- MAISI: Synthetic Medical Imaging", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Cough-E: Edge Privacy Detection", "raw": "- Cough-E: Edge Privacy Detection", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MassSpecGym: Molecule Identification", "raw": "- MassSpecGym: Molecule Identification", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLM Applications:", "raw": "Medical LLM Applications:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- DiaMond: Multi-Modal Dementia Diagnosis", "raw": "- DiaMond: Multi-Modal Dementia Diagnosis", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- LLM-Forest: Health Data Imputation", "raw": "- LLM-Forest: Health Data Imputation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Medical Multimodal Visual Grounding", "raw": "- Medical Multimodal Visual Grounding", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Clinical Evidence Synthesis with LLMs", "raw": "- Clinical Evidence Synthesis with LLMs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLMs & Benchmarks:", "raw": "Medical LLMs & Benchmarks:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Histopathology Models Beyond H&E", "raw": "- Histopathology Models Beyond H&E", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- LLMs in Mental Health Counseling", "raw": "- LLMs in Mental Health Counseling", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Medical Dataset Reuse Analysis", "raw": "- Medical Dataset Reuse Analysis", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "AI in Healthcare Ethics:", "raw": "AI in Healthcare Ethics:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- LLMs in Medical Education", "raw": "- LLMs in Medical Education", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Medical Exam Question Generation", "raw": "- Medical Exam Question Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Clinical Knowledge Graph Integration", "raw": "- Clinical Knowledge Graph Integration", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Now you can watch and listen to the latest Medical AI papers daily on our YouTube and Spotify channels as well!", "raw": "Now you can watch and listen to the latest Medical AI papers daily on our YouTube and Spotify channels as well!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Full Thread: ", "raw": "- Full Thread: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://x.com/OpenlifesciAI/status/1852685220912464066", "href": "https://x.com/OpenlifesciAI/status/1852685220912464066", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- YouTube: ", "raw": "- YouTube: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/3O3xjaMCXHI", "href": "https://youtu.be/3O3xjaMCXHI", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Spotify: ", "raw": "- Spotify: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://open.spotify.com/episode/05trbTbtVZcfI7ycA5Z3Tt?si=706b74626f714aa1", "href": "https://open.spotify.com/episode/05trbTbtVZcfI7ycA5Z3Tt?si=706b74626f714aa1", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Last Week in Medical AI: Top Research Papers/Models 🔥 🏅 (October 26 - November 2, 2024) 🏅 Medical AI Paper of the Week: Google Presents MDAgents: An Adaptive Collaboration of LLMs for Medical Decision-Making Medical LLM & Other Models: - Matchmaker: Schema Matching with LLMs - UltraMedical: Specialized Biomedical Models - ZALM3: Vision-Language Medical Dialogue - EchoFM: Echocardiogram Foundation Model Frameworks and Methodologies: - FEDKIM: Federated Medical Knowledge Injection - Flex-MoE: Flexible Modality Combination - MAISI: Synthetic Medical Imaging - Cough-E: Edge Privacy Detection - MassSpecGym: Molecule Identification Medical LLM Applications: - DiaMond: Multi-Modal Dementia Diagnosis - LLM-Forest: Health Data Imputation - Medical Multimodal Visual Grounding - Clinical Evidence Synthesis with LLMs Medical LLMs & Benchmarks: - Histopathology Models Beyond H&E - LLMs in Mental Health Counseling - Medical Dataset Reuse Analysis AI in Healthcare Ethics: - LLMs in Medical Education - Medical Exam Question Generation - Clinical Knowledge Graph Integration Now you can watch and listen to the latest Medical AI papers daily on our YouTube and Spotify channels as well! - Full Thread: https://x.com/OpenlifesciAI/status/1852685220912464066 - YouTube: https://youtu.be/3O3xjaMCXHI - Spotify: https://open.spotify.com/episode/05trbTbtVZcfI7ycA5Z3Tt?si=706b74626f714aa1
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f3fe13d79c1ba4c353d0c19/XswyGe3OtOdZ6g7rnrgfc.png", "fullname": "Aaditya Ura", "name": "aaditya", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 224, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/2eNxNujX6o5OTPRIiyeFQ.jpeg" } ]
[]
[ { "reaction": "❤️", "users": [ "aaditya", "Sri-Vigneshwar-DJ", "AtAndDev", "akahana", "BayesTensor", "models4world", "OpenlifeScience", "CYGDEN", "Minbyul" ], "count": 9 }, { "reaction": "🧠", "users": [ "aaditya", "John6666", "AtAndDev", "BayesTensor", "models4world", "OpenlifeScience", "CYGDEN", "Minbyul" ], "count": 8 }, { "reaction": "🤗", "users": [ "aaditya", "AtAndDev", "BayesTensor", "models4world", "OpenlifeScience", "CYGDEN", "Minbyul" ], "count": 7 }, { "reaction": "🚀", "users": [ "aaditya", "AtAndDev", "BayesTensor", "models4world", "OpenlifeScience", "CYGDEN", "Minbyul" ], "count": 7 }, { "reaction": "🔥", "users": [ "aaditya", "AtAndDev", "BayesTensor", "models4world", "OpenlifeScience", "CYGDEN", "Minbyul" ], "count": 7 }, { "reaction": "👍", "users": [ "aaditya", "AtAndDev", "OpenlifeScience", "CYGDEN", "Minbyul" ], "count": 5 } ]
2024-11-02T13:35:24.000Z
2024-11-03T08:09:23.689Z
[]
/posts/aaditya/563565199854269
2,708
0
861259083059023
[ { "type": "text", "value": "📢 If you're aimed at processing complex dependencies in spreadsheet data with LLM Chain-of-Thought technique, then this update might be valuable for you 💎", "raw": "📢 If you're aimed at processing complex dependencies in spreadsheet data with LLM Chain-of-Thought technique, then this update might be valuable for you 💎", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The updated 📦 bulk-chain-0.24.1 which is aimed at iterative processing of CSV/JSONL data with no-string dependencies from third party LLM frameworks is out 🎉", "raw": "The updated 📦 bulk-chain-0.24.1 which is aimed at iterative processing of CSV/JSONL data with no-string dependencies from third party LLM frameworks is out 🎉", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📦: ", "raw": "📦: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://pypi.org/project/bulk-chain/0.24.1/", "href": "https://pypi.org/project/bulk-chain/0.24.1/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌟: ", "raw": "🌟: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/bulk-chain", "href": "https://github.com/nicolay-r/bulk-chain", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📘: ", "raw": "📘: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/bulk-chain/issues/26", "href": "https://github.com/nicolay-r/bulk-chain/issues/26", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The key feature of bulk-chain is SQLite caching that saves your time ⏰️ and money 💵 by guarantee no-data-lost, which is important once using the remote LLM providers such as OpenAI, ReplicateIO, OpenRouter, etc.", "raw": "The key feature of bulk-chain is SQLite caching that saves your time ⏰️ and money 💵 by guarantee no-data-lost, which is important once using the remote LLM providers such as OpenAI, ReplicateIO, OpenRouter, etc.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔧 This release has the following updates:", "raw": "🔧 This release has the following updates:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✅ Improved stability for various header conditions and the related support from SQLite", "raw": "✅ Improved stability for various header conditions and the related support from SQLite", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✅ Manual setup for ID column / assigning the ID", "raw": "✅ Manual setup for ID column / assigning the ID", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✅ Make CSV-related setups dynamic, that refers to the related Python 📦 csv package.", "raw": "✅ Make CSV-related setups dynamic, that refers to the related Python 📦 csv package.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Quick start on GoogleColab:", "raw": "Quick start on GoogleColab:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📙: ", "raw": "📙: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://colab.research.google.com/github/nicolay-r/bulk-chain/blob/master/bulk_chain_tutorial.ipynb", "href": "https://colab.research.google.com/github/nicolay-r/bulk-chain/blob/master/bulk_chain_tutorial.ipynb", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Below is an example of the three simple steps in pictures:", "raw": "Below is an example of the three simple steps in pictures:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. ⬇️ Package installation ", "raw": "1. ⬇️ Package installation ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. ✍️ Declaring schema", "raw": "2. ✍️ Declaring schema", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. 🚀 Launching inference for your data with Replicate and 🤖 ", "raw": "3. 🚀 Launching inference for your data with Replicate and 🤖 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/meta-llama/Llama-3.1-405B", "href": null, "resource": { "type": "model", "id": "meta-llama/Llama-3.1-405B", "discussionNum": null }, "url": "https://huggingface.co/meta-llama/Llama-3.1-405B", "code": null, "user": null, "label": null, "lang": null } ]
📢 If you're aimed at processing complex dependencies in spreadsheet data with LLM Chain-of-Thought technique, then this update might be valuable for you 💎 The updated 📦 bulk-chain-0.24.1 which is aimed at iterative processing of CSV/JSONL data with no-string dependencies from third party LLM frameworks is out 🎉 📦: https://pypi.org/project/bulk-chain/0.24.1/ 🌟: https://github.com/nicolay-r/bulk-chain 📘: https://github.com/nicolay-r/bulk-chain/issues/26 The key feature of bulk-chain is SQLite caching that saves your time ⏰️ and money 💵 by guarantee no-data-lost, which is important once using the remote LLM providers such as OpenAI, ReplicateIO, OpenRouter, etc. 🔧 This release has the following updates: ✅ Improved stability for various header conditions and the related support from SQLite ✅ Manual setup for ID column / assigning the ID ✅ Make CSV-related setups dynamic, that refers to the related Python 📦 csv package. Quick start on GoogleColab: 📙: https://colab.research.google.com/github/nicolay-r/bulk-chain/blob/master/bulk_chain_tutorial.ipynb Below is an example of the three simple steps in pictures: 1. ⬇️ Package installation 2. ✍️ Declaring schema 3. 🚀 Launching inference for your data with Replicate and 🤖 https://huggingface.co/meta-llama/Llama-3.1-405B
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/tn6ZouW8I2JfVs1PGO9m6.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/6i15rnl5WrdGeM5Cj3AU6.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/DHdg63uc6NBJ091mbKdqT.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666", "Sri-Vigneshwar-DJ", "EquinoxElahin" ], "count": 3 }, { "reaction": "🔥", "users": [ "prithivMLmods", "kaykyramos" ], "count": 2 } ]
2024-11-02T13:00:17.000Z
2024-11-02T13:06:40.115Z
[]
/posts/nicolay-r/861259083059023
1,832
0
568860377306586
[ { "type": "text", "value": "hi everyone, i have just uploaded my first fine tuned model, but serverless inference client is'nt available, its built with transformer architecture and is just a fine tuned llama 8b instruct. does anyone know how to make serverless inference available on a model?", "raw": "hi everyone, i have just uploaded my first fine tuned model, but serverless inference client is'nt available, its built with transformer architecture and is just a fine tuned llama 8b instruct. does anyone know how to make serverless inference available on a model?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
hi everyone, i have just uploaded my first fine tuned model, but serverless inference client is'nt available, its built with transformer architecture and is just a fine tuned llama 8b instruct. does anyone know how to make serverless inference available on a model?
{ "avatarUrl": "/avatars/7be1913712fdd1ffe75967ed19007720.svg", "fullname": "stock mining", "name": "automatedstockminingorg", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 10, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "SujanKarki", "John6666", "Sri-Vigneshwar-DJ", "luigi12345", "Clausss" ], "count": 5 }, { "reaction": "😔", "users": [ "AtAndDev" ], "count": 1 } ]
2024-11-02T07:40:09.000Z
2024-11-04T04:39:24.779Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }, { "avatarUrl": "/avatars/7be1913712fdd1ffe75967ed19007720.svg", "fullname": "stock mining", "name": "automatedstockminingorg", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 10, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65a76f3cac3a06f3e8bdf9f5/hYTFOYj1Pca0ZOugSE42o.jpeg", "fullname": "Pankaj Singh", "name": "Pankaj8922", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64408cd43e0374802e19f454/o8PPWigydaYlqgKO5tTFX.png", "fullname": "Darrel Bryan", "name": "ZeroXClem", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 14, "isFollowing": false } ]
/posts/automatedstockminingorg/568860377306586
1,754
6
792961882663632
[ { "type": "text", "value": "Hey guys. ", "raw": "Hey guys. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is my first post here on huggingface. I'm glad to be a part of this amazing community!", "raw": "This is my first post here on huggingface. I'm glad to be a part of this amazing community!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hey guys. This is my first post here on huggingface. I'm glad to be a part of this amazing community!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/637251142f98dcc049b349de/kkRLjyaO55_nFrTNWRZFQ.jpeg", "fullname": "Haghiri", "name": "Muhammadreza", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 26, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "Cryptororaa", "codersboutique", "John6666", "BaskarR", "victor", "cfahlgren1", "Niansuh", "nyuuzyou", "asaduzzaman319" ], "count": 9 }, { "reaction": "🤝", "users": [ "codersboutique", "John6666", "MuizIsCool" ], "count": 3 }, { "reaction": "🤗", "users": [ "Smorty100" ], "count": 1 } ]
2024-11-01T23:24:50.000Z
2024-11-02T16:15:08.182Z
[ { "avatarUrl": "/avatars/f14249f9f4a2fdd967f629df05c7a87e.svg", "fullname": "Coders Boutique", "name": "codersboutique", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false } ]
/posts/Muhammadreza/792961882663632
2,577
2
525160027304949
[ { "type": "text", "value": "We just released a paper (NeuZip) that compresses VRAM in a lossless manner to run larger models. This should be particularly useful when VRAM is insufficient during training/inference. Specifically, we look inside each floating number and find that the exponents are highly compressible (as shown in the figure below).", "raw": "We just released a paper (NeuZip) that compresses VRAM in a lossless manner to run larger models. This should be particularly useful when VRAM is insufficient during training/inference. Specifically, we look inside each floating number and find that the exponents are highly compressible (as shown in the figure below).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read more about the work at ", "raw": "Read more about the work at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2410.20650", "href": null, "resource": { "type": "paper", "id": "2410.20650", "discussionNum": null }, "url": "https://huggingface.co/papers/2410.20650", "code": null, "user": null, "label": "NeuZip: Memory-Efficient Training and Inference with Dynamic Compression\n of Neural Networks (2410.20650)", "lang": null } ]
We just released a paper (NeuZip) that compresses VRAM in a lossless manner to run larger models. This should be particularly useful when VRAM is insufficient during training/inference. Specifically, we look inside each floating number and find that the exponents are highly compressible (as shown in the figure below). Read more about the work at https://huggingface.co/papers/2410.20650
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62d5fc0a34e211202759496f/Exk5Xz6Ibzw3FF9md1e3C.jpeg", "fullname": "Yongchang Hao", "name": "yongchanghao", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 12, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62d5fc0a34e211202759496f/LOc2LEWsgZBhinZu3AI8W.png" } ]
[]
[ { "reaction": "🔥", "users": [ "gatorand", "John6666", "AtAndDev", "Viewegger", "spooner2", "YaTharThShaRma999", "prithivMLmods", "amphealy", "Jonatandb", "aaditya", "Kasnol", "maywell", "OrigamiDream", "Norod78", "Chief-Inspector", "victor", "yongchanghao", "jgitsolutions" ], "count": 18 }, { "reaction": "👀", "users": [ "ijohn07", "jgitsolutions", "qmpzqmpz" ], "count": 3 } ]
2024-11-01T21:54:07.000Z
2024-11-01T21:54:07.754Z
[]
/posts/yongchanghao/525160027304949
3,724
0
896561565033687
[ { "type": "text", "value": "Did you guys know that if you try to link a prepaid card to huggingface it won't work, but then if you press the button again it links anyway? Then you can lock the card (deny any charges), and get resources for free? You're welcome :P", "raw": "Did you guys know that if you try to link a prepaid card to huggingface it won't work, but then if you press the button again it links anyway? Then you can lock the card (deny any charges), and get resources for free? You're welcome :P", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Did you guys know that if you try to link a prepaid card to huggingface it won't work, but then if you press the button again it links anyway? Then you can lock the card (deny any charges), and get resources for free? You're welcome :P
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false }
[]
[]
[ { "reaction": "🤯", "users": [ "John6666", "1tbfree", "AtAndDev", "CYGDEN", "not-lain", "victor", "ngxson", "karthickspk08" ], "count": 8 }, { "reaction": "🤝", "users": [ "gatorand", "John6666", "AtAndDev", "CYGDEN" ], "count": 4 } ]
2024-11-01T20:48:54.000Z
2024-11-02T10:32:54.052Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }, { "avatarUrl": "/avatars/eb93f56f56b5aad0b294d2d3d4d9eddf.svg", "fullname": "Qkasriel", "name": "qkasriel", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false } ]
/posts/nroggendorff/896561565033687
1,815
4
558044235307225
[ { "type": "text", "value": "How would you like to be able to run AI Agents locally from your computer, for $0? Does this sound like a pipe dream? It is reality. Note: I am of the personal opinion that agent-based technology is still 'not quite ready for primetime'. That has not stopped FAANG from flooding you with agent-based products though. So, if you want to buy their marketing, here is what they are offering you, for free. ", "raw": "How would you like to be able to run AI Agents locally from your computer, for $0? Does this sound like a pipe dream? It is reality. Note: I am of the personal opinion that agent-based technology is still 'not quite ready for primetime'. That has not stopped FAANG from flooding you with agent-based products though. So, if you want to buy their marketing, here is what they are offering you, for free. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/aV3F5fqHyqc", "href": "https://youtu.be/aV3F5fqHyqc", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
How would you like to be able to run AI Agents locally from your computer, for $0? Does this sound like a pipe dream? It is reality. Note: I am of the personal opinion that agent-based technology is still 'not quite ready for primetime'. That has not stopped FAANG from flooding you with agent-based products though. So, if you want to buy their marketing, here is what they are offering you, for free. https://youtu.be/aV3F5fqHyqc
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64274b69ba6cef0a6ebb0fd6/MQTOxsaFui2QUxj4px-b2.jpeg" } ]
[]
[ { "reaction": "👀", "users": [ "John6666", "some-one-m", "Sri-Vigneshwar-DJ" ], "count": 3 } ]
2024-11-01T19:11:26.000Z
2024-11-02T05:58:12.853Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6316fb937b0ee0136e5f1220/poHBoJ7QAF_s2CCaosdvQ.jpeg", "fullname": "Firstname Lastname", "name": "takeraparterer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 29, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false } ]
/posts/TuringsSolutions/558044235307225
1,114
6
325931788344240
[ { "type": "text", "value": "🎙️ \"We need digital sobriety.\" ", "raw": "🎙️ \"We need digital sobriety.\" ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@sasha", "href": null, "resource": null, "url": null, "code": null, "user": "sasha", "label": null, "lang": null }, { "type": "text", "value": " challenges Big Tech's race for nuclear energy on BBC AI Decoded. Instead of pursuing more power, shouldn't we first ask if we really need AI everywhere?", "raw": " challenges Big Tech's race for nuclear energy on BBC AI Decoded. Instead of pursuing more power, shouldn't we first ask if we really need AI everywhere?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Such an eye-opening chat! Check it out here: ", "raw": "Such an eye-opening chat! Check it out here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/watch?v=3wAduy52mGc", "href": "https://www.youtube.com/watch?v=3wAduy52mGc", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🎙️ "We need digital sobriety." @sasha challenges Big Tech's race for nuclear energy on BBC AI Decoded. Instead of pursuing more power, shouldn't we first ask if we really need AI everywhere? Such an eye-opening chat! Check it out here: https://www.youtube.com/watch?v=3wAduy52mGc
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/FdvvXxzaTz3POPM0p6b7a.mp4" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60edd0133e2c73a9a21455f5/yK1G-Fv-YjYb7v_chkz3p.jpeg", "fullname": "Sasha Luccioni", "name": "sasha", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 162 } ]
[ { "reaction": "😔", "users": [ "gatorand", "oneiroid" ], "count": 2 }, { "reaction": "👀", "users": [ "John6666", "Sri-Vigneshwar-DJ" ], "count": 2 }, { "reaction": "🚀", "users": [ "wneopi" ], "count": 1 } ]
2024-11-01T18:45:43.000Z
2024-11-01T18:45:43.607Z
[]
/posts/fdaudens/325931788344240
1,194
0
999461581189859
[ { "type": "text", "value": "> Oasis: First Real-Time Video Game Without a Game Engine! 🎮", "raw": "> Oasis: First Real-Time Video Game Without a Game Engine! 🎮", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "DecartAI & Etched just released Oasis - a fully AI-generated video game running at 20 FPS (frames per second). The model takes keyboard inputs and generates everything - physics, rules, graphics - on the fly, without any game engine.", "raw": "DecartAI & Etched just released Oasis - a fully AI-generated video game running at 20 FPS (frames per second). The model takes keyboard inputs and generates everything - physics, rules, graphics - on the fly, without any game engine.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚡️ What makes this special? Current text-to-video models (Mochi-1, Sora, Kling) generate about 1 frame every 10-20 seconds (that's the kind of device I had to play LoL back in the day, thus my low rankings). Oasis is 200 times faster, making it the first playable AI-generated game.", "raw": "⚡️ What makes this special? Current text-to-video models (Mochi-1, Sora, Kling) generate about 1 frame every 10-20 seconds (that's the kind of device I had to play LoL back in the day, thus my low rankings). Oasis is 200 times faster, making it the first playable AI-generated game.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚙️ Under the hood, it uses a vision transformer to encode space and a diffusion model to generate frames. The secret sauce is \"dynamic noising\" - a technique that keeps the video stable between frames.", "raw": "⚙️ Under the hood, it uses a vision transformer to encode space and a diffusion model to generate frames. The secret sauce is \"dynamic noising\" - a technique that keeps the video stable between frames.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key insights:", "raw": "Key insights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚡️ Generates 20 FPS, vs 0.2 FPS for other DIT-based video models", "raw": "⚡️ Generates 20 FPS, vs 0.2 FPS for other DIT-based video models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ The specialized hardware Sohu developed by Etched allows to handle 10x more player than H100", "raw": "‣ The specialized hardware Sohu developed by Etched allows to handle 10x more player than H100", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🎮 Features real game mechanics", "raw": "🎮 Features real game mechanics", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ Movement, jumping, item management", "raw": "‣ Movement, jumping, item management", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ Physics and lighting", "raw": "‣ Physics and lighting", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ Procedurally generated worlds", "raw": "‣ Procedurally generated worlds", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚠️ Current limitations", "raw": "⚠️ Current limitations", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ Blurry graphics at a distance", "raw": "‣ Blurry graphics at a distance", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ Objects sometimes change appearance", "raw": "‣ Objects sometimes change appearance", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "‣ Memory issues in long sessions", "raw": "‣ Memory issues in long sessions", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Try it yourself, the playable demo is impressive! 👉 ", "raw": "Try it yourself, the playable demo is impressive! 👉 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://oasis.decart.ai/welcome", "href": "https://oasis.decart.ai/welcome", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Code 👉 ", "raw": "Code 👉 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/etched-ai/open-oasis", "href": "https://github.com/etched-ai/open-oasis", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read it in full 👉 ", "raw": "Read it in full 👉 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://oasis-model.github.io/", "href": "https://oasis-model.github.io/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
> Oasis: First Real-Time Video Game Without a Game Engine! 🎮 DecartAI & Etched just released Oasis - a fully AI-generated video game running at 20 FPS (frames per second). The model takes keyboard inputs and generates everything - physics, rules, graphics - on the fly, without any game engine. ⚡️ What makes this special? Current text-to-video models (Mochi-1, Sora, Kling) generate about 1 frame every 10-20 seconds (that's the kind of device I had to play LoL back in the day, thus my low rankings). Oasis is 200 times faster, making it the first playable AI-generated game. ⚙️ Under the hood, it uses a vision transformer to encode space and a diffusion model to generate frames. The secret sauce is "dynamic noising" - a technique that keeps the video stable between frames. Key insights: ⚡️ Generates 20 FPS, vs 0.2 FPS for other DIT-based video models ‣ The specialized hardware Sohu developed by Etched allows to handle 10x more player than H100 🎮 Features real game mechanics ‣ Movement, jumping, item management ‣ Physics and lighting ‣ Procedurally generated worlds ⚠️ Current limitations ‣ Blurry graphics at a distance ‣ Objects sometimes change appearance ‣ Memory issues in long sessions Try it yourself, the playable demo is impressive! 👉 https://oasis.decart.ai/welcome Code 👉 https://github.com/etched-ai/open-oasis Read it in full 👉 https://oasis-model.github.io/
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/VEMIsJt0_PymgUM5jpSj2.mp4" } ]
[]
[ { "reaction": "🔥", "users": [ "YaTharThShaRma999", "John6666", "AtAndDev", "nicolay-r", "Clausss", "nyuuzyou", "johnpaulbin" ], "count": 7 }, { "reaction": "🧠", "users": [ "Tonic", "andricx" ], "count": 2 }, { "reaction": "🚀", "users": [ "Tonic", "andricx" ], "count": 2 }, { "reaction": "❤️", "users": [ "Tonic", "andricx" ], "count": 2 }, { "reaction": "😎", "users": [ "Tonic", "andricx" ], "count": 2 } ]
2024-11-01T18:08:38.000Z
2024-11-01T18:08:38.625Z
[]
/posts/m-ric/999461581189859
2,342
0
730234069278304
[ { "type": "text", "value": "The dataset of all Italian laws just released ", "raw": "The dataset of all Italian laws just released ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/mii-llm/gazzetta-ufficiale", "href": null, "resource": { "type": "dataset", "id": "mii-llm/gazzetta-ufficiale", "discussionNum": null }, "url": "https://huggingface.co/datasets/mii-llm/gazzetta-ufficiale", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Kudos to ", "raw": "Kudos to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@efederici", "href": null, "resource": null, "url": null, "code": null, "user": "efederici", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@mferraretto", "href": null, "resource": null, "url": null, "code": null, "user": "mferraretto", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@z-uo", "href": null, "resource": null, "url": null, "code": null, "user": "z-uo", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
The dataset of all Italian laws just released https://huggingface.co/datasets/mii-llm/gazzetta-ufficiale Kudos to @efederici @mferraretto @z-uo
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fef4eb7770b06e11c2c6381/1NMdigjCGtn0yvQZSi5NJ.png", "fullname": "Alessandro Ercolani", "name": "giux78", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 44, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/612246596d9ce900691744d2/9DlHVQDqblKz7QPTA6nDa.jpeg", "fullname": "Edoardo Federici", "name": "efederici", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 28 }, { "avatarUrl": "/avatars/24921b3a2600e145e6fc968164b25b9c.svg", "fullname": "Mattia Ferraretto", "name": "mferraretto", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 7 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1635161073467-61669cac1fa2bc2e548738c6.jpeg", "fullname": "Nicola Landro", "name": "z-uo", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 9 } ]
[ { "reaction": "❤️", "users": [ "clem", "giux78", "samusenps", "mferraretto", "PereLluis13", "efederici", "clefourrier", "ajibawa-2023" ], "count": 8 }, { "reaction": "🤯", "users": [ "lucabaggi", "clem", "giux78" ], "count": 3 } ]
2024-03-05T12:54:58.000Z
2024-03-05T12:54:58.816Z
[]
/posts/giux78/730234069278304
28
0
419340718470762
[ { "type": "text", "value": "The Stable Diffusion 3 research paper broken down, including some overlooked details! 📝", "raw": "The Stable Diffusion 3 research paper broken down, including some overlooked details! 📝", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model", "raw": "Model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📏 2 base model variants mentioned: 2B and 8B sizes", "raw": "📏 2 base model variants mentioned: 2B and 8B sizes", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📐 New architecture in all abstraction levels: ", "raw": "📐 New architecture in all abstraction levels: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 🔽 UNet; ⬆️ Multimodal Diffusion Transformer, bye cross attention 👋", "raw": "- 🔽 UNet; ⬆️ Multimodal Diffusion Transformer, bye cross attention 👋", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 🆕 Rectified flows for the diffusion process", "raw": "- 🆕 Rectified flows for the diffusion process", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 🧩 Still a Latent Diffusion Model", "raw": "- 🧩 Still a Latent Diffusion Model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📄 3 text-encoders: 2 CLIPs, one T5-XXL; plug-and-play: removing the larger one maintains competitiveness", "raw": "📄 3 text-encoders: 2 CLIPs, one T5-XXL; plug-and-play: removing the larger one maintains competitiveness", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🗃️ Dataset was deduplicated with SSCD which helped with memorization (no more details about the dataset tho) ", "raw": "🗃️ Dataset was deduplicated with SSCD which helped with memorization (no more details about the dataset tho) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Variants", "raw": "Variants", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔁 A DPO fine-tuned model showed great improvement in prompt understanding and aesthetics", "raw": "🔁 A DPO fine-tuned model showed great improvement in prompt understanding and aesthetics", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✏️ An Instruct Edit 2B model was trained, and learned how to do text-replacement ", "raw": "✏️ An Instruct Edit 2B model was trained, and learned how to do text-replacement ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Results", "raw": "Results", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✅ State of the art in automated evals for composition and prompt understanding ", "raw": "✅ State of the art in automated evals for composition and prompt understanding ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✅ Best win rate in human preference evaluation for prompt understanding, aesthetics and typography (missing some details on how many participants and the design of the experiment)", "raw": "✅ Best win rate in human preference evaluation for prompt understanding, aesthetics and typography (missing some details on how many participants and the design of the experiment)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://stabilityai-public-packages.s3.us-west-2.amazonaws.com/Stable+Diffusion+3+Paper.pdf", "href": "https://stabilityai-public-packages.s3.us-west-2.amazonaws.com/Stable+Diffusion+3+Paper.pdf", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
The Stable Diffusion 3 research paper broken down, including some overlooked details! 📝 Model 📏 2 base model variants mentioned: 2B and 8B sizes 📐 New architecture in all abstraction levels: - 🔽 UNet; ⬆️ Multimodal Diffusion Transformer, bye cross attention 👋 - 🆕 Rectified flows for the diffusion process - 🧩 Still a Latent Diffusion Model 📄 3 text-encoders: 2 CLIPs, one T5-XXL; plug-and-play: removing the larger one maintains competitiveness 🗃️ Dataset was deduplicated with SSCD which helped with memorization (no more details about the dataset tho) Variants 🔁 A DPO fine-tuned model showed great improvement in prompt understanding and aesthetics ✏️ An Instruct Edit 2B model was trained, and learned how to do text-replacement Results ✅ State of the art in automated evals for composition and prompt understanding ✅ Best win rate in human preference evaluation for prompt understanding, aesthetics and typography (missing some details on how many participants and the design of the experiment) Paper: https://stabilityai-public-packages.s3.us-west-2.amazonaws.com/Stable+Diffusion+3+Paper.pdf
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1649143001781-624bebf604abc7ebb01789af.jpeg", "fullname": "Apolinário from multimodal AI art", "name": "multimodalart", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 3177, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "osanseviero", "fffiloni", "victor", "mvaloatto", "clem", "BrigitteTousi", "WaveCut", "EtienneDosSantos", "samusenps", "Tonic", "chethan62", "Eungbean", "MrForExample", "Obhee", "GPT007" ], "count": 15 }, { "reaction": "❤️", "users": [ "clem", "BrigitteTousi", "samusenps", "leegao19", "Tonic", "Bils", "gonduras" ], "count": 7 } ]
2024-03-05T11:13:22.000Z
2024-03-08T04:56:36.756Z
[ { "avatarUrl": "/avatars/ce597d8d2640c726473dd85ae8c5cdc7.svg", "fullname": "Lee Gao", "name": "leegao19", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1661165878439-noauth.jpeg", "fullname": "Valeriy Selitskiy", "name": "WaveCut", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 15, "isFollowing": false }, { "avatarUrl": "/avatars/38495afec0e0dc4c23308bb517bbc409.svg", "fullname": "NibbyNobNibber", "name": "MoveScores18", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/multimodalart/419340718470762
15,354
3
293058125194160
[ { "type": "text", "value": "🗺 Major TOM: Expandable Datasets for Earth Observation", "raw": "🗺 Major TOM: Expandable Datasets for Earth Observation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🚨 RECORD-BREAKING EO DATASET: the largest ever ML-ready Sentinel-2 dataset! It covers almost every single point on Earth captured by the Copernicus Sentinel-2 satellite. ", "raw": "🚨 RECORD-BREAKING EO DATASET: the largest ever ML-ready Sentinel-2 dataset! It covers almost every single point on Earth captured by the Copernicus Sentinel-2 satellite. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@mikonvergence", "href": null, "resource": null, "url": null, "code": null, "user": "mikonvergence", "label": null, "lang": null }, { "type": "text", "value": " and I are thrilled to finally announce the release of ", "raw": " and I are thrilled to finally announce the release of ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/Major-TOM/Core-S2L2A", "href": null, "resource": { "type": "dataset", "id": "Major-TOM/Core-S2L2A", "discussionNum": null }, "url": "https://huggingface.co/datasets/Major-TOM/Core-S2L2A", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/Major-TOM/Core-S2L1C", "href": null, "resource": { "type": "dataset", "id": "Major-TOM/Core-S2L1C", "discussionNum": null }, "url": "https://huggingface.co/datasets/Major-TOM/Core-S2L1C", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌍 About half of the entire planet is covered. That's 2,245,886 patches of 1068 x 1068 pixels, available in both L1C and L2A. At 10 m resolution, we've got 256 million square km with over 2.5 trillion pixels. It's all yours with a few lines of code. See the paper linked below 🔽 for more info!", "raw": "🌍 About half of the entire planet is covered. That's 2,245,886 patches of 1068 x 1068 pixels, available in both L1C and L2A. At 10 m resolution, we've got 256 million square km with over 2.5 trillion pixels. It's all yours with a few lines of code. See the paper linked below 🔽 for more info!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🧱 And this is just the beginning. We are currently preparing more datasets from different satellites for the Major TOM org. TOM stands for Terrestrial Observation Metaset - a simple set of rules for building an ecosystem of ML-ready EO datasets, which can be seamlessly combined as if they were Lego bricks.", "raw": "🧱 And this is just the beginning. We are currently preparing more datasets from different satellites for the Major TOM org. TOM stands for Terrestrial Observation Metaset - a simple set of rules for building an ecosystem of ML-ready EO datasets, which can be seamlessly combined as if they were Lego bricks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🚴‍♀️ Want to take the dataset for a spin? We have a viewer app on spaces that lets you go anywhere on Earth and shows you the data, if its available ", "raw": "🚴‍♀️ Want to take the dataset for a spin? We have a viewer app on spaces that lets you go anywhere on Earth and shows you the data, if its available ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Major-TOM/MajorTOM-Core-Viewer", "href": null, "resource": { "type": "space", "id": "Major-TOM/MajorTOM-Core-Viewer", "discussionNum": null }, "url": "https://huggingface.co/spaces/Major-TOM/MajorTOM-Core-Viewer", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📰 Preprint paper: ", "raw": "📰 Preprint paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.12095", "href": null, "resource": { "type": "paper", "id": "2402.12095", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.12095", "code": null, "user": null, "label": "Major TOM: Expandable Datasets for Earth Observation (2402.12095)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💻 Colab example: ", "raw": "💻 Colab example: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://colab.research.google.com/github/ESA-PhiLab/Major-TOM/blob/main/03-Filtering-in-Colab.ipynb", "href": "https://colab.research.google.com/github/ESA-PhiLab/Major-TOM/blob/main/03-Filtering-in-Colab.ipynb", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thank you to the amazing 🤗Hugging Face team for the support on this one! ", "raw": "Thank you to the amazing 🤗Hugging Face team for the support on this one! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@osanseviero", "href": null, "resource": null, "url": null, "code": null, "user": "osanseviero", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@lhoestq", "href": null, "resource": null, "url": null, "code": null, "user": "lhoestq", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@BrigitteTousi", "href": null, "resource": null, "url": null, "code": null, "user": "BrigitteTousi", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🗺 Major TOM: Expandable Datasets for Earth Observation 🚨 RECORD-BREAKING EO DATASET: the largest ever ML-ready Sentinel-2 dataset! It covers almost every single point on Earth captured by the Copernicus Sentinel-2 satellite. @mikonvergence and I are thrilled to finally announce the release of https://huggingface.co/datasets/Major-TOM/Core-S2L2A and https://huggingface.co/datasets/Major-TOM/Core-S2L1C 🌍 About half of the entire planet is covered. That's 2,245,886 patches of 1068 x 1068 pixels, available in both L1C and L2A. At 10 m resolution, we've got 256 million square km with over 2.5 trillion pixels. It's all yours with a few lines of code. See the paper linked below 🔽 for more info! 🧱 And this is just the beginning. We are currently preparing more datasets from different satellites for the Major TOM org. TOM stands for Terrestrial Observation Metaset - a simple set of rules for building an ecosystem of ML-ready EO datasets, which can be seamlessly combined as if they were Lego bricks. 🚴‍♀️ Want to take the dataset for a spin? We have a viewer app on spaces that lets you go anywhere on Earth and shows you the data, if its available https://huggingface.co/spaces/Major-TOM/MajorTOM-Core-Viewer 📰 Preprint paper: https://huggingface.co/papers/2402.12095 💻 Colab example: https://colab.research.google.com/github/ESA-PhiLab/Major-TOM/blob/main/03-Filtering-in-Colab.ipynb Thank you to the amazing 🤗Hugging Face team for the support on this one! @osanseviero @lhoestq @BrigitteTousi
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/635011301d81beb8e2455ee9/NyDIbzavucEIyFDHnaAv0.jpeg", "fullname": "Alistair Francis", "name": "aliFrancis", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 21, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63691c3eda9b693c2730b2a2/hBtKpgo3_9003MWCGkw5d.png", "fullname": "Brigitte Tousignant", "name": "BrigitteTousi", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 136 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1594214747713-5e9ecfc04957053f60648a3e.png", "fullname": "Quentin Lhoest", "name": "lhoestq", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 196 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1678741407493-6304c06eeb6d777a838eab63.png", "fullname": "Mikolaj Czerkawski", "name": "mikonvergence", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 25 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868 } ]
[ { "reaction": "🤗", "users": [ "mikonvergence", "lhoestq", "osanseviero", "julien-c", "taufiqdp", "mvaloatto", "clem", "BrigitteTousi", "nicolasdec", "kramp", "csaybar" ], "count": 11 }, { "reaction": "❤️", "users": [ "lhoestq", "osanseviero", "julien-c", "clem", "BrigitteTousi", "nicolasdec", "mrapplegate", "samusenps", "brunosan" ], "count": 9 }, { "reaction": "🤯", "users": [ "lhoestq", "julien-c", "clem", "BrigitteTousi" ], "count": 4 } ]
2024-03-05T10:29:49.000Z
2024-03-05T12:03:28.023Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false } ]
/posts/aliFrancis/293058125194160
173
1
707376120439385
[ { "type": "text", "value": "Just released moondream2 - a small 1.8B parameter vision language model. Now fully open source (Apache 2.0) so you can use it without restrictions on commercial use!", "raw": "Just released moondream2 - a small 1.8B parameter vision language model. Now fully open source (Apache 2.0) so you can use it without restrictions on commercial use!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/vikhyatk/moondream2", "href": null, "resource": { "type": "model", "id": "vikhyatk/moondream2", "discussionNum": null }, "url": "https://huggingface.co/vikhyatk/moondream2", "code": null, "user": null, "label": null, "lang": null } ]
Just released moondream2 - a small 1.8B parameter vision language model. Now fully open source (Apache 2.0) so you can use it without restrictions on commercial use! https://huggingface.co/vikhyatk/moondream2
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63117568fa95534e218da163/8h9zN8aKRxPLBnXW7sqY9.jpeg", "fullname": "Vik Korrapati", "name": "vikhyatk", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 375, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "mrfakename", "andysalerno", "natolambert", "ajibawa-2023", "Vigilence", "vignesh-fynd", "Tonic", "Csplk", "DmitryRyumin", "Chris4K", "chansung", "Dlbk", "Yoben", "fffiloni", "Kukedlc", "taufiqdp", "clem", "ostris", "samusenps", "saikatkumardey", "multimodalart", "veravira", "euclaise", "twenkid", "sbrandeis" ], "count": 26 }, { "reaction": "👍", "users": [ "osanseviero", "andysalerno", "OsnNos", "Vigilence", "vignesh-fynd", "Tonic", "kramp", "Dlbk", "sourceoftruthdata", "clem", "t1u1", "damerajee" ], "count": 12 }, { "reaction": "🤝", "users": [ "vignesh-fynd", "Tonic", "clem" ], "count": 3 }, { "reaction": "🤯", "users": [ "marc-es", "Redox3275" ], "count": 2 } ]
2024-03-04T23:52:23.000Z
2024-04-14T01:42:32.022Z
[ { "avatarUrl": "/avatars/9af57ddd73fc94d02499d54478d0cfeb.svg", "fullname": "Brian King", "name": "iamrobotbear", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg", "fullname": "Feynman Innovations", "name": "ajibawa-2023", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 138, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669354809260-noauth.jpeg", "fullname": "Prajapati", "name": "vignesh-fynd", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/b2725bb163fa15d6c5856121780d52eb.svg", "fullname": "Ci Splunk", "name": "Csplk", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 43, "isFollowing": false }, { "avatarUrl": "/avatars/bf23598ce566558e0306d3bcc8727c2e.svg", "fullname": "Marc Revert", "name": "marc-es", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/425196786918bf65bbf823279c0db78d.svg", "fullname": "Laird Foret", "name": "lforet", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/483f8c0538f6f2c59c5b6a972152c261.svg", "fullname": "shivani", "name": "shivaniy", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/20103539a6c2118aa24ab6f8449a0f7a.svg", "fullname": "PLX", "name": "aleph65", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/vikhyatk/707376120439385
162
8
635360328098616
[ { "type": "text", "value": "🚀🎬🌟 New Research Alert - CVPR 2024! 🌟🎬 🚀", "raw": "🚀🎬🌟 New Research Alert - CVPR 2024! 🌟🎬 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📄 Title: GaussianAvatar: Towards Realistic Human Avatar Modeling from a Single Video via Animatable 3D Gaussians 🌟🚀", "raw": "📄 Title: GaussianAvatar: Towards Realistic Human Avatar Modeling from a Single Video via Animatable 3D Gaussians 🌟🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👥 Authors: Liangxiao Hu et al.", "raw": "👥 Authors: Liangxiao Hu et al.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸", "raw": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Paper: ", "raw": "🔗 Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2312.02134", "href": null, "resource": { "type": "paper", "id": "2312.02134", "discussionNum": null }, "url": "https://huggingface.co/papers/2312.02134", "code": null, "user": null, "label": "GaussianAvatar: Towards Realistic Human Avatar Modeling from a Single\n Video via Animatable 3D Gaussians (2312.02134)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Github Page: ", "raw": "🔗 Github Page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huliangxiao.github.io/GaussianAvatar", "href": "https://huliangxiao.github.io/GaussianAvatar", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Repository: ", "raw": "🔗 Repository: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/huliangxiao/GaussianAvatar", "href": "https://github.com/huliangxiao/GaussianAvatar", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Video: ", "raw": "🔗 Video: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/watch?v=a4g8Z9nCF-k", "href": "https://www.youtube.com/watch?v=a4g8Z9nCF-k", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📚 More Papers: more cutting-edge research presented at other conferences in the ", "raw": "📚 More Papers: more cutting-edge research presented at other conferences in the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "href": null, "resource": { "type": "space", "id": "DmitryRyumin/NewEraAI-Papers", "discussionNum": null }, "url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " curated by ", "raw": " curated by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@DmitryRyumin", "href": null, "resource": null, "url": null, "code": null, "user": "DmitryRyumin", "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🚀 Added to the Avatars Collection: ", "raw": "🚀 Added to the Avatars Collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "href": null, "resource": { "type": "collection", "id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "discussionNum": null }, "url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔍 Keywords: #GaussianAvatar #3DGaussians #HumanAvatarModeling #PoseDependentAppearance #DynamicAppearanceModeling #MotionEstimation #MonocularSettings #AppearanceQuality #RenderingEfficiency #CVPR2024 #DeepLearning #Animation #Innovation", "raw": "🔍 Keywords: #GaussianAvatar #3DGaussians #HumanAvatarModeling #PoseDependentAppearance #DynamicAppearanceModeling #MotionEstimation #MonocularSettings #AppearanceQuality #RenderingEfficiency #CVPR2024 #DeepLearning #Animation #Innovation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🚀🎬🌟 New Research Alert - CVPR 2024! 🌟🎬 🚀 📄 Title: GaussianAvatar: Towards Realistic Human Avatar Modeling from a Single Video via Animatable 3D Gaussians 🌟🚀 👥 Authors: Liangxiao Hu et al. 📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸 🔗 Paper: https://huggingface.co/papers/2312.02134 🔗 Github Page: https://huliangxiao.github.io/GaussianAvatar 🔗 Repository: https://github.com/huliangxiao/GaussianAvatar 🔗 Video: https://www.youtube.com/watch?v=a4g8Z9nCF-k 📚 More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin 🚀 Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36 🔍 Keywords: #GaussianAvatar #3DGaussians #HumanAvatarModeling #PoseDependentAppearance #DynamicAppearanceModeling #MotionEstimation #MonocularSettings #AppearanceQuality #RenderingEfficiency #CVPR2024 #DeepLearning #Animation #Innovation
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/Ok2JQmPCENof6NbfjRko3.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/QTD-3Ou3o1BAViHTZejF5.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/01ea8l_JJvYc955P7s-v-.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/FR9-MHrG0sy1JM0whWKyM.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/Ejq5yxHQRF0vm_UBjhkgN.gif" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/jp6mPkMx3c0wOEdUysFm6.mp4" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377 } ]
[ { "reaction": "❤️", "users": [ "DmitryRyumin", "vladbogo", "mickdarling", "clem", "MiSTe-R", "StrangeSX", "szymonrucinski", "Tonic", "sbrandeis" ], "count": 9 } ]
2024-03-04T20:41:51.000Z
2024-03-04T22:20:21.797Z
[]
/posts/DmitryRyumin/635360328098616
92
0
717319217106504
[ { "type": "text", "value": "🌊 Released #LaVague, fullly open-source AI pipeline to turn natural language into browser actions!", "raw": "🌊 Released #LaVague, fullly open-source AI pipeline to turn natural language into browser actions!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In less than 150 lines of code (RAG with local embedding + Zephyr-7b-Gemma locally or Mixtral on HF Inference API), it generates #Selenium code from user query. In this GIF you can see it follow user instructions to command a browser to browse HF website! ", "raw": "In less than 150 lines of code (RAG with local embedding + Zephyr-7b-Gemma locally or Mixtral on HF Inference API), it generates #Selenium code from user query. In this GIF you can see it follow user instructions to command a browser to browse HF website! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Try it on Colab: colab.research.google.com/github/dhuynh95/LaVague/blob/main/LaVague.ipynb", "raw": "Try it on Colab: colab.research.google.com/github/dhuynh95/LaVague/blob/main/LaVague.ipynb", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "GitHub: github.com/dhuynh95/LaVague", "raw": "GitHub: github.com/dhuynh95/LaVague", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Pretty exciting how it becomes possible to create an AI assistant that could perform actions for us, such as logging on gov accounts, fill forms, or pull personal information!", "raw": "Pretty exciting how it becomes possible to create an AI assistant that could perform actions for us, such as logging on gov accounts, fill forms, or pull personal information!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It was quite fun to hack in the weekend using open-source tools, from ", "raw": "It was quite fun to hack in the weekend using open-source tools, from ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@huggingface", "href": null, "resource": null, "url": null, "code": null, "user": "huggingface", "label": null, "lang": null }, { "type": "text", "value": " local embedding with transformers for local inference or HF Inference API, to RAG with @llama_index, through ", "raw": " local embedding with transformers for local inference or HF Inference API, to RAG with @llama_index, through ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@MistralAI", "href": null, "resource": null, "url": null, "code": null, "user": "MistralAI", "label": null, "lang": null }, { "type": "text", "value": " Mixtral model! ", "raw": " Mixtral model! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Some challenges: to make it run on Colab for the #GPU Poors, I first resorted to ", "raw": "Some challenges: to make it run on Colab for the #GPU Poors, I first resorted to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@huggingface", "href": null, "resource": null, "url": null, "code": null, "user": "huggingface", "label": null, "lang": null }, { "type": "text", "value": " Inference API with Mixtral as it was the only model good enough (gemma-7b did not make it and refused to produce code). But after some experimentations, I managed to make it work a local Zephyr-7b-Gemma so that people could run this assistant fully locally! ", "raw": " Inference API with Mixtral as it was the only model good enough (gemma-7b did not make it and refused to produce code). But after some experimentations, I managed to make it work a local Zephyr-7b-Gemma so that people could run this assistant fully locally! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Because I used an off-the-shelf model, I had to improve performance with few-shot learning and Chain Of Thought, which managed to generate appropriate code! ", "raw": "Because I used an off-the-shelf model, I had to improve performance with few-shot learning and Chain Of Thought, which managed to generate appropriate code! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I hope this project will herald a new dawn where transparent, private and local AI assistants help automate menial but critical tasks, such as helping fill taxes, book accomodation, or research information for us.", "raw": "I hope this project will herald a new dawn where transparent, private and local AI assistants help automate menial but critical tasks, such as helping fill taxes, book accomodation, or research information for us.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🌊 Released #LaVague, fullly open-source AI pipeline to turn natural language into browser actions! In less than 150 lines of code (RAG with local embedding + Zephyr-7b-Gemma locally or Mixtral on HF Inference API), it generates #Selenium code from user query. In this GIF you can see it follow user instructions to command a browser to browse HF website! Try it on Colab: colab.research.google.com/github/dhuynh95/LaVague/blob/main/LaVague.ipynb GitHub: github.com/dhuynh95/LaVague Pretty exciting how it becomes possible to create an AI assistant that could perform actions for us, such as logging on gov accounts, fill forms, or pull personal information! It was quite fun to hack in the weekend using open-source tools, from @huggingface local embedding with transformers for local inference or HF Inference API, to RAG with @llama_index, through @MistralAI Mixtral model! Some challenges: to make it run on Colab for the #GPU Poors, I first resorted to @huggingface Inference API with Mixtral as it was the only model good enough (gemma-7b did not make it and refused to produce code). But after some experimentations, I managed to make it work a local Zephyr-7b-Gemma so that people could run this assistant fully locally! Because I used an off-the-shelf model, I had to improve performance with few-shot learning and Chain Of Thought, which managed to generate appropriate code! I hope this project will herald a new dawn where transparent, private and local AI assistants help automate menial but critical tasks, such as helping fill taxes, book accomodation, or research information for us.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1661497922734-62f4ac43567dbf9a39f75474.jpeg", "fullname": "Daniel Huynh", "name": "dhuynh95", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 75, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62f4ac43567dbf9a39f75474/zdDUsmHDVnf56HBg4fRQK.gif" } ]
[]
[ { "reaction": "👍", "users": [ "osanseviero", "kramp", "ejsellers", "vladbogo", "mexicanamerican", "0xKrypt0r", "Dlbk", "qnguyen3", "julien-c", "pranay-j", "victor", "diwank", "iamrobotbear", "samusenps", "bruceunx", "George-Blaze", "superLk", "timnon" ], "count": 18 }, { "reaction": "❤️", "users": [ "osanseviero", "VictorSanh", "Guilherme34", "Dlbk", "julien-c", "victor", "clem", "diwank", "bonsaielectric", "iamrobotbear", "samusenps", "z3ymd", "kgawron", "bruceunx", "Tavi31", "superLk", "sbrandeis" ], "count": 17 }, { "reaction": "🤯", "users": [ "fffiloni", "victor", "clem", "iamrobotbear", "Tavi31" ], "count": 5 } ]
2024-03-04T17:42:22.000Z
2024-06-21T07:58:51.493Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657fbd30a575d54a1ea75442/X-PDAOj-OwNGbnZ2WHpfe.jpeg", "fullname": "bruce hu", "name": "bruceunx", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1661497922734-62f4ac43567dbf9a39f75474.jpeg", "fullname": "Daniel Huynh", "name": "dhuynh95", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 75, "isFollowing": false }, { "avatarUrl": "/avatars/9af57ddd73fc94d02499d54478d0cfeb.svg", "fullname": "Brian King", "name": "iamrobotbear", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }, { "avatarUrl": "/avatars/6556f74fc6c3d314cd05ded359a5fd5f.svg", "fullname": "Shiv Kumar", "name": "Shivkumar27", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/dhuynh95/717319217106504
417
5
487271582713321
[ { "type": "text", "value": "Understand research papers easier with automatically generated Q&As by LLM (Gemini 1.0 Pro). For this purpose, I have built two projects.", "raw": "Understand research papers easier with automatically generated Q&As by LLM (Gemini 1.0 Pro). For this purpose, I have built two projects.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- [Auto Paper Analysis](", "raw": "- [Auto Paper Analysis](", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/deep-diver/auto-paper-analysis", "href": "https://github.com/deep-diver/auto-paper-analysis", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ") let you generate QAs on a list of papers. The paper list could be specified either from Hugging Face's Daily Papers or in a set of raw arXiv IDs. Then the generated QA dataset could be pushed to the Hugging Face Dataset. Refer to the attached image.", "raw": ") let you generate QAs on a list of papers. The paper list could be specified either from Hugging Face's Daily Papers or in a set of raw arXiv IDs. Then the generated QA dataset could be pushed to the Hugging Face Dataset. Refer to the attached image.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- [PaperQA Space application](", "raw": "- [PaperQA Space application](", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/chansung/paper_qa", "href": null, "resource": { "type": "space", "id": "chansung/paper_qa", "discussionNum": null }, "url": "https://huggingface.co/spaces/chansung/paper_qa", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ") shows how to interact with the generated QA dataset. Search the paper by keyword or date, then understand it with the QAs (in ELI5 and technical versions). Check out the attached video, or visit the space directly.", "raw": ") shows how to interact with the generated QA dataset. Search the paper by keyword or date, then understand it with the QAs (in ELI5 and technical versions). Check out the attached video, or visit the space directly.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is a baby step for the automated paper analysis (summarization) to easily consume the exploding information in the field of AI. In the next phase, I am gonna need spend my time to enhance prompt engineering, UI/UX (such as Like/Dislike system), ...", "raw": "This is a baby step for the automated paper analysis (summarization) to easily consume the exploding information in the field of AI. In the next phase, I am gonna need spend my time to enhance prompt engineering, UI/UX (such as Like/Dislike system), ...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "However, in the meantime, I hope this project could be helpful for someone who struggles on understanding papers (new papers comes out even when I did finish reading a paper from yesterday yet,,)!", "raw": "However, in the meantime, I hope this project could be helpful for someone who struggles on understanding papers (new papers comes out even when I did finish reading a paper from yesterday yet,,)!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Also, any suggestion to improve this, please let me know :) ", "raw": "Also, any suggestion to improve this, please let me know :) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Understand research papers easier with automatically generated Q&As by LLM (Gemini 1.0 Pro). For this purpose, I have built two projects. - [Auto Paper Analysis](https://github.com/deep-diver/auto-paper-analysis) let you generate QAs on a list of papers. The paper list could be specified either from Hugging Face's Daily Papers or in a set of raw arXiv IDs. Then the generated QA dataset could be pushed to the Hugging Face Dataset. Refer to the attached image. - [PaperQA Space application](https://huggingface.co/spaces/chansung/paper_qa) shows how to interact with the generated QA dataset. Search the paper by keyword or date, then understand it with the QAs (in ELI5 and technical versions). Check out the attached video, or visit the space directly. This is a baby step for the automated paper analysis (summarization) to easily consume the exploding information in the field of AI. In the next phase, I am gonna need spend my time to enhance prompt engineering, UI/UX (such as Like/Dislike system), ... However, in the meantime, I hope this project could be helpful for someone who struggles on understanding papers (new papers comes out even when I did finish reading a paper from yesterday yet,,)! Also, any suggestion to improve this, please let me know :)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659971187637-60d3b57ad7b174177faabd6e.jpeg", "fullname": "chansung park", "name": "chansung", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2695, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60d3b57ad7b174177faabd6e/No8xFeVw7R-JeLXARDnX7.png" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/60d3b57ad7b174177faabd6e/mUGuUF4lN1gYGk4nSzd9g.qt" } ]
[]
[ { "reaction": "❤️", "users": [ "chansung", "Kukedlc", "medmac01", "vladbogo", "arcdyn", "Lewdiculous", "pranay-j", "Srulikbd", "samusenps", "bh-lee", "akhaliq", "boapps", "genaiLLM", "sbrandeis" ], "count": 14 }, { "reaction": "👍", "users": [ "acho98", "akhaliq" ], "count": 2 } ]
2024-03-04T17:38:31.000Z
2024-03-04T19:26:33.488Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/640603e2c3ab325efa94bc4a/jBLC7JH2dBAkDHYzFXZmr.jpeg", "fullname": "Mohammed Machrouh", "name": "medmac01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 30, "isFollowing": false } ]
/posts/chansung/487271582713321
68
1
589177903810003
[ { "type": "text", "value": "VisionLLaMA", "raw": "VisionLLaMA", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A Unified LLaMA Interface for Vision Tasks", "raw": "A Unified LLaMA Interface for Vision Tasks", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2403.00522", "href": null, "resource": { "type": "paper", "id": "2403.00522", "discussionNum": null }, "url": "https://huggingface.co/papers/2403.00522", "code": null, "user": null, "label": "VisionLLaMA: A Unified LLaMA Interface for Vision Tasks (2403.00522)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Large language models are built on top of a transformer-based architecture to process textual inputs. For example, the LLaMA stands out among many open-source implementations. Can the same transformer be used to process 2D images? In this paper, we answer this question by unveiling a LLaMA-like vision transformer in plain and pyramid forms, termed VisionLLaMA, which is tailored for this purpose. VisionLLaMA is a unified and generic modelling framework for solving most vision tasks. We extensively evaluate its effectiveness using typical pre-training paradigms in a good portion of downstream tasks of image perception and especially image generation. In many cases, VisionLLaMA have exhibited substantial gains over the previous state-of-the-art vision transformers. We believe that VisionLLaMA can serve as a strong new baseline model for vision generation and understanding.", "raw": "Large language models are built on top of a transformer-based architecture to process textual inputs. For example, the LLaMA stands out among many open-source implementations. Can the same transformer be used to process 2D images? In this paper, we answer this question by unveiling a LLaMA-like vision transformer in plain and pyramid forms, termed VisionLLaMA, which is tailored for this purpose. VisionLLaMA is a unified and generic modelling framework for solving most vision tasks. We extensively evaluate its effectiveness using typical pre-training paradigms in a good portion of downstream tasks of image perception and especially image generation. In many cases, VisionLLaMA have exhibited substantial gains over the previous state-of-the-art vision transformers. We believe that VisionLLaMA can serve as a strong new baseline model for vision generation and understanding.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
VisionLLaMA A Unified LLaMA Interface for Vision Tasks https://huggingface.co/papers/2403.00522 Large language models are built on top of a transformer-based architecture to process textual inputs. For example, the LLaMA stands out among many open-source implementations. Can the same transformer be used to process 2D images? In this paper, we answer this question by unveiling a LLaMA-like vision transformer in plain and pyramid forms, termed VisionLLaMA, which is tailored for this purpose. VisionLLaMA is a unified and generic modelling framework for solving most vision tasks. We extensively evaluate its effectiveness using typical pre-training paradigms in a good portion of downstream tasks of image perception and especially image generation. In many cases, VisionLLaMA have exhibited substantial gains over the previous state-of-the-art vision transformers. We believe that VisionLLaMA can serve as a strong new baseline model for vision generation and understanding.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/_s-yutykNBHaXUFbfAcHJ.png" } ]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "clem", "Kukedlc", "hasangoni", "vladbogo", "notune", "Yoben", "mvaloatto", "TD788432", "samusenps", "Abecid", "jsfs11", "Ryukijano", "sprime01", "researchase", "sbrandeis" ], "count": 16 } ]
2024-03-04T15:34:12.000Z
2024-03-04T15:34:12.087Z
[]
/posts/akhaliq/589177903810003
218
0
309061623352456
[ { "type": "text", "value": "Today, we're launching an effort to empower the community to build impactful datasets collectively. ", "raw": "Today, we're launching an effort to empower the community to build impactful datasets collectively. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Good data is essential for the open-source AI community. Recently, Argilla and Hugging Face launched Data is Better Together. In less than two weeks, over 350 people ranked over 10k prompts. ", "raw": "Good data is essential for the open-source AI community. Recently, Argilla and Hugging Face launched Data is Better Together. In less than two weeks, over 350 people ranked over 10k prompts. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Today, we're shifting our focus to help support other community efforts to create datasets using Argilla and Hugging Face Spaces. This workflow means anyone with a Hugging Face account can contribute to a dataset in less than a minute. We want to hear from anyone with ideas for creating important datasets as a community. This could include things like:", "raw": "Today, we're shifting our focus to help support other community efforts to create datasets using Argilla and Hugging Face Spaces. This workflow means anyone with a Hugging Face account can contribute to a dataset in less than a minute. We want to hear from anyone with ideas for creating important datasets as a community. This could include things like:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Creating preference data for a language that lacks high-quality preference datasets.", "raw": "- Creating preference data for a language that lacks high-quality preference datasets.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Building evaluation datasets for a new domain.", "raw": "- Building evaluation datasets for a new domain.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Developing a dataset for a new task. ", "raw": "- Developing a dataset for a new task. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If you would like to get involved, join us in the ", "raw": "If you would like to get involved, join us in the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`#data-is-better-together`", "href": null, "resource": null, "url": null, "code": "#data-is-better-together", "user": null, "label": null, "lang": null }, { "type": "text", "value": " Discord channel: ", "raw": " Discord channel: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://discord.com/channels/879548962464493619/1205128865735770142", "href": "https://discord.com/channels/879548962464493619/1205128865735770142", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ". ", "raw": ". ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can read more in this blog post from ", "raw": "You can read more in this blog post from ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@dvilasuero", "href": null, "resource": null, "url": null, "code": null, "user": "dvilasuero", "label": null, "lang": null }, { "type": "text", "value": " and I: ", "raw": " and I: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/community-datasets", "href": "https://huggingface.co/blog/community-datasets", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Today, we're launching an effort to empower the community to build impactful datasets collectively. Good data is essential for the open-source AI community. Recently, Argilla and Hugging Face launched Data is Better Together. In less than two weeks, over 350 people ranked over 10k prompts. Today, we're shifting our focus to help support other community efforts to create datasets using Argilla and Hugging Face Spaces. This workflow means anyone with a Hugging Face account can contribute to a dataset in less than a minute. We want to hear from anyone with ideas for creating important datasets as a community. This could include things like: - Creating preference data for a language that lacks high-quality preference datasets. - Building evaluation datasets for a new domain. - Developing a dataset for a new task. If you would like to get involved, join us in the `#data-is-better-together` Discord channel: https://discord.com/channels/879548962464493619/1205128865735770142. You can read more in this blog post from @dvilasuero and I: https://huggingface.co/blog/community-datasets
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg", "fullname": "Daniel van Strien", "name": "davanstrien", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 410, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60420dccc15e823a685f2b03/Dn7QTyy9SZ7jKN6xpufVD.png", "fullname": "Daniel Vila", "name": "dvilasuero", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 231 } ]
[ { "reaction": "❤️", "users": [ "osanseviero", "clem", "Kukedlc", "ajibawa-2023", "danielus", "julien-c", "samusenps", "dillfrescott" ], "count": 8 }, { "reaction": "🤗", "users": [ "osanseviero", "julien-c", "dillfrescott" ], "count": 3 } ]
2024-03-04T14:51:04.000Z
2024-03-04T14:52:26.181Z
[]
/posts/davanstrien/309061623352456
24
0
776409626801382
[ { "type": "text", "value": "🚨 Now you can run Starcoder- 2 models locally on your Mac M1 Pro Apple Silicon with 16GB memory! 🧑🏽‍💻 ⚡️✨", "raw": "🚨 Now you can run Starcoder- 2 models locally on your Mac M1 Pro Apple Silicon with 16GB memory! 🧑🏽‍💻 ⚡️✨", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Below is the UX with Twinny extension using ", "raw": "Below is the UX with Twinny extension using ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/bigcode/starcoder2-3b", "href": null, "resource": { "type": "model", "id": "bigcode/starcoder2-3b", "discussionNum": null }, "url": "https://huggingface.co/bigcode/starcoder2-3b", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " for FIM and ", "raw": " for FIM and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf", "href": null, "resource": { "type": "model", "id": "codellama/CodeLlama-7b-Instruct-hf", "discussionNum": null }, "url": "https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " for chat. Dev tools is showing the prompt being sent to ollama server.", "raw": " for chat. Dev tools is showing the prompt being sent to ollama server.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Starcoder-2 is now supported in ", "raw": "Starcoder-2 is now supported in ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`llama.cpp`", "href": null, "resource": null, "url": null, "code": "llama.cpp", "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/ggerganov/llama.cpp/pull/5795", "href": "https://github.com/ggerganov/llama.cpp/pull/5795", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "!", "raw": "!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\ncd llama.cpp\npython convert-hf-to-gguf.py ../starcoder2-3b/ --outfile models/starcoder2-3b.gguf --outtype \"f16\"\n./quantize models/starcoder2-3b.gguf models/starcoder2-3b-Q4_K_M.gguf Q4_K_M\n```", "href": null, "resource": null, "url": null, "code": "cd llama.cpp\npython convert-hf-to-gguf.py ../starcoder2-3b/ --outfile models/starcoder2-3b.gguf --outtype \"f16\"\n./quantize models/starcoder2-3b.gguf models/starcoder2-3b-Q4_K_M.gguf Q4_K_M", "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For more details, please go through the following tweet thread: ", "raw": "For more details, please go through the following tweet thread: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://x.com/sourab_m/status/1764583139798823235?s=20", "href": "https://x.com/sourab_m/status/1764583139798823235?s=20", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🚨 Now you can run Starcoder- 2 models locally on your Mac M1 Pro Apple Silicon with 16GB memory! 🧑🏽‍💻 ⚡️✨ Below is the UX with Twinny extension using https://huggingface.co/bigcode/starcoder2-3b for FIM and https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf for chat. Dev tools is showing the prompt being sent to ollama server. Starcoder-2 is now supported in `llama.cpp` https://github.com/ggerganov/llama.cpp/pull/5795! ``` cd llama.cpp python convert-hf-to-gguf.py ../starcoder2-3b/ --outfile models/starcoder2-3b.gguf --outtype "f16" ./quantize models/starcoder2-3b.gguf models/starcoder2-3b-Q4_K_M.gguf Q4_K_M ``` For more details, please go through the following tweet thread: https://x.com/sourab_m/status/1764583139798823235?s=20
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1638132956881-5fca176d1d7a08cb34d79d5d.jpeg", "fullname": "Sourab Mangrulkar", "name": "smangrul", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 224, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/5fca176d1d7a08cb34d79d5d/cCCaoF_DsGWlLQbI6Wm1I.mp4" } ]
[]
[ { "reaction": "🤯", "users": [ "osanseviero", "lvwerra", "clem", "Kukedlc", "notune", "jshuadvd", "codito", "rjmacarthy" ], "count": 8 }, { "reaction": "❤️", "users": [ "clem", "Kukedlc", "Yoben", "BrigitteTousi", "rjmacarthy" ], "count": 5 }, { "reaction": "🤝", "users": [ "clem" ], "count": 1 } ]
2024-03-04T11:37:31.000Z
2024-03-05T05:40:45.478Z
[]
/posts/smangrul/776409626801382
669
0
409511202209405
[ { "type": "text", "value": "🔍 Today's pick in Interpretability & Analysis of LMs: AtP*: An efficient and scalable method for localizing LLM behaviour to components by J. Kramár T. Lieberum R. Shah ", "raw": "🔍 Today's pick in Interpretability & Analysis of LMs: AtP*: An efficient and scalable method for localizing LLM behaviour to components by J. Kramár T. Lieberum R. Shah ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@NeelNanda", "href": null, "resource": null, "url": null, "code": null, "user": "NeelNanda", "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The attribution patching method (AtP) can provide fast and effective approximations of activation patching, requiring only two forward passes and one backward pass to estimate the contribution of all network components for a given prompt pair.", "raw": "The attribution patching method (AtP) can provide fast and effective approximations of activation patching, requiring only two forward passes and one backward pass to estimate the contribution of all network components for a given prompt pair.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "While previous work highlighted the effectiveness of attribution patching, authors identify two settings leading to false negatives using AtP:", "raw": "While previous work highlighted the effectiveness of attribution patching, authors identify two settings leading to false negatives using AtP:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- When estimating the contribution of pre-activation components, if clean and noise inputs don’t lie in the same activation region, the first-order gradient approximation provided by the gradient leads to large errors (Fig 3).", "raw": "- When estimating the contribution of pre-activation components, if clean and noise inputs don’t lie in the same activation region, the first-order gradient approximation provided by the gradient leads to large errors (Fig 3).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- When the sum of direct and indirect effects is close to 0, even small approximation errors introduced by nonlinearities can greatly affect the estimated contribution.", "raw": "- When the sum of direct and indirect effects is close to 0, even small approximation errors introduced by nonlinearities can greatly affect the estimated contribution.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Authors propose two changes to the AtP method to mitigate such issues:", "raw": "Authors propose two changes to the AtP method to mitigate such issues:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- recomputing the attention softmax for the selected component, and then taking a linear approximation to the remaining part of the model (QK Fix)", "raw": "- recomputing the attention softmax for the selected component, and then taking a linear approximation to the remaining part of the model (QK Fix)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Iteratively zeroing gradients at layers contributing to the indirect effects causing cancellation (GradDrop)", "raw": "- Iteratively zeroing gradients at layers contributing to the indirect effects causing cancellation (GradDrop)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "AtP and AtP* are compared across several patching settings for Pythia models, finding them effective while much less computationally expensive than other approaches. A new methodology is also proposed to estimate the magnitude of AtP* false negatives given a set of samples and desired confidence levels.", "raw": "AtP and AtP* are compared across several patching settings for Pythia models, finding them effective while much less computationally expensive than other approaches. A new methodology is also proposed to estimate the magnitude of AtP* false negatives given a set of samples and desired confidence levels.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📄 Paper: ", "raw": "📄 Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2403.00745", "href": null, "resource": { "type": "paper", "id": "2403.00745", "discussionNum": null }, "url": "https://huggingface.co/papers/2403.00745", "code": null, "user": null, "label": "AtP*: An efficient and scalable method for localizing LLM behaviour to\n components (2403.00745)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔍 All daily picks: ", "raw": "🔍 All daily picks: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/gsarti/daily-picks-in-interpretability-and-analysis-of-lms-65ae3339949c5675d25de2f9", "href": null, "resource": { "type": "collection", "id": "gsarti/daily-picks-in-interpretability-and-analysis-of-lms-65ae3339949c5675d25de2f9", "discussionNum": null }, "url": "https://huggingface.co/collections/gsarti/daily-picks-in-interpretability-and-analysis-of-lms-65ae3339949c5675d25de2f9", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🔍 Today's pick in Interpretability & Analysis of LMs: AtP*: An efficient and scalable method for localizing LLM behaviour to components by J. Kramár T. Lieberum R. Shah @NeelNanda The attribution patching method (AtP) can provide fast and effective approximations of activation patching, requiring only two forward passes and one backward pass to estimate the contribution of all network components for a given prompt pair. While previous work highlighted the effectiveness of attribution patching, authors identify two settings leading to false negatives using AtP: - When estimating the contribution of pre-activation components, if clean and noise inputs don’t lie in the same activation region, the first-order gradient approximation provided by the gradient leads to large errors (Fig 3). - When the sum of direct and indirect effects is close to 0, even small approximation errors introduced by nonlinearities can greatly affect the estimated contribution. Authors propose two changes to the AtP method to mitigate such issues: - recomputing the attention softmax for the selected component, and then taking a linear approximation to the remaining part of the model (QK Fix) - Iteratively zeroing gradients at layers contributing to the indirect effects causing cancellation (GradDrop) AtP and AtP* are compared across several patching settings for Pythia models, finding them effective while much less computationally expensive than other approaches. A new methodology is also proposed to estimate the magnitude of AtP* false negatives given a set of samples and desired confidence levels. 📄 Paper: https://huggingface.co/papers/2403.00745 🔍 All daily picks: https://huggingface.co/collections/gsarti/daily-picks-in-interpretability-and-analysis-of-lms-65ae3339949c5675d25de2f9
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1670231290373-5e7749883d77a72421292d07.jpeg", "fullname": "Gabriele Sarti", "name": "gsarti", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5e7749883d77a72421292d07/S-aySiHq6JZoJ6oXvltYJ.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5e7749883d77a72421292d07/jTI-e_CqmtFA9VpWWGiWG.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5e7749883d77a72421292d07/MyVAypcB03fZCTwkL-xPh.png" } ]
[ { "avatarUrl": "/avatars/6d5cd2261163308b82341c1ce28984d1.svg", "fullname": "Neel Nanda", "name": "NeelNanda", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 40 } ]
[ { "reaction": "❤️", "users": [ "osanseviero", "NeelNanda", "clem", "Kukedlc", "julien-c", "nirmalendu01" ], "count": 6 }, { "reaction": "🤯", "users": [ "Kukedlc", "sbrandeis" ], "count": 2 } ]
2024-03-04T08:46:37.000Z
2024-04-11T02:57:52.967Z
[ { "avatarUrl": "/avatars/d364246314535c44cacd87a8e773525a.svg", "fullname": "nirmalendu prakash", "name": "nirmalendu01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1670231290373-5e7749883d77a72421292d07.jpeg", "fullname": "Gabriele Sarti", "name": "gsarti", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 205, "isFollowing": false } ]
/posts/gsarti/409511202209405
42
5
624385094363079
[ { "type": "text", "value": "Panda-70M is a new large-scale video dataset comprising 70 million high-quality video clips, each paired with textual captions, designed to be used as pre-training for video understanding tasks.", "raw": "Panda-70M is a new large-scale video dataset comprising 70 million high-quality video clips, each paired with textual captions, designed to be used as pre-training for video understanding tasks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key Points:", "raw": "Key Points:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Automatic Caption Generation: Utilizes an automatic pipeline with multiple cross-modality teacher models to generate captions for video clips.", "raw": "* Automatic Caption Generation: Utilizes an automatic pipeline with multiple cross-modality teacher models to generate captions for video clips.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Fine-tuned Caption Selection: Employs a fine-tuned retrieval model to select the most appropriate caption from multiple candidates for each video clip.", "raw": "* Fine-tuned Caption Selection: Employs a fine-tuned retrieval model to select the most appropriate caption from multiple candidates for each video clip.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Improved Performance: Pre-training on Panda-70M shows significant performance gains in video captioning, text-video retrieval, and text-driven video generation.", "raw": "* Improved Performance: Pre-training on Panda-70M shows significant performance gains in video captioning, text-video retrieval, and text-driven video generation.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.19479", "href": null, "resource": { "type": "paper", "id": "2402.19479", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.19479", "code": null, "user": null, "label": "Panda-70M: Captioning 70M Videos with Multiple Cross-Modality Teachers (2402.19479)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Project page: ", "raw": "Project page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://snap-research.github.io/Panda-70M/", "href": "https://snap-research.github.io/Panda-70M/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Code: ", "raw": "Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/snap-research/Panda-70M", "href": "https://github.com/snap-research/Panda-70M", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Congrats to the authors ", "raw": "Congrats to the authors ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@tschen", "href": null, "resource": null, "url": null, "code": null, "user": "tschen", "label": null, "lang": null }, { "type": "text", "value": ", ", "raw": ", ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@aliaksandr-siarohin", "href": null, "resource": null, "url": null, "code": null, "user": "aliaksandr-siarohin", "label": null, "lang": null }, { "type": "text", "value": " et al. for their work!", "raw": " et al. for their work!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Panda-70M is a new large-scale video dataset comprising 70 million high-quality video clips, each paired with textual captions, designed to be used as pre-training for video understanding tasks. Key Points: * Automatic Caption Generation: Utilizes an automatic pipeline with multiple cross-modality teacher models to generate captions for video clips. * Fine-tuned Caption Selection: Employs a fine-tuned retrieval model to select the most appropriate caption from multiple candidates for each video clip. * Improved Performance: Pre-training on Panda-70M shows significant performance gains in video captioning, text-video retrieval, and text-driven video generation. Paper: https://huggingface.co/papers/2402.19479 Project page: https://snap-research.github.io/Panda-70M/ Code: https://github.com/snap-research/Panda-70M Congrats to the authors @tschen, @aliaksandr-siarohin et al. for their work!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657217faabb25ed8aedd5e48/UUHAXeGtOnQBXFD3nYtf2.jpeg", "fullname": "Vlad Bogolin", "name": "vladbogo", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 109, "isFollowing": false }
[]
[ { "avatarUrl": "/avatars/76f933cd549f10e5e2db379de235d304.svg", "fullname": "Aliaksandr Siarohin", "name": "aliaksandr-siarohin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 }, { "avatarUrl": "/avatars/5b394a72f277aa6c886b21b477010ab0.svg", "fullname": "Tsai-Shien Chen", "name": "tschen", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null } ]
[ { "reaction": "👍", "users": [ "toanvu", "upleader", "ajibawa-2023", "clem", "Kernel", "osanseviero", "dkgee", "ankur296", "ChavyvAkvar", "tschen" ], "count": 10 }, { "reaction": "❤️", "users": [ "clem", "osanseviero", "tschen", "nisten", "sbrandeis" ], "count": 5 } ]
2024-03-03T23:22:05.000Z
2024-03-04T03:27:23.310Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/9-X8NlUt6iTeeFy4UoDJZ.png", "fullname": "Lincoln Yin", "name": "upleader", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/vladbogo/624385094363079
125
1
622788932781684
[ { "type": "text", "value": "Diaries of Open Source. Part 1.", "raw": "Diaries of Open Source. Part 1.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What a week! Here are some of the exciting Open Source releases of the week!", "raw": "What a week! Here are some of the exciting Open Source releases of the week!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. BigCode releases The Stack v2 and StarCoder 2", "raw": "1. BigCode releases The Stack v2 and StarCoder 2", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Resources in ", "raw": "Resources in ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/posts/loubnabnl/596860170283496", "href": "https://huggingface.co/posts/loubnabnl/596860170283496", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Blog ", "raw": "Blog ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/starcoder2", "href": "https://huggingface.co/blog/starcoder2", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Collection: ", "raw": "Collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/bigcode/starcoder2-65de6da6e87db3383572be1a", "href": null, "resource": { "type": "collection", "id": "bigcode/starcoder2-65de6da6e87db3383572be1a", "discussionNum": null }, "url": "https://huggingface.co/collections/bigcode/starcoder2-65de6da6e87db3383572be1a", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Playground v2.5, a very powerful new text-to-image model", "raw": "2. Playground v2.5, a very powerful new text-to-image model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model: ", "raw": "Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic", "href": null, "resource": { "type": "model", "id": "playgroundai/playground-v2.5-1024px-aesthetic", "discussionNum": null }, "url": "https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Demo: ", "raw": "Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/playgroundai/playground-v2.5", "href": null, "resource": { "type": "space", "id": "playgroundai/playground-v2.5", "discussionNum": null }, "url": "https://huggingface.co/spaces/playgroundai/playground-v2.5", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Blog: ", "raw": "Blog: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://playground.com/blog/playground-v2-5", "href": "https://playground.com/blog/playground-v2-5", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3.Evo: DNA foundation models ", "raw": "3.Evo: DNA foundation models ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Blog: ", "raw": "Blog: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arcinstitute.org/news/blog/evo", "href": "https://arcinstitute.org/news/blog/evo", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Models: ", "raw": "Models: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/togethercomputer/evo-1-131k-base", "href": null, "resource": { "type": "model", "id": "togethercomputer/evo-1-131k-base", "discussionNum": null }, "url": "https://huggingface.co/togethercomputer/evo-1-131k-base", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. OpenHermesPreferences: a dataset of ~1 million AI Preferences ", "raw": "4. OpenHermesPreferences: a dataset of ~1 million AI Preferences ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/argilla/OpenHermesPreferences", "href": null, "resource": { "type": "dataset", "id": "argilla/OpenHermesPreferences", "discussionNum": null }, "url": "https://huggingface.co/datasets/argilla/OpenHermesPreferences", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5. SpeechBrain 1.0: a toolkit with hundreds of recipes and pretrained models for audio-related tasks, such as speech recognition, diarization, and enhancement. New major release!", "raw": "5. SpeechBrain 1.0: a toolkit with hundreds of recipes and pretrained models for audio-related tasks, such as speech recognition, diarization, and enhancement. New major release!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "HF repos: ", "raw": "HF repos: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/speechbrain", "href": "https://huggingface.co/speechbrain", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Website: ", "raw": "Website: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://speechbrain.github.io/", "href": "https://speechbrain.github.io/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "6. Tower: a suite of Llama-based multilingual translation models ", "raw": "6. Tower: a suite of Llama-based multilingual translation models ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/Unbabel/tower-659eaedfe36e6dd29eb1805c", "href": null, "resource": { "type": "collection", "id": "Unbabel/tower-659eaedfe36e6dd29eb1805c", "discussionNum": null }, "url": "https://huggingface.co/collections/Unbabel/tower-659eaedfe36e6dd29eb1805c", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "7. AllenAI releases OLMo-7B-Instruct ", "raw": "7. AllenAI releases OLMo-7B-Instruct ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/allenai/olmo-suite-65aeaae8fe5b6b2122b46778", "href": null, "resource": { "type": "collection", "id": "allenai/olmo-suite-65aeaae8fe5b6b2122b46778", "discussionNum": null }, "url": "https://huggingface.co/collections/allenai/olmo-suite-65aeaae8fe5b6b2122b46778", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "8. DIBT - An crowdsourced effort to human-rate prompts. Its 10k prompts dataset is released ttps://huggingface.co/datasets/DIBT/10k_prompts_ranked", "raw": "8. DIBT - An crowdsourced effort to human-rate prompts. Its 10k prompts dataset is released ttps://huggingface.co/datasets/DIBT/10k_prompts_ranked", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "9. ChatMusician: A Llama 2 fine-tuned model for music generation ", "raw": "9. ChatMusician: A Llama 2 fine-tuned model for music generation ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/m-a-p/ChatMusician", "href": null, "resource": { "type": "model", "id": "m-a-p/ChatMusician", "discussionNum": null }, "url": "https://huggingface.co/m-a-p/ChatMusician", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "10. Bonito, an model that converts data into synthetic instruction datasets", "raw": "10. Bonito, an model that converts data into synthetic instruction datasets", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "GitHub: ", "raw": "GitHub: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/BatsResearch/bonito", "href": "https://github.com/BatsResearch/bonito", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model: ", "raw": "Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/BatsResearch/bonito-v1", "href": null, "resource": { "type": "model", "id": "BatsResearch/bonito-v1", "discussionNum": null }, "url": "https://huggingface.co/BatsResearch/bonito-v1", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.18334", "href": null, "resource": { "type": "paper", "id": "2402.18334", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.18334", "code": null, "user": null, "label": "Learning to Generate Instruction Tuning Datasets for Zero-Shot Task\n Adaptation (2402.18334)", "lang": null } ]
Diaries of Open Source. Part 1. What a week! Here are some of the exciting Open Source releases of the week! 1. BigCode releases The Stack v2 and StarCoder 2 Resources in https://huggingface.co/posts/loubnabnl/596860170283496 Blog https://huggingface.co/blog/starcoder2 Collection: https://huggingface.co/collections/bigcode/starcoder2-65de6da6e87db3383572be1a 2. Playground v2.5, a very powerful new text-to-image model Model: https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic Demo: https://huggingface.co/spaces/playgroundai/playground-v2.5 Blog: https://playground.com/blog/playground-v2-5 3.Evo: DNA foundation models Blog: https://arcinstitute.org/news/blog/evo Models: https://huggingface.co/togethercomputer/evo-1-131k-base 4. OpenHermesPreferences: a dataset of ~1 million AI Preferences https://huggingface.co/datasets/argilla/OpenHermesPreferences 5. SpeechBrain 1.0: a toolkit with hundreds of recipes and pretrained models for audio-related tasks, such as speech recognition, diarization, and enhancement. New major release! HF repos: https://huggingface.co/speechbrain Website: https://speechbrain.github.io/ 6. Tower: a suite of Llama-based multilingual translation models https://huggingface.co/collections/Unbabel/tower-659eaedfe36e6dd29eb1805c 7. AllenAI releases OLMo-7B-Instruct https://huggingface.co/collections/allenai/olmo-suite-65aeaae8fe5b6b2122b46778 8. DIBT - An crowdsourced effort to human-rate prompts. Its 10k prompts dataset is released ttps://huggingface.co/datasets/DIBT/10k_prompts_ranked 9. ChatMusician: A Llama 2 fine-tuned model for music generation https://huggingface.co/m-a-p/ChatMusician 10. Bonito, an model that converts data into synthetic instruction datasets GitHub: https://github.com/BatsResearch/bonito Model: https://huggingface.co/BatsResearch/bonito-v1 Paper: https://huggingface.co/papers/2402.18334
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "DmitryRyumin", "vladbogo", "clem", "chansung", "Kernel", "kramp", "tofuCheng", "tomaarsen", "lvwerra", "dkgee", "not-lain", "julien-c", "mvaloatto" ], "count": 13 }, { "reaction": "👍", "users": [ "dkgee", "not-lain" ], "count": 2 } ]
2024-03-03T20:44:37.000Z
2024-03-07T18:15:44.937Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg", "fullname": "Feynman Innovations", "name": "ajibawa-2023", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 138, "isFollowing": false }, { "avatarUrl": "/avatars/a266430889baaf550e21de57995e4da2.svg", "fullname": "Harpinder Singh", "name": "SinghCoder", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/osanseviero/622788932781684
35
3
578997477674932
[ { "type": "text", "value": "🚀💃🌟 New Research Alert - CVPR 2024! 🌟🕺 🚀", "raw": "🚀💃🌟 New Research Alert - CVPR 2024! 🌟🕺 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📄 Title: MagicAnimate: Temporally Consistent Human Image Animation using Diffusion Model 🌟🚀", "raw": "📄 Title: MagicAnimate: Temporally Consistent Human Image Animation using Diffusion Model 🌟🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👥 Authors: ", "raw": "👥 Authors: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@junhao910323", "href": null, "resource": null, "url": null, "code": null, "user": "junhao910323", "label": null, "lang": null }, { "type": "text", "value": ", ", "raw": ", ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@hansyan", "href": null, "resource": null, "url": null, "code": null, "user": "hansyan", "label": null, "lang": null }, { "type": "text", "value": " et al.", "raw": " et al.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸", "raw": "📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🤗 Demo: ", "raw": "🤗 Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/zcxu-eric/magicanimate", "href": null, "resource": { "type": "space", "id": "zcxu-eric/magicanimate", "discussionNum": null }, "url": "https://huggingface.co/spaces/zcxu-eric/magicanimate", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Paper: ", "raw": "🔗 Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2311.16498", "href": null, "resource": { "type": "paper", "id": "2311.16498", "discussionNum": null }, "url": "https://huggingface.co/papers/2311.16498", "code": null, "user": null, "label": "MagicAnimate: Temporally Consistent Human Image Animation using\n Diffusion Model (2311.16498)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Github Page: ", "raw": "🔗 Github Page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://showlab.github.io/magicanimate/", "href": "https://showlab.github.io/magicanimate/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Repository: ", "raw": "🔗 Repository: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/magic-research/magic-animate", "href": "https://github.com/magic-research/magic-animate", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔥 Model 🤖: ", "raw": "🔥 Model 🤖: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/zcxu-eric/MagicAnimate", "href": null, "resource": { "type": "model", "id": "zcxu-eric/MagicAnimate", "discussionNum": null }, "url": "https://huggingface.co/zcxu-eric/MagicAnimate", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📚 More Papers: more cutting-edge research presented at other conferences in the ", "raw": "📚 More Papers: more cutting-edge research presented at other conferences in the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "href": null, "resource": { "type": "space", "id": "DmitryRyumin/NewEraAI-Papers", "discussionNum": null }, "url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " curated by ", "raw": " curated by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@DmitryRyumin", "href": null, "resource": null, "url": null, "code": null, "user": "DmitryRyumin", "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🚀 Added to the Avatars Collection: ", "raw": "🚀 Added to the Avatars Collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "href": null, "resource": { "type": "collection", "id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "discussionNum": null }, "url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔍 Keywords: #MagicAnimate #DiffusionModel #HumanImageAnimation #CVPR2024 #Diffusion #DeepLearning #Innovation", "raw": "🔍 Keywords: #MagicAnimate #DiffusionModel #HumanImageAnimation #CVPR2024 #Diffusion #DeepLearning #Innovation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🚀💃🌟 New Research Alert - CVPR 2024! 🌟🕺 🚀 📄 Title: MagicAnimate: Temporally Consistent Human Image Animation using Diffusion Model 🌟🚀 👥 Authors: @junhao910323, @hansyan et al. 📅 Conference: CVPR, Jun 17-21, 2024 | Seattle WA, USA 🇺🇸 🤗 Demo: https://huggingface.co/spaces/zcxu-eric/magicanimate 🔗 Paper: https://huggingface.co/papers/2311.16498 🔗 Github Page: https://showlab.github.io/magicanimate/ 🔗 Repository: https://github.com/magic-research/magic-animate 🔥 Model 🤖: https://huggingface.co/zcxu-eric/MagicAnimate 📚 More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin 🚀 Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36 🔍 Keywords: #MagicAnimate #DiffusionModel #HumanImageAnimation #CVPR2024 #Diffusion #DeepLearning #Innovation
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/7DKOYAxINZyPdSkevWGAR.jpeg" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/n3axN1rjqqFUDatLLmq0E.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/RKI9gr48QlSVyzXMbFJSA.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/-vHa2Nvregj4vlzk95jP-.mp4" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377 }, { "avatarUrl": "/avatars/39a6fd3b9717c142b73be8c81c69ebe5.svg", "fullname": "PeRFlow", "name": "hansyan", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 20 }, { "avatarUrl": "/avatars/0ec3c55d445264d43c0430f9edf88bf8.svg", "fullname": "Jun Hao Liew", "name": "junhao910323", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 } ]
[ { "reaction": "❤️", "users": [ "DmitryRyumin", "musfiqdehan", "pkedzia", "vladbogo", "osanseviero", "victor", "toanvu", "clem", "samusenps", "sbrandeis" ], "count": 10 } ]
2024-03-02T16:04:49.000Z
2024-03-03T20:20:36.781Z
[]
/posts/DmitryRyumin/578997477674932
55
132
915843619688362
[ { "type": "text", "value": "\"What Evidence Do Language Models Find Convincing?\" is a new paper that explores what types of evidence and argumentation techniques language models find convincing when presented with ambiguous, open-domain questions that have conflicting answers online. ", "raw": "\"What Evidence Do Language Models Find Convincing?\" is a new paper that explores what types of evidence and argumentation techniques language models find convincing when presented with ambiguous, open-domain questions that have conflicting answers online. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Keypoints:", "raw": "Keypoints:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Dataset: It introduces \"ConflictingQA,\" a dataset of controversial questions and real-world evidence paragraphs supporting both \"yes\" and \"no\" answers.", "raw": "* Dataset: It introduces \"ConflictingQA,\" a dataset of controversial questions and real-world evidence paragraphs supporting both \"yes\" and \"no\" answers.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Convincingness Metric: It uses the \"paragraph win rate\" - when shown two conflicting paragraphs, this measures how often a model predicts the answer that aligns with a given paragraph's stance.", "raw": "* Convincingness Metric: It uses the \"paragraph win rate\" - when shown two conflicting paragraphs, this measures how often a model predicts the answer that aligns with a given paragraph's stance.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "* Current models rely on the relevance of the content to the query, while largely ignoring stylistic features such as whether a text contains scientific references or if it is written with a neutral tone.", "raw": "* Current models rely on the relevance of the content to the query, while largely ignoring stylistic features such as whether a text contains scientific references or if it is written with a neutral tone.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Congrats to the authors for their work!", "raw": "Congrats to the authors for their work!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.11782", "href": null, "resource": { "type": "paper", "id": "2402.11782", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.11782", "code": null, "user": null, "label": "What Evidence Do Language Models Find Convincing? (2402.11782)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Code: ", "raw": "Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/AlexWan0/rag-convincingness", "href": "https://github.com/AlexWan0/rag-convincingness", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
"What Evidence Do Language Models Find Convincing?" is a new paper that explores what types of evidence and argumentation techniques language models find convincing when presented with ambiguous, open-domain questions that have conflicting answers online. Keypoints: * Dataset: It introduces "ConflictingQA," a dataset of controversial questions and real-world evidence paragraphs supporting both "yes" and "no" answers. * Convincingness Metric: It uses the "paragraph win rate" - when shown two conflicting paragraphs, this measures how often a model predicts the answer that aligns with a given paragraph's stance. * Current models rely on the relevance of the content to the query, while largely ignoring stylistic features such as whether a text contains scientific references or if it is written with a neutral tone. Congrats to the authors for their work! Paper: https://huggingface.co/papers/2402.11782 Code: https://github.com/AlexWan0/rag-convincingness
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657217faabb25ed8aedd5e48/UUHAXeGtOnQBXFD3nYtf2.jpeg", "fullname": "Vlad Bogolin", "name": "vladbogo", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 109, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/657217faabb25ed8aedd5e48/HmgUtTGFf_2GO5OgaexXd.png" } ]
[]
[ { "reaction": "❤️", "users": [ "samusenps", "dillfrescott", "osanseviero", "gsarti", "victor", "clem", "Kukedlc", "victorshenbr", "sbrandeis" ], "count": 9 } ]
2024-03-01T23:58:43.000Z
2024-03-02T00:00:05.328Z
[]
/posts/vladbogo/915843619688362
39
0
758614858352623
[ { "type": "text", "value": "Introducing: Zephyr Gemma!", "raw": "Introducing: Zephyr Gemma!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The community has struggled to do a good preference-tune of Gemma, so the amazing ", "raw": "The community has struggled to do a good preference-tune of Gemma, so the amazing ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@lewtun", "href": null, "resource": null, "url": null, "code": null, "user": "lewtun", "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@philschmid", "href": null, "resource": null, "url": null, "code": null, "user": "philschmid", "label": null, "lang": null }, { "type": "text", "value": " built an open-source recipe and trained a model to help people get started. ", "raw": " built an open-source recipe and trained a model to help people get started. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Handbook: ", "raw": "Handbook: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/huggingface/alignment-handbook/blob/main/recipes/zephyr-7b-gemma/README.md", "href": "https://github.com/huggingface/alignment-handbook/blob/main/recipes/zephyr-7b-gemma/README.md", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model: ", "raw": "Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1", "href": null, "resource": { "type": "model", "id": "HuggingFaceH4/zephyr-7b-gemma-v0.1", "discussionNum": null }, "url": "https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Demo: ", "raw": "Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/HuggingFaceH4/zephyr-7b-gemma-chat", "href": null, "resource": { "type": "space", "id": "HuggingFaceH4/zephyr-7b-gemma-chat", "discussionNum": null }, "url": "https://huggingface.co/spaces/HuggingFaceH4/zephyr-7b-gemma-chat", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Some interesting details", "raw": "Some interesting details", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Fine-tuned on DEITA and DPOed with Argilla DPO dataset", "raw": "- Fine-tuned on DEITA and DPOed with Argilla DPO dataset", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Very strong MT Bench results (7.81), better than Zephyr Beta (mistral based) and Gemma Instruct", "raw": "- Very strong MT Bench results (7.81), better than Zephyr Beta (mistral based) and Gemma Instruct", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Can run locally with tools such as llama.cpp on a Mac", "raw": "- Can run locally with tools such as llama.cpp on a Mac", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Not so good AGIEval results compared to mistral-based tunes", "raw": "- Not so good AGIEval results compared to mistral-based tunes", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- All training code is open-sourced", "raw": "- All training code is open-sourced", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Trained for 105 minutes on 8x H100", "raw": "- Trained for 105 minutes on 8x H100", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- No system message", "raw": "- No system message", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Big kudos to the team! Super exciting to see a good fine-tune for Gemma", "raw": "Big kudos to the team! Super exciting to see a good fine-tune for Gemma", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Introducing: Zephyr Gemma! The community has struggled to do a good preference-tune of Gemma, so the amazing @lewtun and @philschmid built an open-source recipe and trained a model to help people get started. Handbook: https://github.com/huggingface/alignment-handbook/blob/main/recipes/zephyr-7b-gemma/README.md Model: https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1 Demo: https://huggingface.co/spaces/HuggingFaceH4/zephyr-7b-gemma-chat Some interesting details - Fine-tuned on DEITA and DPOed with Argilla DPO dataset - Very strong MT Bench results (7.81), better than Zephyr Beta (mistral based) and Gemma Instruct - Can run locally with tools such as llama.cpp on a Mac - Not so good AGIEval results compared to mistral-based tunes - All training code is open-sourced - Trained for 105 minutes on 8x H100 - No system message Big kudos to the team! Super exciting to see a good fine-tune for Gemma
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1594651707950-noauth.jpeg", "fullname": "Lewis Tunstall", "name": "lewtun", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 678 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1624629516652-5ff5d596f244529b3ec0fb89.png", "fullname": "Philipp Schmid", "name": "philschmid", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 657 } ]
[ { "reaction": "🤗", "users": [ "yjernite", "vladbogo", "clem", "samusenps", "dkyazze", "taufiqdp", "pdina" ], "count": 7 }, { "reaction": "❤️", "users": [ "clem", "samusenps", "ajibawa-2023", "giux78" ], "count": 4 }, { "reaction": "🤝", "users": [ "clem" ], "count": 1 } ]
2024-03-01T21:05:38.000Z
2024-03-02T04:23:06.672Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg", "fullname": "Feynman Innovations", "name": "ajibawa-2023", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 138, "isFollowing": false } ]
/posts/osanseviero/758614858352623
33
1
451935956279259
[ { "type": "text", "value": "It feels awkward having my first post sharing my stuff, but this is a weekend project that I really enjoyed working on. I'd love to meet more people interested in random ideas like this.", "raw": "It feels awkward having my first post sharing my stuff, but this is a weekend project that I really enjoyed working on. I'd love to meet more people interested in random ideas like this.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A hard part of building AI applications is choosing which model to use. What if we don’t have to? What if we can predict the best model for any prompt?", "raw": "A hard part of building AI applications is choosing which model to use. What if we don’t have to? What if we can predict the best model for any prompt?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Predictive human preference aims to predict which model users might prefer for a specific query.", "raw": "Predictive human preference aims to predict which model users might prefer for a specific query.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huyenchip.com/2024/02/28/predictive-human-preference.html", "href": "https://huyenchip.com/2024/02/28/predictive-human-preference.html", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "One use case is model routing. If we know in advance that for a prompt, users will prefer Claude Instant’s response over GPT-4, and Claude Instant is cheaper/faster than GPT-4, we can route this prompt to Claude Instant. Model routing has the potential to increase response quality while reducing costs and latency.", "raw": "One use case is model routing. If we know in advance that for a prompt, users will prefer Claude Instant’s response over GPT-4, and Claude Instant is cheaper/faster than GPT-4, we can route this prompt to Claude Instant. Model routing has the potential to increase response quality while reducing costs and latency.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "One pattern is that for simple prompts, weak models can do (nearly) as well as strong models. For more challenging prompts, however, users are more likely to prefer stronger models. Here’s a visualization of predicted human preference for an easy prompt (“hello, how are you?”) and a challenging prompt (“Explain why Planc length …”).", "raw": "One pattern is that for simple prompts, weak models can do (nearly) as well as strong models. For more challenging prompts, however, users are more likely to prefer stronger models. Here’s a visualization of predicted human preference for an easy prompt (“hello, how are you?”) and a challenging prompt (“Explain why Planc length …”).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Preference predictors make it possible to create leaderboards unique to any prompt and domain.", "raw": "Preference predictors make it possible to create leaderboards unique to any prompt and domain.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
It feels awkward having my first post sharing my stuff, but this is a weekend project that I really enjoyed working on. I'd love to meet more people interested in random ideas like this. A hard part of building AI applications is choosing which model to use. What if we don’t have to? What if we can predict the best model for any prompt? Predictive human preference aims to predict which model users might prefer for a specific query. https://huyenchip.com/2024/02/28/predictive-human-preference.html One use case is model routing. If we know in advance that for a prompt, users will prefer Claude Instant’s response over GPT-4, and Claude Instant is cheaper/faster than GPT-4, we can route this prompt to Claude Instant. Model routing has the potential to increase response quality while reducing costs and latency. One pattern is that for simple prompts, weak models can do (nearly) as well as strong models. For more challenging prompts, however, users are more likely to prefer stronger models. Here’s a visualization of predicted human preference for an easy prompt (“hello, how are you?”) and a challenging prompt (“Explain why Planc length …”). Preference predictors make it possible to create leaderboards unique to any prompt and domain.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6011db60aa32e5620759af6d/sjrkUXJA_EZAtxtdAhkJR.jpeg", "fullname": "Chip Huyen", "name": "chiphuyen", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 98, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6011db60aa32e5620759af6d/GwD6iB-CXrbo2BTDuKJGR.png" } ]
[]
[ { "reaction": "❤️", "users": [ "clem", "Nganx", "andrewrreed", "vladbogo", "osanseviero", "loubnabnl", "EmilyWitko", "ankity09", "samusenps", "callmesan", "bedestaz", "ChavyvAkvar", "dortorwu2", "gsarti", "Shaheer-ipynb", "aotrih", "ntdas", "dark-pen", "julien-c", "kgourgou", "pcuenq", "valeriiakuka", "sbrandeis" ], "count": 23 }, { "reaction": "🤗", "users": [ "andrewrreed", "mvaloatto", "kramp", "julien-c", "kgourgou", "gigant", "Bkarine" ], "count": 7 }, { "reaction": "👍", "users": [ "FremyCompany", "derek-thomas" ], "count": 2 } ]
2024-03-01T16:19:43.000Z
2024-03-05T12:15:57.401Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61d375fd733d3a83ecd1bba9/oIXwvvs1-HaCnJXMCZgkc.jpeg", "fullname": "Andrew Reed", "name": "andrewrreed", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 106, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false } ]
/posts/chiphuyen/451935956279259
195
3
835174267036764
[ { "type": "text", "value": "👷🏽‍♀️📚🔨 Announcing the Foundation Model Development Cheatsheet!", "raw": "👷🏽‍♀️📚🔨 Announcing the Foundation Model Development Cheatsheet!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "My first 🤗Post🤗 ever to announce the release of a fantastic collaborative resource to support model developers across the full development stack: The FM Development Cheatsheet available here: ", "raw": "My first 🤗Post🤗 ever to announce the release of a fantastic collaborative resource to support model developers across the full development stack: The FM Development Cheatsheet available here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://fmcheatsheet.org/", "href": "https://fmcheatsheet.org/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The cheatsheet is a growing database of the many crucial resources coming from open research and development efforts to support the responsible development of models. This new resource highlights essential yet often underutilized tools in order to make it as easy as possible for developers to adopt best practices, covering among other aspects:", "raw": "The cheatsheet is a growing database of the many crucial resources coming from open research and development efforts to support the responsible development of models. This new resource highlights essential yet often underutilized tools in order to make it as easy as possible for developers to adopt best practices, covering among other aspects:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🧑🏼‍🤝‍🧑🏼 data selection, curation, and governance;", "raw": "🧑🏼‍🤝‍🧑🏼 data selection, curation, and governance;", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📖 accurate and limitations-aware documentation;", "raw": "📖 accurate and limitations-aware documentation;", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚡ energy efficiency throughout the training phase;", "raw": "⚡ energy efficiency throughout the training phase;", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📊 thorough capability assessments and risk evaluations;", "raw": "📊 thorough capability assessments and risk evaluations;", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌏 environmentally and socially conscious deployment strategies.", "raw": "🌏 environmentally and socially conscious deployment strategies.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We strongly encourage developers working on creating and improving models to make full use of the tools listed here, and to help keep the resource up to date by adding the resources that you yourself have developed or found useful in your own practice 🤗", "raw": "We strongly encourage developers working on creating and improving models to make full use of the tools listed here, and to help keep the resource up to date by adding the resources that you yourself have developed or found useful in your own practice 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Congrats to all the participants in this effort for the release! Read more about it from:", "raw": "Congrats to all the participants in this effort for the release! Read more about it from:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Shayne", "href": null, "resource": null, "url": null, "code": null, "user": "Shayne", "label": null, "lang": null }, { "type": "text", "value": " - ", "raw": " - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://twitter.com/ShayneRedford/status/1763215814860186005", "href": "https://twitter.com/ShayneRedford/status/1763215814860186005", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@hails", "href": null, "resource": null, "url": null, "code": null, "user": "hails", "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@stellaathena", "href": null, "resource": null, "url": null, "code": null, "user": "stellaathena", "label": null, "lang": null }, { "type": "text", "value": " - ", "raw": " - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://blog.eleuther.ai/fm-dev-cheatsheet/", "href": "https://blog.eleuther.ai/fm-dev-cheatsheet/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@alon-albalak", "href": null, "resource": null, "url": null, "code": null, "user": "alon-albalak", "label": null, "lang": null }, { "type": "text", "value": " - ", "raw": " - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "http://nlp.cs.ucsb.edu/blog/a-new-guide-for-the-responsible-development-of-foundation-models.html", "href": "http://nlp.cs.ucsb.edu/blog/a-new-guide-for-the-responsible-development-of-foundation-models.html", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And also to ", "raw": "And also to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@gabrielilharco", "href": null, "resource": null, "url": null, "code": null, "user": "gabrielilharco", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@sayashk", "href": null, "resource": null, "url": null, "code": null, "user": "sayashk", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@kklyman", "href": null, "resource": null, "url": null, "code": null, "user": "kklyman", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@kylel", "href": null, "resource": null, "url": null, "code": null, "user": "kylel", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@mbrauh", "href": null, "resource": null, "url": null, "code": null, "user": "mbrauh", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@fauxneticien", "href": null, "resource": null, "url": null, "code": null, "user": "fauxneticien", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@avi-skowron", "href": null, "resource": null, "url": null, "code": null, "user": "avi-skowron", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Bertievidgen", "href": null, "resource": null, "url": null, "code": null, "user": "Bertievidgen", "label": null, "lang": null }, { "type": "text", "value": " Laura Weidinger, Arvind Narayanan, ", "raw": " Laura Weidinger, Arvind Narayanan, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@VictorSanh", "href": null, "resource": null, "url": null, "code": null, "user": "VictorSanh", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Davlan", "href": null, "resource": null, "url": null, "code": null, "user": "Davlan", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@percyliang", "href": null, "resource": null, "url": null, "code": null, "user": "percyliang", "label": null, "lang": null }, { "type": "text", "value": " Rishi Bommasani, ", "raw": " Rishi Bommasani, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@breakend", "href": null, "resource": null, "url": null, "code": null, "user": "breakend", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@sasha", "href": null, "resource": null, "url": null, "code": null, "user": "sasha", "label": null, "lang": null }, { "type": "text", "value": " 🔥", "raw": " 🔥", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
👷🏽‍♀️📚🔨 Announcing the Foundation Model Development Cheatsheet! My first 🤗Post🤗 ever to announce the release of a fantastic collaborative resource to support model developers across the full development stack: The FM Development Cheatsheet available here: https://fmcheatsheet.org/ The cheatsheet is a growing database of the many crucial resources coming from open research and development efforts to support the responsible development of models. This new resource highlights essential yet often underutilized tools in order to make it as easy as possible for developers to adopt best practices, covering among other aspects: 🧑🏼‍🤝‍🧑🏼 data selection, curation, and governance; 📖 accurate and limitations-aware documentation; ⚡ energy efficiency throughout the training phase; 📊 thorough capability assessments and risk evaluations; 🌏 environmentally and socially conscious deployment strategies. We strongly encourage developers working on creating and improving models to make full use of the tools listed here, and to help keep the resource up to date by adding the resources that you yourself have developed or found useful in your own practice 🤗 Congrats to all the participants in this effort for the release! Read more about it from: @Shayne - https://twitter.com/ShayneRedford/status/1763215814860186005 @hails and @stellaathena - https://blog.eleuther.ai/fm-dev-cheatsheet/ @alon-albalak - http://nlp.cs.ucsb.edu/blog/a-new-guide-for-the-responsible-development-of-foundation-models.html And also to @gabrielilharco @sayashk @kklyman @kylel @mbrauh @fauxneticien @avi-skowron @Bertievidgen Laura Weidinger, Arvind Narayanan, @VictorSanh @Davlan @percyliang Rishi Bommasani, @breakend @sasha 🔥
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1594144055859-5ee3a7cd2a3eae3cbdad1305.jpeg", "fullname": "Yacine Jernite", "name": "yjernite", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 151, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5ee3a7cd2a3eae3cbdad1305/IiaUmlBy1YvD7h9wdVpEW.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/611a7ec4289467cafea62d13/pck-0fmPQkoU7yzh6-WoL.jpeg", "fullname": "Alon Albalak", "name": "alon-albalak", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 7 }, { "avatarUrl": "/avatars/edcfcd9cfb03286d670e6c5743efef6a.svg", "fullname": "Aviya Skowron", "name": "avi-skowron", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4 }, { "avatarUrl": "/avatars/4a2e3091d546d79383f99d2f837ebbde.svg", "fullname": "Bertie Vidgen", "name": "Bertievidgen", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1644543485859-617aafa1ff3db6021d069787.jpeg", "fullname": "Peter Henderson", "name": "breakend", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 13 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1620642852901-5fcc1929563427b03e9af259.jpeg", "fullname": "David Adelani", "name": "Davlan", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 63 }, { "avatarUrl": "/avatars/57cb6e5bf7912bbcb4162b4b9d99388d.svg", "fullname": "Nay San", "name": "fauxneticien", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2 }, { "avatarUrl": "/avatars/d53dee91892cccfd0c4d7353ffb67cbf.svg", "fullname": "Gabriel Ilharco", "name": "gabrielilharco", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 12 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669665010552-62895a0215aeee85756062c4.jpeg", "fullname": "Hailey Schoelkopf", "name": "hails", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 56 }, { "avatarUrl": "/avatars/0dc892cccb8c71a1ad1c1a1801ee9772.svg", "fullname": "Kevin Klyman", "name": "kklyman", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1584459920518-noauth.jpeg", "fullname": "Kyle Lo", "name": "kylel", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 25 }, { "avatarUrl": "/avatars/1a5f7b3fff050ceaaddf8b48aa645dc4.svg", "fullname": "Maribeth", "name": "mbrauh", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 }, { "avatarUrl": "/avatars/1fb8c80b60f21f65a0a027319101f236.svg", "fullname": "Percy Liang", "name": "percyliang", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 7 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60edd0133e2c73a9a21455f5/yK1G-Fv-YjYb7v_chkz3p.jpeg", "fullname": "Sasha Luccioni", "name": "sasha", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 162 }, { "avatarUrl": "/avatars/bca882c027d17f7feba837baae71ec2d.svg", "fullname": "Sayash Kapoor", "name": "sayashk", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2 }, { "avatarUrl": "/avatars/a1cf1ef1fd442c36ed65c68e51919fed.svg", "fullname": "Shayne Longpre", "name": "Shayne", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 8 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60347d3660e3dd96631c9093/B3fuZer5N04tZIAYrLnz4.jpeg", "fullname": "Stella Biderman", "name": "stellaathena", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2002 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1619623771844-5ecea265968f6028e0559fa5.jpeg", "fullname": "Victor Sanh", "name": "VictorSanh", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 206 } ]
[ { "reaction": "❤️", "users": [ "VictorSanh", "hails", "mohammedbriman", "Shayne", "kklyman", "pietrolesci", "osanseviero", "sayhan", "poliva", "loubnabnl", "yongzx", "evdcush", "alon-albalak", "boapps", "mbrauh", "Epiculous", "BrigitteTousi", "mathiasn1", "sasha", "de-Rodrigo", "alielfilali01" ], "count": 21 }, { "reaction": "🤗", "users": [ "hails", "mohammedbriman", "Shayne", "kklyman", "mvaloatto", "osanseviero", "evdcush", "alon-albalak", "Epiculous", "BrigitteTousi" ], "count": 10 }, { "reaction": "🤯", "users": [ "VictorSanh", "Epiculous", "BrigitteTousi" ], "count": 3 }, { "reaction": "👍", "users": [ "MexIvanov", "q-allen" ], "count": 2 } ]
2024-03-01T14:37:40.000Z
2024-03-05T13:44:17.647Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64adfd277b5ff762771e4571/HVmAmGOtBPYd72492XGar.png", "fullname": "Epiculous", "name": "Epiculous", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 131, "isFollowing": false } ]
/posts/yjernite/835174267036764
762
1
526166388235014
[ { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.16459", "href": null, "resource": { "type": "paper", "id": "2402.16459", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.16459", "code": null, "user": null, "label": "Defending LLMs against Jailbreaking Attacks via Backtranslation (2402.16459)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "**Defending LLMs against Jailbreaking Attacks via Backtranslation**", "raw": "**Defending LLMs against Jailbreaking Attacks via Backtranslation**", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I really love this! Its a really innovative way to get robust defense against jailbreaking. Its not cheap, 2-3 calls per user request. But for some use-cases it can be worth it!", "raw": "I really love this! Its a really innovative way to get robust defense against jailbreaking. Its not cheap, 2-3 calls per user request. But for some use-cases it can be worth it!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
https://huggingface.co/papers/2402.16459 **Defending LLMs against Jailbreaking Attacks via Backtranslation** I really love this! Its a really innovative way to get robust defense against jailbreaking. Its not cheap, 2-3 calls per user request. But for some use-cases it can be worth it!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/638eb5f949de7ae552dd6211/mJkQJGpn9tXV37N2VLFCh.jpeg", "fullname": "Derek Thomas", "name": "derek-thomas", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 95, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "samusenps" ], "count": 2 } ]
2024-03-01T07:40:31.000Z
2024-03-01T07:41:37.525Z
[]
/posts/derek-thomas/526166388235014
819
0
100154753019152
[ { "type": "text", "value": "8 Spaces Of The Week is nice, but 840 is even better! 🔥", "raw": "8 Spaces Of The Week is nice, but 840 is even better! 🔥", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here is the complete library of ALL Spaces featured by Hugging Face since October 2021:", "raw": "Here is the complete library of ALL Spaces featured by Hugging Face since October 2021:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "All Spaces Of The Week - ", "raw": "All Spaces Of The Week - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/mvaloatto/ASOTW", "href": null, "resource": { "type": "space", "id": "mvaloatto/ASOTW", "discussionNum": null }, "url": "https://huggingface.co/spaces/mvaloatto/ASOTW", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "-", "raw": "-", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A special mention goes to ", "raw": "A special mention goes to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@osanseviero", "href": null, "resource": null, "url": null, "code": null, "user": "osanseviero", "label": null, "lang": null }, { "type": "text", "value": ", whose collection inspired me to design this dedicated Space. Another shoutout to ", "raw": ", whose collection inspired me to design this dedicated Space. Another shoutout to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@victor", "href": null, "resource": null, "url": null, "code": null, "user": "victor", "label": null, "lang": null }, { "type": "text", "value": ", whose intricately designed Spaces cards motivated me to step up my CSS game :) I plan to release additional features in the future. In the meantime, suggestions are welcome!", "raw": ", whose intricately designed Spaces cards motivated me to step up my CSS game :) I plan to release additional features in the future. In the meantime, suggestions are welcome!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
8 Spaces Of The Week is nice, but 840 is even better! 🔥 Here is the complete library of ALL Spaces featured by Hugging Face since October 2021: All Spaces Of The Week - https://huggingface.co/spaces/mvaloatto/ASOTW - A special mention goes to @osanseviero, whose collection inspired me to design this dedicated Space. Another shoutout to @victor, whose intricately designed Spaces cards motivated me to step up my CSS game :) I plan to release additional features in the future. In the meantime, suggestions are welcome!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63893d4c184615e463aa24b8/S1flsX_26OF6ZJBVcPlaf.jpeg", "fullname": "Matt Valoatto", "name": "mvaloatto", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 56, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63893d4c184615e463aa24b8/_Wewm63ZSmGxTOmljUIgW.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607 } ]
[ { "reaction": "❤️", "users": [ "osanseviero", "vladbogo", "hysts", "victor", "kramp", "clem", "samusenps", "fffiloni", "nbroad", "Nymbo" ], "count": 10 } ]
2024-02-29T19:16:07.000Z
2024-03-01T11:04:05.336Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63893d4c184615e463aa24b8/S1flsX_26OF6ZJBVcPlaf.jpeg", "fullname": "Matt Valoatto", "name": "mvaloatto", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 56, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1643012094339-61914f536d34e827404ceb99.jpeg", "fullname": "hysts", "name": "hysts", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 2521, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg", "fullname": "Feynman Innovations", "name": "ajibawa-2023", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 138, "isFollowing": false } ]
/posts/mvaloatto/100154753019152
232
4
192152819814553
[ { "type": "text", "value": "An increasing number of engineers and researchers are developing foundational models. Navigating the tools, resources, codebases, and best practices guides is daunting for new contributors.", "raw": "An increasing number of engineers and researchers are developing foundational models. Navigating the tools, resources, codebases, and best practices guides is daunting for new contributors.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Introducing the Foundation Model Development Cheatsheet, a succinct guide with 250+ resources & tools for:", "raw": "Introducing the Foundation Model Development Cheatsheet, a succinct guide with 250+ resources & tools for:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📖 sourcing data", "raw": "📖 sourcing data", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔍 documenting & audits", "raw": "🔍 documenting & audits", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌍 environmental impact", "raw": "🌍 environmental impact", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🥊 risks & harms eval", "raw": "🥊 risks & harms eval", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🎮 release & monitoring", "raw": "🎮 release & monitoring", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://fmcheatsheet.org/", "href": "https://fmcheatsheet.org/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👐 What tools & resources should appear in that cheatsheet? Contributions encouraged!", "raw": "👐 What tools & resources should appear in that cheatsheet? Contributions encouraged!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is the result of a large collaboration between many organizations promoting open-science, and spearheaded by ", "raw": "This is the result of a large collaboration between many organizations promoting open-science, and spearheaded by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Shayne", "href": null, "resource": null, "url": null, "code": null, "user": "Shayne", "label": null, "lang": null }, { "type": "text", "value": " 🔥", "raw": " 🔥", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
An increasing number of engineers and researchers are developing foundational models. Navigating the tools, resources, codebases, and best practices guides is daunting for new contributors. Introducing the Foundation Model Development Cheatsheet, a succinct guide with 250+ resources & tools for: 📖 sourcing data 🔍 documenting & audits 🌍 environmental impact 🥊 risks & harms eval 🎮 release & monitoring https://fmcheatsheet.org/ 👐 What tools & resources should appear in that cheatsheet? Contributions encouraged! This is the result of a large collaboration between many organizations promoting open-science, and spearheaded by @Shayne 🔥
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1619623771844-5ecea265968f6028e0559fa5.jpeg", "fullname": "Victor Sanh", "name": "VictorSanh", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 206, "isFollowing": false }
[]
[ { "avatarUrl": "/avatars/a1cf1ef1fd442c36ed65c68e51919fed.svg", "fullname": "Shayne Longpre", "name": "Shayne", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 8 } ]
[ { "reaction": "👍", "users": [ "Shayne", "osanseviero", "kirch", "mayacinka", "mvaloatto", "ajibawa-2023", "FranzNo", "yjernite", "erikab", "samusenps", "dkyazze", "sbarman25", "victor", "lixinjie" ], "count": 14 }, { "reaction": "❤️", "users": [ "yjernite", "samusenps", "Felladrin" ], "count": 3 } ]
2024-02-29T19:00:06.000Z
2024-03-01T05:24:52.238Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1661096595351-626c59ca030a6e7363b94dad.jpeg", "fullname": "Michael Kirchner", "name": "kirch", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 44, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg", "fullname": "Feynman Innovations", "name": "ajibawa-2023", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 138, "isFollowing": false } ]
/posts/VictorSanh/192152819814553
35
2
327860918354895
[ { "type": "text", "value": "Training a SOTA code LLM with a fully transparent library (nanotron) built from scratch -> Done ✅", "raw": "Training a SOTA code LLM with a fully transparent library (nanotron) built from scratch -> Done ✅", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⭐ BlogPost: ", "raw": "⭐ BlogPost: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/starcoder2", "href": "https://huggingface.co/blog/starcoder2", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 StarCoder2 collection: ", "raw": "🔗 StarCoder2 collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/bigcode/starcoder2-65de6da6e87db3383572be1a", "href": null, "resource": { "type": "collection", "id": "bigcode/starcoder2-65de6da6e87db3383572be1a", "discussionNum": null }, "url": "https://huggingface.co/collections/bigcode/starcoder2-65de6da6e87db3383572be1a", "code": null, "user": null, "label": null, "lang": null } ]
Training a SOTA code LLM with a fully transparent library (nanotron) built from scratch -> Done ✅ ⭐ BlogPost: https://huggingface.co/blog/starcoder2 🔗 StarCoder2 collection: https://huggingface.co/collections/bigcode/starcoder2-65de6da6e87db3383572be1a
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1652134289581-5ff8c9f4b2035d9a81a859f7.jpeg", "fullname": "Nouamane Tazi", "name": "nouamanetazi", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 123, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "clem", "alielfilali01", "medmac01", "drFarid", "osanseviero", "ajibawa-2023", "loubnabnl", "giux78", "samusenps", "nouamanetazi", "victor", "vtiyyal1", "Yasbok", "neuralink" ], "count": 14 }, { "reaction": "🤗", "users": [ "alielfilali01", "medmac01", "osanseviero", "samusenps" ], "count": 4 }, { "reaction": "👍", "users": [ "clem", "Eyel", "samusenps" ], "count": 3 }, { "reaction": "🤝", "users": [ "clem" ], "count": 1 } ]
2024-02-29T14:34:35.000Z
2024-02-29T14:34:35.581Z
[]
/posts/nouamanetazi/327860918354895
363
0
175483096750639
[ { "type": "text", "value": "🙋🏻‍♂️ hey there folks ,", "raw": "🙋🏻‍♂️ hey there folks ,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Star coder came out and it's really fascinating in more ways than one !", "raw": "Star coder came out and it's really fascinating in more ways than one !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "first off it codes well already. but secondly it's reported to \"know\" 101 programming languages !", "raw": "first off it codes well already. but secondly it's reported to \"know\" 101 programming languages !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "that actually means it's ripe for fine tunes, so if you're like me you've been bookmarking cool datasets and cant wait to get started !", "raw": "that actually means it's ripe for fine tunes, so if you're like me you've been bookmarking cool datasets and cant wait to get started !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "that said , here's a cool demo where you can try it out now : ", "raw": "that said , here's a cool demo where you can try it out now : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Tonic/starcoder2", "href": null, "resource": { "type": "space", "id": "Tonic/starcoder2", "discussionNum": null }, "url": "https://huggingface.co/spaces/Tonic/starcoder2", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "turns out it can program a T5 demo using gradio ! ", "raw": "turns out it can program a T5 demo using gradio ! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🙋🏻‍♂️ hey there folks , Star coder came out and it's really fascinating in more ways than one ! first off it codes well already. but secondly it's reported to "know" 101 programming languages ! that actually means it's ripe for fine tunes, so if you're like me you've been bookmarking cool datasets and cant wait to get started ! that said , here's a cool demo where you can try it out now : https://huggingface.co/spaces/Tonic/starcoder2 turns out it can program a T5 demo using gradio !
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62a3bb1cd0d8c2c2169f0b88/n3xoRIHRhTu5P__kDhcEq.png" } ]
[]
[ { "reaction": "🤯", "users": [ "victor", "osanseviero", "kgourgou", "clem", "fffiloni" ], "count": 5 }, { "reaction": "👍", "users": [ "mvaloatto", "ajibawa-2023", "clem", "goncharenko", "yxxsgdmn" ], "count": 5 } ]
2024-02-29T10:19:43.000Z
2024-02-29T10:19:43.587Z
[]
/posts/Tonic/175483096750639
27
0
475713755264649
[ { "type": "text", "value": "Where I work, we are obsessed with what happens to a model's performance after it has been deployed. We call this post-deployment data science.", "raw": "Where I work, we are obsessed with what happens to a model's performance after it has been deployed. We call this post-deployment data science.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Let me tell you about a post-deployment data science algorithm that we recently developed to measure the impact of Concept Drift on a model's performance.", "raw": "Let me tell you about a post-deployment data science algorithm that we recently developed to measure the impact of Concept Drift on a model's performance.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How can we detect Concept Drift? 🤔", "raw": "How can we detect Concept Drift? 🤔", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "All ML models are designed to do one thing: learning a probability distribution in the form of P(y|X). In other words, they try to learn how to model an outcome 'y' given the input variables 'X'. 🧠", "raw": "All ML models are designed to do one thing: learning a probability distribution in the form of P(y|X). In other words, they try to learn how to model an outcome 'y' given the input variables 'X'. 🧠", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This probability distribution, P(y|X), is also called Concept. Therefore, if the Concept changes, the model may become invalid.", "raw": "This probability distribution, P(y|X), is also called Concept. Therefore, if the Concept changes, the model may become invalid.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "❓But how do we know if there is a new Concept in our data?", "raw": "❓But how do we know if there is a new Concept in our data?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "❓Or, more important, how do we measure if the new Concept is affecting the model's performance?", "raw": "❓Or, more important, how do we measure if the new Concept is affecting the model's performance?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💡 We came up with a clever solution where the main ingredients are a reference dataset, one where the model's performance is known, and a dataset with the latest data we would like to monitor.", "raw": "💡 We came up with a clever solution where the main ingredients are a reference dataset, one where the model's performance is known, and a dataset with the latest data we would like to monitor.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👣 Step-by-Step solution:", "raw": "👣 Step-by-Step solution:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1️⃣ We start by training an internal model on a chunk of the latest data. ➡️ This allows us to learn the new possible Concept presented in the data.", "raw": "1️⃣ We start by training an internal model on a chunk of the latest data. ➡️ This allows us to learn the new possible Concept presented in the data.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2️⃣ Next, we use the internal model to make predictions on the reference dataset.", "raw": "2️⃣ Next, we use the internal model to make predictions on the reference dataset.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3️⃣ We then estimate the model's performance on the reference dataset, assuming the model's predictions on the monitoring data as ground truth.", "raw": "3️⃣ We then estimate the model's performance on the reference dataset, assuming the model's predictions on the monitoring data as ground truth.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4️⃣ If the estimated performance of the internal model and the actual monitored model are very different, we then say that there has been a Concept Drift.", "raw": "4️⃣ If the estimated performance of the internal model and the actual monitored model are very different, we then say that there has been a Concept Drift.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "To quantify how this Concept impacts performance, we subtract the actual model's performance on reference from the estimated performance and report a delta of the performance metric. ➡️ This is what the plot below shows. The change of the F1-score due to Concept drift! 🚨", "raw": "To quantify how this Concept impacts performance, we subtract the actual model's performance on reference from the estimated performance and report a delta of the performance metric. ➡️ This is what the plot below shows. The change of the F1-score due to Concept drift! 🚨", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This process is repeated for every new chunk of data that we get. 🔁", "raw": "This process is repeated for every new chunk of data that we get. 🔁", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Where I work, we are obsessed with what happens to a model's performance after it has been deployed. We call this post-deployment data science. Let me tell you about a post-deployment data science algorithm that we recently developed to measure the impact of Concept Drift on a model's performance. How can we detect Concept Drift? 🤔 All ML models are designed to do one thing: learning a probability distribution in the form of P(y|X). In other words, they try to learn how to model an outcome 'y' given the input variables 'X'. 🧠 This probability distribution, P(y|X), is also called Concept. Therefore, if the Concept changes, the model may become invalid. ❓But how do we know if there is a new Concept in our data? ❓Or, more important, how do we measure if the new Concept is affecting the model's performance? 💡 We came up with a clever solution where the main ingredients are a reference dataset, one where the model's performance is known, and a dataset with the latest data we would like to monitor. 👣 Step-by-Step solution: 1️⃣ We start by training an internal model on a chunk of the latest data. ➡️ This allows us to learn the new possible Concept presented in the data. 2️⃣ Next, we use the internal model to make predictions on the reference dataset. 3️⃣ We then estimate the model's performance on the reference dataset, assuming the model's predictions on the monitoring data as ground truth. 4️⃣ If the estimated performance of the internal model and the actual monitored model are very different, we then say that there has been a Concept Drift. To quantify how this Concept impacts performance, we subtract the actual model's performance on reference from the estimated performance and report a delta of the performance metric. ➡️ This is what the plot below shows. The change of the F1-score due to Concept drift! 🚨 This process is repeated for every new chunk of data that we get. 🔁
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1657144463525-629a173153a72d997d3f57d0.jpeg", "fullname": "Santiago Viquez", "name": "santiviquez", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 84, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/629a173153a72d997d3f57d0/1YRx7NALfSIm0fHozah8B.jpeg" } ]
[]
[ { "reaction": "👍", "users": [ "ajibawa-2023", "fblgit", "jessicagab", "Nhebo", "victor", "vishwask", "codito", "anujd9" ], "count": 8 }, { "reaction": "❤️", "users": [ "gsarti", "clem", "dlicari", "samusenps", "Kukedlc" ], "count": 5 } ]
2024-02-29T08:13:52.000Z
2024-02-29T08:13:52.488Z
[]
/posts/santiviquez/475713755264649
23
0
804343794091633
[ { "type": "text", "value": "Real-time object detection w/ 🤗 Transformers.js, running YOLOv9 locally in your browser! 🤯", "raw": "Real-time object detection w/ 🤗 Transformers.js, running YOLOv9 locally in your browser! 🤯", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Try it out yourself: ", "raw": "Try it out yourself: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Xenova/video-object-detection", "href": null, "resource": { "type": "space", "id": "Xenova/video-object-detection", "discussionNum": null }, "url": "https://huggingface.co/spaces/Xenova/video-object-detection", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "(Model used + example code: ", "raw": "(Model used + example code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/Xenova/gelan-c_all", "href": null, "resource": { "type": "model", "id": "Xenova/gelan-c_all", "discussionNum": null }, "url": "https://huggingface.co/Xenova/gelan-c_all", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ")", "raw": ")", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This demo shows why on-device ML is so important:", "raw": "This demo shows why on-device ML is so important:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Privacy - local inference means no user data is sent to the cloud", "raw": "1. Privacy - local inference means no user data is sent to the cloud", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. No server latency - empowers developers to build real-time applications", "raw": "2. No server latency - empowers developers to build real-time applications", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Lower costs - no need to pay for bandwidth and processing of streamed video", "raw": "3. Lower costs - no need to pay for bandwidth and processing of streamed video", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I can't wait to see what you build with it! 🔥", "raw": "I can't wait to see what you build with it! 🔥", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Real-time object detection w/ 🤗 Transformers.js, running YOLOv9 locally in your browser! 🤯 Try it out yourself: https://huggingface.co/spaces/Xenova/video-object-detection (Model used + example code: https://huggingface.co/Xenova/gelan-c_all) This demo shows why on-device ML is so important: 1. Privacy - local inference means no user data is sent to the cloud 2. No server latency - empowers developers to build real-time applications 3. Lower costs - no need to pay for bandwidth and processing of streamed video I can't wait to see what you build with it! 🔥
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b253b7ac5ecaae3d1efe0c/hwiQ0uvz3t-L5a-NtBIO6.png", "fullname": "Joshua", "name": "Xenova", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 3792, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/61b253b7ac5ecaae3d1efe0c/xpvEiQ7odTiFGOQluLaJg.mp4" } ]
[]
[ { "reaction": "❤️", "users": [ "DmitryRyumin", "macadeliccc", "giux78", "radames", "ajibawa-2023", "osanseviero", "Dlbk", "jaickerag", "victor", "mvaloatto", "clem", "samusenps", "bwang0911", "Thepickledegg", "hogunkim", "felixdrp", "Noomam", "Youngwon", "elcrei" ], "count": 19 }, { "reaction": "👍", "users": [ "jaickerag", "mrkbac", "yxxsgdmn", "JoPmt", "elcrei" ], "count": 5 } ]
2024-02-28T18:41:21.000Z
2024-03-04T13:04:38.304Z
[ { "avatarUrl": "/avatars/9d6050996fc440ad6693bc05087d66d8.svg", "fullname": "Rahul Atlury", "name": "atlury", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b253b7ac5ecaae3d1efe0c/hwiQ0uvz3t-L5a-NtBIO6.png", "fullname": "Joshua", "name": "Xenova", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 3792, "isFollowing": false } ]
/posts/Xenova/804343794091633
3,302
3
591814698484680
[ { "type": "text", "value": "I have dedicated several days, working over 12 hours each day, on SUPIR (Scaling-UP Image Restoration), a cutting-edge image enhancement and upscaling model introduced in the paper Scaling Up to Excellence: Practicing Model Scaling for Photo-Realistic Image Restoration In the Wild.", "raw": "I have dedicated several days, working over 12 hours each day, on SUPIR (Scaling-UP Image Restoration), a cutting-edge image enhancement and upscaling model introduced in the paper Scaling Up to Excellence: Practicing Model Scaling for Photo-Realistic Image Restoration In the Wild.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This model is simply mind-blowing. At the bottom of this post, you will see side-by-side comparisons of SUPIR versus the extremely expensive online service, Magnific AI. Magnific is known to be the best among the community. However, SUPIR is by far superior. SUPIR also significantly outperforms Topaz AI upscale. SUPIR manages to remain faithful to the original image almost 100% while adding details and achieving super upscaling with the best realism.", "raw": "This model is simply mind-blowing. At the bottom of this post, you will see side-by-side comparisons of SUPIR versus the extremely expensive online service, Magnific AI. Magnific is known to be the best among the community. However, SUPIR is by far superior. SUPIR also significantly outperforms Topaz AI upscale. SUPIR manages to remain faithful to the original image almost 100% while adding details and achieving super upscaling with the best realism.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can read the full blog post here : ", "raw": "You can read the full blog post here : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai", "href": "https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I have dedicated several days, working over 12 hours each day, on SUPIR (Scaling-UP Image Restoration), a cutting-edge image enhancement and upscaling model introduced in the paper Scaling Up to Excellence: Practicing Model Scaling for Photo-Realistic Image Restoration In the Wild. This model is simply mind-blowing. At the bottom of this post, you will see side-by-side comparisons of SUPIR versus the extremely expensive online service, Magnific AI. Magnific is known to be the best among the community. However, SUPIR is by far superior. SUPIR also significantly outperforms Topaz AI upscale. SUPIR manages to remain faithful to the original image almost 100% while adding details and achieving super upscaling with the best realism. You can read the full blog post here : https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png", "fullname": "Furkan Gözükara", "name": "MonsterMMORPG", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 376, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/8bCg_0TAP7o-thVnRO3Lp.png" } ]
[]
[ { "reaction": "👍", "users": [ "ameerazam08", "clem", "iDrops", "samusenps", "diogofranciscop", "yxxsgdmn", "Johnnycadelover" ], "count": 7 }, { "reaction": "🤯", "users": [ "adamelliotfields" ], "count": 1 }, { "reaction": "🔥", "users": [ "Jwjjwk" ], "count": 1 } ]
2024-02-28T17:42:02.000Z
2024-02-29T16:00:16.243Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6266513d539521e602b5dc3a/qg0fmVTGNKEFL7feyvQNh.png", "fullname": "Ameer Azam", "name": "ameerazam08", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 77, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png", "fullname": "Furkan Gözükara", "name": "MonsterMMORPG", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 376, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg", "fullname": "Feynman Innovations", "name": "ajibawa-2023", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 138, "isFollowing": false } ]
/posts/MonsterMMORPG/591814698484680
379
4
650521542566757
[ { "type": "text", "value": "🔥 What's that biomedical model that got 170,763 downloads last month on HuggingFace?! Well, the paper is finally published! #BioLORD", "raw": "🔥 What's that biomedical model that got 170,763 downloads last month on HuggingFace?! Well, the paper is finally published! #BioLORD", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📰 Read our article in the Journal of the American Medical Informatics Association:", "raw": "📰 Read our article in the Journal of the American Medical Informatics Association:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://academic.oup.com/jamia/advance-article/doi/10.1093/jamia/ocae029/7614965", "href": "https://academic.oup.com/jamia/advance-article/doi/10.1093/jamia/ocae029/7614965", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📝", "raw": "📝", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`TLDR:`", "href": null, "resource": null, "url": null, "code": "TLDR:", "user": null, "label": null, "lang": null }, { "type": "text", "value": " BioLORD-2023 is a series of semantic language models for the biomedical domain, capable of representing clinical concepts and sentences in a semantic space aligned with human preferences. Our new multilingual version supports 50+ languages and is further finetuned on 7 European languages. These models were trained contrastively and through distillations, using a corpus unifying in the same latent space the concept names of biomedical concepts and their descriptions. For concepts which didn't have a description written by humans in UMLS, we use information contained in the SnomedCT knowledge graph and the capabilities of ChatGPT to generate synthetic data and improve our results.", "raw": " BioLORD-2023 is a series of semantic language models for the biomedical domain, capable of representing clinical concepts and sentences in a semantic space aligned with human preferences. Our new multilingual version supports 50+ languages and is further finetuned on 7 European languages. These models were trained contrastively and through distillations, using a corpus unifying in the same latent space the concept names of biomedical concepts and their descriptions. For concepts which didn't have a description written by humans in UMLS, we use information contained in the SnomedCT knowledge graph and the capabilities of ChatGPT to generate synthetic data and improve our results.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🤗 Access our models from the HuggingFace hub, including the new 2023-C and 2023-S variants:", "raw": "🤗 Access our models from the HuggingFace hub, including the new 2023-C and 2023-S variants:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/FremyCompany/BioLORD-2023", "href": null, "resource": { "type": "model", "id": "FremyCompany/BioLORD-2023", "discussionNum": null }, "url": "https://huggingface.co/FremyCompany/BioLORD-2023", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/FremyCompany/BioLORD-2023-M", "href": null, "resource": { "type": "model", "id": "FremyCompany/BioLORD-2023-M", "discussionNum": null }, "url": "https://huggingface.co/FremyCompany/BioLORD-2023-M", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/FremyCompany/BioLORD-2023-S", "href": null, "resource": { "type": "model", "id": "FremyCompany/BioLORD-2023-S", "discussionNum": null }, "url": "https://huggingface.co/FremyCompany/BioLORD-2023-S", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/FremyCompany/BioLORD-2023-C", "href": null, "resource": { "type": "model", "id": "FremyCompany/BioLORD-2023-C", "discussionNum": null }, "url": "https://huggingface.co/FremyCompany/BioLORD-2023-C", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🔥 What's that biomedical model that got 170,763 downloads last month on HuggingFace?! Well, the paper is finally published! #BioLORD 📰 Read our article in the Journal of the American Medical Informatics Association: https://academic.oup.com/jamia/advance-article/doi/10.1093/jamia/ocae029/7614965 📝`TLDR:` BioLORD-2023 is a series of semantic language models for the biomedical domain, capable of representing clinical concepts and sentences in a semantic space aligned with human preferences. Our new multilingual version supports 50+ languages and is further finetuned on 7 European languages. These models were trained contrastively and through distillations, using a corpus unifying in the same latent space the concept names of biomedical concepts and their descriptions. For concepts which didn't have a description written by humans in UMLS, we use information contained in the SnomedCT knowledge graph and the capabilities of ChatGPT to generate synthetic data and improve our results. 🤗 Access our models from the HuggingFace hub, including the new 2023-C and 2023-S variants: https://huggingface.co/FremyCompany/BioLORD-2023 https://huggingface.co/FremyCompany/BioLORD-2023-M https://huggingface.co/FremyCompany/BioLORD-2023-S https://huggingface.co/FremyCompany/BioLORD-2023-C
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1647276617786-5f04e8865d08220171a0ad3f.png", "fullname": "François Remy", "name": "FremyCompany", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 32, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f04e8865d08220171a0ad3f/sK22MD2CCD2lDhajQoaRV.png" } ]
[]
[ { "reaction": "👍", "users": [ "ilGawo", "cdevelder", "osanseviero", "katielink", "julien-c", "clem", "macadeliccc", "UMCU", "danielhanchen", "samusenps", "yxxsgdmn", "pdelobelle" ], "count": 12 }, { "reaction": "❤️", "users": [ "clem", "Concor", "drak-hf", "samusenps" ], "count": 4 } ]
2024-02-28T15:25:32.000Z
2024-02-29T14:59:45.605Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/618b9a79ba796dc2bf3f4412/66ustwEp1EDU_rxcXJBIO.jpeg", "fullname": "Bram van Es", "name": "UMCU", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 6, "isFollowing": false } ]
/posts/FremyCompany/650521542566757
687
1
987671089789196
[ { "type": "text", "value": "The Era of 1-bit LLMs: All Large Language Models are in 1.58 Bits", "raw": "The Era of 1-bit LLMs: All Large Language Models are in 1.58 Bits", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.17764", "href": null, "resource": { "type": "paper", "id": "2402.17764", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.17764", "code": null, "user": null, "label": "The Era of 1-bit LLMs: All Large Language Models are in 1.58 Bits (2402.17764)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Recent research, such as BitNet, is paving the way for a new era of 1-bit Large Language Models (LLMs). In this work, we introduce a 1-bit LLM variant, namely BitNet b1.58, in which every single parameter (or weight) of the LLM is ternary {-1, 0, 1}. It matches the full-precision (i.e., FP16 or BF16) Transformer LLM with the same model size and training tokens in terms of both perplexity and end-task performance, while being significantly more cost-effective in terms of latency, memory, throughput, and energy consumption. More profoundly, the 1.58-bit LLM defines a new scaling law and recipe for training new generations of LLMs that are both high-performance and cost-effective. Furthermore, it enables a new computation paradigm and opens the door for designing specific hardware optimized for 1-bit LLMs.", "raw": "Recent research, such as BitNet, is paving the way for a new era of 1-bit Large Language Models (LLMs). In this work, we introduce a 1-bit LLM variant, namely BitNet b1.58, in which every single parameter (or weight) of the LLM is ternary {-1, 0, 1}. It matches the full-precision (i.e., FP16 or BF16) Transformer LLM with the same model size and training tokens in terms of both perplexity and end-task performance, while being significantly more cost-effective in terms of latency, memory, throughput, and energy consumption. More profoundly, the 1.58-bit LLM defines a new scaling law and recipe for training new generations of LLMs that are both high-performance and cost-effective. Furthermore, it enables a new computation paradigm and opens the door for designing specific hardware optimized for 1-bit LLMs.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
The Era of 1-bit LLMs: All Large Language Models are in 1.58 Bits https://huggingface.co/papers/2402.17764 Recent research, such as BitNet, is paving the way for a new era of 1-bit Large Language Models (LLMs). In this work, we introduce a 1-bit LLM variant, namely BitNet b1.58, in which every single parameter (or weight) of the LLM is ternary {-1, 0, 1}. It matches the full-precision (i.e., FP16 or BF16) Transformer LLM with the same model size and training tokens in terms of both perplexity and end-task performance, while being significantly more cost-effective in terms of latency, memory, throughput, and energy consumption. More profoundly, the 1.58-bit LLM defines a new scaling law and recipe for training new generations of LLMs that are both high-performance and cost-effective. Furthermore, it enables a new computation paradigm and opens the door for designing specific hardware optimized for 1-bit LLMs.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/ZXjQ-1EnsQ-6FWoDu7JOR.png" } ]
[]
[ { "reaction": "👍", "users": [ "Azamat1k", "bofenghuang", "osanseviero", "clem", "macadeliccc", "ssone95", "minjejeon", "SRDdev", "arcdyn", "ajibawa-2023", "Kukedlc", "yongzx", "eramax", "mvaloatto", "kgourgou", "danielhanchen", "arjunsriva", "goncharenko", "notune", "koflerdavid", "krishnapraveen", "Banshal", "Hemanth-thunder", "Aishou", "northern-64bit", "tuanlda78202", "andrewrreed", "CarlLee", "rippertnt", "joecrypto", "feveromo", "mathiasn1", "RachidAR", "SaiNikhileshReddy", "yxxsgdmn", "julien-c", "femboysLover", "victor", "wath5" ], "count": 39 }, { "reaction": "❤️", "users": [ "clem", "macadeliccc", "giux78", "ssone95", "fblgit", "adhisetiawan", "tuanlda78202", "samusenps", "mindrage", "Parth", "SaiNikhileshReddy", "julien-c", "ncard" ], "count": 13 } ]
2024-02-28T14:39:57.000Z
2024-02-28T14:39:57.255Z
[]
/posts/akhaliq/987671089789196
223
0
596860170283496
[ { "type": "text", "value": "⭐ Today we’re releasing The Stack v2 & StarCoder2: a series of 3B, 7B & 15B code generation models trained on 3.3 to 4.5 trillion tokens of code: ", "raw": "⭐ Today we’re releasing The Stack v2 & StarCoder2: a series of 3B, 7B & 15B code generation models trained on 3.3 to 4.5 trillion tokens of code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- StarCoder2-15B matches or outperforms CodeLlama 34B, and approaches DeepSeek-33B on multiple benchmarks.", "raw": "- StarCoder2-15B matches or outperforms CodeLlama 34B, and approaches DeepSeek-33B on multiple benchmarks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- StarCoder2-3B outperforms StarCoderBase-15B and similar sized models.", "raw": "- StarCoder2-3B outperforms StarCoderBase-15B and similar sized models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- The Stack v2 a 4x larger dataset than the Stack v1, resulting in 900B unique code tokens 🚀", "raw": "- The Stack v2 a 4x larger dataset than the Stack v1, resulting in 900B unique code tokens 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "As always, we released everything from models and datasets to curation code. Enjoy!", "raw": "As always, we released everything from models and datasets to curation code. Enjoy!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 StarCoder2 collection: ", "raw": "🔗 StarCoder2 collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/bigcode/starcoder2-65de6da6e87db3383572be1a", "href": null, "resource": { "type": "collection", "id": "bigcode/starcoder2-65de6da6e87db3383572be1a", "discussionNum": null }, "url": "https://huggingface.co/collections/bigcode/starcoder2-65de6da6e87db3383572be1a", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Paper: ", "raw": "🔗 Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://drive.google.com/file/d/17iGn3c-sYNiLyRSY-A85QOzgzGnGiVI3/view", "href": "https://drive.google.com/file/d/17iGn3c-sYNiLyRSY-A85QOzgzGnGiVI3/view", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 BlogPost: ", "raw": "🔗 BlogPost: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/starcoder2", "href": "https://huggingface.co/blog/starcoder2", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Code Leaderboard: ", "raw": "🔗 Code Leaderboard: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/bigcode/bigcode-models-leaderboard", "href": null, "resource": { "type": "space", "id": "bigcode/bigcode-models-leaderboard", "discussionNum": null }, "url": "https://huggingface.co/spaces/bigcode/bigcode-models-leaderboard", "code": null, "user": null, "label": null, "lang": null } ]
⭐ Today we’re releasing The Stack v2 & StarCoder2: a series of 3B, 7B & 15B code generation models trained on 3.3 to 4.5 trillion tokens of code: - StarCoder2-15B matches or outperforms CodeLlama 34B, and approaches DeepSeek-33B on multiple benchmarks. - StarCoder2-3B outperforms StarCoderBase-15B and similar sized models. - The Stack v2 a 4x larger dataset than the Stack v1, resulting in 900B unique code tokens 🚀 As always, we released everything from models and datasets to curation code. Enjoy! 🔗 StarCoder2 collection: https://huggingface.co/collections/bigcode/starcoder2-65de6da6e87db3383572be1a 🔗 Paper: https://drive.google.com/file/d/17iGn3c-sYNiLyRSY-A85QOzgzGnGiVI3/view 🔗 BlogPost: https://huggingface.co/blog/starcoder2 🔗 Code Leaderboard: https://huggingface.co/spaces/bigcode/bigcode-models-leaderboard
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61c141342aac764ce1654e43/81AwoT5IQ_Xdw0OVw7TKu.jpeg", "fullname": "Loubna Ben Allal", "name": "loubnabnl", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 2334, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/61c141342aac764ce1654e43/xgxe4cZjwUQL-qH9yS9Of.png" } ]
[]
[ { "reaction": "🤗", "users": [ "lvwerra", "DmitryRyumin", "thomwolf", "olivierdehaene", "smangrul", "osanseviero", "mcpotato", "alielfilali01", "SivilTaram", "victor", "yuxiang630", "BrigitteTousi", "VictorSanh", "harmdevries", "RaymondLi", "3outeille", "Azamat1k", "vladbogo", "bofenghuang", "ncoop57", "philschmid", "Avremi", "clem", "admarcosai", "macadeliccc", "mayank-mishra", "hysts", "kramp", "nouamanetazi", "danielhanchen", "goncharenko", "MoritzLaurer", "tbeck", "not-lain", "amyeroberts", "andrewrreed", "yxxsgdmn", "seyf1elislam", "omaryshchenko", "mishig" ], "count": 40 }, { "reaction": "❤️", "users": [ "lvwerra", "thomwolf", "olivierdehaene", "smangrul", "osanseviero", "mcpotato", "alielfilali01", "SivilTaram", "victor", "yuxiang630", "BrigitteTousi", "VictorSanh", "harmdevries", "RaymondLi", "3outeille", "ncoop57", "philschmid", "clem", "admarcosai", "macadeliccc", "EdoAbati", "euclaise", "ajibawa-2023", "mayank-mishra", "dalraf", "nouamanetazi", "tbeck", "not-lain", "amyeroberts", "andrewrreed", "samusenps", "seyf1elislam", "arjunguha", "mishig", "Ramikan-BR" ], "count": 35 }, { "reaction": "🤯", "users": [ "lvwerra", "thomwolf", "olivierdehaene", "smangrul", "osanseviero", "SivilTaram", "victor", "yuxiang630", "BrigitteTousi", "RaymondLi", "3outeille", "hlarcher", "OmBenz", "philschmid", "Abderrazak", "clem", "admarcosai", "mayank-mishra", "not-lain", "mishig" ], "count": 20 }, { "reaction": "🚀", "users": [ "mishig" ], "count": 1 } ]
2024-02-28T14:18:14.000Z
2024-02-28T14:21:17.956Z
[]
/posts/loubnabnl/596860170283496
1,293
0
146398971160140
[ { "type": "text", "value": "🌟🎭✨ Exciting News! The Latest in Expressive Video Portrait Generation! 🌟🎭✨", "raw": "🌟🎭✨ Exciting News! The Latest in Expressive Video Portrait Generation! 🌟🎭✨", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📄 Title: EMO: Emote Portrait Alive - Generating Expressive Portrait Videos with Audio2Video Diffusion Model under Weak Conditions", "raw": "📄 Title: EMO: Emote Portrait Alive - Generating Expressive Portrait Videos with Audio2Video Diffusion Model under Weak Conditions", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👥 Authors: Linrui Tian, ", "raw": "👥 Authors: Linrui Tian, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@lucaskingjade", "href": null, "resource": null, "url": null, "code": null, "user": "lucaskingjade", "label": null, "lang": null }, { "type": "text", "value": ", Bang Zhang, and ", "raw": ", Bang Zhang, and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Liefeng", "href": null, "resource": null, "url": null, "code": null, "user": "Liefeng", "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Paper: ", "raw": "🔗 Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.17485", "href": null, "resource": { "type": "paper", "id": "2402.17485", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.17485", "code": null, "user": null, "label": "EMO: Emote Portrait Alive - Generating Expressive Portrait Videos with\n Audio2Video Diffusion Model under Weak Conditions (2402.17485)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Github Page: ", "raw": "🔗 Github Page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://humanaigc.github.io/emote-portrait-alive", "href": "https://humanaigc.github.io/emote-portrait-alive", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Repository: ", "raw": "🔗 Repository: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/HumanAIGC/EMO", "href": "https://github.com/HumanAIGC/EMO", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔍 Keywords: #EMO #EmotePortrait #Audio2VideoDiffusion #ExpressiveAnimations #VideoGeneration #DigitalArt #HumanExpression #ComputerVision #DeepLearning #AI", "raw": "🔍 Keywords: #EMO #EmotePortrait #Audio2VideoDiffusion #ExpressiveAnimations #VideoGeneration #DigitalArt #HumanExpression #ComputerVision #DeepLearning #AI", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🚀 Added to the Avatars Collection: ", "raw": "🚀 Added to the Avatars Collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "href": null, "resource": { "type": "collection", "id": "DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "discussionNum": null }, "url": "https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36", "code": null, "user": null, "label": null, "lang": null } ]
🌟🎭✨ Exciting News! The Latest in Expressive Video Portrait Generation! 🌟🎭✨ 📄 Title: EMO: Emote Portrait Alive - Generating Expressive Portrait Videos with Audio2Video Diffusion Model under Weak Conditions 👥 Authors: Linrui Tian, @lucaskingjade, Bang Zhang, and @Liefeng 🔗 Paper: https://huggingface.co/papers/2402.17485 🔗 Github Page: https://humanaigc.github.io/emote-portrait-alive 🔗 Repository: https://github.com/HumanAIGC/EMO 🔍 Keywords: #EMO #EmotePortrait #Audio2VideoDiffusion #ExpressiveAnimations #VideoGeneration #DigitalArt #HumanExpression #ComputerVision #DeepLearning #AI 🚀 Added to the Avatars Collection: https://huggingface.co/collections/DmitryRyumin/avatars-65df37cdf81fec13d4dbac36
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/Ml36qlNX-Fjt-ADSs9kKR.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/A89GsKkmUz-hukkQqY09c.png" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/AhyXqhOqt62tzvwCtfyAg.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/t5HVn8Cbo7A7IhUrhdI4w.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/FIWZmC2GrP6Em2uUA1eJC.mp4" } ]
[ { "avatarUrl": "/avatars/3eb8c79f9a7c4c819038ea7b04e323dd.svg", "fullname": "Bo", "name": "Liefeng", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4 }, { "avatarUrl": "/avatars/be11bf61465df29ac997cc0fedad1cb9.svg", "fullname": "qi wang", "name": "lucaskingjade", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2 } ]
[ { "reaction": "❤️", "users": [ "s3nh", "osanseviero", "DmitryRyumin", "Azamat1k", "aloobun", "sachaarbonel", "plmsmile", "ajibawa-2023", "victor", "mvaloatto", "Hemanth-thunder", "samusenps", "CedrickChu", "thomwolf", "newxuyangcao", "oviniciusfeitosa" ], "count": 16 }, { "reaction": "🤯", "users": [ "MalikIbrar", "fffiloni", "dkyazze" ], "count": 3 }, { "reaction": "🤗", "users": [ "MalikIbrar", "thomwolf", "yxxsgdmn" ], "count": 3 } ]
2024-02-28T13:52:43.000Z
2024-03-02T15:28:13.772Z
[]
/posts/DmitryRyumin/146398971160140
396
0
276108444421787
[ { "type": "text", "value": "Super excited to share with you all our latest contribution from 2A2I.", "raw": "Super excited to share with you all our latest contribution from 2A2I.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Today we announce : ", "raw": "Today we announce : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/2A2I/Arabic-OpenHermes-2.5", "href": null, "resource": { "type": "dataset", "id": "2A2I/Arabic-OpenHermes-2.5", "discussionNum": null }, "url": "https://huggingface.co/datasets/2A2I/Arabic-OpenHermes-2.5", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Arabic-OpenHermes-2.5 Is simply the translation of the original dataset released by ", "raw": "Arabic-OpenHermes-2.5 Is simply the translation of the original dataset released by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@teknium", "href": null, "resource": null, "url": null, "code": null, "user": "teknium", "label": null, "lang": null }, { "type": "text", "value": " couple months ago ! ", "raw": " couple months ago ! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In fact it looks as a simple task ! In reality it was quite a laborious job ! ", "raw": "In fact it looks as a simple task ! In reality it was quite a laborious job ! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "But thanks to ", "raw": "But thanks to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@maghwa", "href": null, "resource": null, "url": null, "code": null, "user": "maghwa", "label": null, "lang": null }, { "type": "text", "value": " & ", "raw": " & ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@medmac01", "href": null, "resource": null, "url": null, "code": null, "user": "medmac01", "label": null, "lang": null }, { "type": "text", "value": " this dataset managed to see the light today and help creating better / more aligned arabic LLMs in the near future.", "raw": " this dataset managed to see the light today and help creating better / more aligned arabic LLMs in the near future.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If you are interested to join us and/or help us, please leave a comment below or visit our HuggingFace Org Card for more details about How/What you can do.", "raw": "If you are interested to join us and/or help us, please leave a comment below or visit our HuggingFace Org Card for more details about How/What you can do.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "More datasets to come and more models are in the way 🔥", "raw": "More datasets to come and more models are in the way 🔥", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Super excited to share with you all our latest contribution from 2A2I. Today we announce : https://huggingface.co/datasets/2A2I/Arabic-OpenHermes-2.5 Arabic-OpenHermes-2.5 Is simply the translation of the original dataset released by @teknium couple months ago ! In fact it looks as a simple task ! In reality it was quite a laborious job ! But thanks to @maghwa & @medmac01 this dataset managed to see the light today and help creating better / more aligned arabic LLMs in the near future. If you are interested to join us and/or help us, please leave a comment below or visit our HuggingFace Org Card for more details about How/What you can do. More datasets to come and more models are in the way 🔥
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626237d9bbcbd1c34f1bb231/EJrOjvAL-68qMCYdnvOrq.png", "fullname": "Ali El Filali", "name": "alielfilali01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 186, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/626237d9bbcbd1c34f1bb231/tnl-Lil3yRr5wHdcFjWiM.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64d5698102e58cc1fdd0b585/LK9iASnZnk6AlL3J5FfWV.png", "fullname": "Marwa El Kamil", "name": "maghwa", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 20 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/640603e2c3ab325efa94bc4a/jBLC7JH2dBAkDHYzFXZmr.jpeg", "fullname": "Mohammed Machrouh", "name": "medmac01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 30 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6317aade83d8d2fd903192d9/erOwgMXc_CZih3uMoyTAp.jpeg", "fullname": "Teknium", "name": "teknium", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4267 } ]
[ { "reaction": "❤️", "users": [ "medmac01", "NovoCode", "osanseviero", "ajibawa-2023", "maghwa", "mvaloatto", "sayhan", "samusenps" ], "count": 8 }, { "reaction": "🤗", "users": [ "medmac01", "maghwa", "sayhan" ], "count": 3 }, { "reaction": "👍", "users": [ "alphaprime90" ], "count": 1 } ]
2024-02-28T13:36:44.000Z
2024-02-28T13:38:15.145Z
[]
/posts/alielfilali01/276108444421787
91
0
573120738895551
[ { "type": "text", "value": "🚨 New Release of 🤗PEFT!", "raw": "🚨 New Release of 🤗PEFT!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. New methods for merging LoRA weights. Refer this HF Post for more details: ", "raw": "1. New methods for merging LoRA weights. Refer this HF Post for more details: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/posts/smangrul/850816632583824", "href": "https://huggingface.co/posts/smangrul/850816632583824", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. AWQ and AQLM support for LoRA. You can now:", "raw": "2. AWQ and AQLM support for LoRA. You can now:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Train adapters on top of 2-bit quantized models with AQLM", "raw": "- Train adapters on top of 2-bit quantized models with AQLM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Train adapters on top of powerful AWQ quantized models", "raw": "- Train adapters on top of powerful AWQ quantized models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Note for inference you can't merge the LoRA weights into the base model!", "raw": "Note for inference you can't merge the LoRA weights into the base model!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. DoRA support: Enabling DoRA is as easy as adding ", "raw": "3. DoRA support: Enabling DoRA is as easy as adding ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`use_dora=True`", "href": null, "resource": null, "url": null, "code": "use_dora=True", "user": null, "label": null, "lang": null }, { "type": "text", "value": " to your ", "raw": " to your ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`LoraConfig`", "href": null, "resource": null, "url": null, "code": "LoraConfig", "user": null, "label": null, "lang": null }, { "type": "text", "value": ". Find out more about this method here: ", "raw": ". Find out more about this method here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2402.09353", "href": "https://arxiv.org/abs/2402.09353", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Improved documentation, particularly docs regarding PEFT LoRA+DeepSpeed and PEFT LoRA+FSDP! 📄 Check out the docs at ", "raw": "4. Improved documentation, particularly docs regarding PEFT LoRA+DeepSpeed and PEFT LoRA+FSDP! 📄 Check out the docs at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/docs/peft/index", "href": "https://huggingface.co/docs/peft/index", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ".", "raw": ".", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5. Full Release Notes: ", "raw": "5. Full Release Notes: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/huggingface/peft/releases/tag/v0.9.0", "href": "https://github.com/huggingface/peft/releases/tag/v0.9.0", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🚨 New Release of 🤗PEFT! 1. New methods for merging LoRA weights. Refer this HF Post for more details: https://huggingface.co/posts/smangrul/850816632583824 2. AWQ and AQLM support for LoRA. You can now: - Train adapters on top of 2-bit quantized models with AQLM - Train adapters on top of powerful AWQ quantized models Note for inference you can't merge the LoRA weights into the base model! 3. DoRA support: Enabling DoRA is as easy as adding `use_dora=True` to your `LoraConfig`. Find out more about this method here: https://arxiv.org/abs/2402.09353 4. Improved documentation, particularly docs regarding PEFT LoRA+DeepSpeed and PEFT LoRA+FSDP! 📄 Check out the docs at https://huggingface.co/docs/peft/index. 5. Full Release Notes: https://github.com/huggingface/peft/releases/tag/v0.9.0
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1638132956881-5fca176d1d7a08cb34d79d5d.jpeg", "fullname": "Sourab Mangrulkar", "name": "smangrul", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 224, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "Kukedlc", "clem", "Ji-Ha", "samusenps", "alielfilali01", "mudogruer", "ybelkada", "ryokeken", "Cstark", "dahwinsingularity" ], "count": 11 }, { "reaction": "👍", "users": [ "NickyNicky", "Jenish-23", "ixaxaar", "ababio", "rizwan-ai", "ybelkada", "Cstark", "ZennyKenny", "catastropiyush" ], "count": 9 } ]
2024-02-28T11:51:55.000Z
2024-03-05T07:45:30.402Z
[ { "avatarUrl": "/avatars/c02f9f8d27cb83be65dba0c7b945daa4.svg", "fullname": "Jenish-23", "name": "Jenish-23", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1638132956881-5fca176d1d7a08cb34d79d5d.jpeg", "fullname": "Sourab Mangrulkar", "name": "smangrul", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 224, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648631057413-noauth.png", "fullname": "Younes Belkada", "name": "ybelkada", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 417, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626237d9bbcbd1c34f1bb231/EJrOjvAL-68qMCYdnvOrq.png", "fullname": "Ali El Filali", "name": "alielfilali01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 186, "isFollowing": false } ]
/posts/smangrul/573120738895551
645
4
718473545021746
[ { "type": "text", "value": "🆕 There's now a visible description on Spaces previews. You can set the description of your Spaces by editing their README.md - it should make your Spaces more discoverable 🚀.", "raw": "🆕 There's now a visible description on Spaces previews. You can set the description of your Spaces by editing their README.md - it should make your Spaces more discoverable 🚀.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🆕 There's now a visible description on Spaces previews. You can set the description of your Spaces by editing their README.md - it should make your Spaces more discoverable 🚀.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f17f0a0925b9863e28ad517/J0yut5CpC32RTCANJr3lW.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f17f0a0925b9863e28ad517/YkegKXOOm5vCrlldfVRXK.png" } ]
[]
[ { "reaction": "❤️", "users": [ "Felladrin", "osanseviero", "mvaloatto", "clefourrier", "alielfilali01", "clem", "Vokturz", "samusenps", "ojasvisingh786" ], "count": 9 }, { "reaction": "👍", "users": [ "fffiloni", "Tonic" ], "count": 2 } ]
2024-02-28T09:36:53.000Z
2024-02-28T12:41:20.677Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63893d4c184615e463aa24b8/S1flsX_26OF6ZJBVcPlaf.jpeg", "fullname": "Matt Valoatto", "name": "mvaloatto", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 56, "isFollowing": false } ]
/posts/victor/718473545021746
687
1
353840137679104
[ { "type": "text", "value": "🔍 Today's pick in Interpretability & Analysis of LMs: CausalGym: Benchmarking causal interpretability methods on linguistic tasks by ", "raw": "🔍 Today's pick in Interpretability & Analysis of LMs: CausalGym: Benchmarking causal interpretability methods on linguistic tasks by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@aryaman", "href": null, "resource": null, "url": null, "code": null, "user": "aryaman", "label": null, "lang": null }, { "type": "text", "value": " D. Jurafsky ", "raw": " D. Jurafsky ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@cgpotts", "href": null, "resource": null, "url": null, "code": null, "user": "cgpotts", "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "TL;DR: Introduce a revisited benchmark to evaluate the effectiveness and reliability of intervention methods across several linguistic phenomena.", "raw": "TL;DR: Introduce a revisited benchmark to evaluate the effectiveness and reliability of intervention methods across several linguistic phenomena.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "While several interpretability methods are currently used to discover task-relevant model components, their performance and reliability is seldom tested broadly.", "raw": "While several interpretability methods are currently used to discover task-relevant model components, their performance and reliability is seldom tested broadly.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This paper adapts the SyntaxGym benchmark, originally conceived for the study of psycholinguistic phenomena such as subject-verb agreement and garden-path sentences, to evaluate intervention-based interpretability methods. In practice, faithful interventions over model components are expected to cause a predictable change in model prediction (e.g. singular -> plural verb).", "raw": "This paper adapts the SyntaxGym benchmark, originally conceived for the study of psycholinguistic phenomena such as subject-verb agreement and garden-path sentences, to evaluate intervention-based interpretability methods. In practice, faithful interventions over model components are expected to cause a predictable change in model prediction (e.g. singular -> plural verb).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Various methods are benchmarked on Pythia models ranging from 14M to 6.9B params, finding Distributed Alignment Search (DAS) to consistently outperform other approaches, followed by probing. When recurring to control tasks to account for the expressivity of supervised methods, probing is found to be more reliable than DAS in larger model sizes.", "raw": "Various methods are benchmarked on Pythia models ranging from 14M to 6.9B params, finding Distributed Alignment Search (DAS) to consistently outperform other approaches, followed by probing. When recurring to control tasks to account for the expressivity of supervised methods, probing is found to be more reliable than DAS in larger model sizes.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Authors conclude with an evaluation of how features driving linguistically plausible behaviours emerge during model training. These features are observed to emerge in Pythia models after 1k training steps, and become progressively more complex over time.", "raw": "Authors conclude with an evaluation of how features driving linguistically plausible behaviours emerge during model training. These features are observed to emerge in Pythia models after 1k training steps, and become progressively more complex over time.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📄 Paper: ", "raw": "📄 Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.12560", "href": null, "resource": { "type": "paper", "id": "2402.12560", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.12560", "code": null, "user": null, "label": "CausalGym: Benchmarking causal interpretability methods on linguistic\n tasks (2402.12560)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💻 Code: ", "raw": "💻 Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/aryamanarora/causalgym", "href": "https://github.com/aryamanarora/causalgym", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔡 Dataset: ", "raw": "🔡 Dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/aryaman/causalgym", "href": null, "resource": { "type": "dataset", "id": "aryaman/causalgym", "discussionNum": null }, "url": "https://huggingface.co/datasets/aryaman/causalgym", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔍 All daily picks: ", "raw": "🔍 All daily picks: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/gsarti/daily-picks-in-interpretability-and-analysis-of-lms-65ae3339949c5675d25de2f9", "href": null, "resource": { "type": "collection", "id": "gsarti/daily-picks-in-interpretability-and-analysis-of-lms-65ae3339949c5675d25de2f9", "discussionNum": null }, "url": "https://huggingface.co/collections/gsarti/daily-picks-in-interpretability-and-analysis-of-lms-65ae3339949c5675d25de2f9", "code": null, "user": null, "label": null, "lang": null } ]
🔍 Today's pick in Interpretability & Analysis of LMs: CausalGym: Benchmarking causal interpretability methods on linguistic tasks by @aryaman D. Jurafsky @cgpotts TL;DR: Introduce a revisited benchmark to evaluate the effectiveness and reliability of intervention methods across several linguistic phenomena. While several interpretability methods are currently used to discover task-relevant model components, their performance and reliability is seldom tested broadly. This paper adapts the SyntaxGym benchmark, originally conceived for the study of psycholinguistic phenomena such as subject-verb agreement and garden-path sentences, to evaluate intervention-based interpretability methods. In practice, faithful interventions over model components are expected to cause a predictable change in model prediction (e.g. singular -> plural verb). Various methods are benchmarked on Pythia models ranging from 14M to 6.9B params, finding Distributed Alignment Search (DAS) to consistently outperform other approaches, followed by probing. When recurring to control tasks to account for the expressivity of supervised methods, probing is found to be more reliable than DAS in larger model sizes. Authors conclude with an evaluation of how features driving linguistically plausible behaviours emerge during model training. These features are observed to emerge in Pythia models after 1k training steps, and become progressively more complex over time. 📄 Paper: https://huggingface.co/papers/2402.12560 💻 Code: https://github.com/aryamanarora/causalgym 🔡 Dataset: https://huggingface.co/datasets/aryaman/causalgym 🔍 All daily picks: https://huggingface.co/collections/gsarti/daily-picks-in-interpretability-and-analysis-of-lms-65ae3339949c5675d25de2f9
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1670231290373-5e7749883d77a72421292d07.jpeg", "fullname": "Gabriele Sarti", "name": "gsarti", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5e7749883d77a72421292d07/JdrfMXgKTV6FapyYKcPBX.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5e7749883d77a72421292d07/IAukJm89-kLlq-EWtpthh.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5e7749883d77a72421292d07/5ZQg_jM-PAV5aclJIe-E3.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/603803ad1b4a9bee818ab78e/CLWFX4lUD0hxJsV7uiiQE.jpeg", "fullname": "Aryaman Arora", "name": "aryaman", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 9 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1650485126923-noauth.png", "fullname": "Chris Potts", "name": "cgpotts", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5 } ]
[ { "reaction": "👍", "users": [ "clefourrier", "aryaman", "vladbogo", "samusenps" ], "count": 4 } ]
2024-02-28T09:17:33.000Z
2024-02-28T09:17:33.007Z
[]
/posts/gsarti/353840137679104
15
0
292874595271388
[ { "type": "text", "value": "ChatMusician", "raw": "ChatMusician", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Understanding and Generating Music Intrinsically with LLM", "raw": "Understanding and Generating Music Intrinsically with LLM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.16153", "href": null, "resource": { "type": "paper", "id": "2402.16153", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.16153", "code": null, "user": null, "label": "ChatMusician: Understanding and Generating Music Intrinsically with LLM (2402.16153)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "While Large Language Models (LLMs) demonstrate impressive capabilities in text generation, we find that their ability has yet to be generalized to music, humanity's creative language. We introduce ChatMusician, an open-source LLM that integrates intrinsic musical abilities. It is based on continual pre-training and finetuning LLaMA2 on a text-compatible music representation, ABC notation, and the music is treated as a second language. ChatMusician can understand and generate music with a pure text tokenizer without any external multi-modal neural structures or tokenizers. Interestingly, endowing musical abilities does not harm language abilities, even achieving a slightly higher MMLU score. Our model is capable of composing well-structured, full-length music, conditioned on texts, chords, melodies, motifs, musical forms, etc, surpassing GPT-4 baseline. On our meticulously curated college-level music understanding benchmark, MusicTheoryBench, ChatMusician surpasses LLaMA2 and GPT-3.5 on zero-shot setting by a noticeable margin. Our work reveals that LLMs can be an excellent compressor for music, but there remains significant territory to be conquered. We release our 4B token music-language corpora MusicPile, the collected MusicTheoryBench, code, model and demo in GitHub.", "raw": "While Large Language Models (LLMs) demonstrate impressive capabilities in text generation, we find that their ability has yet to be generalized to music, humanity's creative language. We introduce ChatMusician, an open-source LLM that integrates intrinsic musical abilities. It is based on continual pre-training and finetuning LLaMA2 on a text-compatible music representation, ABC notation, and the music is treated as a second language. ChatMusician can understand and generate music with a pure text tokenizer without any external multi-modal neural structures or tokenizers. Interestingly, endowing musical abilities does not harm language abilities, even achieving a slightly higher MMLU score. Our model is capable of composing well-structured, full-length music, conditioned on texts, chords, melodies, motifs, musical forms, etc, surpassing GPT-4 baseline. On our meticulously curated college-level music understanding benchmark, MusicTheoryBench, ChatMusician surpasses LLaMA2 and GPT-3.5 on zero-shot setting by a noticeable margin. Our work reveals that LLMs can be an excellent compressor for music, but there remains significant territory to be conquered. We release our 4B token music-language corpora MusicPile, the collected MusicTheoryBench, code, model and demo in GitHub.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
ChatMusician Understanding and Generating Music Intrinsically with LLM https://huggingface.co/papers/2402.16153 While Large Language Models (LLMs) demonstrate impressive capabilities in text generation, we find that their ability has yet to be generalized to music, humanity's creative language. We introduce ChatMusician, an open-source LLM that integrates intrinsic musical abilities. It is based on continual pre-training and finetuning LLaMA2 on a text-compatible music representation, ABC notation, and the music is treated as a second language. ChatMusician can understand and generate music with a pure text tokenizer without any external multi-modal neural structures or tokenizers. Interestingly, endowing musical abilities does not harm language abilities, even achieving a slightly higher MMLU score. Our model is capable of composing well-structured, full-length music, conditioned on texts, chords, melodies, motifs, musical forms, etc, surpassing GPT-4 baseline. On our meticulously curated college-level music understanding benchmark, MusicTheoryBench, ChatMusician surpasses LLaMA2 and GPT-3.5 on zero-shot setting by a noticeable margin. Our work reveals that LLMs can be an excellent compressor for music, but there remains significant territory to be conquered. We release our 4B token music-language corpora MusicPile, the collected MusicTheoryBench, code, model and demo in GitHub.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/6sNlJlLKUKXWsOf0_i0VS.mp4" } ]
[]
[ { "reaction": "👍", "users": [ "regikono", "euclaise", "mexicanamerican", "ajibawa-2023", "mvaloatto", "danielhanchen", "korhancagla", "osanseviero", "kramp", "victor", "Azamat1k", "Ranjanj1", "Hoctar77", "rafaelpierrehf" ], "count": 14 }, { "reaction": "❤️", "users": [ "clem", "danielhanchen", "osanseviero", "EquinoxElahin", "asus4" ], "count": 5 } ]
2024-02-27T19:02:30.000Z
2024-02-27T19:02:30.953Z
[]
/posts/akhaliq/292874595271388
103
0
760143912699340
[ { "type": "text", "value": "Gemma QLoRA finetuning is now 2.4x faster and uses 58% less VRAM than FA2 through 🦥Unsloth! Had to rewrite our Cross Entropy Loss kernels to work on all vocab sizes, re-design our manual autograd engine to accept all activation functions, and more! I wrote all about our learnings in our blog post: ", "raw": "Gemma QLoRA finetuning is now 2.4x faster and uses 58% less VRAM than FA2 through 🦥Unsloth! Had to rewrite our Cross Entropy Loss kernels to work on all vocab sizes, re-design our manual autograd engine to accept all activation functions, and more! I wrote all about our learnings in our blog post: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://unsloth.ai/blog/gemma", "href": "https://unsloth.ai/blog/gemma", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ".", "raw": ".", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Also have a Colab notebook with no OOMs, and has 2x faster inference for Gemma & how to merge and save to llama.cpp GGUF & vLLM: ", "raw": "Also have a Colab notebook with no OOMs, and has 2x faster inference for Gemma & how to merge and save to llama.cpp GGUF & vLLM: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing", "href": "https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And uploaded 4bit pre-quantized versions for Gemma 2b and 7b: ", "raw": "And uploaded 4bit pre-quantized versions for Gemma 2b and 7b: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/unsloth/gemma-7b-bnb-4bit", "href": null, "resource": { "type": "model", "id": "unsloth/gemma-7b-bnb-4bit", "discussionNum": null }, "url": "https://huggingface.co/unsloth/gemma-7b-bnb-4bit", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/unsloth/gemma-2b-bnb-4bit", "href": null, "resource": { "type": "model", "id": "unsloth/gemma-2b-bnb-4bit", "discussionNum": null }, "url": "https://huggingface.co/unsloth/gemma-2b-bnb-4bit", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\nfrom unsloth import FastLanguageModel\nmodel, tokenzer = FastLanguageModel.from_pretrained(\"unsloth/gemma-7b\")\nmodel = FastLanguageModel.get_peft_model(model)\n```", "href": null, "resource": null, "url": null, "code": "from unsloth import FastLanguageModel\nmodel, tokenzer = FastLanguageModel.from_pretrained(\"unsloth/gemma-7b\")\nmodel = FastLanguageModel.get_peft_model(model)", "user": null, "label": null, "lang": null } ]
Gemma QLoRA finetuning is now 2.4x faster and uses 58% less VRAM than FA2 through 🦥Unsloth! Had to rewrite our Cross Entropy Loss kernels to work on all vocab sizes, re-design our manual autograd engine to accept all activation functions, and more! I wrote all about our learnings in our blog post: https://unsloth.ai/blog/gemma. Also have a Colab notebook with no OOMs, and has 2x faster inference for Gemma & how to merge and save to llama.cpp GGUF & vLLM: https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing And uploaded 4bit pre-quantized versions for Gemma 2b and 7b: https://huggingface.co/unsloth/gemma-7b-bnb-4bit https://huggingface.co/unsloth/gemma-2b-bnb-4bit ``` from unsloth import FastLanguageModel model, tokenzer = FastLanguageModel.from_pretrained("unsloth/gemma-7b") model = FastLanguageModel.get_peft_model(model) ```
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62ecdc18b72a69615d6bd857/ixLCk0TwaCVyL_nAfrgEs.png", "fullname": "Daniel Han-Chen", "name": "danielhanchen", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 193, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62ecdc18b72a69615d6bd857/N7CXnNQmkJsMMfRDpBYIJ.jpeg" } ]
[]
[ { "reaction": "❤️", "users": [ "Sylvestre", "harpreetsahota", "cnmoro", "clem", "osanseviero", "ybelkada", "rbiswasfc", "alielfilali01", "danielus", "ambivalent02", "victor" ], "count": 11 }, { "reaction": "👍", "users": [ "osanseviero", "ybelkada", "sbarman25", "alielfilali01" ], "count": 4 } ]
2024-02-27T16:27:46.000Z
2024-03-11T17:48:17.590Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg", "fullname": "Feynman Innovations", "name": "ajibawa-2023", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 138, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62ecdc18b72a69615d6bd857/ixLCk0TwaCVyL_nAfrgEs.png", "fullname": "Daniel Han-Chen", "name": "danielhanchen", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 193, "isFollowing": false } ]
/posts/danielhanchen/760143912699340
1,380
4
528781527880535
[ { "type": "text", "value": "The open-source AI community can build impactful datasets collectively! ", "raw": "The open-source AI community can build impactful datasets collectively! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Announcing ", "raw": "Announcing ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/datasets/DIBT/10k_prompts_ranked", "href": "https://huggingface.co/datasets/DIBT/10k_prompts_ranked", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ", the first dataset release from Data Is Better Together. ", "raw": ", the first dataset release from Data Is Better Together. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Created in <2 weeks by the community. Includes:", "raw": "Created in <2 weeks by the community. Includes:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✨ 10,000+ prompt quality ratings", "raw": "✨ 10,000+ prompt quality ratings", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🧑‍💻 Human and synthetic data prompts", "raw": "🧑‍💻 Human and synthetic data prompts", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌐 Generated by 300+ contributors", "raw": "🌐 Generated by 300+ contributors", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How and why collaborative datasets?", "raw": "How and why collaborative datasets?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It's no secret that high-quality open data is essential for creating better open models. The open source community shares 100s of models, datasets and demos openly weekly, but collectively building open datasets has been less explored.", "raw": "It's no secret that high-quality open data is essential for creating better open models. The open source community shares 100s of models, datasets and demos openly weekly, but collectively building open datasets has been less explored.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Datasets have a massive role in shaping what models can be created. If we want more high-quality models for all languages, domains and tasks, we need more and better open datasets for all languages, domains and tasks!", "raw": "Datasets have a massive role in shaping what models can be created. If we want more high-quality models for all languages, domains and tasks, we need more and better open datasets for all languages, domains and tasks!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "To explore how the community could build impactful datasets collectively, Argilla added support for HF authentication for Argilla instances hosted on a Hugging Face Space. Anyone with an HF login could begin contributing to a dataset in <1 minute.", "raw": "To explore how the community could build impactful datasets collectively, Argilla added support for HF authentication for Argilla instances hosted on a Hugging Face Space. Anyone with an HF login could begin contributing to a dataset in <1 minute.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "To test this new workflow, we launched a task to rank the quality of prompts (human and synthetically generated).", "raw": "To test this new workflow, we launched a task to rank the quality of prompts (human and synthetically generated).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In less than two weeks, we built a community of over 300 contributors for this dataset 🤗", "raw": "In less than two weeks, we built a community of over 300 contributors for this dataset 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This dataset became a reality thanks to the dedication of all the individuals who lent their support ❤️ To see the amazing people behind this dataset, visit ", "raw": "This dataset became a reality thanks to the dedication of all the individuals who lent their support ❤️ To see the amazing people behind this dataset, visit ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/spaces/DIBT/prompt-collective-dashboard", "href": "https://huggingface.co/spaces/DIBT/prompt-collective-dashboard", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is just the start for collectively building powerful open datasets! ", "raw": "This is just the start for collectively building powerful open datasets! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
The open-source AI community can build impactful datasets collectively! Announcing https://huggingface.co/datasets/DIBT/10k_prompts_ranked, the first dataset release from Data Is Better Together. Created in <2 weeks by the community. Includes: ✨ 10,000+ prompt quality ratings 🧑‍💻 Human and synthetic data prompts 🌐 Generated by 300+ contributors How and why collaborative datasets? It's no secret that high-quality open data is essential for creating better open models. The open source community shares 100s of models, datasets and demos openly weekly, but collectively building open datasets has been less explored. Datasets have a massive role in shaping what models can be created. If we want more high-quality models for all languages, domains and tasks, we need more and better open datasets for all languages, domains and tasks! To explore how the community could build impactful datasets collectively, Argilla added support for HF authentication for Argilla instances hosted on a Hugging Face Space. Anyone with an HF login could begin contributing to a dataset in <1 minute. To test this new workflow, we launched a task to rank the quality of prompts (human and synthetically generated). In less than two weeks, we built a community of over 300 contributors for this dataset 🤗 This dataset became a reality thanks to the dedication of all the individuals who lent their support ❤️ To see the amazing people behind this dataset, visit https://huggingface.co/spaces/DIBT/prompt-collective-dashboard This is just the start for collectively building powerful open datasets!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg", "fullname": "Daniel van Strien", "name": "davanstrien", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 410, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "alvarobartt", "dvilasuero", "samusenps", "clem", "TuringsSolutions", "danielhanchen", "alielfilali01", "mmhamdy", "Tonic" ], "count": 10 }, { "reaction": "🤯", "users": [ "victor", "osanseviero", "alvarobartt", "dvilasuero", "DmitryRyumin", "Sylvestre", "clem", "danielhanchen", "alielfilali01" ], "count": 9 }, { "reaction": "👍", "users": [ "ToKrCZ", "mmhamdy", "Tonic" ], "count": 3 } ]
2024-02-27T16:14:28.000Z
2024-02-27T16:45:01.189Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60f0608166e5701b80ed3f02/BHso-wSWpR9b8b8CKvodC.jpeg", "fullname": "Alvaro Bartolome", "name": "alvarobartt", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1739, "isFollowing": false } ]
/posts/davanstrien/528781527880535
26
1
967130417344883
[ { "type": "text", "value": "🚀 Just released version 0.21.0 of the ", "raw": "🚀 Just released version 0.21.0 of the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`huggingface_hub`", "href": null, "resource": null, "url": null, "code": "huggingface_hub", "user": null, "label": null, "lang": null }, { "type": "text", "value": " Python library!", "raw": " Python library!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Exciting updates include:", "raw": "Exciting updates include:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🖇️ Dataclasses everywhere for improved developer experience!", "raw": "🖇️ Dataclasses everywhere for improved developer experience!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💾 HfFileSystem optimizations!", "raw": "💾 HfFileSystem optimizations!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🧩 ", "raw": "🧩 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`PyTorchHubMixin`", "href": null, "resource": null, "url": null, "code": "PyTorchHubMixin", "user": null, "label": null, "lang": null }, { "type": "text", "value": " now supports configs and safetensors!", "raw": " now supports configs and safetensors!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✨ ", "raw": "✨ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`audio-to-audio`", "href": null, "resource": null, "url": null, "code": "audio-to-audio", "user": null, "label": null, "lang": null }, { "type": "text", "value": " supported in the InferenceClient!", "raw": " supported in the InferenceClient!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📚 Translated docs in Simplified Chinese and French!", "raw": "📚 Translated docs in Simplified Chinese and French!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💔 Breaking changes: simplified API for listing models and datasets!", "raw": "💔 Breaking changes: simplified API for listing models and datasets!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out the full release notes for more details: ", "raw": "Check out the full release notes for more details: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Wauplin/huggingface_hub/discussions/4", "href": null, "resource": { "type": "space", "id": "Wauplin/huggingface_hub", "discussionNum": 4 }, "url": "https://huggingface.co/spaces/Wauplin/huggingface_hub/discussions/4", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 🤖💻", "raw": " 🤖💻", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🚀 Just released version 0.21.0 of the `huggingface_hub` Python library! Exciting updates include: 🖇️ Dataclasses everywhere for improved developer experience! 💾 HfFileSystem optimizations! 🧩 `PyTorchHubMixin` now supports configs and safetensors! ✨ `audio-to-audio` supported in the InferenceClient! 📚 Translated docs in Simplified Chinese and French! 💔 Breaking changes: simplified API for listing models and datasets! Check out the full release notes for more details: https://huggingface.co/spaces/Wauplin/huggingface_hub/discussions/4 🤖💻
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659336880158-6273f303f6d63a28483fde12.png", "fullname": "Lucain Pouget", "name": "Wauplin", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 157, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "DmitryRyumin", "bmuskalla", "pcuenq", "osanseviero", "lunarflu", "clem", "severo", "pierrci", "harpreetsahota", "digiplay", "samusenps", "ajibawa-2023", "dillfrescott", "MoritzLaurer", "dalraf", "victor", "mvaloatto", "yjernite", "danielus", "Kvikontent", "tonyassi", "radames" ], "count": 22 }, { "reaction": "🤝", "users": [ "alvarobartt", "osanseviero", "lunarflu", "clem", "pierrci", "harpreetsahota", "dillfrescott", "yjernite", "radames" ], "count": 9 }, { "reaction": "👍", "users": [ "fffiloni", "kramp", "harpreetsahota", "dillfrescott", "radames" ], "count": 5 } ]
2024-02-27T12:41:11.000Z
2024-03-20T20:55:22.166Z
[ { "avatarUrl": "/avatars/973db41f23cd375c588e7828c5efe5ce.svg", "fullname": "Pierric", "name": "pierrci", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 115, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/648a824a8ca6cf9857d1349c/wAdFg_x9Km-_Jw2ccD6DV.jpeg", "fullname": "Tony Assi", "name": "tonyassi", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2288, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659336880158-6273f303f6d63a28483fde12.png", "fullname": "Lucain Pouget", "name": "Wauplin", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 157, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648966381588-6064e095abd8d3692e3e2ed6.jpeg", "fullname": "Radamés Ajna", "name": "radames", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2401, "isFollowing": false } ]
/posts/Wauplin/967130417344883
288
4
189499728827559
[ { "type": "text", "value": "I've tried DoRA (", "raw": "I've tried DoRA (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2402.09353", "href": "https://arxiv.org/abs/2402.09353", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ") with SDXL using PEFT, outputs are quite detailed 🤩🌟 ", "raw": ") with SDXL using PEFT, outputs are quite detailed 🤩🌟 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "as usual trained on lego dataset I compiled, I compared them with previously trained pivotal tuned model and the normal DreamBooth model before that 😊 ", "raw": "as usual trained on lego dataset I compiled, I compared them with previously trained pivotal tuned model and the normal DreamBooth model before that 😊 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Notebook by ", "raw": "Notebook by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@linoyts", "href": null, "resource": null, "url": null, "code": null, "user": "linoyts", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://colab.research.google.com/drive/134mt7bCMKtCYyYzETfEGKXT1J6J50ydT?usp=sharing", "href": "https://colab.research.google.com/drive/134mt7bCMKtCYyYzETfEGKXT1J6J50ydT?usp=sharing", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Integration to PEFT by ", "raw": "Integration to PEFT by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@BenjaminB", "href": null, "resource": null, "url": null, "code": null, "user": "BenjaminB", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/huggingface/peft/pull/1474", "href": "https://github.com/huggingface/peft/pull/1474", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " (more info in the PR) ", "raw": " (more info in the PR) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I've tried DoRA (https://arxiv.org/abs/2402.09353) with SDXL using PEFT, outputs are quite detailed 🤩🌟 as usual trained on lego dataset I compiled, I compared them with previously trained pivotal tuned model and the normal DreamBooth model before that 😊 Notebook by @linoyts https://colab.research.google.com/drive/134mt7bCMKtCYyYzETfEGKXT1J6J50ydT?usp=sharing Integration to PEFT by @BenjaminB https://github.com/huggingface/peft/pull/1474 (more info in the PR)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/x70Ny5ZV0iDaHXEYoRkNW.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1656685953025-62bf03d1e80cec527083cd66.jpeg", "fullname": "Benjamin Bossan", "name": "BenjaminB", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 35 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/638f308fc4444c6ca870b60a/Q11NK-8-JbiilJ-vk2LAR.png", "fullname": "Linoy Tsaban", "name": "linoyts", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 160 } ]
[ { "reaction": "👍", "users": [ "jeanflop", "vladbogo", "NeuralKartMocker", "davideuler", "osanseviero", "Falah", "harpreetsahota", "jucamohedano", "ajibawa-2023", "terpjwu1", "CarlLee", "mvaloatto", "danielhanchen", "smangrul", "adamelliotfields", "fffiloni", "rafaelpierrehf", "sgch2023", "radames", "mathiasn1", "julien-c" ], "count": 21 }, { "reaction": "🤝", "users": [ "jeanflop", "Falah", "danielhanchen", "smangrul", "rafaelpierrehf" ], "count": 5 }, { "reaction": "❤️", "users": [ "alielfilali01", "rafaelpierrehf", "radames" ], "count": 3 }, { "reaction": "🤗", "users": [ "alielfilali01", "rafaelpierrehf" ], "count": 2 } ]
2024-02-27T10:39:29.000Z
2024-02-27T10:39:29.004Z
[]
/posts/merve/189499728827559
95
0
872583609664849
[ { "type": "text", "value": "🚀🔥🌟 New Research Alert - ICLR 2024! 🌟🔥🚀", "raw": "🚀🔥🌟 New Research Alert - ICLR 2024! 🌟🔥🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📄 Title: FuseChat: Revolutionizing Chat Models Fusion 🌟🚀", "raw": "📄 Title: FuseChat: Revolutionizing Chat Models Fusion 🌟🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👥 Authors: ", "raw": "👥 Authors: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Wanfq", "href": null, "resource": null, "url": null, "code": null, "user": "Wanfq", "label": null, "lang": null }, { "type": "text", "value": ", ", "raw": ", ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@passerqxj", "href": null, "resource": null, "url": null, "code": null, "user": "passerqxj", "label": null, "lang": null }, { "type": "text", "value": " et al.", "raw": " et al.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📅 Conference: ICLR, May 7-11, 2024 | Vienna, Austria 🇦🇹", "raw": "📅 Conference: ICLR, May 7-11, 2024 | Vienna, Austria 🇦🇹", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Paper: ", "raw": "🔗 Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2402.16107", "href": null, "resource": { "type": "paper", "id": "2402.16107", "discussionNum": null }, "url": "https://huggingface.co/papers/2402.16107", "code": null, "user": null, "label": "FuseChat: Knowledge Fusion of Chat Models (2402.16107)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Repository: ", "raw": "🔗 Repository: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/fanqiwan/FuseLLM", "href": "https://github.com/fanqiwan/FuseLLM", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔥 Models 🤖:", "raw": "🔥 Models 🤖:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1️⃣ FuseChat-7B-VaRM: ", "raw": "1️⃣ FuseChat-7B-VaRM: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/FuseAI/FuseChat-7B-VaRM", "href": null, "resource": { "type": "model", "id": "FuseAI/FuseChat-7B-VaRM", "discussionNum": null }, "url": "https://huggingface.co/FuseAI/FuseChat-7B-VaRM", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2️⃣ FuseChat-7B-Slerp: ", "raw": "2️⃣ FuseChat-7B-Slerp: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/FuseAI/FuseChat-7B-Slerp", "href": null, "resource": { "type": "model", "id": "FuseAI/FuseChat-7B-Slerp", "discussionNum": null }, "url": "https://huggingface.co/FuseAI/FuseChat-7B-Slerp", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3️⃣ OpenChat-3.5-7B-Solar: ", "raw": "3️⃣ OpenChat-3.5-7B-Solar: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/FuseAI/OpenChat-3.5-7B-Solar", "href": null, "resource": { "type": "model", "id": "FuseAI/OpenChat-3.5-7B-Solar", "discussionNum": null }, "url": "https://huggingface.co/FuseAI/OpenChat-3.5-7B-Solar", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4️⃣ FuseChat-7B-TA: ", "raw": "4️⃣ FuseChat-7B-TA: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/FuseAI/FuseChat-7B-TA", "href": null, "resource": { "type": "model", "id": "FuseAI/FuseChat-7B-TA", "discussionNum": null }, "url": "https://huggingface.co/FuseAI/FuseChat-7B-TA", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5️⃣ OpenChat-3.5-7B-Mixtral: ", "raw": "5️⃣ OpenChat-3.5-7B-Mixtral: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/FuseAI/OpenChat-3.5-7B-Mixtral", "href": null, "resource": { "type": "model", "id": "FuseAI/OpenChat-3.5-7B-Mixtral", "discussionNum": null }, "url": "https://huggingface.co/FuseAI/OpenChat-3.5-7B-Mixtral", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📚 More Papers: more cutting-edge research presented at other conferences in the ", "raw": "📚 More Papers: more cutting-edge research presented at other conferences in the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "href": null, "resource": { "type": "space", "id": "DmitryRyumin/NewEraAI-Papers", "discussionNum": null }, "url": "https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " curated by ", "raw": " curated by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@DmitryRyumin", "href": null, "resource": null, "url": null, "code": null, "user": "DmitryRyumin", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔍 Keywords: #FuseChat #ChatModels #KnowledgeFusion #ICLR2024 #AI #Innovation #FuseLLM", "raw": "🔍 Keywords: #FuseChat #ChatModels #KnowledgeFusion #ICLR2024 #AI #Innovation #FuseLLM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🚀🔥🌟 New Research Alert - ICLR 2024! 🌟🔥🚀 📄 Title: FuseChat: Revolutionizing Chat Models Fusion 🌟🚀 👥 Authors: @Wanfq, @passerqxj et al. 📅 Conference: ICLR, May 7-11, 2024 | Vienna, Austria 🇦🇹 🔗 Paper: https://huggingface.co/papers/2402.16107 🔗 Repository: https://github.com/fanqiwan/FuseLLM 🔥 Models 🤖: 1️⃣ FuseChat-7B-VaRM: https://huggingface.co/FuseAI/FuseChat-7B-VaRM 2️⃣ FuseChat-7B-Slerp: https://huggingface.co/FuseAI/FuseChat-7B-Slerp 3️⃣ OpenChat-3.5-7B-Solar: https://huggingface.co/FuseAI/OpenChat-3.5-7B-Solar 4️⃣ FuseChat-7B-TA: https://huggingface.co/FuseAI/FuseChat-7B-TA 5️⃣ OpenChat-3.5-7B-Mixtral: https://huggingface.co/FuseAI/OpenChat-3.5-7B-Mixtral 📚 More Papers: more cutting-edge research presented at other conferences in the https://huggingface.co/spaces/DmitryRyumin/NewEraAI-Papers curated by @DmitryRyumin 🔍 Keywords: #FuseChat #ChatModels #KnowledgeFusion #ICLR2024 #AI #Innovation #FuseLLM
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/rbnUX7Pd1CtfuJ3xKnGWr.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/ie-jHlxw8TPA6p1lvBXVb.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6493306970d925ae80523a53/oIP-vnUlMeqc1J9OHfQVo.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/nRCxbVng_PPBqKd-Z3KVc.jpeg", "fullname": "Dmitry Ryumin", "name": "DmitryRyumin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 377 }, { "avatarUrl": "/avatars/8a387036758b2f7fc7d7529dea206669.svg", "fullname": "Xiaojun Quan", "name": "passerqxj", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62ecbffd99112e99c5f7fded/U6iXAJbpm2vaC5qksEPiH.png", "fullname": "Fanqi Wan", "name": "Wanfq", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 15 } ]
[ { "reaction": "❤️", "users": [ "osanseviero", "tofuCheng", "vtiyyal1", "Hoioi", "danielhanchen", "Concor", "sbrandeis" ], "count": 7 }, { "reaction": "👍", "users": [ "osanseviero", "Hoioi", "raidhon" ], "count": 3 } ]
2024-02-27T10:38:53.000Z
2024-03-07T22:35:06.695Z
[]
/posts/DmitryRyumin/872583609664849
42
0