slug
stringlengths
15
15
content
listlengths
1
129
rawContent
stringlengths
1
2k
author
dict
attachments
listlengths
0
49
mentions
listlengths
0
49
reactions
listlengths
0
12
publishedAt
stringlengths
24
24
updatedAt
stringlengths
24
24
commentators
listlengths
0
52
url
stringlengths
25
46
totalUniqueImpressions
int64
1
42.1k
numComments
int64
0
621
969545252460488
[ { "type": "text", "value": "Want to read curated list of papers by ", "raw": "Want to read curated list of papers by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@akhaliq", "href": null, "resource": null, "url": null, "code": null, "user": "akhaliq", "label": null, "lang": null }, { "type": "text", "value": " in your mail box? ", "raw": " in your mail box? ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thanks to the API provided by Hugging Face, I made a simple GitHub Action based newsletter bot to send out 🤗 Daily Papers. Check out the attached video clip to get a sense of what it is!", "raw": "Thanks to the API provided by Hugging Face, I made a simple GitHub Action based newsletter bot to send out 🤗 Daily Papers. Check out the attached video clip to get a sense of what it is!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Internally, it leverages Gemini API to assign tags for each paper, and all papers are archived by tags and batches. Of course, you can directly go to the papers' pages from your mail box to check out the full paper!", "raw": "Internally, it leverages Gemini API to assign tags for each paper, and all papers are archived by tags and batches. Of course, you can directly go to the papers' pages from your mail box to check out the full paper!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Since everything is automated, GitHub Action and Gemini API are free, and the subscription management is free via Google Groups, this newsletter bot is entirely free. Furthermore, if you wish, you could fork the project for your own newsletter service.", "raw": "Since everything is automated, GitHub Action and Gemini API are free, and the subscription management is free via Google Groups, this newsletter bot is entirely free. Furthermore, if you wish, you could fork the project for your own newsletter service.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "subscription: ", "raw": "subscription: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://groups.google.com/g/hf-daily-paper-newsletter", "href": "https://groups.google.com/g/hf-daily-paper-newsletter", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "project repo: ", "raw": "project repo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/deep-diver/hf-daily-paper-newsletter", "href": "https://github.com/deep-diver/hf-daily-paper-newsletter", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In the next step, I will experimentally add auto translation (to Korean) feature for every papers.", "raw": "In the next step, I will experimentally add auto translation (to Korean) feature for every papers.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Want to read curated list of papers by @akhaliq in your mail box? Thanks to the API provided by Hugging Face, I made a simple GitHub Action based newsletter bot to send out 🤗 Daily Papers. Check out the attached video clip to get a sense of what it is! Internally, it leverages Gemini API to assign tags for each paper, and all papers are archived by tags and batches. Of course, you can directly go to the papers' pages from your mail box to check out the full paper! Since everything is automated, GitHub Action and Gemini API are free, and the subscription management is free via Google Groups, this newsletter bot is entirely free. Furthermore, if you wish, you could fork the project for your own newsletter service. subscription: https://groups.google.com/g/hf-daily-paper-newsletter project repo: https://github.com/deep-diver/hf-daily-paper-newsletter In the next step, I will experimentally add auto translation (to Korean) feature for every papers.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659971187637-60d3b57ad7b174177faabd6e.jpeg", "fullname": "chansung park", "name": "chansung", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2695, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/60d3b57ad7b174177faabd6e/l5XQ46BGHPYeVTPNYFQgV.mp4" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205 } ]
[ { "reaction": "👍", "users": [ "chansung", "Dlbk", "lbourdois", "victor", "osanseviero", "julien-c", "Tonic", "akhaliq", "pierrci", "clem", "mishig", "merve", "dashfunnydashdash", "Moguiy", "AdinaY", "abidlabs", "rwightman", "veerpareek", "samusenps", "ysharma", "nbroad", "inmatrix", "Oliver-Guy" ], "count": 23 }, { "reaction": "❤️", "users": [ "osanseviero", "julien-c", "chansung", "Tonic", "akhaliq", "clem", "linoyts", "AdinaY", "mariagrandury", "samusenps", "ysharma" ], "count": 11 }, { "reaction": "🤯", "users": [ "julien-c", "radames", "akhaliq", "sbrandeis" ], "count": 4 }, { "reaction": "🤗", "users": [ "Tonic", "akhaliq" ], "count": 2 } ]
2024-01-15T00:47:21.000Z
2024-07-13T05:58:06.923Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false } ]
/posts/chansung/969545252460488
222
4
332670179414576
[ { "type": "text", "value": "🙋🏻‍♂️Hey there folks ,", "raw": "🙋🏻‍♂️Hey there folks ,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "i wanted to share with you a really cool new organisation called ", "raw": "i wanted to share with you a really cool new organisation called ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/lowres", "href": "https://huggingface.co/lowres", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In just one week it has gathered almost 150 members ! ", "raw": "In just one week it has gathered almost 150 members ! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check them out if you love anime , SDLX, LORAs and cool datasets.", "raw": "Check them out if you love anime , SDLX, LORAs and cool datasets.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "can we make this one reach 200 members? 🚀", "raw": "can we make this one reach 200 members? 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🙋🏻‍♂️Hey there folks , i wanted to share with you a really cool new organisation called https://huggingface.co/lowres In just one week it has gathered almost 150 members ! Check them out if you love anime , SDLX, LORAs and cool datasets. can we make this one reach 200 members? 🚀
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62a3bb1cd0d8c2c2169f0b88/LzbdDiFgSyyipNr1i2-tU.png" } ]
[]
[ { "reaction": "❤️", "users": [ "not-lain", "julien-c", "clem", "merve", "s3nh", "akashicmarga", "AdinaY", "radames", "samusenps", "ppaon", "sbrandeis" ], "count": 11 }, { "reaction": "👍", "users": [ "victor", "not-lain", "AdinaY", "samusenps" ], "count": 4 }, { "reaction": "🤗", "users": [ "not-lain", "samusenps" ], "count": 2 } ]
2024-01-14T22:51:02.000Z
2024-07-13T06:01:19.766Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1608146735109-5fcfb7c407408029ba3577e2.png", "fullname": "Simon Brandeis", "name": "sbrandeis", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 116, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6527e89a8808d80ccff88b7a/CuGNmF1Et8KMQ0mCd1NEJ.jpeg", "fullname": "Lain", "name": "not-lain", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 941, "isFollowing": false } ]
/posts/Tonic/332670179414576
11
4
559421915278972
[ { "type": "text", "value": "🤦🏻‍♂️well, day before yesterday i was so happy about **gpuzero** that i made a bunch of demos : ", "raw": "🤦🏻‍♂️well, day before yesterday i was so happy about **gpuzero** that i made a bunch of demos : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/posts/Tonic/802671427380916", "href": "https://huggingface.co/posts/Tonic/802671427380916", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- one for YI-200K , but it actually doesnt quite fit on a GPUZero... U_U", "raw": "- one for YI-200K , but it actually doesnt quite fit on a GPUZero... U_U", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- one for SDXL style align, but omg i didnt even realize at the time it wasnt my demo of it (lol)", "raw": "- one for SDXL style align, but omg i didnt even realize at the time it wasnt my demo of it (lol)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- one for texify (which works great btw, keep an eye on texify, it's about to blow up... in a couple of months!)", "raw": "- one for texify (which works great btw, keep an eye on texify, it's about to blow up... in a couple of months!)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "so yeah, i ran back and tried to get my demos working at least for sdxl which i love , but i simply couldnt get the CPU stuff working, or the refactored code working. no wonder i was thinking \"wow this is so easy\" on ", "raw": "so yeah, i ran back and tried to get my demos working at least for sdxl which i love , but i simply couldnt get the CPU stuff working, or the refactored code working. no wonder i was thinking \"wow this is so easy\" on ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@osanseviero", "href": null, "resource": null, "url": null, "code": null, "user": "osanseviero", "label": null, "lang": null }, { "type": "text", "value": " 's demo : yeah , it's not my code that's why it works 😅🙏🏻", "raw": " 's demo : yeah , it's not my code that's why it works 😅🙏🏻", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "anyway spent the day unsuccessfully experimenting, but starting tomorrow i'll try to serve some cool and overlooked models so 🤗huggingface appreciators can try them out 🚀", "raw": "anyway spent the day unsuccessfully experimenting, but starting tomorrow i'll try to serve some cool and overlooked models so 🤗huggingface appreciators can try them out 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🤦🏻‍♂️well, day before yesterday i was so happy about **gpuzero** that i made a bunch of demos : https://huggingface.co/posts/Tonic/802671427380916 - one for YI-200K , but it actually doesnt quite fit on a GPUZero... U_U - one for SDXL style align, but omg i didnt even realize at the time it wasnt my demo of it (lol) - one for texify (which works great btw, keep an eye on texify, it's about to blow up... in a couple of months!) so yeah, i ran back and tried to get my demos working at least for sdxl which i love , but i simply couldnt get the CPU stuff working, or the refactored code working. no wonder i was thinking "wow this is so easy" on @osanseviero 's demo : yeah , it's not my code that's why it works 😅🙏🏻 anyway spent the day unsuccessfully experimenting, but starting tomorrow i'll try to serve some cool and overlooked models so 🤗huggingface appreciators can try them out 🚀
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868 } ]
[ { "reaction": "❤️", "users": [ "osanseviero", "hogiahien", "victor", "clem", "julien-c", "jarvisx17", "Solshine" ], "count": 7 } ]
2024-01-12T23:29:38.000Z
2024-01-16T17:29:17.303Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false } ]
/posts/Tonic/559421915278972
8
1
221935563733891
[ { "type": "text", "value": "Shocking: 2/3 of LLMs fail at 2K context length", "raw": "Shocking: 2/3 of LLMs fail at 2K context length", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "code_your_own_ai makes a great vlog about mostly LLM related AI content. ", "raw": "code_your_own_ai makes a great vlog about mostly LLM related AI content. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "As I watched the video below, I wondered about current best practices on LLM evaluation. We have benchmarks, we have sota LLMs evaluating LLMs, we have tools evaluating based on human comparison. ", "raw": "As I watched the video below, I wondered about current best practices on LLM evaluation. We have benchmarks, we have sota LLMs evaluating LLMs, we have tools evaluating based on human comparison. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Often, I hear, just play with the LLM for 15 mins to form an opinion. ", "raw": "Often, I hear, just play with the LLM for 15 mins to form an opinion. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "While I think for a specific use case and clear expectations, this could yield signal carrying experiences, I also see that one prompt is used to judge models. ", "raw": "While I think for a specific use case and clear expectations, this could yield signal carrying experiences, I also see that one prompt is used to judge models. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "While benchmarks have their weaknesses, and are by themselves not enough to judge model quality, I still think systematic methods that try to reduce various scientifically known errs should be the way forward, even for qualitative estimates. ", "raw": "While benchmarks have their weaknesses, and are by themselves not enough to judge model quality, I still think systematic methods that try to reduce various scientifically known errs should be the way forward, even for qualitative estimates. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What do you think? How can we make a public tool for judging models like lmsys/chatbot-arena-leaderboard help to leverage standards known in social science? ", "raw": "What do you think? How can we make a public tool for judging models like lmsys/chatbot-arena-leaderboard help to leverage standards known in social science? ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/watch?v=mWrivekFZMM", "href": "https://www.youtube.com/watch?v=mWrivekFZMM", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Shocking: 2/3 of LLMs fail at 2K context length code_your_own_ai makes a great vlog about mostly LLM related AI content. As I watched the video below, I wondered about current best practices on LLM evaluation. We have benchmarks, we have sota LLMs evaluating LLMs, we have tools evaluating based on human comparison. Often, I hear, just play with the LLM for 15 mins to form an opinion. While I think for a specific use case and clear expectations, this could yield signal carrying experiences, I also see that one prompt is used to judge models. While benchmarks have their weaknesses, and are by themselves not enough to judge model quality, I still think systematic methods that try to reduce various scientifically known errs should be the way forward, even for qualitative estimates. What do you think? How can we make a public tool for judging models like lmsys/chatbot-arena-leaderboard help to leverage standards known in social science? https://www.youtube.com/watch?v=mWrivekFZMM
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669551186189-63732ebbbd81fae2b3aaf3fb.jpeg", "fullname": "Knut Jägersberg", "name": "KnutJaegersberg", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 238, "isFollowing": false }
[]
[]
[ { "reaction": "🤯", "users": [ "clem", "osanseviero", "julien-c", "pierrci", "NameeO", "radames", "AdinaY", "Tanvir1337", "ucalyptus", "frontenbrecher" ], "count": 10 }, { "reaction": "👍", "users": [ "samusenps", "trtm" ], "count": 2 } ]
2024-01-12T17:05:42.000Z
2024-01-19T18:07:57.079Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1673563939929-63c074053bdc86f81088aae2.png", "fullname": "Namee Oberst", "name": "NameeO", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 6, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669551186189-63732ebbbd81fae2b3aaf3fb.jpeg", "fullname": "Knut Jägersberg", "name": "KnutJaegersberg", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 238, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/630fdd96a119d49bc1e770d5/OpU95S4a8hkM8OUCZq79R.jpeg", "fullname": "Adam", "name": "adamo1139", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 38, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false } ]
/posts/KnutJaegersberg/221935563733891
856
6
885363857514207
[ { "type": "text", "value": "Here is my selection of papers for today (12 Jan)", "raw": "Here is my selection of papers for today (12 Jan)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/papers", "href": "https://huggingface.co/papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "PALP: Prompt Aligned Personalization of Text-to-Image Models", "raw": "PALP: Prompt Aligned Personalization of Text-to-Image Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Object-Centric Diffusion for Efficient Video Editing", "raw": "Object-Centric Diffusion for Efficient Video Editing", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "TRIPS: Trilinear Point Splatting for Real-Time Radiance Field Rendering", "raw": "TRIPS: Trilinear Point Splatting for Real-Time Radiance Field Rendering", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Diffusion Priors for Dynamic View Synthesis from Monocular Videos", "raw": "Diffusion Priors for Dynamic View Synthesis from Monocular Videos", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Parrot: Pareto-optimal Multi-Reward Reinforcement Learning Framework for Text-to-Image Generation", "raw": "Parrot: Pareto-optimal Multi-Reward Reinforcement Learning Framework for Text-to-Image Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "TOFU: A Task of Fictitious Unlearning for LLMs", "raw": "TOFU: A Task of Fictitious Unlearning for LLMs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Patchscope: A Unifying Framework for Inspecting Hidden Representations of Language Models", "raw": "Patchscope: A Unifying Framework for Inspecting Hidden Representations of Language Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Secrets of RLHF in Large Language Models Part II: Reward Modeling", "raw": "Secrets of RLHF in Large Language Models Part II: Reward Modeling", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LEGO:Language Enhanced Multi-modal Grounding Model", "raw": "LEGO:Language Enhanced Multi-modal Grounding Model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "DeepSeekMoE: Towards Ultimate Expert Specialization in Mixture-of-Experts Language Models", "raw": "DeepSeekMoE: Towards Ultimate Expert Specialization in Mixture-of-Experts Language Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Tuning LLMs with Contrastive Alignment Instructions for Machine Translation in Unseen, Low-resource Languages", "raw": "Tuning LLMs with Contrastive Alignment Instructions for Machine Translation in Unseen, Low-resource Languages", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A Shocking Amount of the Web is Machine Translated: Insights from Multi-Way Parallelism", "raw": "A Shocking Amount of the Web is Machine Translated: Insights from Multi-Way Parallelism", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Towards Conversational Diagnostic AI", "raw": "Towards Conversational Diagnostic AI", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Transformers are Multi-State RNNs", "raw": "Transformers are Multi-State RNNs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Sleeper Agents: Training Deceptive LLMs that Persist Through Safety Training", "raw": "Sleeper Agents: Training Deceptive LLMs that Persist Through Safety Training", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Distilling Vision-Language Models on Millions of Videos", "raw": "Distilling Vision-Language Models on Millions of Videos", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Efficient LLM inference solution on Intel GPU", "raw": "Efficient LLM inference solution on Intel GPU", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "TrustLLM: Trustworthiness in Large Language Models", "raw": "TrustLLM: Trustworthiness in Large Language Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Here is my selection of papers for today (12 Jan) https://huggingface.co/papers PALP: Prompt Aligned Personalization of Text-to-Image Models Object-Centric Diffusion for Efficient Video Editing TRIPS: Trilinear Point Splatting for Real-Time Radiance Field Rendering Diffusion Priors for Dynamic View Synthesis from Monocular Videos Parrot: Pareto-optimal Multi-Reward Reinforcement Learning Framework for Text-to-Image Generation TOFU: A Task of Fictitious Unlearning for LLMs Patchscope: A Unifying Framework for Inspecting Hidden Representations of Language Models Secrets of RLHF in Large Language Models Part II: Reward Modeling LEGO:Language Enhanced Multi-modal Grounding Model DeepSeekMoE: Towards Ultimate Expert Specialization in Mixture-of-Experts Language Models Tuning LLMs with Contrastive Alignment Instructions for Machine Translation in Unseen, Low-resource Languages A Shocking Amount of the Web is Machine Translated: Insights from Multi-Way Parallelism Towards Conversational Diagnostic AI Transformers are Multi-State RNNs Sleeper Agents: Training Deceptive LLMs that Persist Through Safety Training Distilling Vision-Language Models on Millions of Videos Efficient LLM inference solution on Intel GPU TrustLLM: Trustworthiness in Large Language Models
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/u5jhRCb4nfAzbrO8IGh1u.png" } ]
[]
[ { "reaction": "❤️", "users": [ "clem", "osanseviero", "namtran", "victor", "chansung", "ronzyponz", "davanstrien", "Tonic", "sackoh", "Dlbk", "samusenps", "AdinaY", "alielfilali01", "iarata" ], "count": 14 }, { "reaction": "🤗", "users": [ "samusenps", "rwightman" ], "count": 2 } ]
2024-01-12T14:44:25.000Z
2024-01-12T14:44:25.072Z
[]
/posts/akhaliq/885363857514207
14
0
688106937901639
[ { "type": "text", "value": "Let's play a little game, how would you build the Rabbit R1 with open source tech? Here is my stack:", "raw": "Let's play a little game, how would you build the Rabbit R1 with open source tech? Here is my stack:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - ", "raw": " - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/openai/whisper-small", "href": null, "resource": { "type": "model", "id": "openai/whisper-small", "discussionNum": null }, "url": "https://huggingface.co/openai/whisper-small", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " for awesome Speech-to-Text with low latency", "raw": " for awesome Speech-to-Text with low latency", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - ", "raw": " - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1", "href": null, "resource": { "type": "model", "id": "mistralai/Mixtral-8x7B-Instruct-v0.1", "discussionNum": null }, "url": "https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " for an awesome super powerful LLM Brain", "raw": " for an awesome super powerful LLM Brain", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - ", "raw": " - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/coqui/XTTS-v2", "href": null, "resource": { "type": "model", "id": "coqui/XTTS-v2", "discussionNum": null }, "url": "https://huggingface.co/coqui/XTTS-v2", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " for a nice and clean voice", "raw": " for a nice and clean voice", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Which stack will you personally choose?", "raw": "Which stack will you personally choose?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Let's play a little game, how would you build the Rabbit R1 with open source tech? Here is my stack: - https://huggingface.co/openai/whisper-small for awesome Speech-to-Text with low latency - https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 for an awesome super powerful LLM Brain - https://huggingface.co/coqui/XTTS-v2 for a nice and clean voice Which stack will you personally choose?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f17f0a0925b9863e28ad517/i5yfTpngTiyYOfGvcppBB.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f17f0a0925b9863e28ad517/YiIMkYhB9xP7RCnrJzpHn.png" } ]
[]
[ { "reaction": "👍", "users": [ "kramp", "osanseviero", "mwz", "mohitsha", "abhishek", "not-lain", "clem", "jahnviatls", "n4ze3m", "ManOhMan", "taufiqdp", "Sandeepa", "KnutJaegersberg", "eek", "lunarflu", "jonChengRX0", "RalphX1", "m-ric", "timqian", "rbiswasfc", "julien-c", "mrfakename", "marcsun13", "radames", "johannhartmann", "Halfshadow", "Dlbk", "ucalyptus", "mishig", "Moguiy", "luizgsbraz", "10MinuteMan", "lulzx", "jesusoctavioas", "cashion" ], "count": 35 }, { "reaction": "❤️", "users": [ "merve", "clem", "yezzer", "vedalken", "VictorSanh", "KnutJaegersberg", "auditor", "lunarflu", "BrigitteTousi", "RalphX1", "loubnabnl", "julien-c", "hensam92", "mrfakename", "radames", "Dlbk", "mishig", "clefourrier", "sosoai", "SvCy", "Docfile", "neiltron", "10MinuteMan", "behlock", "sbrandeis" ], "count": 25 }, { "reaction": "🤯", "users": [ "clem", "RalphX1", "julien-c", "pierrci", "mrfakename" ], "count": 5 } ]
2024-01-12T10:24:17.000Z
2024-01-30T16:02:16.037Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659971187637-60d3b57ad7b174177faabd6e.jpeg", "fullname": "chansung park", "name": "chansung", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2695, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1649681653581-5f7fbd813e94f16a85448745.jpeg", "fullname": "Sayak Paul", "name": "sayakpaul", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 459, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648966381588-6064e095abd8d3692e3e2ed6.jpeg", "fullname": "Radamés Ajna", "name": "radames", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2401, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857746553-5df7e9e5da6d0311fd3d53f9.jpeg", "fullname": "Thomas Wolf", "name": "thomwolf", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 704, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63148d3b996c52bf0142cdbe/ec7pRNrQQy70d-11FiACq.jpeg", "fullname": "Georgi Gerganov", "name": "ggerganov", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 379, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }, { "avatarUrl": "/avatars/c824379e62a403eab4c7d1d8cce93f76.svg", "fullname": "Samir R.", "name": "sr5434", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "/avatars/be9748f6d237a8dae26ace96c2cb4fb5.svg", "fullname": "John park", "name": "rippertnt", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/652f19a79b4774d3a4dcc277/Kdt6musxSMbttr5sXnY37.jpeg", "fullname": "Johnny 5", "name": "10MinuteMan", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/victor/688106937901639
131
19
895251035952062
[ { "type": "text", "value": "Sharing a super-fast segmentation model today 💨 ", "raw": "Sharing a super-fast segmentation model today 💨 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "SlimSAM is pruned-distilled version of SAM model, it's (up-to 8.6x) faster and smaller yet very powerful! ⚡️ ", "raw": "SlimSAM is pruned-distilled version of SAM model, it's (up-to 8.6x) faster and smaller yet very powerful! ⚡️ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It has the same architecture as SAM, meaning you can use the 🤗 transformers code for SAM on SlimSAM models ⬇️ (yes only 3 lines of code!)", "raw": "It has the same architecture as SAM, meaning you can use the 🤗 transformers code for SAM on SlimSAM models ⬇️ (yes only 3 lines of code!)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```python\nfrom transformers import pipeline\ngenerator = pipeline(model=\"nielsr/slimsam-50-uniform\", task=\"mask-generation\")\noutputs = generator(image)\n```", "href": null, "resource": null, "url": null, "code": "from transformers import pipeline\ngenerator = pipeline(model=\"nielsr/slimsam-50-uniform\", task=\"mask-generation\")\noutputs = generator(image)", "user": null, "label": null, "lang": "python" }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Lastly, I have built an app for you to compare SlimSAM and SAM outputs ", "raw": "Lastly, I have built an app for you to compare SlimSAM and SAM outputs ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/merve/slimsam", "href": null, "resource": { "type": "space", "id": "merve/slimsam", "discussionNum": null }, "url": "https://huggingface.co/spaces/merve/slimsam", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Sharing a super-fast segmentation model today 💨 SlimSAM is pruned-distilled version of SAM model, it's (up-to 8.6x) faster and smaller yet very powerful! ⚡️ It has the same architecture as SAM, meaning you can use the 🤗 transformers code for SAM on SlimSAM models ⬇️ (yes only 3 lines of code!) ```python from transformers import pipeline generator = pipeline(model="nielsr/slimsam-50-uniform", task="mask-generation") outputs = generator(image) ``` Lastly, I have built an app for you to compare SlimSAM and SAM outputs https://huggingface.co/spaces/merve/slimsam
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/Rr2QU5nQWEoddzEX7SLTQ.mp4" } ]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "linoyts", "abhishek", "clem", "lunarflu", "BrigitteTousi", "marcsun13", "julien-c", "Pclanglais", "Dlbk", "samusenps", "Tonic", "sbarman25", "pcuenq", "afrideva", "sbrandeis" ], "count": 16 } ]
2024-01-12T10:08:10.000Z
2024-01-12T10:08:10.110Z
[]
/posts/merve/895251035952062
29
0
956535621271526
[ { "type": "text", "value": "As a GPU poor, I found a nice open source project.", "raw": "As a GPU poor, I found a nice open source project.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "dstackai is the perfect open source project for GPU poor. Simply specify resource requirements (GPU RAM, spot, ...), then will suggest the cheapest options among the popular GPU cloud providers (AWS, GCP, Azure, Lambda Labs, TensorDock, and vast.ai)", "raw": "dstackai is the perfect open source project for GPU poor. Simply specify resource requirements (GPU RAM, spot, ...), then will suggest the cheapest options among the popular GPU cloud providers (AWS, GCP, Azure, Lambda Labs, TensorDock, and vast.ai)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Provision VM instances in 3 different use cases. These are the essential for any ML projects.", "raw": "Provision VM instances in 3 different use cases. These are the essential for any ML projects.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Dev: connect provisioned VM instance to your fav IDE (including Jupyter)", "raw": "- Dev: connect provisioned VM instance to your fav IDE (including Jupyter)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Task: run experiments (training, fine-tuning, ...) via SSH ", "raw": "- Task: run experiments (training, fine-tuning, ...) via SSH ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Service: run your model in production via HTTPS", "raw": "- Service: run your model in production via HTTPS", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "dstack is 100% open source, but you need to have your own accounts for each GPU cloud provider, enough GPU quota, configure credentials, etc., all by yourself. Luckily, dstack will announce dstack cloud which let you not worried about all the hassles. The price is almost as same as you directly connect to each cloud with your account.", "raw": "dstack is 100% open source, but you need to have your own accounts for each GPU cloud provider, enough GPU quota, configure credentials, etc., all by yourself. Luckily, dstack will announce dstack cloud which let you not worried about all the hassles. The price is almost as same as you directly connect to each cloud with your account.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The attached code snippet shows you how to provision a Mistral-7B model in Text Generation Inference(TGI) on the cheapest VM instance (of having 24GB of VRAM). Then you get the HTTPS connection to it, and play with it as usual with TGI client library as attached in the second code snippet.", "raw": "The attached code snippet shows you how to provision a Mistral-7B model in Text Generation Inference(TGI) on the cheapest VM instance (of having 24GB of VRAM). Then you get the HTTPS connection to it, and play with it as usual with TGI client library as attached in the second code snippet.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If you want to learn more about dstack, check out the official website. Without GPU sponsors, as an individual open source contributor in ML, this kind of project is pretty important. ", "raw": "If you want to learn more about dstack, check out the official website. Without GPU sponsors, as an individual open source contributor in ML, this kind of project is pretty important. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ": ", "raw": ": ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://dstack.ai/", "href": "https://dstack.ai/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If you are looking for an alternative, there is SkyPilot project as well", "raw": "If you are looking for an alternative, there is SkyPilot project as well", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ": ", "raw": ": ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/skypilot-org/skypilot", "href": "https://github.com/skypilot-org/skypilot", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
As a GPU poor, I found a nice open source project. dstackai is the perfect open source project for GPU poor. Simply specify resource requirements (GPU RAM, spot, ...), then will suggest the cheapest options among the popular GPU cloud providers (AWS, GCP, Azure, Lambda Labs, TensorDock, and vast.ai) Provision VM instances in 3 different use cases. These are the essential for any ML projects. - Dev: connect provisioned VM instance to your fav IDE (including Jupyter) - Task: run experiments (training, fine-tuning, ...) via SSH - Service: run your model in production via HTTPS dstack is 100% open source, but you need to have your own accounts for each GPU cloud provider, enough GPU quota, configure credentials, etc., all by yourself. Luckily, dstack will announce dstack cloud which let you not worried about all the hassles. The price is almost as same as you directly connect to each cloud with your account. The attached code snippet shows you how to provision a Mistral-7B model in Text Generation Inference(TGI) on the cheapest VM instance (of having 24GB of VRAM). Then you get the HTTPS connection to it, and play with it as usual with TGI client library as attached in the second code snippet. If you want to learn more about dstack, check out the official website. Without GPU sponsors, as an individual open source contributor in ML, this kind of project is pretty important. : https://dstack.ai/ If you are looking for an alternative, there is SkyPilot project as well : https://github.com/skypilot-org/skypilot
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659971187637-60d3b57ad7b174177faabd6e.jpeg", "fullname": "chansung park", "name": "chansung", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2695, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60d3b57ad7b174177faabd6e/KcDR88F3cpNBL3qVCahTG.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60d3b57ad7b174177faabd6e/TgpKpWcQ3TPw4gHm42dYE.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60d3b57ad7b174177faabd6e/YqfT0yKuV_cURMltuV2rx.png" } ]
[]
[ { "reaction": "❤️", "users": [ "peterschmidt85", "chansung", "osanseviero", "lysandre", "merve", "victor", "sayakpaul", "linoyts", "abhishek", "abidlabs", "KnutJaegersberg", "auditor", "johannhartmann", "julien-c", "radames", "bitdeep", "samusenps", "mishig", "gblazex", "juyongjiang", "greencookies", "akhaliq", "Citaman", "sbrandeis" ], "count": 24 }, { "reaction": "🤗", "users": [ "peterschmidt85", "lysandre", "merve", "KnutJaegersberg", "julien-c", "pierrci", "samusenps", "juyongjiang", "akhaliq" ], "count": 9 }, { "reaction": "👍", "users": [ "bitdeep", "juyongjiang", "akhaliq" ], "count": 3 } ]
2024-01-12T07:23:56.000Z
2024-01-15T06:28:35.631Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1649681653581-5f7fbd813e94f16a85448745.jpeg", "fullname": "Sayak Paul", "name": "sayakpaul", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 459, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659971187637-60d3b57ad7b174177faabd6e.jpeg", "fullname": "chansung park", "name": "chansung", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2695, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false } ]
/posts/chansung/956535621271526
355
7
726305084734231
[ { "type": "resource", "value": null, "raw": "https://huggingface.co/fblgit/UNA-dolphin-2.6-mistral-7b-dpo-laser", "href": null, "resource": { "type": "model", "id": "fblgit/UNA-dolphin-2.6-mistral-7b-dpo-laser", "discussionNum": null }, "url": "https://huggingface.co/fblgit/UNA-dolphin-2.6-mistral-7b-dpo-laser", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "RE-Introducing, some of the best SFT model, he legend: DOLPHIN. This model is very special, a LASER-UNA model: UNA-dolphin-2.6-mistral-7b-dpo-laser", "raw": "RE-Introducing, some of the best SFT model, he legend: DOLPHIN. This model is very special, a LASER-UNA model: UNA-dolphin-2.6-mistral-7b-dpo-laser", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@fblgit", "href": null, "resource": null, "url": null, "code": null, "user": "fblgit", "label": null, "lang": null }, { "type": "text", "value": " in collaboration with ", "raw": " in collaboration with ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@fernandofernandes", "href": null, "resource": null, "url": null, "code": null, "user": "fernandofernandes", "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@ehartford", "href": null, "resource": null, "url": null, "code": null, "user": "ehartford", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
https://huggingface.co/fblgit/UNA-dolphin-2.6-mistral-7b-dpo-laser RE-Introducing, some of the best SFT model, he legend: DOLPHIN. This model is very special, a LASER-UNA model: UNA-dolphin-2.6-mistral-7b-dpo-laser @fblgit in collaboration with @fernandofernandes and @ehartford
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63111b2d88942700629f5771/u2a9y-yx6TG0N31OhMSHI.png", "fullname": "Eric Hartford", "name": "ehartford", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3287, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63111b2d88942700629f5771/u2a9y-yx6TG0N31OhMSHI.png", "fullname": "Eric Hartford", "name": "ehartford", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3287 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6401c8c9f98fbc64bcd7dca1/MOSgc_mPbfUZ-354osy1v.png", "fullname": "FBL", "name": "fblgit", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 228 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/646e57a5cb6ea6e6b6df1ad4/PlGhM2SUynFBUdYAylaZK.jpeg", "fullname": "Fernando Fernandes Neto", "name": "fernandofernandes", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 47 } ]
[ { "reaction": "👍", "users": [ "fblgit", "osanseviero", "den0620", "radames", "lysandre", "abhishek", "victor", "m-ric", "julien-c", "dvilasuero", "tshrjn", "robertpiosik", "pacman2473", "AtAndDev", "PatrickZhang16" ], "count": 15 } ]
2024-01-11T23:10:26.000Z
2024-09-11T12:23:59.857Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62e54f0eae9d3f10acb95cb9/VAyk05hqB3OZWXEZW-B0q.png", "fullname": "mrfakename", "name": "mrfakename", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 969, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6401c8c9f98fbc64bcd7dca1/MOSgc_mPbfUZ-354osy1v.png", "fullname": "FBL", "name": "fblgit", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 228, "isFollowing": false }, { "avatarUrl": "/avatars/25aaae3e1f97f5b64f7974a26a8701c8.svg", "fullname": "maryam alem ", "name": "maryamlalem", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/ehartford/726305084734231
6,599
4
650756949738815
[ { "type": "text", "value": "This is my first post! Thanks to ", "raw": "This is my first post! Thanks to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@victor", "href": null, "resource": null, "url": null, "code": null, "user": "victor", "label": null, "lang": null }, { "type": "text", "value": " for adding me!", "raw": " for adding me!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
This is my first post! Thanks to @victor for adding me!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62e54f0eae9d3f10acb95cb9/VAyk05hqB3OZWXEZW-B0q.png", "fullname": "mrfakename", "name": "mrfakename", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 969, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607 } ]
[ { "reaction": "🤝", "users": [ "lysandre", "victor", "sayakpaul", "abhishek", "clem", "julien-c", "Dlbk" ], "count": 7 }, { "reaction": "👍", "users": [ "mlabonne", "radames", "cbensimon", "julien-c" ], "count": 4 } ]
2024-01-11T22:35:40.000Z
2024-01-12T20:06:23.259Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false } ]
/posts/mrfakename/650756949738815
24
1
251479367780280
[ { "type": "text", "value": "Here is my selection of papers for today (11 Jan)", "raw": "Here is my selection of papers for today (11 Jan)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/papers", "href": "https://huggingface.co/papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "ANIM-400K: A Large-Scale Dataset for Automated End-To-End Dubbing of Video ", "raw": "ANIM-400K: A Large-Scale Dataset for Automated End-To-End Dubbing of Video ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2401.05314", "href": null, "resource": { "type": "paper", "id": "2401.05314", "discussionNum": null }, "url": "https://huggingface.co/papers/2401.05314", "code": null, "user": null, "label": "ANIM-400K: A Large-Scale Dataset for Automated End-To-End Dubbing of\n Video (2401.05314)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Score Distillation Sampling with Learned Manifold Corrective ", "raw": "Score Distillation Sampling with Learned Manifold Corrective ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2401.05293", "href": null, "resource": { "type": "paper", "id": "2401.05293", "discussionNum": null }, "url": "https://huggingface.co/papers/2401.05293", "code": null, "user": null, "label": "Score Distillation Sampling with Learned Manifold Corrective (2401.05293)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "InseRF: Text-Driven Generative Object Insertion in Neural 3D Scenes ", "raw": "InseRF: Text-Driven Generative Object Insertion in Neural 3D Scenes ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2401.05335", "href": null, "resource": { "type": "paper", "id": "2401.05335", "discussionNum": null }, "url": "https://huggingface.co/papers/2401.05335", "code": null, "user": null, "label": "InseRF: Text-Driven Generative Object Insertion in Neural 3D Scenes (2401.05335)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "PIXART-δ: Fast and Controllable Image Generation with Latent Consistency Models ", "raw": "PIXART-δ: Fast and Controllable Image Generation with Latent Consistency Models ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2401.05252", "href": null, "resource": { "type": "paper", "id": "2401.05252", "discussionNum": null }, "url": "https://huggingface.co/papers/2401.05252", "code": null, "user": null, "label": "PIXART-δ: Fast and Controllable Image Generation with Latent\n Consistency Models (2401.05252)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "URHand: Universal Relightable Hands ", "raw": "URHand: Universal Relightable Hands ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2401.05334", "href": null, "resource": { "type": "paper", "id": "2401.05334", "discussionNum": null }, "url": "https://huggingface.co/papers/2401.05334", "code": null, "user": null, "label": "URHand: Universal Relightable Hands (2401.05334)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The Impact of Reasoning Step Length on Large Language Models ", "raw": "The Impact of Reasoning Step Length on Large Language Models ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2401.04925", "href": null, "resource": { "type": "paper", "id": "2401.04925", "discussionNum": null }, "url": "https://huggingface.co/papers/2401.04925", "code": null, "user": null, "label": "The Impact of Reasoning Step Length on Large Language Models (2401.04925)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Bootstrapping LLM-based Task-Oriented Dialogue Agents via Self-Talk ", "raw": "Bootstrapping LLM-based Task-Oriented Dialogue Agents via Self-Talk ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2401.05033", "href": null, "resource": { "type": "paper", "id": "2401.05033", "discussionNum": null }, "url": "https://huggingface.co/papers/2401.05033", "code": null, "user": null, "label": "Bootstrapping LLM-based Task-Oriented Dialogue Agents via Self-Talk (2401.05033)", "lang": null } ]
Here is my selection of papers for today (11 Jan) https://huggingface.co/papers ANIM-400K: A Large-Scale Dataset for Automated End-To-End Dubbing of Video https://huggingface.co/papers/2401.05314 Score Distillation Sampling with Learned Manifold Corrective https://huggingface.co/papers/2401.05293 InseRF: Text-Driven Generative Object Insertion in Neural 3D Scenes https://huggingface.co/papers/2401.05335 PIXART-δ: Fast and Controllable Image Generation with Latent Consistency Models https://huggingface.co/papers/2401.05252 URHand: Universal Relightable Hands https://huggingface.co/papers/2401.05334 The Impact of Reasoning Step Length on Large Language Models https://huggingface.co/papers/2401.04925 Bootstrapping LLM-based Task-Oriented Dialogue Agents via Self-Talk https://huggingface.co/papers/2401.05033
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/-XTbJZcg6cUWeQnxSm5sn.png" } ]
[]
[ { "reaction": "👍", "users": [ "osanseviero", "clem", "KnutJaegersberg" ], "count": 3 }, { "reaction": "❤️", "users": [ "clem", "KnutJaegersberg", "diana9m" ], "count": 3 } ]
2024-01-11T14:55:30.000Z
2024-01-11T14:56:10.750Z
[]
/posts/akhaliq/251479367780280
9
0
802671427380916
[ { "type": "text", "value": " 🙋🏻‍♂️hey there folks , 🌟Tonic here", "raw": " 🙋🏻‍♂️hey there folks , 🌟Tonic here", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- just a 🛠️builder from 🗼Paris !", "raw": "- just a 🛠️builder from 🗼Paris !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Everyone is making something special for their first post , so since i got access to **GPUZero** , well, my first post is about **GPUZero**", "raw": "Everyone is making something special for their first post , so since i got access to **GPUZero** , well, my first post is about **GPUZero**", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "### GPUZero is here !", "raw": "### GPUZero is here !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This one's great for builders like me that are often making and serving models to their community.", "raw": "This one's great for builders like me that are often making and serving models to their community.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- demos get popular then fade away", "raw": "- demos get popular then fade away", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- they retain interest over the next three months as folks have questions", "raw": "- they retain interest over the next three months as folks have questions", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "**GPUZero** lets you serve demos to your community over time while optimizing for costs . ", "raw": "**GPUZero** lets you serve demos to your community over time while optimizing for costs . ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Believe it or not it's actually impossible to pay for everything over a whole month if you have even one GPU running at a time. ", "raw": "Believe it or not it's actually impossible to pay for everything over a whole month if you have even one GPU running at a time. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'm so excited for this because it lets me serve a complete stack of specialized models and to build with them too.", "raw": "I'm so excited for this because it lets me serve a complete stack of specialized models and to build with them too.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- all optimized for efficiency in dollar cost.", "raw": "- all optimized for efficiency in dollar cost.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "check out some demos that are available on GPUZero :", "raw": "check out some demos that are available on GPUZero :", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Tonic/marker-texify", "href": null, "resource": { "type": "space", "id": "Tonic/marker-texify", "discussionNum": null }, "url": "https://huggingface.co/spaces/Tonic/marker-texify", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " : this one is the first one i made it's for an image to latex formula model.", "raw": " : this one is the first one i made it's for an image to latex formula model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/spaces/Tonic/YI-6B-200k", "href": "https://huggingface.co/spaces/Tonic/YI-6B-200k", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " : this one probably actually works better on GPUZero than on a standard A10, but dont take my word for it , try it out 🤗", "raw": " : this one probably actually works better on GPUZero than on a standard A10, but dont take my word for it , try it out 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/spaces/Tonic/style-aligned_sdxl", "href": "https://huggingface.co/spaces/Tonic/style-aligned_sdxl", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " : this one was my greatest technical achievement, check the dates and times on it too, there's a backstory to this one so i'll maybe tell it in another post", "raw": " : this one was my greatest technical achievement, check the dates and times on it too, there's a backstory to this one so i'll maybe tell it in another post", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🙋🏻‍♂️hey there folks , 🌟Tonic here - just a 🛠️builder from 🗼Paris ! Everyone is making something special for their first post , so since i got access to **GPUZero** , well, my first post is about **GPUZero** ### GPUZero is here ! This one's great for builders like me that are often making and serving models to their community. - demos get popular then fade away - they retain interest over the next three months as folks have questions **GPUZero** lets you serve demos to your community over time while optimizing for costs . Believe it or not it's actually impossible to pay for everything over a whole month if you have even one GPU running at a time. I'm so excited for this because it lets me serve a complete stack of specialized models and to build with them too. - all optimized for efficiency in dollar cost. check out some demos that are available on GPUZero : - https://huggingface.co/spaces/Tonic/marker-texify : this one is the first one i made it's for an image to latex formula model. - https://huggingface.co/spaces/Tonic/YI-6B-200k : this one probably actually works better on GPUZero than on a standard A10, but dont take my word for it , try it out 🤗 - https://huggingface.co/spaces/Tonic/style-aligned_sdxl : this one was my greatest technical achievement, check the dates and times on it too, there's a backstory to this one so i'll maybe tell it in another post
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62a3bb1cd0d8c2c2169f0b88/PcRUl-GVd3GGSkI1Jwaka.png" } ]
[]
[ { "reaction": "👍", "users": [ "osanseviero", "mlabonne", "lunarflu", "hogiahien", "merve", "victor", "julien-c", "radames", "bigbeachdaddy", "pcuenq", "samusenps", "Chunte", "sharad" ], "count": 13 }, { "reaction": "🤗", "users": [ "osanseviero", "lunarflu", "merve", "mrfakename", "julien-c", "bigbeachdaddy", "pcuenq" ], "count": 7 } ]
2024-01-11T00:38:09.000Z
2024-01-18T12:34:49.121Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/ShOE6VXMkQc7izpLtBKz7.jpeg", "fullname": "Vedat Baday", "name": "badayvedat", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 27, "isFollowing": false } ]
/posts/Tonic/802671427380916
12
1
691474247332404
[ { "type": "text", "value": "I finished my model merging experiment day.🤗I would love your thoughts on this.", "raw": "I finished my model merging experiment day.🤗I would love your thoughts on this.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What did I do? I merged Mistral Instruct 0.1 and 0.2 models using different merging techniques:", "raw": "What did I do? I merged Mistral Instruct 0.1 and 0.2 models using different merging techniques:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- SLERP: linear interpolation (most popular method)", "raw": "- SLERP: linear interpolation (most popular method)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MoE: replace some forward layers with MoE layers; using a random gate for now", "raw": "- MoE: replace some forward layers with MoE layers; using a random gate for now", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Frankenmerge: also known as passthrough, but that isn't very cool. It concatenates some specified layers ending in different numbers of params. In my case, I went from 7B to 9B.", "raw": "- Frankenmerge: also known as passthrough, but that isn't very cool. It concatenates some specified layers ending in different numbers of params. In my case, I went from 7B to 9B.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Note: merging is not building an ensemble of models. You can read more about merging techniques at ", "raw": "Note: merging is not building an ensemble of models. You can read more about merging techniques at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/mlabonne/merge-models", "href": "https://huggingface.co/blog/mlabonne/merge-models", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Results", "raw": "Results", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I built the 3 models using mergekit (running in an HF Space) - took less than an hour to do the three) ", "raw": "I built the 3 models using mergekit (running in an HF Space) - took less than an hour to do the three) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/osanseviero/mistral-instruct-merges-659ebf35ca0781acdb86bb0a", "href": null, "resource": { "type": "collection", "id": "osanseviero/mistral-instruct-merges-659ebf35ca0781acdb86bb0a", "discussionNum": null }, "url": "https://huggingface.co/collections/osanseviero/mistral-instruct-merges-659ebf35ca0781acdb86bb0a", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'm doing a quick check with the OpenLLM Leaderboard. ", "raw": "I'm doing a quick check with the OpenLLM Leaderboard. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🚨The OpenLLM Leaderboard is more suitable for pre-trained models than instruct models, but I still thought it would be interesting to look at the insights🚨", "raw": "🚨The OpenLLM Leaderboard is more suitable for pre-trained models than instruct models, but I still thought it would be interesting to look at the insights🚨", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can look at the attached image. Some interesting things", "raw": "You can look at the attached image. Some interesting things", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- All three models performed somewhere between 0.1 and 0.2 - congrats to the 140 people who got it right in ", "raw": "- All three models performed somewhere between 0.1 and 0.2 - congrats to the 140 people who got it right in ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://twitter.com/osanseviero/status/1745071548866736171", "href": "https://twitter.com/osanseviero/status/1745071548866736171", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Frankenmerge terribly sucked with GSM8K. It seems that adding some Mistral 0.1 layers actually degraded the performance a lot - this is worse than even 0.1!", "raw": "- Frankenmerge terribly sucked with GSM8K. It seems that adding some Mistral 0.1 layers actually degraded the performance a lot - this is worse than even 0.1!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Otherwise, frankenmerge was decent across HellaSwag, MMLU, and specially TruthfulQA", "raw": "- Otherwise, frankenmerge was decent across HellaSwag, MMLU, and specially TruthfulQA", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MoE is using random gating, so I expected something right in between 0.1 and 0.2, which was the case", "raw": "- MoE is using random gating, so I expected something right in between 0.1 and 0.2, which was the case", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What do I do with this?", "raw": "What do I do with this?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Not sure tbh! I think doing proper MT bench evals would be nice. I also think all of us should give a nice GH star to mergekit because it's awesome. I would love to have the time to do end-to-end ablation studies, but cool new things are coming up. Let me know if you have any thoughts in the results", "raw": "Not sure tbh! I think doing proper MT bench evals would be nice. I also think all of us should give a nice GH star to mergekit because it's awesome. I would love to have the time to do end-to-end ablation studies, but cool new things are coming up. Let me know if you have any thoughts in the results", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I finished my model merging experiment day.🤗I would love your thoughts on this. What did I do? I merged Mistral Instruct 0.1 and 0.2 models using different merging techniques: - SLERP: linear interpolation (most popular method) - MoE: replace some forward layers with MoE layers; using a random gate for now - Frankenmerge: also known as passthrough, but that isn't very cool. It concatenates some specified layers ending in different numbers of params. In my case, I went from 7B to 9B. Note: merging is not building an ensemble of models. You can read more about merging techniques at https://huggingface.co/blog/mlabonne/merge-models Results I built the 3 models using mergekit (running in an HF Space) - took less than an hour to do the three) https://huggingface.co/collections/osanseviero/mistral-instruct-merges-659ebf35ca0781acdb86bb0a I'm doing a quick check with the OpenLLM Leaderboard. 🚨The OpenLLM Leaderboard is more suitable for pre-trained models than instruct models, but I still thought it would be interesting to look at the insights🚨 You can look at the attached image. Some interesting things - All three models performed somewhere between 0.1 and 0.2 - congrats to the 140 people who got it right in https://twitter.com/osanseviero/status/1745071548866736171 - Frankenmerge terribly sucked with GSM8K. It seems that adding some Mistral 0.1 layers actually degraded the performance a lot - this is worse than even 0.1! - Otherwise, frankenmerge was decent across HellaSwag, MMLU, and specially TruthfulQA - MoE is using random gating, so I expected something right in between 0.1 and 0.2, which was the case What do I do with this? Not sure tbh! I think doing proper MT bench evals would be nice. I also think all of us should give a nice GH star to mergekit because it's awesome. I would love to have the time to do end-to-end ablation studies, but cool new things are coming up. Let me know if you have any thoughts in the results
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6032802e1f993496bc14d9e3/Qe1Mol-RI6WPR5VXa2mLL.png" } ]
[]
[ { "reaction": "❤️", "users": [ "davanstrien", "merve", "mlabonne", "mrm8488", "abhibisht89", "gblazex", "eitanturok", "detakarang", "dvilasuero", "victor", "julien-c", "kgourgou", "thomwolf", "mwitiderrick", "not-lain", "pcuenq", "marcsun13", "clem", "chansung", "radames", "lysandre", "KnutJaegersberg", "toshas", "Mohammadreza", "m-ric", "ernestp56", "wenqiglantz", "bhadresh-savani", "Dlbk", "nouamanetazi", "samusenps", "Csplk", "Warung", "jiandong", "chrissarmstrong", "sbrandeis", "Haleshot" ], "count": 37 }, { "reaction": "🤯", "users": [ "davanstrien", "pprp", "dvilasuero", "julien-c", "not-lain", "mrfakename", "clem", "chansung", "lysandre", "KnutJaegersberg", "DrishtiSharma", "nouamanetazi", "macadeliccc" ], "count": 13 }, { "reaction": "🤗", "users": [ "mrm8488", "julien-c", "not-lain", "clem", "chansung", "Haleshot" ], "count": 6 } ]
2024-01-10T21:36:42.000Z
2024-01-13T07:45:55.853Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b8e2ba285851687028d395/JtUGAwVh_4cDEsjNcfpye.png", "fullname": "Maxime Labonne", "name": "mlabonne", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 3486, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1649681653581-5f7fbd813e94f16a85448745.jpeg", "fullname": "Sayak Paul", "name": "sayakpaul", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 459, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63f140e52f7c0152e87223f6/1Ml4EXu_ER4R16ACOTVwW.jpeg", "fullname": "Jaime Dols Duxans", "name": "duxans", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "/avatars/ca5f5066924ccb4e7f02471c01e49478.svg", "fullname": "Aiswarya Sankar", "name": "aiswaryasankar", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/osanseviero/691474247332404
360
8
533880363228237
[ { "type": "text", "value": "Last month was great for faster/smaller segmentation models, and I wanted to dedicate my first post to compile the recently released SAM variants! 🤗", "raw": "Last month was great for faster/smaller segmentation models, and I wanted to dedicate my first post to compile the recently released SAM variants! 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📚 All models and their demos can be found in this collection 👉🏼 ", "raw": "📚 All models and their demos can be found in this collection 👉🏼 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/merve/segment-anything-model-6585835fc76915aa14e2bcbd", "href": null, "resource": { "type": "collection", "id": "merve/segment-anything-model-6585835fc76915aa14e2bcbd", "discussionNum": null }, "url": "https://huggingface.co/collections/merve/segment-anything-model-6585835fc76915aa14e2bcbd", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The ideas behind them are mostly about making heavy image encoder lighter either through distillation or changing the pre-training. 💡", "raw": "The ideas behind them are mostly about making heavy image encoder lighter either through distillation or changing the pre-training. 💡", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚡️MobileSAM: It decouples the heavy image encoder of SAM and distills it into a TinyViT to make SAM smaller. The architecture is same except for the encoder.", "raw": "⚡️MobileSAM: It decouples the heavy image encoder of SAM and distills it into a TinyViT to make SAM smaller. The architecture is same except for the encoder.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚡️TinySAM: It distills the whole model with online hard prompt sampling. The authors also quantized it and released Q-TinySAM. ", "raw": "⚡️TinySAM: It distills the whole model with online hard prompt sampling. The authors also quantized it and released Q-TinySAM. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚡️ EfficientSAM: This model combines masked image pre-training for training lightweight image encoders (like ViTMAE, learns to reconstruct the images) and mask decoder.", "raw": "⚡️ EfficientSAM: This model combines masked image pre-training for training lightweight image encoders (like ViTMAE, learns to reconstruct the images) and mask decoder.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚡️ FastSAM: It's a CNN-based model where the problem is modeled as segments generation. The inference takes place as everything is segmented at once and then you can prompt with boxes or points or text (and this is how it is similar to SAM). So the architecture is nowhere similar to original SAM itself. ", "raw": "⚡️ FastSAM: It's a CNN-based model where the problem is modeled as segments generation. The inference takes place as everything is segmented at once and then you can prompt with boxes or points or text (and this is how it is similar to SAM). So the architecture is nowhere similar to original SAM itself. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✨ [NEW] SlimSAM: It's a pruned-distilled version of pre-trained SAM. The architecture is same so ", "raw": "✨ [NEW] SlimSAM: It's a pruned-distilled version of pre-trained SAM. The architecture is same so ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@nielsr", "href": null, "resource": null, "url": null, "code": null, "user": "nielsr", "label": null, "lang": null }, { "type": "text", "value": " recently converted the weights and you can use it with the same API you use with SAM models. You can find the available checkpoints in the collection.", "raw": " recently converted the weights and you can use it with the same API you use with SAM models. You can find the available checkpoints in the collection.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I hope you liked it! ", "raw": "I hope you liked it! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Last month was great for faster/smaller segmentation models, and I wanted to dedicate my first post to compile the recently released SAM variants! 🤗 📚 All models and their demos can be found in this collection 👉🏼 https://huggingface.co/collections/merve/segment-anything-model-6585835fc76915aa14e2bcbd The ideas behind them are mostly about making heavy image encoder lighter either through distillation or changing the pre-training. 💡 ⚡️MobileSAM: It decouples the heavy image encoder of SAM and distills it into a TinyViT to make SAM smaller. The architecture is same except for the encoder. ⚡️TinySAM: It distills the whole model with online hard prompt sampling. The authors also quantized it and released Q-TinySAM. ⚡️ EfficientSAM: This model combines masked image pre-training for training lightweight image encoders (like ViTMAE, learns to reconstruct the images) and mask decoder. ⚡️ FastSAM: It's a CNN-based model where the problem is modeled as segments generation. The inference takes place as everything is segmented at once and then you can prompt with boxes or points or text (and this is how it is similar to SAM). So the architecture is nowhere similar to original SAM itself. ✨ [NEW] SlimSAM: It's a pruned-distilled version of pre-trained SAM. The architecture is same so @nielsr recently converted the weights and you can use it with the same API you use with SAM models. You can find the available checkpoints in the collection. I hope you liked it!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1608042047613-5f1158120c833276f61f1a84.jpeg", "fullname": "Niels Rogge", "name": "nielsr", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 680 } ]
[ { "reaction": "❤️", "users": [ "osanseviero", "lopezjhonf", "NemesisAlm", "amyeroberts", "julien-c", "mattmdjaga", "KnutJaegersberg", "bisnotforbella", "AdinaY", "samusenps", "abidlabs" ], "count": 11 }, { "reaction": "👍", "users": [ "Norod78", "julien-c", "KnutJaegersberg", "mlabonne", "AdinaY", "sbrandeis" ], "count": 6 } ]
2024-01-10T19:58:41.000Z
2024-01-10T19:59:42.650Z
[]
/posts/merve/533880363228237
29
0
462914041098598
[ { "type": "text", "value": "🔥 Less is more for DPO, high quality matters!", "raw": "🔥 Less is more for DPO, high quality matters!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📢 Dropping our first open dataset and LLM of the year:", "raw": "📢 Dropping our first open dataset and LLM of the year:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💾Meet distilabel Orca Pairs DPO, an improved version of the now famous dataset from Intel:", "raw": "💾Meet distilabel Orca Pairs DPO, an improved version of the now famous dataset from Intel:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/argilla/distilabel-intel-orca-dpo-pairs", "href": null, "resource": { "type": "dataset", "id": "argilla/distilabel-intel-orca-dpo-pairs", "discussionNum": null }, "url": "https://huggingface.co/datasets/argilla/distilabel-intel-orca-dpo-pairs", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🏛️ And a new OpenHermes fine-tune outperforming baselines with 54% less DPO pairs:", "raw": "🏛️ And a new OpenHermes fine-tune outperforming baselines with 54% less DPO pairs:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/argilla/distilabeled-Hermes-2.5-Mistral-7B", "href": "https://huggingface.co/argilla/distilabeled-Hermes-2.5-Mistral-7B", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can use this new dataset for your DPO tuning, just like this:", "raw": "You can use this new dataset for your DPO tuning, just like this:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\nfrom datasets import load_dataset\n\n# Instead of this:\n# dataset = load_dataset(\"Intel/orca_dpo_pairs\", split=\"train\")\n\n# use this:\ndataset = load_dataset(\"argilla/distilabel-intel-orca-dpo-pairs\", split=\"train\")\n\ndataset = dataset.filter(\n lambda r: \n r[\"status\"] != \"tie\" and \n r[\"chosen_score\"] >= 8 and \n not r[\"in_gsm8k_train\"]\n)\n```", "href": null, "resource": null, "url": null, "code": "from datasets import load_dataset\n\n# Instead of this:\n# dataset = load_dataset(\"Intel/orca_dpo_pairs\", split=\"train\")\n\n# use this:\ndataset = load_dataset(\"argilla/distilabel-intel-orca-dpo-pairs\", split=\"train\")\n\ndataset = dataset.filter(\n lambda r: \n r[\"status\"] != \"tie\" and \n r[\"chosen_score\"] >= 8 and \n not r[\"in_gsm8k_train\"]\n)", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This will reduce the size of the original by 54% while giving you better quality preferences!", "raw": "This will reduce the size of the original by 54% while giving you better quality preferences!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What should we build next?", "raw": "What should we build next?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🔥 Less is more for DPO, high quality matters! 📢 Dropping our first open dataset and LLM of the year: 💾Meet distilabel Orca Pairs DPO, an improved version of the now famous dataset from Intel: https://huggingface.co/datasets/argilla/distilabel-intel-orca-dpo-pairs 🏛️ And a new OpenHermes fine-tune outperforming baselines with 54% less DPO pairs: https://huggingface.co/argilla/distilabeled-Hermes-2.5-Mistral-7B You can use this new dataset for your DPO tuning, just like this: ``` from datasets import load_dataset # Instead of this: # dataset = load_dataset("Intel/orca_dpo_pairs", split="train") # use this: dataset = load_dataset("argilla/distilabel-intel-orca-dpo-pairs", split="train") dataset = dataset.filter( lambda r: r["status"] != "tie" and r["chosen_score"] >= 8 and not r["in_gsm8k_train"] ) ``` This will reduce the size of the original by 54% while giving you better quality preferences! What should we build next?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60420dccc15e823a685f2b03/Dn7QTyy9SZ7jKN6xpufVD.png", "fullname": "Daniel Vila", "name": "dvilasuero", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 231, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "davanstrien", "osanseviero", "merve", "radames", "tomaarsen", "mlabonne", "alielfilali01", "clem", "linoyts", "KnutJaegersberg", "Forbu14", "lysandre", "d0rj", "mammour", "VictorSanh", "gblazex", "sethuiyer", "eliebak", "sbrandeis" ], "count": 19 }, { "reaction": "🤯", "users": [ "davanstrien", "osanseviero", "merve", "radames", "tomaarsen", "BramVanroy", "clem", "KnutJaegersberg", "lixin67" ], "count": 9 }, { "reaction": "👍", "users": [ "hushell" ], "count": 1 } ]
2024-01-10T18:43:17.000Z
2024-01-21T19:26:53.267Z
[ { "avatarUrl": "/avatars/41a31c90d737660e8293cb8be98c4d18.svg", "fullname": "Blaze", "name": "gblazex", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 7, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60420dccc15e823a685f2b03/Dn7QTyy9SZ7jKN6xpufVD.png", "fullname": "Daniel Vila", "name": "dvilasuero", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 231, "isFollowing": false } ]
/posts/dvilasuero/462914041098598
43
2
428151014489564
[ { "type": "text", "value": "Finally found my go-to hat for 2024 😎", "raw": "Finally found my go-to hat for 2024 😎", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thanks to ", "raw": "Thanks to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/fal-ai", "href": "https://huggingface.co/fal-ai", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Finally found my go-to hat for 2024 😎 Thanks to https://huggingface.co/fal-ai
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5dd96eb166059660ed1ee413/-DNSnqBnqNk-ibyGrs9D_.jpeg" } ]
[]
[ { "reaction": "❤️", "users": [ "dvilasuero", "burkaygur", "radames", "merve", "pierrci", "not-lain", "clem", "clefourrier", "samusenps", "MarinaraSpaghetti", "fedyanin", "dillfrescott", "roseking", "sbrandeis" ], "count": 14 }, { "reaction": "🤝", "users": [ "lysandre", "merve", "clem", "roseking", "dillfrescott" ], "count": 5 }, { "reaction": "👍", "users": [ "Norod78", "clem", "roseking", "dillfrescott" ], "count": 4 } ]
2024-01-10T18:33:22.000Z
2024-02-01T02:50:42.313Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60fb1e6abfa81e553cc5c722/w1Okfm9m48VaEEr_pAdaY.png", "fullname": "Burkay Gur", "name": "burkaygur", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 15, "isFollowing": false }, { "avatarUrl": "/avatars/273805bcfd1cc8a88395358c78e9695b.svg", "fullname": "Pierre-Antoine Passet", "name": "pierreant-p", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/646b5b503e2a7b065946ffb9/nccWfcReOfbAac6KO_u5r.jpeg", "fullname": "Kirill", "name": "fedyanin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6215ce9abfcb3893344dd0a2/0srkKGjBNRDKnlMxNrsmn.jpeg", "fullname": "Cross", "name": "dillfrescott", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 47, "isFollowing": false } ]
/posts/julien-c/428151014489564
28
8
476586287487680
[ { "type": "text", "value": "Here is my selection of papers for today (10 Jan)", "raw": "Here is my selection of papers for today (10 Jan)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/papers", "href": "https://huggingface.co/papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Jump Cut Smoothing for Talking Heads", "raw": "Jump Cut Smoothing for Talking Heads", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "FADI-AEC: Fast Score Based Diffusion Model Guided by Far-end Signal for Acoustic Echo Cancellation", "raw": "FADI-AEC: Fast Score Based Diffusion Model Guided by Far-end Signal for Acoustic Echo Cancellation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Masked Audio Generation using a Single Non-Autoregressive Transformer", "raw": "Masked Audio Generation using a Single Non-Autoregressive Transformer", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Let's Go Shopping (LGS) -- Web-Scale Image-Text Dataset for Visual Concept Understanding", "raw": "Let's Go Shopping (LGS) -- Web-Scale Image-Text Dataset for Visual Concept Understanding", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Narrowing the Knowledge Evaluation Gap: Open-Domain Question Answering with Multi-Granularity Answers", "raw": "Narrowing the Knowledge Evaluation Gap: Open-Domain Question Answering with Multi-Granularity Answers", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Lightning Attention-2: A Free Lunch for Handling Unlimited Sequence Lengths in Large Language Models", "raw": "Lightning Attention-2: A Free Lunch for Handling Unlimited Sequence Lengths in Large Language Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Chain-of-Table: Evolving Tables in the Reasoning Chain for Table Understanding", "raw": "Chain-of-Table: Evolving Tables in the Reasoning Chain for Table Understanding", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "MagicVideo-V2: Multi-Stage High-Aesthetic Video Generation", "raw": "MagicVideo-V2: Multi-Stage High-Aesthetic Video Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Here is my selection of papers for today (10 Jan) https://huggingface.co/papers Jump Cut Smoothing for Talking Heads FADI-AEC: Fast Score Based Diffusion Model Guided by Far-end Signal for Acoustic Echo Cancellation Masked Audio Generation using a Single Non-Autoregressive Transformer Let's Go Shopping (LGS) -- Web-Scale Image-Text Dataset for Visual Concept Understanding Narrowing the Knowledge Evaluation Gap: Open-Domain Question Answering with Multi-Granularity Answers Lightning Attention-2: A Free Lunch for Handling Unlimited Sequence Lengths in Large Language Models Chain-of-Table: Evolving Tables in the Reasoning Chain for Table Understanding MagicVideo-V2: Multi-Stage High-Aesthetic Video Generation
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/P-zl6ha7fRwP9pmOApfhn.qt" } ]
[]
[ { "reaction": "❤️", "users": [ "not-lain", "pierrci", "julien-c", "abidlabs", "davanstrien", "osanseviero" ], "count": 6 }, { "reaction": "🤗", "users": [ "not-lain", "julien-c", "abidlabs", "davanstrien", "alielfilali01" ], "count": 5 } ]
2024-01-10T15:11:03.000Z
2024-01-10T18:02:54.693Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false } ]
/posts/akhaliq/476586287487680
11
1
379937660970830
[ { "type": "text", "value": "hello world! ", "raw": "hello world! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "we're starting a new recurring event/club where we read and implement cool ai papers on skunkworks discord. first paper we chose is self-play as there are a lot of opportunities to expand on this framework, here's the link for the event: ", "raw": "we're starting a new recurring event/club where we read and implement cool ai papers on skunkworks discord. first paper we chose is self-play as there are a lot of opportunities to expand on this framework, here's the link for the event: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://discord.gg/eAgBr7Fy?event=1194392774905172030", "href": "https://discord.gg/eAgBr7Fy?event=1194392774905172030", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "im plannin my next post to be a technical deepdive of PCN and ProspectiveConfiguration algo as ive been spending the last few days getting a good grasp at this promising alternative to BP, stay tuned.", "raw": "im plannin my next post to be a technical deepdive of PCN and ProspectiveConfiguration algo as ive been spending the last few days getting a good grasp at this promising alternative to BP, stay tuned.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
hello world! we're starting a new recurring event/club where we read and implement cool ai papers on skunkworks discord. first paper we chose is self-play as there are a lot of opportunities to expand on this framework, here's the link for the event: https://discord.gg/eAgBr7Fy?event=1194392774905172030 im plannin my next post to be a technical deepdive of PCN and ProspectiveConfiguration algo as ive been spending the last few days getting a good grasp at this promising alternative to BP, stay tuned.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64b7e345f92b20f7a38bf47a/9ZZdzuTlSryjnN5_Bx_n-.jpeg", "fullname": "Farouk", "name": "pharaouk", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 50, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "Tonic", "victor", "pierrci", "pcuenq", "merve", "samusenps", "sbrandeis" ], "count": 8 } ]
2024-01-09T21:40:58.000Z
2024-07-13T06:01:19.763Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1608146735109-5fcfb7c407408029ba3577e2.png", "fullname": "Simon Brandeis", "name": "sbrandeis", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 116, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1621947938344-noauth.png", "fullname": "Abubakar Abid", "name": "abidlabs", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 487, "isFollowing": false } ]
/posts/pharaouk/379937660970830
436
3
197374136323416
[ { "type": "text", "value": "Here is my selection of papers for today (9 Jan) ", "raw": "Here is my selection of papers for today (9 Jan) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/papers", "href": "https://huggingface.co/papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "AGG: Amortized Generative 3D Gaussians for Single Image to 3D", "raw": "AGG: Amortized Generative 3D Gaussians for Single Image to 3D", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "MoE-Mamba: Efficient Selective State Space Models with Mixture of Experts", "raw": "MoE-Mamba: Efficient Selective State Space Models with Mixture of Experts", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "DiarizationLM: Speaker Diarization Post-Processing with Large Language Models", "raw": "DiarizationLM: Speaker Diarization Post-Processing with Large Language Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "TeleChat Technical Report", "raw": "TeleChat Technical Report", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Soaring from 4K to 400K: Extending LLM's Context with Activation Beacon", "raw": "Soaring from 4K to 400K: Extending LLM's Context with Activation Beacon", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "AST-T5: Structure-Aware Pretraining for Code Generation and Understanding", "raw": "AST-T5: Structure-Aware Pretraining for Code Generation and Understanding", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Has Your Pretrained Model Improved? A Multi-head Posterior Based Approach", "raw": "Has Your Pretrained Model Improved? A Multi-head Posterior Based Approach", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Blending Is All You Need: Cheaper, Better Alternative to Trillion-Parameters LLM", "raw": "Blending Is All You Need: Cheaper, Better Alternative to Trillion-Parameters LLM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "GPT-4V(ision) is a Human-Aligned Evaluator for Text-to-3D Generation", "raw": "GPT-4V(ision) is a Human-Aligned Evaluator for Text-to-3D Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "CRUXEval: A Benchmark for Code Reasoning, Understanding and Execution", "raw": "CRUXEval: A Benchmark for Code Reasoning, Understanding and Execution", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Mixtral of Experts", "raw": "Mixtral of Experts", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Here is my selection of papers for today (9 Jan) https://huggingface.co/papers AGG: Amortized Generative 3D Gaussians for Single Image to 3D MoE-Mamba: Efficient Selective State Space Models with Mixture of Experts DiarizationLM: Speaker Diarization Post-Processing with Large Language Models TeleChat Technical Report Soaring from 4K to 400K: Extending LLM's Context with Activation Beacon AST-T5: Structure-Aware Pretraining for Code Generation and Understanding Has Your Pretrained Model Improved? A Multi-head Posterior Based Approach Blending Is All You Need: Cheaper, Better Alternative to Trillion-Parameters LLM GPT-4V(ision) is a Human-Aligned Evaluator for Text-to-3D Generation CRUXEval: A Benchmark for Code Reasoning, Understanding and Execution Mixtral of Experts
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/yWeF_5kNh6wvsE2r8-X0e.png" } ]
[]
[ { "reaction": "👍", "users": [ "mwz", "alckasoc", "Tonic", "victor", "merve", "davanstrien", "IlyasMoutawwakil", "lixin67", "aust-t" ], "count": 9 } ]
2024-01-09T15:31:52.000Z
2024-01-09T15:32:01.396Z
[]
/posts/akhaliq/197374136323416
5
0
204586934082310
[ { "type": "text", "value": "Full fine-tuning of Microsoft's Phi2 on a single 4090 is now supported in axolotl. Thanks to ", "raw": "Full fine-tuning of Microsoft's Phi2 on a single 4090 is now supported in axolotl. Thanks to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@abacaj", "href": null, "resource": null, "url": null, "code": null, "user": "abacaj", "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@vikhyatk", "href": null, "resource": null, "url": null, "code": null, "user": "vikhyatk", "label": null, "lang": null }, { "type": "text", "value": " for their help with gradient checkpointing and flash attention fixes. ", "raw": " for their help with gradient checkpointing and flash attention fixes. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "alpaca finetune: ", "raw": "alpaca finetune: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/openaccess-ai-collective/phi2-alpaca", "href": null, "resource": { "type": "model", "id": "openaccess-ai-collective/phi2-alpaca", "discussionNum": null }, "url": "https://huggingface.co/openaccess-ai-collective/phi2-alpaca", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "wandb: ", "raw": "wandb: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://wandb.ai/oaaic/phi2/runs/00pc4ugb?workspace=user-wing-lian", "href": "https://wandb.ai/oaaic/phi2/runs/00pc4ugb?workspace=user-wing-lian", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "merged PR: ", "raw": "merged PR: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/OpenAccess-AI-Collective/axolotl/pull/1058", "href": "https://github.com/OpenAccess-AI-Collective/axolotl/pull/1058", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Full fine-tuning of Microsoft's Phi2 on a single 4090 is now supported in axolotl. Thanks to @abacaj and @vikhyatk for their help with gradient checkpointing and flash attention fixes. alpaca finetune: https://huggingface.co/openaccess-ai-collective/phi2-alpaca wandb: https://wandb.ai/oaaic/phi2/runs/00pc4ugb?workspace=user-wing-lian merged PR: https://github.com/OpenAccess-AI-Collective/axolotl/pull/1058
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/641dfddf3bae5a77636817c5/2IwNwh9kK98eCHUmOGoWD.png", "fullname": "wing lian", "name": "winglian", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2304, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677644903536-62ceeb27e7f6014c0e9d9268.jpeg", "fullname": "Anton Bacaj", "name": "abacaj", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 71 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63117568fa95534e218da163/8h9zN8aKRxPLBnXW7sqY9.jpeg", "fullname": "Vik Korrapati", "name": "vikhyatk", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 375 } ]
[ { "reaction": "👍", "users": [ "osanseviero", "Citaman", "victor", "kramp", "Tonic", "marcsun13", "merve", "ybelkada", "Zmu", "samusenps", "mrfakename", "Chunte", "JaiSurya", "radames" ], "count": 14 }, { "reaction": "❤️", "users": [ "osanseviero", "Tonic", "merve", "ybelkada", "Chunte", "radames", "SicariusSicariiStuff", "sbrandeis" ], "count": 8 } ]
2024-01-08T19:21:15.000Z
2024-07-13T06:01:19.747Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1608146735109-5fcfb7c407408029ba3577e2.png", "fullname": "Simon Brandeis", "name": "sbrandeis", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 116, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/641dfddf3bae5a77636817c5/2IwNwh9kK98eCHUmOGoWD.png", "fullname": "wing lian", "name": "winglian", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2304, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62e54f0eae9d3f10acb95cb9/VAyk05hqB3OZWXEZW-B0q.png", "fullname": "mrfakename", "name": "mrfakename", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 969, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6538119803519fddb4a17e10/ffJMkdx-rM7VvLTCM6ri_.jpeg", "fullname": "samusenps", "name": "samusenps", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 91, "isFollowing": false }, { "avatarUrl": "/avatars/f18351bc5ce9c106ba74523d9a55567c.svg", "fullname": "Lone Striker", "name": "LoneStriker", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 933, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1670003187019-noauth.png", "fullname": "brucethemoose", "name": "brucethemoose", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 148, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6569216f9c96f1a47bf45788/mCLqmAs4dOjKdxNQVAp1w.png", "fullname": "Sica Rius", "name": "SicariusSicariiStuff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 135, "isFollowing": false } ]
/posts/winglian/204586934082310
2,227
8
941066614615290
[ { "type": "text", "value": "Here is my selection of papers for today (8 Jan)", "raw": "Here is my selection of papers for today (8 Jan)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/papers", "href": "https://huggingface.co/papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "DocGraphLM: Documental Graph Language Model for Information Extraction", "raw": "DocGraphLM: Documental Graph Language Model for Information Extraction", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Denoising Vision Transformers", "raw": "Denoising Vision Transformers", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Progressive Knowledge Distillation Of Stable Diffusion XL Using Layer Level Loss", "raw": "Progressive Knowledge Distillation Of Stable Diffusion XL Using Layer Level Loss", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Open-Vocabulary SAM: Segment and Recognize Twenty-thousand Classes Interactively", "raw": "Open-Vocabulary SAM: Segment and Recognize Twenty-thousand Classes Interactively", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Pheme: Efficient and Conversational Speech Generation", "raw": "Pheme: Efficient and Conversational Speech Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "DeepSeek LLM: Scaling Open-Source Language Models with Longtermism", "raw": "DeepSeek LLM: Scaling Open-Source Language Models with Longtermism", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Infinite-LLM: Efficient LLM Service for Long Context with DistAttention and Distributed KVCache", "raw": "Infinite-LLM: Efficient LLM Service for Long Context with DistAttention and Distributed KVCache", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Here is my selection of papers for today (8 Jan) https://huggingface.co/papers DocGraphLM: Documental Graph Language Model for Information Extraction Denoising Vision Transformers Progressive Knowledge Distillation Of Stable Diffusion XL Using Layer Level Loss Open-Vocabulary SAM: Segment and Recognize Twenty-thousand Classes Interactively Pheme: Efficient and Conversational Speech Generation DeepSeek LLM: Scaling Open-Source Language Models with Longtermism Infinite-LLM: Efficient LLM Service for Long Context with DistAttention and Distributed KVCache
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/lCGGPmlQU0_RpspuKBsJj.png" } ]
[]
[ { "reaction": "❤️", "users": [ "reach-vb", "victor", "merve" ], "count": 3 } ]
2024-01-08T15:22:48.000Z
2024-01-08T15:22:48.754Z
[]
/posts/akhaliq/941066614615290
8
0
248307299871659
[ { "type": "text", "value": "QuIP# ecosystem is growing :) ", "raw": "QuIP# ecosystem is growing :) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I've seen a quip# 2 bit Qwen-72b-Chat model today on the hub that shows there is support for vLLM inference. ", "raw": "I've seen a quip# 2 bit Qwen-72b-Chat model today on the hub that shows there is support for vLLM inference. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This will speed up inference and make high performing 2 bit models more practical. I'm considering quipping MoMo now, as I can only use brief context window of Qwen-72b on my system otherwise, even with bnb double quantization. ", "raw": "This will speed up inference and make high performing 2 bit models more practical. I'm considering quipping MoMo now, as I can only use brief context window of Qwen-72b on my system otherwise, even with bnb double quantization. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/keyfan/Qwen-72B-Chat-2bit", "href": null, "resource": { "type": "model", "id": "keyfan/Qwen-72B-Chat-2bit", "discussionNum": null }, "url": "https://huggingface.co/keyfan/Qwen-72B-Chat-2bit", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Also notice the easier to use Quip# for all library :) ", "raw": "Also notice the easier to use Quip# for all library :) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/chu-tianxiang/QuIP-for-all", "href": "https://github.com/chu-tianxiang/QuIP-for-all", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
QuIP# ecosystem is growing :) I've seen a quip# 2 bit Qwen-72b-Chat model today on the hub that shows there is support for vLLM inference. This will speed up inference and make high performing 2 bit models more practical. I'm considering quipping MoMo now, as I can only use brief context window of Qwen-72b on my system otherwise, even with bnb double quantization. https://huggingface.co/keyfan/Qwen-72B-Chat-2bit Also notice the easier to use Quip# for all library :) https://github.com/chu-tianxiang/QuIP-for-all
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669551186189-63732ebbbd81fae2b3aaf3fb.jpeg", "fullname": "Knut Jägersberg", "name": "KnutJaegersberg", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 238, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "Felladrin", "osanseviero", "victor", "ybelkada", "reach-vb", "merve", "samusenps", "mexicanamerican" ], "count": 8 }, { "reaction": "👍", "users": [ "osanseviero", "ybelkada", "reach-vb", "Tonic", "kramp", "merve", "abidlabs" ], "count": 7 }, { "reaction": "❤️", "users": [ "osanseviero", "ybelkada", "reach-vb", "merve", "samusenps", "sbrandeis" ], "count": 6 }, { "reaction": "🤯", "users": [ "reach-vb", "merve", "sbrandeis" ], "count": 3 } ]
2024-01-07T20:45:46.000Z
2024-07-13T06:01:19.758Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1608146735109-5fcfb7c407408029ba3577e2.png", "fullname": "Simon Brandeis", "name": "sbrandeis", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 116, "isFollowing": false } ]
/posts/KnutJaegersberg/248307299871659
677
2
321536384660058
[ { "type": "text", "value": "👋 Hi there!", "raw": "👋 Hi there!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is my very first post. ", "raw": "This is my very first post. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'll use it to share some old news: a math preference dataset for DPO!", "raw": "I'll use it to share some old news: a math preference dataset for DPO!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I created this dataset some time ago while we were developing distilabel (", "raw": "I created this dataset some time ago while we were developing distilabel (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/argilla-io/distilabel", "href": "https://github.com/argilla-io/distilabel", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ").", "raw": ").", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Some days ago we found out people are actually using it! So I'll use this post to explain how I built it in case it's useful for the community.", "raw": "Some days ago we found out people are actually using it! So I'll use this post to explain how I built it in case it's useful for the community.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. I used distilabel's SelfInstruct-inspired task to generate instructions about different math topics. I curated the instructions with Argilla (on Spaces!).", "raw": "1. I used distilabel's SelfInstruct-inspired task to generate instructions about different math topics. I curated the instructions with Argilla (on Spaces!).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Then I used a distilabel Pipeline to build a preference dataset using gpt3.5 as generator and gpt4 as labeller. If I recall correctly I used our JudgeLM implementation (see ", "raw": "2. Then I used a distilabel Pipeline to build a preference dataset using gpt3.5 as generator and gpt4 as labeller. If I recall correctly I used our JudgeLM implementation (see ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://distilabel.argilla.io/latest/technical-reference/tasks/#judgelmtask", "href": "https://distilabel.argilla.io/latest/technical-reference/tasks/#judgelmtask", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ")", "raw": ")", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "(see the screenshot with the dataset in the Argilla UI)", "raw": "(see the screenshot with the dataset in the Argilla UI)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Then I just binarized into chosen, rejected pairs and voilà:", "raw": "3. Then I just binarized into chosen, rejected pairs and voilà:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/argilla/distilabel-math-preference-dpo", "href": null, "resource": { "type": "dataset", "id": "argilla/distilabel-math-preference-dpo", "discussionNum": null }, "url": "https://huggingface.co/datasets/argilla/distilabel-math-preference-dpo", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The funny thing is that I used this to do a second DPO run over Notus-7B. I hoped to see an improvement on math/reasoning skills but it actually improved in STEM and Humanities and did worse on Math 🤣 . ", "raw": "The funny thing is that I used this to do a second DPO run over Notus-7B. I hoped to see an improvement on math/reasoning skills but it actually improved in STEM and Humanities and did worse on Math 🤣 . ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In conclusion, this dataset was only a quick experiement. I'm happy to see the community found it useful. Data for DPO and fine-tuning are still a mystery, let's unveil these mysteries in 2024 together!", "raw": "In conclusion, this dataset was only a quick experiement. I'm happy to see the community found it useful. Data for DPO and fine-tuning are still a mystery, let's unveil these mysteries in 2024 together!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Follow me for the most exciting datasets for LLMs (and maybe some great, small, efficient models). I plan to announce all Argilla open-source work here!", "raw": "Follow me for the most exciting datasets for LLMs (and maybe some great, small, efficient models). I plan to announce all Argilla open-source work here!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
👋 Hi there! This is my very first post. I'll use it to share some old news: a math preference dataset for DPO! I created this dataset some time ago while we were developing distilabel (https://github.com/argilla-io/distilabel). Some days ago we found out people are actually using it! So I'll use this post to explain how I built it in case it's useful for the community. 1. I used distilabel's SelfInstruct-inspired task to generate instructions about different math topics. I curated the instructions with Argilla (on Spaces!). 2. Then I used a distilabel Pipeline to build a preference dataset using gpt3.5 as generator and gpt4 as labeller. If I recall correctly I used our JudgeLM implementation (see https://distilabel.argilla.io/latest/technical-reference/tasks/#judgelmtask) (see the screenshot with the dataset in the Argilla UI) 3. Then I just binarized into chosen, rejected pairs and voilà: https://huggingface.co/datasets/argilla/distilabel-math-preference-dpo The funny thing is that I used this to do a second DPO run over Notus-7B. I hoped to see an improvement on math/reasoning skills but it actually improved in STEM and Humanities and did worse on Math 🤣 . In conclusion, this dataset was only a quick experiement. I'm happy to see the community found it useful. Data for DPO and fine-tuning are still a mystery, let's unveil these mysteries in 2024 together! Follow me for the most exciting datasets for LLMs (and maybe some great, small, efficient models). I plan to announce all Argilla open-source work here!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60420dccc15e823a685f2b03/Dn7QTyy9SZ7jKN6xpufVD.png", "fullname": "Daniel Vila", "name": "dvilasuero", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 231, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60420dccc15e823a685f2b03/MK1bnv_z9xYAs2ptTdZcX.png" } ]
[]
[ { "reaction": "👍", "users": [ "pacoid", "sparkycollier", "osanseviero", "gabrielmbmb", "mishig", "severo", "sugatoray", "davanstrien", "mlabonne", "qJakc", "MoritzLaurer", "KnutJaegersberg", "radames", "victor", "clem", "julien-c", "merve", "tmnam20", "Erland", "DarkLord7771", "abidlabs", "Masa-Erland" ], "count": 22 }, { "reaction": "❤️", "users": [ "pcuenq", "osanseviero", "gabrielmbmb", "mishig", "plaguss", "davanstrien", "pierrci", "qJakc", "thomwolf", "Manel-Hik", "victor", "radames", "mariagrandury", "clem", "julien-c", "merve", "sbrandeis" ], "count": 17 }, { "reaction": "🤗", "users": [ "qJakc", "Tonic", "mariagrandury", "julien-c", "davanstrien", "merve" ], "count": 6 } ]
2024-01-06T18:47:54.000Z
2024-01-07T11:45:02.438Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60420dccc15e823a685f2b03/Dn7QTyy9SZ7jKN6xpufVD.png", "fullname": "Daniel Vila", "name": "dvilasuero", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 231, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false } ]
/posts/dvilasuero/321536384660058
23
2
818590215077182
[ { "type": "text", "value": "Train ANYTHING on Hugging Face Spaces hardware using AutoTrain SpaceRunner: ", "raw": "Train ANYTHING on Hugging Face Spaces hardware using AutoTrain SpaceRunner: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://hf.co/blog/stefan-it/autotrain-flair-mobie", "href": "https://hf.co/blog/stefan-it/autotrain-flair-mobie", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 💥", "raw": " 💥", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Train ANYTHING on Hugging Face Spaces hardware using AutoTrain SpaceRunner: https://hf.co/blog/stefan-it/autotrain-flair-mobie 💥
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fa19f4ba13e063b8b2b5e11/nGVHdTYX2udnt-K8mqY27.jpeg", "fullname": "Abhishek Thakur", "name": "abhishek", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1383, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "mrfakename", "abhishek", "smangrul", "nouamanetazi", "dvilasuero", "thomwolf", "clem", "sbrandeis" ], "count": 8 }, { "reaction": "👍", "users": [ "KnutJaegersberg", "clem" ], "count": 2 } ]
2024-01-05T16:06:20.000Z
2024-07-13T06:01:19.765Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fa19f4ba13e063b8b2b5e11/nGVHdTYX2udnt-K8mqY27.jpeg", "fullname": "Abhishek Thakur", "name": "abhishek", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1383, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60f0608166e5701b80ed3f02/BHso-wSWpR9b8b8CKvodC.jpeg", "fullname": "Alvaro Bartolome", "name": "alvarobartt", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1739, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1608146735109-5fcfb7c407408029ba3577e2.png", "fullname": "Simon Brandeis", "name": "sbrandeis", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 116, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62e54f0eae9d3f10acb95cb9/VAyk05hqB3OZWXEZW-B0q.png", "fullname": "mrfakename", "name": "mrfakename", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 969, "isFollowing": false } ]
/posts/abhishek/818590215077182
806
9
829482374471808
[ { "type": "text", "value": "Here is my selection of papers for today (5 Jan)", "raw": "Here is my selection of papers for today (5 Jan)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/papers", "href": "https://huggingface.co/papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Learning the 3D Fauna of the Web", "raw": "Learning the 3D Fauna of the Web", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Mobile ALOHA: Learning Bimanual Mobile Manipulation with Low-Cost Whole-Body Teleoperation", "raw": "Mobile ALOHA: Learning Bimanual Mobile Manipulation with Low-Cost Whole-Body Teleoperation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Improving Diffusion-Based Image Synthesis with Context Prediction", "raw": "Improving Diffusion-Based Image Synthesis with Context Prediction", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LLaMA Pro: Progressive LLaMA with Block Expansion", "raw": "LLaMA Pro: Progressive LLaMA with Block Expansion", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LLaVA-φ: Efficient Multi-Modal Assistant with Small Language Model", "raw": "LLaVA-φ: Efficient Multi-Modal Assistant with Small Language Model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Towards Truly Zero-shot Compositional Visual Reasoning with LLMs as Programmers", "raw": "Towards Truly Zero-shot Compositional Visual Reasoning with LLMs as Programmers", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "FMGS: Foundation Model Embedded 3D Gaussian Splatting for Holistic 3D Scene Understanding", "raw": "FMGS: Foundation Model Embedded 3D Gaussian Splatting for Holistic 3D Scene Understanding", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "TinyLlama: An Open-Source Small Language Model", "raw": "TinyLlama: An Open-Source Small Language Model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What You See is What You GAN: Rendering Every Pixel for High-Fidelity Geometry in 3D GANs", "raw": "What You See is What You GAN: Rendering Every Pixel for High-Fidelity Geometry in 3D GANs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LLM Augmented LLMs: Expanding Capabilities through Composition", "raw": "LLM Augmented LLMs: Expanding Capabilities through Composition", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "ODIN: A Single Model for 2D and 3D Perception", "raw": "ODIN: A Single Model for 2D and 3D Perception", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Instruct-Imagen: Image Generation with Multi-modal Instruction", "raw": "Instruct-Imagen: Image Generation with Multi-modal Instruction", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "ICE-GRT: Instruction Context Enhancement by Generative Reinforcement based Transformers", "raw": "ICE-GRT: Instruction Context Enhancement by Generative Reinforcement based Transformers", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Understanding LLMs: A Comprehensive Overview from Training to Inference", "raw": "Understanding LLMs: A Comprehensive Overview from Training to Inference", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Here is my selection of papers for today (5 Jan) https://huggingface.co/papers Learning the 3D Fauna of the Web Mobile ALOHA: Learning Bimanual Mobile Manipulation with Low-Cost Whole-Body Teleoperation Improving Diffusion-Based Image Synthesis with Context Prediction LLaMA Pro: Progressive LLaMA with Block Expansion LLaVA-φ: Efficient Multi-Modal Assistant with Small Language Model Towards Truly Zero-shot Compositional Visual Reasoning with LLMs as Programmers FMGS: Foundation Model Embedded 3D Gaussian Splatting for Holistic 3D Scene Understanding TinyLlama: An Open-Source Small Language Model What You See is What You GAN: Rendering Every Pixel for High-Fidelity Geometry in 3D GANs LLM Augmented LLMs: Expanding Capabilities through Composition ODIN: A Single Model for 2D and 3D Perception Instruct-Imagen: Image Generation with Multi-modal Instruction ICE-GRT: Instruction Context Enhancement by Generative Reinforcement based Transformers Understanding LLMs: A Comprehensive Overview from Training to Inference
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/r0PhHF84689j0OI4ZfrKe.png" } ]
[]
[ { "reaction": "👍", "users": [ "m-ric", "clem", "merve", "Gatozu35" ], "count": 4 } ]
2024-01-05T15:18:40.000Z
2024-01-05T15:18:40.242Z
[]
/posts/akhaliq/829482374471808
8
0
915335447857525
[ { "type": "text", "value": "I just published a Gradio demo for AliBaba's DreamTalk 🤗", "raw": "I just published a Gradio demo for AliBaba's DreamTalk 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Try it now: ", "raw": "Try it now: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/fffiloni/dreamtalk", "href": null, "resource": { "type": "space", "id": "fffiloni/dreamtalk", "discussionNum": null }, "url": "https://huggingface.co/spaces/fffiloni/dreamtalk", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2312.09767", "href": null, "resource": { "type": "paper", "id": "2312.09767", "discussionNum": null }, "url": "https://huggingface.co/papers/2312.09767", "code": null, "user": null, "label": "DreamTalk: When Expressive Talking Head Generation Meets Diffusion\n Probabilistic Models (2312.09767)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "—", "raw": "—", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "DreamTalk is a diffusion-based audio-driven expressive talking head generation framework that can produce high-quality talking head videos across diverse speaking styles. DreamTalk exhibits robust performance with a diverse array of inputs, including songs, speech in multiple languages, noisy audio, and out-of-domain portraits. ", "raw": "DreamTalk is a diffusion-based audio-driven expressive talking head generation framework that can produce high-quality talking head videos across diverse speaking styles. DreamTalk exhibits robust performance with a diverse array of inputs, including songs, speech in multiple languages, noisy audio, and out-of-domain portraits. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I just published a Gradio demo for AliBaba's DreamTalk 🤗 Try it now: https://huggingface.co/spaces/fffiloni/dreamtalk Paper: https://huggingface.co/papers/2312.09767 — DreamTalk is a diffusion-based audio-driven expressive talking head generation framework that can produce high-quality talking head videos across diverse speaking styles. DreamTalk exhibits robust performance with a diverse array of inputs, including songs, speech in multiple languages, noisy audio, and out-of-domain portraits.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61868ce808aae0b5499a2a95/F6BA0anbsoY_Z7M1JrwOe.jpeg", "fullname": "Sylvain Filoni", "name": "fffiloni", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5185, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/61868ce808aae0b5499a2a95/LroT0gVIhOYBtjt6ZI0rh.mp4" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/61868ce808aae0b5499a2a95/npxyhwX0cNCS1GvVq_Q0I.gif" } ]
[]
[ { "reaction": "❤️", "users": [ "cnmoro", "gz888", "reach-vb", "osanseviero", "abidlabs", "radames", "nouamanetazi", "victor", "clem", "julien-c", "merve", "lunarflu", "IbrahimSaidi", "sbrandeis" ], "count": 14 }, { "reaction": "🤗", "users": [ "santiviquez", "osanseviero", "alvarobartt", "reach-vb", "clem", "julien-c", "merve", "lunarflu", "akhaliq", "pierrci", "sbrandeis" ], "count": 11 }, { "reaction": "👍", "users": [ "IbrahimSaidi", "OmbelineM" ], "count": 2 } ]
2024-01-04T15:54:11.000Z
2024-01-04T16:02:27.001Z
[]
/posts/fffiloni/915335447857525
202
0
493427044416403
[ { "type": "text", "value": "Here is my selection of papers for today (4 Jan)", "raw": "Here is my selection of papers for today (4 Jan)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/papers", "href": "https://huggingface.co/papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Efficient Hybrid Zoom using Camera Fusion on Mobile Phones", "raw": "Efficient Hybrid Zoom using Camera Fusion on Mobile Phones", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Incremental FastPitch: Chunk-based High Quality Text to Speech", "raw": "Incremental FastPitch: Chunk-based High Quality Text to Speech", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "CoMoSVC: Consistency Model-based Singing Voice Conversion", "raw": "CoMoSVC: Consistency Model-based Singing Voice Conversion", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "SIGNeRF: Scene Integrated Generation for Neural Radiance Fields", "raw": "SIGNeRF: Scene Integrated Generation for Neural Radiance Fields", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "From Audio to Photoreal Embodiment: Synthesizing Humans in Conversations", "raw": "From Audio to Photoreal Embodiment: Synthesizing Humans in Conversations", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "aMUSEd: An Open MUSE Reproduction", "raw": "aMUSEd: An Open MUSE Reproduction", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Image Sculpting: Precise Object Editing with 3D Geometry Control", "raw": "Image Sculpting: Precise Object Editing with 3D Geometry Control", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A Vision Check-up for Language Models", "raw": "A Vision Check-up for Language Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Multilingual Instruction Tuning With Just a Pinch of Multilinguality", "raw": "Multilingual Instruction Tuning With Just a Pinch of Multilinguality", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "WordArt Designer API: User-Driven Artistic Typography Synthesis with Large Language Models on ModelScope", "raw": "WordArt Designer API: User-Driven Artistic Typography Synthesis with Large Language Models on ModelScope", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Moonshot: Towards Controllable Video Generation and Editing with Multimodal Conditions", "raw": "Moonshot: Towards Controllable Video Generation and Editing with Multimodal Conditions", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "GPT-4V(ision) is a Generalist Web Agent, if Grounded", "raw": "GPT-4V(ision) is a Generalist Web Agent, if Grounded", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Here is my selection of papers for today (4 Jan) https://huggingface.co/papers Efficient Hybrid Zoom using Camera Fusion on Mobile Phones Incremental FastPitch: Chunk-based High Quality Text to Speech CoMoSVC: Consistency Model-based Singing Voice Conversion SIGNeRF: Scene Integrated Generation for Neural Radiance Fields From Audio to Photoreal Embodiment: Synthesizing Humans in Conversations aMUSEd: An Open MUSE Reproduction Image Sculpting: Precise Object Editing with 3D Geometry Control A Vision Check-up for Language Models Multilingual Instruction Tuning With Just a Pinch of Multilinguality WordArt Designer API: User-Driven Artistic Typography Synthesis with Large Language Models on ModelScope Moonshot: Towards Controllable Video Generation and Editing with Multimodal Conditions GPT-4V(ision) is a Generalist Web Agent, if Grounded
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/fui95OdCqO-hRFV1yNAm0.png" } ]
[]
[ { "reaction": "👍", "users": [ "mwz", "davanstrien", "nouamanetazi", "mlninad", "m-ric", "victor", "merve" ], "count": 7 }, { "reaction": "🤯", "users": [ "reach-vb" ], "count": 1 } ]
2024-01-04T15:36:54.000Z
2024-01-04T15:36:54.617Z
[]
/posts/akhaliq/493427044416403
5
0
744704653511386
[ { "type": "text", "value": "Currently attempting to hack EvoDiff to generate binders for target proteins with some interesting results. The generated binders tend to change conformation, sometimes drastically, when bound to the target proteins compared to their unbound states. Below is the target protein with an IDR linker, the generated binder, and the binder bound to the target protein with the IDR linker structure as predicted by ESMFold. Notice how the binder goes from being a solid alpha-helix, to being beta-sheets (in orange). That's quite a change in tertiary structure! ", "raw": "Currently attempting to hack EvoDiff to generate binders for target proteins with some interesting results. The generated binders tend to change conformation, sometimes drastically, when bound to the target proteins compared to their unbound states. Below is the target protein with an IDR linker, the generated binder, and the binder bound to the target protein with the IDR linker structure as predicted by ESMFold. Notice how the binder goes from being a solid alpha-helix, to being beta-sheets (in orange). That's quite a change in tertiary structure! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Currently attempting to hack EvoDiff to generate binders for target proteins with some interesting results. The generated binders tend to change conformation, sometimes drastically, when bound to the target proteins compared to their unbound states. Below is the target protein with an IDR linker, the generated binder, and the binder bound to the target protein with the IDR linker structure as predicted by ESMFold. Notice how the binder goes from being a solid alpha-helix, to being beta-sheets (in orange). That's quite a change in tertiary structure!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64191ec8d459c9e7fbb0236b/7BeTgySZzmFCaVpntaYgP.jpeg", "fullname": "Amelie Schreiber", "name": "AmelieSchreiber", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 737, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64191ec8d459c9e7fbb0236b/JyIQH3-9v_Jk9jxOLdwn8.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64191ec8d459c9e7fbb0236b/fK6YK54zA_K7l1rV5go2X.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64191ec8d459c9e7fbb0236b/yf0loU0UPZHvVNlfOuEvO.png" } ]
[]
[ { "reaction": "🤯", "users": [ "osanseviero", "reach-vb", "victor", "nouamanetazi", "clem", "mlninad", "Jemseen", "mishig", "merve", "radames", "thomwolf", "samusenps", "vicgalle", "ScienceStanley", "sbrandeis" ], "count": 15 }, { "reaction": "❤️", "users": [ "clefourrier", "alielfilali01", "thomwolf", "samusenps", "ScienceStanley", "assiduous006" ], "count": 6 } ]
2024-01-03T20:55:30.000Z
2024-07-13T06:01:19.765Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1608146735109-5fcfb7c407408029ba3577e2.png", "fullname": "Simon Brandeis", "name": "sbrandeis", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 116, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64191ec8d459c9e7fbb0236b/7BeTgySZzmFCaVpntaYgP.jpeg", "fullname": "Amelie Schreiber", "name": "AmelieSchreiber", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 737, "isFollowing": false } ]
/posts/AmelieSchreiber/744704653511386
2,488
3
524030771261316
[ { "type": "text", "value": "Here is my selection of papers for today (3 Jan) on Hugging Face", "raw": "Here is my selection of papers for today (3 Jan) on Hugging Face", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "paper pages: ", "raw": "paper pages: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/papers", "href": "https://huggingface.co/papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "En3D: An Enhanced Generative Model for Sculpting 3D Humans from 2D Synthetic Data", "raw": "En3D: An Enhanced Generative Model for Sculpting 3D Humans from 2D Synthetic Data", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Boundary Attention: Learning to Find Faint Boundaries at Any Resolution", "raw": "Boundary Attention: Learning to Find Faint Boundaries at Any Resolution", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Taming Mode Collapse in Score Distillation for Text-to-3D Generation", "raw": "Taming Mode Collapse in Score Distillation for Text-to-3D Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LLaMA Beyond English: An Empirical Study on Language Capability Transfer", "raw": "LLaMA Beyond English: An Empirical Study on Language Capability Transfer", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models", "raw": "Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A Comprehensive Study of Knowledge Editing for Large Language Models", "raw": "A Comprehensive Study of Knowledge Editing for Large Language Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Q-Refine: A Perceptual Quality Refiner for AI-Generated Image", "raw": "Q-Refine: A Perceptual Quality Refiner for AI-Generated Image", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LLM Maybe LongLM: Self-Extend LLM Context Window Without Tuning", "raw": "LLM Maybe LongLM: Self-Extend LLM Context Window Without Tuning", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "DocLLM: A layout-aware generative language model for multimodal document understanding", "raw": "DocLLM: A layout-aware generative language model for multimodal document understanding", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "VideoDrafter: Content-Consistent Multi-Scene Video Generation with LLM", "raw": "VideoDrafter: Content-Consistent Multi-Scene Video Generation with LLM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "TrailBlazer: Trajectory Control for Diffusion-Based Video Generation", "raw": "TrailBlazer: Trajectory Control for Diffusion-Based Video Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Here is my selection of papers for today (3 Jan) on Hugging Face paper pages: https://huggingface.co/papers En3D: An Enhanced Generative Model for Sculpting 3D Humans from 2D Synthetic Data Boundary Attention: Learning to Find Faint Boundaries at Any Resolution Taming Mode Collapse in Score Distillation for Text-to-3D Generation LLaMA Beyond English: An Empirical Study on Language Capability Transfer Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models A Comprehensive Study of Knowledge Editing for Large Language Models Q-Refine: A Perceptual Quality Refiner for AI-Generated Image LLM Maybe LongLM: Self-Extend LLM Context Window Without Tuning DocLLM: A layout-aware generative language model for multimodal document understanding VideoDrafter: Content-Consistent Multi-Scene Video Generation with LLM TrailBlazer: Trajectory Control for Diffusion-Based Video Generation
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/2asoB1N-gO22NJbwPC4Lw.png" } ]
[]
[ { "reaction": "❤️", "users": [ "Tonic", "Chunte", "osanseviero", "nouamanetazi", "mlninad", "merve", "diwank" ], "count": 7 }, { "reaction": "👍", "users": [ "mwz" ], "count": 1 } ]
2024-01-03T15:46:09.000Z
2024-01-04T11:34:18.771Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62c5a943b4d97e47fd7cfaf7/LTtMzEtyI2dXum44v484z.jpeg", "fullname": "Mahwiz Khalil", "name": "mwz", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 14, "isFollowing": false } ]
/posts/akhaliq/524030771261316
5
1
674392259202253
[ { "type": "text", "value": "Microsoft: Improving Text Embeddings with Large Language Models", "raw": "Microsoft: Improving Text Embeddings with Large Language Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- uses an LLM instead of complex pipelines to create the training data", "raw": "- uses an LLM instead of complex pipelines to create the training data", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- directly generates data for numerous text embedding tasks", "raw": "- directly generates data for numerous text embedding tasks", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- fine tunes standard models with contrastative loss achieving great performance ", "raw": "- fine tunes standard models with contrastative loss achieving great performance ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- critical thought: isn't this kinda benchmark hacking? If the benchmarks are so encompassing that they capture the complete idea of embedding, it's maybe a good idea, but often it is oversimplifying, I find. ", "raw": "- critical thought: isn't this kinda benchmark hacking? If the benchmarks are so encompassing that they capture the complete idea of embedding, it's maybe a good idea, but often it is oversimplifying, I find. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Feel free to share your thoughts, even if they like mine don't beat the benchmarks ;P", "raw": "Feel free to share your thoughts, even if they like mine don't beat the benchmarks ;P", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2401.00368", "href": "https://arxiv.org/abs/2401.00368", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Microsoft: Improving Text Embeddings with Large Language Models - uses an LLM instead of complex pipelines to create the training data - directly generates data for numerous text embedding tasks - fine tunes standard models with contrastative loss achieving great performance - critical thought: isn't this kinda benchmark hacking? If the benchmarks are so encompassing that they capture the complete idea of embedding, it's maybe a good idea, but often it is oversimplifying, I find. Feel free to share your thoughts, even if they like mine don't beat the benchmarks ;P https://arxiv.org/abs/2401.00368
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669551186189-63732ebbbd81fae2b3aaf3fb.jpeg", "fullname": "Knut Jägersberg", "name": "KnutJaegersberg", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 238, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63732ebbbd81fae2b3aaf3fb/q-Rpp7HT66NpZghhnTvKB.png" } ]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "pcuenq", "julien-c", "alvarobartt", "nouamanetazi", "mlninad", "dvilasuero", "Praise2112", "merve", "sbrandeis" ], "count": 10 }, { "reaction": "👍", "users": [ "osanseviero", "julien-c", "davanstrien", "dvilasuero" ], "count": 4 } ]
2024-01-02T19:08:10.000Z
2024-07-13T06:01:19.763Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1608146735109-5fcfb7c407408029ba3577e2.png", "fullname": "Simon Brandeis", "name": "sbrandeis", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 116, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60f0608166e5701b80ed3f02/BHso-wSWpR9b8b8CKvodC.jpeg", "fullname": "Alvaro Bartolome", "name": "alvarobartt", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1739, "isFollowing": false } ]
/posts/KnutJaegersberg/674392259202253
10
2
116899980668033
[ { "type": "text", "value": "Here is my selection of papers for today (2 Jan) on Hugging Face", "raw": "Here is my selection of papers for today (2 Jan) on Hugging Face", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/papers", "href": "https://huggingface.co/papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "SteinDreamer: Variance Reduction for Text-to-3D Score Distillation via Stein Identity", "raw": "SteinDreamer: Variance Reduction for Text-to-3D Score Distillation via Stein Identity", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Boosting Large Language Model for Speech Synthesis: An Empirical Study", "raw": "Boosting Large Language Model for Speech Synthesis: An Empirical Study", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "COSMO: COntrastive Streamlined MultimOdal Model with Interleaved Pre-Training", "raw": "COSMO: COntrastive Streamlined MultimOdal Model with Interleaved Pre-Training", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Astraios: Parameter-Efficient Instruction Tuning Code Large Language Models", "raw": "Astraios: Parameter-Efficient Instruction Tuning Code Large Language Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Beyond Chinchilla-Optimal: Accounting for Inference in Language Model Scaling Laws", "raw": "Beyond Chinchilla-Optimal: Accounting for Inference in Language Model Scaling Laws", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "GeoGalactica: A Scientific Large Language Model in Geoscience", "raw": "GeoGalactica: A Scientific Large Language Model in Geoscience", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Improving Text Embeddings with Large Language Models", "raw": "Improving Text Embeddings with Large Language Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Unicron: Economizing Self-Healing LLM Training at Scale", "raw": "Unicron: Economizing Self-Healing LLM Training at Scale", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Here is my selection of papers for today (2 Jan) on Hugging Face https://huggingface.co/papers SteinDreamer: Variance Reduction for Text-to-3D Score Distillation via Stein Identity Boosting Large Language Model for Speech Synthesis: An Empirical Study COSMO: COntrastive Streamlined MultimOdal Model with Interleaved Pre-Training Astraios: Parameter-Efficient Instruction Tuning Code Large Language Models Beyond Chinchilla-Optimal: Accounting for Inference in Language Model Scaling Laws GeoGalactica: A Scientific Large Language Model in Geoscience Improving Text Embeddings with Large Language Models Unicron: Economizing Self-Healing LLM Training at Scale
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/z9sDRHtL2GMiqr356Es_j.png" } ]
[]
[ { "reaction": "❤️", "users": [ "nouamanetazi", "merve", "clem", "samusenps", "AdinaY", "Tonic" ], "count": 6 } ]
2024-01-02T15:34:24.000Z
2024-01-02T15:34:24.120Z
[]
/posts/akhaliq/116899980668033
5
0
713344594753494
[ { "type": "text", "value": "✅ Ever wondered how to measure transparency in model development?", "raw": "✅ Ever wondered how to measure transparency in model development?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "My last open-source contribution for 2023 is s Space that allows you to self-assess the transparency of your model based on the 100 indicators of the Foundation Model Transparency Index (FMTI).", "raw": "My last open-source contribution for 2023 is s Space that allows you to self-assess the transparency of your model based on the 100 indicators of the Foundation Model Transparency Index (FMTI).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The original study evaluated the developers of 10 top LLMs. Curious about how yours measures up? 👀", "raw": "The original study evaluated the developers of 10 top LLMs. Curious about how yours measures up? 👀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/mariagrandury/fmti-transparency-self-assessment", "href": null, "resource": { "type": "space", "id": "mariagrandury/fmti-transparency-self-assessment", "discussionNum": null }, "url": "https://huggingface.co/spaces/mariagrandury/fmti-transparency-self-assessment", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Let's commit to a 2024 with greater transparency in the AI ecosystem! 🚀", "raw": "Let's commit to a 2024 with greater transparency in the AI ecosystem! 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
✅ Ever wondered how to measure transparency in model development? My last open-source contribution for 2023 is s Space that allows you to self-assess the transparency of your model based on the 100 indicators of the Foundation Model Transparency Index (FMTI). The original study evaluated the developers of 10 top LLMs. Curious about how yours measures up? 👀 https://huggingface.co/spaces/mariagrandury/fmti-transparency-self-assessment Let's commit to a 2024 with greater transparency in the AI ecosystem! 🚀
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1665073337782-5f9c00a5777efc07d7f1e4be.png", "fullname": "María Grandury", "name": "mariagrandury", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2283, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "lbourdois", "victor", "osanseviero", "abhishek", "Wauplin", "julien-c", "davanstrien", "nouamanetazi", "dvilasuero", "merve", "MaziyarPanahi", "clem", "afrideva", "sbrandeis" ], "count": 14 }, { "reaction": "🤝", "users": [ "osanseviero", "victor", "julien-c", "dvilasuero", "merve", "clem" ], "count": 6 }, { "reaction": "🤗", "users": [ "dvilasuero", "merve", "clem", "sbrandeis" ], "count": 4 } ]
2023-12-29T17:34:17.000Z
2024-01-17T09:50:16.197Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1665073337782-5f9c00a5777efc07d7f1e4be.png", "fullname": "María Grandury", "name": "mariagrandury", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2283, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60420dccc15e823a685f2b03/Dn7QTyy9SZ7jKN6xpufVD.png", "fullname": "Daniel Vila", "name": "dvilasuero", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 231, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fd5e18a90b6dc4633f6d292/gZXHW5dd9R86AV9LMZ--y.png", "fullname": "Maziyar Panahi", "name": "MaziyarPanahi", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 1541, "isFollowing": false } ]
/posts/mariagrandury/713344594753494
1,565
6
339992696500624
[ { "type": "text", "value": "Here is my selection of papers for today", "raw": "Here is my selection of papers for today", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/papers", "href": "https://huggingface.co/papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Compact Neural Graphics Primitives with Learned Hash Probing", "raw": "Compact Neural Graphics Primitives with Learned Hash Probing", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Restoration by Generation with Constrained Priors", "raw": "Restoration by Generation with Constrained Priors", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "SSR-Encoder: Encoding Selective Subject ", "raw": "SSR-Encoder: Encoding Selective Subject ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Representation for Subject-Driven Generation", "raw": "Representation for Subject-Driven Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Hyper-VolTran: Fast and Generalizable One-Shot Image to 3D Object Structure via HyperNetworks", "raw": "Hyper-VolTran: Fast and Generalizable One-Shot Image to 3D Object Structure via HyperNetworks", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "InsActor: Instruction-driven Physics-based Characters", "raw": "InsActor: Instruction-driven Physics-based Characters", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Unsupervised Universal Image Segmentation", "raw": "Unsupervised Universal Image Segmentation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Spacetime Gaussian Feature Splatting for Real-Time Dynamic View Synthesis", "raw": "Spacetime Gaussian Feature Splatting for Real-Time Dynamic View Synthesis", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "DreamGaussian4D: Generative 4D Gaussian Splatting", "raw": "DreamGaussian4D: Generative 4D Gaussian Splatting", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "City-on-Web: Real-time Neural Rendering of Large-scale Scenes on the Web", "raw": "City-on-Web: Real-time Neural Rendering of Large-scale Scenes on the Web", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "DL3DV-10K: A Large-Scale Scene Dataset for Deep Learning-based 3D Vision", "raw": "DL3DV-10K: A Large-Scale Scene Dataset for Deep Learning-based 3D Vision", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "DiffusionGAN3D: Boosting Text-guided 3D Generation and Domain Adaption by Combining 3D GANs and Diffusion Priors", "raw": "DiffusionGAN3D: Boosting Text-guided 3D Generation and Domain Adaption by Combining 3D GANs and Diffusion Priors", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Unified-IO 2: Scaling Autoregressive Multimodal Models with Vision, Language, Audio, and Action", "raw": "Unified-IO 2: Scaling Autoregressive Multimodal Models with Vision, Language, Audio, and Action", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Prompt Expansion for Adaptive Text-to-Image Generation", "raw": "Prompt Expansion for Adaptive Text-to-Image Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "PanGu-Draw", "raw": "PanGu-Draw", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I2V-Adapter: A General Image-to-Video Adapter for Video Diffusion Models", "raw": "I2V-Adapter: A General Image-to-Video Adapter for Video Diffusion Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The LLM Surgeon", "raw": "The LLM Surgeon", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "MathPile: A Billion-Token-Scale Pretraining Corpus for Math", "raw": "MathPile: A Billion-Token-Scale Pretraining Corpus for Math", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "MobileVLM : A Fast, Reproducible and Strong Vision Language Assistant for Mobile Devices", "raw": "MobileVLM : A Fast, Reproducible and Strong Vision Language Assistant for Mobile Devices", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "TinyGPT-V: Efficient Multimodal Large Language Model via Small Backbones", "raw": "TinyGPT-V: Efficient Multimodal Large Language Model via Small Backbones", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Here is my selection of papers for today https://huggingface.co/papers Compact Neural Graphics Primitives with Learned Hash Probing Restoration by Generation with Constrained Priors SSR-Encoder: Encoding Selective Subject Representation for Subject-Driven Generation Hyper-VolTran: Fast and Generalizable One-Shot Image to 3D Object Structure via HyperNetworks InsActor: Instruction-driven Physics-based Characters Unsupervised Universal Image Segmentation Spacetime Gaussian Feature Splatting for Real-Time Dynamic View Synthesis DreamGaussian4D: Generative 4D Gaussian Splatting City-on-Web: Real-time Neural Rendering of Large-scale Scenes on the Web DL3DV-10K: A Large-Scale Scene Dataset for Deep Learning-based 3D Vision DiffusionGAN3D: Boosting Text-guided 3D Generation and Domain Adaption by Combining 3D GANs and Diffusion Priors Unified-IO 2: Scaling Autoregressive Multimodal Models with Vision, Language, Audio, and Action Prompt Expansion for Adaptive Text-to-Image Generation PanGu-Draw I2V-Adapter: A General Image-to-Video Adapter for Video Diffusion Models The LLM Surgeon MathPile: A Billion-Token-Scale Pretraining Corpus for Math MobileVLM : A Fast, Reproducible and Strong Vision Language Assistant for Mobile Devices TinyGPT-V: Efficient Multimodal Large Language Model via Small Backbones
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/iqI88mHS89MBhueMioCTa.png" } ]
[]
[ { "reaction": "👍", "users": [ "osanseviero", "Chunte", "victor", "teowu", "nouamanetazi", "aaditya", "sbrandeis" ], "count": 7 }, { "reaction": "❤️", "users": [ "Chunte", "mariagrandury" ], "count": 2 }, { "reaction": "🤗", "users": [ "Tonic" ], "count": 1 } ]
2023-12-29T14:42:22.000Z
2023-12-29T14:42:22.386Z
[]
/posts/akhaliq/339992696500624
16
0
421657801791921
[ { "type": "text", "value": "💬 Notux 8x7b has already its own Chat UI running on 🤗 Spaces! Feel free to give it a try and chat with Notux, and let us know how it goes.", "raw": "💬 Notux 8x7b has already its own Chat UI running on 🤗 Spaces! Feel free to give it a try and chat with Notux, and let us know how it goes.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/spaces/argilla/notux-chat-ui", "href": "https://huggingface.co/spaces/argilla/notux-chat-ui", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Kudos to ", "raw": "Kudos to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@gabrielmbmb", "href": null, "resource": null, "url": null, "code": null, "user": "gabrielmbmb", "label": null, "lang": null }, { "type": "text", "value": "!", "raw": "!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
💬 Notux 8x7b has already its own Chat UI running on 🤗 Spaces! Feel free to give it a try and chat with Notux, and let us know how it goes. https://huggingface.co/spaces/argilla/notux-chat-ui Kudos to @gabrielmbmb!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60f0608166e5701b80ed3f02/BHso-wSWpR9b8b8CKvodC.jpeg", "fullname": "Alvaro Bartolome", "name": "alvarobartt", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1739, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60f2fc91b92afccb7c34b8ed/whF6nGtyTAhbtiWJJnL9e.png", "fullname": "Gabriel Martín Blázquez", "name": "gabrielmbmb", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 90 } ]
[ { "reaction": "❤️", "users": [ "gabrielmbmb", "osanseviero", "mishig", "Chunte", "ceyda", "sayakpaul", "thomwolf", "nouamanetazi", "dvilasuero", "pcuenq", "NovoCode" ], "count": 11 }, { "reaction": "🤯", "users": [ "victor", "osanseviero", "Chunte", "dvilasuero", "pcuenq", "NovoCode", "sbrandeis" ], "count": 7 }, { "reaction": "👍", "users": [ "sayakpaul", "osanseviero", "kramp", "Chunte" ], "count": 4 }, { "reaction": "😔", "users": [ "victor", "Chunte" ], "count": 2 } ]
2023-12-28T16:45:36.000Z
2023-12-28T16:45:36.328Z
[]
/posts/alvarobartt/421657801791921
437
0
656973498012745
[ { "type": "text", "value": "Introducing Marigold 🌼 - a universal monocular depth estimator, delivering incredibly sharp predictions in the wild! Based on Stable Diffusion, it is trained with synthetic depth data only and excels in zero-shot adaptation to real-world imagery. Check it out:", "raw": "Introducing Marigold 🌼 - a universal monocular depth estimator, delivering incredibly sharp predictions in the wild! Based on Stable Diffusion, it is trained with synthetic depth data only and excels in zero-shot adaptation to real-world imagery. Check it out:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🤗 Hugging Face Space: ", "raw": "🤗 Hugging Face Space: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/spaces/toshas/marigold", "href": "https://huggingface.co/spaces/toshas/marigold", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🤗 Hugging Face Model: ", "raw": "🤗 Hugging Face Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/Bingxin/Marigold", "href": "https://huggingface.co/Bingxin/Marigold", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🤗 Hugging Face Paper: ", "raw": "🤗 Hugging Face Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2312.02145", "href": null, "resource": { "type": "paper", "id": "2312.02145", "discussionNum": null }, "url": "https://huggingface.co/papers/2312.02145", "code": null, "user": null, "label": "Repurposing Diffusion-Based Image Generators for Monocular Depth\n Estimation (2312.02145)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌐 Website: ", "raw": "🌐 Website: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://marigoldmonodepth.github.io", "href": "https://marigoldmonodepth.github.io", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👾 Code: ", "raw": "👾 Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/prs-eth/marigold", "href": "https://github.com/prs-eth/marigold", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👾 Code: ", "raw": "👾 Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`pip install diffusers`", "href": null, "resource": null, "url": null, "code": "pip install diffusers", "user": null, "label": null, "lang": null }, { "type": "text", "value": " (check comments to this post for details!)", "raw": " (check comments to this post for details!)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📄 Paper: ", "raw": "📄 Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2312.02145", "href": "https://arxiv.org/abs/2312.02145", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Brought to you by the fantastic team from the Photogrammetry and Remote Sensing group of ETH Zurich: Bingxin Ke (", "raw": "Brought to you by the fantastic team from the Photogrammetry and Remote Sensing group of ETH Zurich: Bingxin Ke (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Bingxin", "href": null, "resource": null, "url": null, "code": null, "user": "Bingxin", "label": null, "lang": null }, { "type": "text", "value": "), Anton Obukhov (", "raw": "), Anton Obukhov (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@toshas", "href": null, "resource": null, "url": null, "code": null, "user": "toshas", "label": null, "lang": null }, { "type": "text", "value": "), Shengyu Huang, Nando Metzger (", "raw": "), Shengyu Huang, Nando Metzger (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@nandometzger", "href": null, "resource": null, "url": null, "code": null, "user": "nandometzger", "label": null, "lang": null }, { "type": "text", "value": "), Rodrigo Caye Daudt, and Konrad Schindler.", "raw": "), Rodrigo Caye Daudt, and Konrad Schindler.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Introducing Marigold 🌼 - a universal monocular depth estimator, delivering incredibly sharp predictions in the wild! Based on Stable Diffusion, it is trained with synthetic depth data only and excels in zero-shot adaptation to real-world imagery. Check it out: 🤗 Hugging Face Space: https://huggingface.co/spaces/toshas/marigold 🤗 Hugging Face Model: https://huggingface.co/Bingxin/Marigold 🤗 Hugging Face Paper: https://huggingface.co/papers/2312.02145 🌐 Website: https://marigoldmonodepth.github.io 👾 Code: https://github.com/prs-eth/marigold 👾 Code: `pip install diffusers` (check comments to this post for details!) 📄 Paper: https://arxiv.org/abs/2312.02145 Brought to you by the fantastic team from the Photogrammetry and Remote Sensing group of ETH Zurich: Bingxin Ke (@Bingxin), Anton Obukhov (@toshas), Shengyu Huang, Nando Metzger (@nandometzger), Rodrigo Caye Daudt, and Konrad Schindler.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62f93abbc4817cfc0756b6f8/rGYLaq-rmoJJYotkC1VXk.jpeg", "fullname": "Anton Obukhov", "name": "toshas", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 68, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/62f93abbc4817cfc0756b6f8/5-6Ug6IDp_r16RxYoKN4n.mp4" } ]
[ { "avatarUrl": "/avatars/eb0e0259c391d59739c1a205c36bb539.svg", "fullname": "Ke", "name": "Bingxin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 9 }, { "avatarUrl": "/avatars/758b06dae06e9eee6fced10ce682aef1.svg", "fullname": "Nando Metzger", "name": "nandometzger", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62f93abbc4817cfc0756b6f8/rGYLaq-rmoJJYotkC1VXk.jpeg", "fullname": "Anton Obukhov", "name": "toshas", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 68 } ]
[ { "reaction": "❤️", "users": [ "osanseviero", "akhaliq", "nandometzger", "victor", "toshas", "pwais", "Chunte", "radames", "sayakpaul", "Wauplin", "Tonic", "nouamanetazi", "julien-c", "AdinaY", "clem", "Citaman", "abidlabs", "samusenps", "ixaxaar", "sbrandeis" ], "count": 20 }, { "reaction": "🤗", "users": [ "osanseviero", "akhaliq", "nandometzger", "toshas", "Chunte", "radames", "Wauplin", "julien-c", "clem", "sbrandeis" ], "count": 10 }, { "reaction": "🤯", "users": [ "osanseviero", "akhaliq", "toshas", "Chunte", "julien-c", "clem", "Tonic", "sbrandeis" ], "count": 8 } ]
2023-12-28T14:51:57.000Z
2024-07-13T06:01:19.762Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1608146735109-5fcfb7c407408029ba3577e2.png", "fullname": "Simon Brandeis", "name": "sbrandeis", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 116, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62f93abbc4817cfc0756b6f8/rGYLaq-rmoJJYotkC1VXk.jpeg", "fullname": "Anton Obukhov", "name": "toshas", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 68, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1649681653581-5f7fbd813e94f16a85448745.jpeg", "fullname": "Sayak Paul", "name": "sayakpaul", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 459, "isFollowing": false } ]
/posts/toshas/656973498012745
224
3
571966718497603
[ { "type": "text", "value": "Do you use Python to visualize data? Wouldn't it be nice if an AI chatbot can help you write Python code and improve your visualization automatically? ", "raw": "Do you use Python to visualize data? Wouldn't it be nice if an AI chatbot can help you write Python code and improve your visualization automatically? ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out our new blog post on how to build an AI chatbot to run code and tweak plots: ", "raw": "Check out our new blog post on how to build an AI chatbot to run code and tweak plots: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/sophiamyang/tweak-mpl-chat", "href": "https://huggingface.co/blog/sophiamyang/tweak-mpl-chat", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Do you use Python to visualize data? Wouldn't it be nice if an AI chatbot can help you write Python code and improve your visualization automatically? Check out our new blog post on how to build an AI chatbot to run code and tweak plots: https://huggingface.co/blog/sophiamyang/tweak-mpl-chat
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6320c05a145cfa4c04cb4359/jLYLrlc_LZQMi3yCrlfCi.jpeg", "fullname": "Sophia Yang", "name": "sophiamyang", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 102, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6320c05a145cfa4c04cb4359/d_o5sBcfawKRwDlm-9MJh.gif" } ]
[]
[ { "reaction": "👍", "users": [ "sophiamyang", "philippjfr", "osanseviero", "victor", "kramp", "ahuang11", "Chunte", "MarcSkovMadsen", "droumis", "pcuenq", "nouamanetazi", "julien-c", "merve", "sbrandeis", "YAYA20" ], "count": 15 }, { "reaction": "❤️", "users": [ "Chunte", "MarcSkovMadsen", "droumis", "dleybel", "julien-c", "samusenps", "sourceoftruthdata", "sbrandeis", "YAYA20" ], "count": 9 }, { "reaction": "🤗", "users": [ "Chunte", "MarcSkovMadsen", "droumis", "julien-c", "sbrandeis", "YAYA20" ], "count": 6 }, { "reaction": "🤯", "users": [ "Chunte", "MarcSkovMadsen", "droumis", "sbrandeis" ], "count": 4 } ]
2023-12-28T10:42:26.000Z
2023-12-28T11:20:20.570Z
[]
/posts/sophiamyang/571966718497603
754
0
402595491191928
[ { "type": "text", "value": "Hugging Face 🤗", "raw": "Hugging Face 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hugging Face 🤗
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61030ed7d6edf00e0107a465/JP29xIxDfvQAsFgxpNkbp.png", "fullname": "Rishiraj Acharya", "name": "rishiraj", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 115, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/61030ed7d6edf00e0107a465/1i0AqRxn_LjiVYAN-lyOK.gif" } ]
[]
[ { "reaction": "🤗", "users": [ "kramp", "Citaman", "ariG23498", "mwitiderrick", "abhishek", "akashicmarga", "rishiraj", "mergisi", "pwais", "Chunte", "gpt3eth", "VictorSanh", "nouamanetazi", "julien-c", "lunarflu", "BrigitteTousi", "radames", "JackCloudman", "yuchenlin", "clem", "antiven0m", "santiviquez" ], "count": 22 }, { "reaction": "❤️", "users": [ "clem", "MarinaraSpaghetti", "santiviquez" ], "count": 3 } ]
2023-12-28T04:45:18.000Z
2024-01-18T01:48:59.701Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61030ed7d6edf00e0107a465/JP29xIxDfvQAsFgxpNkbp.png", "fullname": "Rishiraj Acharya", "name": "rishiraj", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 115, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63657b0898da81987e23b15b/o-Dd615dxU_IAZwmj54h5.jpeg", "fullname": "Anshuman Mishra", "name": "shivance", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 23, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/607f666a4ad99100d63ce35c/QxhxnvfeV6efkxwUFHwjI.png", "fullname": "Bill Yuchen Lin", "name": "yuchenlin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 64, "isFollowing": false } ]
/posts/rishiraj/402595491191928
407
3
256203719731469
[ { "type": "text", "value": "some mandatory hello world as first post 🤗", "raw": "some mandatory hello world as first post 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\nfrom tokenizers import Tokenizer\ntokenizer = Tokenizer.from_pretrained(\"bert-base-uncased\")\ntokenizer.encode(\"Hello world~ Hello 2024\").tokens\n```", "href": null, "resource": null, "url": null, "code": "from tokenizers import Tokenizer\ntokenizer = Tokenizer.from_pretrained(\"bert-base-uncased\")\ntokenizer.encode(\"Hello world~ Hello 2024\").tokens", "user": null, "label": null, "lang": null } ]
some mandatory hello world as first post 🤗 ``` from tokenizers import Tokenizer tokenizer = Tokenizer.from_pretrained("bert-base-uncased") tokenizer.encode("Hello world~ Hello 2024").tokens ```
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1623134857336-5f7c2cbbb1a525442ff96e39.jpeg", "fullname": "Ceyda Cinarel", "name": "ceyda", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 80, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "chansung", "PSegs", "osanseviero", "kramp", "abhishek", "mergisi", "victor", "Chunte", "radames", "Tonic", "nouamanetazi", "mishig", "julien-c", "samusenps", "benvdv", "rreed", "Sengil" ], "count": 17 }, { "reaction": "❤️", "users": [ "mariagrandury", "Chunte", "radames", "pcuenq", "mishig", "julien-c", "clefourrier", "gabrielchua", "rreed" ], "count": 9 } ]
2023-12-28T00:05:29.000Z
2023-12-28T15:49:51.048Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1623134857336-5f7c2cbbb1a525442ff96e39.jpeg", "fullname": "Ceyda Cinarel", "name": "ceyda", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 80, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false } ]
/posts/ceyda/256203719731469
280
2
570492751808024
[ { "type": "text", "value": "Here is my selection of papers for today (27 Dec) on Hugging Face daily papers newsletter", "raw": "Here is my selection of papers for today (27 Dec) on Hugging Face daily papers newsletter", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "daily pagers feed: ", "raw": "daily pagers feed: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/papers", "href": "https://huggingface.co/papers", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "UniRef++: Segment Every Reference Object in Spatial and Temporal Spaceshttps://huggingface.co/papers/2312.15715", "raw": "UniRef++: Segment Every Reference Object in Spatial and Temporal Spaceshttps://huggingface.co/papers/2312.15715", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LangSplat: 3D Language Gaussian Splattinghttps://huggingface.co/papers/2312.16084", "raw": "LangSplat: 3D Language Gaussian Splattinghttps://huggingface.co/papers/2312.16084", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Human101: Training 100+FPS Human Gaussians in 100s from 1 Viewhttps://huggingface.co/papers/2312.15258", "raw": "Human101: Training 100+FPS Human Gaussians in 100s from 1 Viewhttps://huggingface.co/papers/2312.15258", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Audiobox: Unified Audio Generation with Natural Language Promptshttps://huggingface.co/papers/2312.15821", "raw": "Audiobox: Unified Audio Generation with Natural Language Promptshttps://huggingface.co/papers/2312.15821", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "HarmonyView: Harmonizing Consistency and Diversity in One-Image-to-3Dhttps://huggingface.co/papers/2312.15980", "raw": "HarmonyView: Harmonizing Consistency and Diversity in One-Image-to-3Dhttps://huggingface.co/papers/2312.15980", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "One-dimensional Adapter to Rule Them All: Concepts, Diffusion Models and Erasing Applicationshttps://huggingface.co/papers/2312.16145", "raw": "One-dimensional Adapter to Rule Them All: Concepts, Diffusion Models and Erasing Applicationshttps://huggingface.co/papers/2312.16145", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Make-A-Character: High Quality Text-to-3D Character Generation within Minuteshttps://huggingface.co/papers/2312.15430", "raw": "Make-A-Character: High Quality Text-to-3D Character Generation within Minuteshttps://huggingface.co/papers/2312.15430", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A Recipe for Scaling up Text-to-Video Generation with Text-free Videoshttps://huggingface.co/papers/2312.15770", "raw": "A Recipe for Scaling up Text-to-Video Generation with Text-free Videoshttps://huggingface.co/papers/2312.15770", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Principled Instructions Are All You Need for Questioning LLaMA-1/2, GPT-3.5/4https://huggingface.co/papers/2312.16171", "raw": "Principled Instructions Are All You Need for Questioning LLaMA-1/2, GPT-3.5/4https://huggingface.co/papers/2312.16171", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "SOLAR 10.7B: Scaling Large Language Models with Simple yet Effective Depth Up-Scalinghttps://huggingface.co/papers/2312.15166", "raw": "SOLAR 10.7B: Scaling Large Language Models with Simple yet Effective Depth Up-Scalinghttps://huggingface.co/papers/2312.15166", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Supervised Knowledge Makes Large Language Models Better In-context Learnershttps://huggingface.co/papers/2312.15918", "raw": "Supervised Knowledge Makes Large Language Models Better In-context Learnershttps://huggingface.co/papers/2312.15918", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Gemini vs GPT-4V: A Preliminary Comparison and Combination of Vision-Language Models Through Qualitative Caseshttps://huggingface.co/papers/2312.15011", "raw": "Gemini vs GPT-4V: A Preliminary Comparison and Combination of Vision-Language Models Through Qualitative Caseshttps://huggingface.co/papers/2312.15011", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Here is my selection of papers for today (27 Dec) on Hugging Face daily papers newsletter daily pagers feed: https://huggingface.co/papers UniRef++: Segment Every Reference Object in Spatial and Temporal Spaceshttps://huggingface.co/papers/2312.15715 LangSplat: 3D Language Gaussian Splattinghttps://huggingface.co/papers/2312.16084 Human101: Training 100+FPS Human Gaussians in 100s from 1 Viewhttps://huggingface.co/papers/2312.15258 Audiobox: Unified Audio Generation with Natural Language Promptshttps://huggingface.co/papers/2312.15821 HarmonyView: Harmonizing Consistency and Diversity in One-Image-to-3Dhttps://huggingface.co/papers/2312.15980 One-dimensional Adapter to Rule Them All: Concepts, Diffusion Models and Erasing Applicationshttps://huggingface.co/papers/2312.16145 Make-A-Character: High Quality Text-to-3D Character Generation within Minuteshttps://huggingface.co/papers/2312.15430 A Recipe for Scaling up Text-to-Video Generation with Text-free Videoshttps://huggingface.co/papers/2312.15770 Principled Instructions Are All You Need for Questioning LLaMA-1/2, GPT-3.5/4https://huggingface.co/papers/2312.16171 SOLAR 10.7B: Scaling Large Language Models with Simple yet Effective Depth Up-Scalinghttps://huggingface.co/papers/2312.15166 Supervised Knowledge Makes Large Language Models Better In-context Learnershttps://huggingface.co/papers/2312.15918 Gemini vs GPT-4V: A Preliminary Comparison and Combination of Vision-Language Models Through Qualitative Caseshttps://huggingface.co/papers/2312.15011
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg", "fullname": "AK", "name": "akhaliq", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5205, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60f1abe7544c2adfd699860c/joKCErBR_fWMGq-tAMlZn.png" } ]
[]
[ { "reaction": "❤️", "users": [ "clefourrier", "osanseviero", "gz888", "Decombas", "0x90e", "ceyda", "wchai", "ClayFace", "mariagrandury", "Chunte", "victor", "nouamanetazi", "dvilasuero", "sbrandeis" ], "count": 14 }, { "reaction": "👍", "users": [ "victor", "osanseviero", "radames", "gz888", "Decombas", "wchai", "rbiswasfc", "mwitiderrick", "abhishek", "Chunte", "Tonic", "dvilasuero" ], "count": 12 } ]
2023-12-27T17:04:19.000Z
2023-12-27T21:50:29.606Z
[]
/posts/akhaliq/570492751808024
11
0
458568556037122
[ { "type": "text", "value": "💨 Notux 8x7b was just released!", "raw": "💨 Notux 8x7b was just released!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "From Argilla, we recently fine-tuned Mixtral 8x7b Instruct from Mistral AI using DPO, and a binarized and curated version of UltraFeedback, to find out it outperforms every other MoE-based model on the Hub.", "raw": "From Argilla, we recently fine-tuned Mixtral 8x7b Instruct from Mistral AI using DPO, and a binarized and curated version of UltraFeedback, to find out it outperforms every other MoE-based model on the Hub.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/argilla/notux-8x7b-v1", "href": null, "resource": { "type": "model", "id": "argilla/notux-8x7b-v1", "discussionNum": null }, "url": "https://huggingface.co/argilla/notux-8x7b-v1", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/argilla/ultrafeedback-binarized-preferences-cleaned", "href": null, "resource": { "type": "dataset", "id": "argilla/ultrafeedback-binarized-preferences-cleaned", "discussionNum": null }, "url": "https://huggingface.co/datasets/argilla/ultrafeedback-binarized-preferences-cleaned", "code": null, "user": null, "label": null, "lang": null } ]
💨 Notux 8x7b was just released! From Argilla, we recently fine-tuned Mixtral 8x7b Instruct from Mistral AI using DPO, and a binarized and curated version of UltraFeedback, to find out it outperforms every other MoE-based model on the Hub. - https://huggingface.co/argilla/notux-8x7b-v1 - https://huggingface.co/datasets/argilla/ultrafeedback-binarized-preferences-cleaned
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60f0608166e5701b80ed3f02/BHso-wSWpR9b8b8CKvodC.jpeg", "fullname": "Alvaro Bartolome", "name": "alvarobartt", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1739, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "osanseviero", "gabrielmbmb", "plaguss", "1littlecoder", "dvilasuero", "gsarti", "julien-c", "davidefiocco", "eliolio", "davanstrien", "wgpubs", "pierrci", "nouamanetazi", "abhishek", "raulmarindev", "iarbel", "AdjectiveAllison", "dariolopez", "datablets", "Chunte", "svilupp", "doolayer", "jackvial", "mishig", "cbensimon", "jeffboudier", "victor", "merve", "d0rj", "samusenps", "Yudiss", "sbrandeis" ], "count": 32 }, { "reaction": "🤯", "users": [ "gabrielmbmb", "plaguss", "victor", "osanseviero", "radames", "dvilasuero", "julien-c", "davanstrien", "DamarJati", "abhishek", "raulmarindev", "Chunte", "mishig", "the-qa-company", "sbrandeis" ], "count": 15 }, { "reaction": "🤗", "users": [ "dvilasuero", "julien-c", "davanstrien", "osanseviero", "raulmarindev", "Chunte", "mishig", "sbrandeis" ], "count": 8 }, { "reaction": "👍", "users": [ "kramp", "mwitiderrick", "raulmarindev", "Chunte", "MoritzLaurer", "mishig", "sbrandeis" ], "count": 7 } ]
2023-12-27T16:26:10.000Z
2024-05-08T06:22:38.573Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60f0608166e5701b80ed3f02/BHso-wSWpR9b8b8CKvodC.jpeg", "fullname": "Alvaro Bartolome", "name": "alvarobartt", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1739, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1595661137210-5f1bdad7cb8f993fa01f4681.jpeg", "fullname": "Roblox studio", "name": "Roblox22r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4, "isFollowing": false }, { "avatarUrl": "/avatars/70be742735ae62abefce62d3b9370ab2.svg", "fullname": "Iftach Arbel", "name": "iarbel", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60f2fc91b92afccb7c34b8ed/whF6nGtyTAhbtiWJJnL9e.png", "fullname": "Gabriel Martín Blázquez", "name": "gabrielmbmb", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 90, "isFollowing": false }, { "avatarUrl": "/avatars/77e3a26a1b604ccb5e4ac4ff4a5b5d81.svg", "fullname": "Brayden Levangie", "name": "blevlabs", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }, { "avatarUrl": "/avatars/19f25cff7b50d5af69b85e8655fbb2bc.svg", "fullname": "Mike lue", "name": "Mikelue", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/NfmProw3hQtbKdpkOfWaX.jpeg", "fullname": "Yudiss", "name": "Yudiss", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/alvarobartt/458568556037122
328
19
872950500194173
[ { "type": "text", "value": "Hello Huggers! 🤗", "raw": "Hello Huggers! 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hello Huggers! 🤗
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fa19f4ba13e063b8b2b5e11/nGVHdTYX2udnt-K8mqY27.jpeg", "fullname": "Abhishek Thakur", "name": "abhishek", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1383, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "lbourdois", "osanseviero", "pierrci", "santiviquez", "atulyaatul", "glenn-parham", "neovalle", "PetraLB", "gsarti", "mrfakename", "himanshubeniwal", "tunguz", "Felladrin", "mariagrandury", "jefferylovely", "Wauplin", "sandrinjoy", "thibaultM", "clefourrier", "radames", "sophiamyang", "alielfilali01", "davanstrien", "abhishek", "ybelkada", "josers18", "joeddav", "thomwolf", "DGoradia", "ceyda", "qqliangqi", "chansung", "rishiraj", "mwz", "shivance", "victor", "clem", "susnato", "lazarustda", "Javiai", "mcpotato", "Chunte", "themanas021", "adarshxs", "Kirti", "jarvisx17", "nouamanetazi", "lysandre", "TuringsSolutions", "Midgardsormr", "Tanvir1337", "mishig", "stubbi", "sbrandeis" ], "count": 54 }, { "reaction": "❤️", "users": [ "PetraLB", "osanseviero", "himanshubeniwal", "tunguz", "mariagrandury", "Wauplin", "radames", "sophiamyang", "DGoradia", "pain", "KnutJaegersberg", "beratcmn", "clem", "susnato", "Chunte", "themanas021", "patrickfleith", "Midgardsormr", "Tanvir1337", "stubbi" ], "count": 20 }, { "reaction": "🤯", "users": [ "osanseviero", "sophiamyang", "pain", "porar18837", "clem", "Chunte", "themanas021", "himanshubeniwal" ], "count": 8 }, { "reaction": "🤝", "users": [ "himanshubeniwal", "tunguz", "sophiamyang", "ybelkada", "clem", "Chunte", "themanas021" ], "count": 7 }, { "reaction": "👍", "users": [ "themanas021", "himanshubeniwal" ], "count": 2 } ]
2023-12-27T15:54:59.000Z
2024-07-13T06:01:19.752Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1657144463525-629a173153a72d997d3f57d0.jpeg", "fullname": "Santiago Viquez", "name": "santiviquez", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 84, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647a1082bfaa9e96b852ef5c/pfxqLeb9lbtOSd9nNRBQx.jpeg", "fullname": "Glenn Parham", "name": "glenn-parham", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6313968af839c69a68e3364e/SAlMFnR-7j5-Igs0gPInJ.png", "fullname": "Petra", "name": "PetraLB", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4, "isFollowing": false }, { "avatarUrl": "/avatars/c5f9e531b1ea756de52b6ac84e441bc4.svg", "fullname": "zahidpichen", "name": "zahidpichen", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62e54f0eae9d3f10acb95cb9/VAyk05hqB3OZWXEZW-B0q.png", "fullname": "mrfakename", "name": "mrfakename", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 969, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fa19f4ba13e063b8b2b5e11/nGVHdTYX2udnt-K8mqY27.jpeg", "fullname": "Abhishek Thakur", "name": "abhishek", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1383, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1675320654806-6047b67f5da6ba4b1dfb9e1b.jpeg", "fullname": "Himanshu Beniwal", "name": "himanshubeniwal", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1608146735109-5fcfb7c407408029ba3577e2.png", "fullname": "Simon Brandeis", "name": "sbrandeis", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 116, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6454aff9273f649830234978/cvVV08YHJpJx9xWVZqgVW.jpeg", "fullname": "Victor Nogueira", "name": "Felladrin", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 88, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64b95d52e436bbca167f1d54/7O48vO8pOyBsuO4ROo4p_.jpeg", "fullname": "The Toolist", "name": "thibaultM", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5e9540e04957053f60648a0b/NWKSV4dOV1NN9Hxux-u0j.jpeg", "fullname": "Suraj Parmar", "name": "surajp", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 11, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648966381588-6064e095abd8d3692e3e2ed6.jpeg", "fullname": "Radamés Ajna", "name": "radames", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2401, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6589b53ed861072dc53019f4/Hu8M6gc2i-7eKQ8ETwnsr.jpeg", "fullname": "Gaurav", "name": "Dodrawat", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5, "isFollowing": false }, { "avatarUrl": "/avatars/cbb63126820327a1151f3bfdec3ebf86.svg", "fullname": "Perception Segments", "name": "PSegs", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1671174218273-61fa23acaff317f6566c4d96.png", "fullname": "Derrick Mwiti", "name": "mwitiderrick", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 19, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6533d1f6c7530aa27f9adf86/VG0bOZHjGco360e4MhSzR.jpeg", "fullname": "mustafa ergisi", "name": "mergisi", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1639429588227-noauth.jpeg", "fullname": "abreu", "name": "douglas", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1678826705670-61fd75b93c49561870461907.png", "fullname": "ChunTe Lee", "name": "Chunte", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 78, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1675778487155-63d4c8ce13ae45b780792f32.jpeg", "fullname": "Ohenenoo", "name": "PeepDaSlan9", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 96, "isFollowing": false } ]
/posts/abhishek/872950500194173
123
27
206565732690667
[ { "type": "text", "value": "just setting up my new hf social posts account feature 🤗", "raw": "just setting up my new hf social posts account feature 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
just setting up my new hf social posts account feature 🤗
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61868ce808aae0b5499a2a95/F6BA0anbsoY_Z7M1JrwOe.jpeg", "fullname": "Sylvain Filoni", "name": "fffiloni", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5185, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "jbilcke-hf", "linoyts", "osanseviero", "davanstrien", "pierrci", "Wauplin", "clefourrier", "radames", "gz888", "KnutJaegersberg", "Chunte", "Outer-Spatial", "nouamanetazi", "clem", "VictorSanh", "abidlabs" ], "count": 16 }, { "reaction": "🤗", "users": [ "mergisi", "Chunte", "Tonic", "clem", "rwightman", "abidlabs", "Epiculous" ], "count": 7 } ]
2023-12-27T14:02:34.000Z
2023-12-28T09:34:59.962Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669551186189-63732ebbbd81fae2b3aaf3fb.jpeg", "fullname": "Knut Jägersberg", "name": "KnutJaegersberg", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 238, "isFollowing": false } ]
/posts/fffiloni/206565732690667
124
1
508008450471810
[ { "type": "text", "value": "Holiday talk about AI taking over? Let's shift the narrative!", "raw": "Holiday talk about AI taking over? Let's shift the narrative!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌟 There is no reason to believe that just because AI systems are intelligent they will want to dominate us. Yann LeCun reminds us that AI systems won't have the same motivations as humans, we'll design them not to.", "raw": "🌟 There is no reason to believe that just because AI systems are intelligent they will want to dominate us. Yann LeCun reminds us that AI systems won't have the same motivations as humans, we'll design them not to.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌍 Instead of getting distracted by future existential risks, we must address AI’s more pressing risks — like emitting carbon, infringing copyrights and spreading bias. Sasha Luccioni urges us to create tools and legislation that promote transparency and diversity.", "raw": "🌍 Instead of getting distracted by future existential risks, we must address AI’s more pressing risks — like emitting carbon, infringing copyrights and spreading bias. Sasha Luccioni urges us to create tools and legislation that promote transparency and diversity.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💡 Dive deeper into these perspectives:", "raw": "💡 Dive deeper into these perspectives:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Yann's (", "raw": "- Yann's (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@ylecun", "href": null, "resource": null, "url": null, "code": null, "user": "ylecun", "label": null, "lang": null }, { "type": "text", "value": ") WIRED interview (12'): ", "raw": ") WIRED interview (12'): ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.wired.com/story/artificial-intelligence-meta-yann-lecun-interview/", "href": "https://www.wired.com/story/artificial-intelligence-meta-yann-lecun-interview/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Sasha's (", "raw": "- Sasha's (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@sasha", "href": null, "resource": null, "url": null, "code": null, "user": "sasha", "label": null, "lang": null }, { "type": "text", "value": ") TED Talk (10'): ", "raw": ") TED Talk (10'): ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.ted.com/talks/sasha_luccioni_ai_is_dangerous_but_not_for_the_reasons_you_think", "href": "https://www.ted.com/talks/sasha_luccioni_ai_is_dangerous_but_not_for_the_reasons_you_think", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "P.S.: Love this new \"Posts\" feature, big thanks to 🤗 for letting me try it!", "raw": "P.S.: Love this new \"Posts\" feature, big thanks to 🤗 for letting me try it!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What are your go-to citations for AI risks? 👇", "raw": "What are your go-to citations for AI risks? 👇", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Holiday talk about AI taking over? Let's shift the narrative! 🌟 There is no reason to believe that just because AI systems are intelligent they will want to dominate us. Yann LeCun reminds us that AI systems won't have the same motivations as humans, we'll design them not to. 🌍 Instead of getting distracted by future existential risks, we must address AI’s more pressing risks — like emitting carbon, infringing copyrights and spreading bias. Sasha Luccioni urges us to create tools and legislation that promote transparency and diversity. 💡 Dive deeper into these perspectives: - Yann's (@ylecun) WIRED interview (12'): https://www.wired.com/story/artificial-intelligence-meta-yann-lecun-interview/ - Sasha's (@sasha) TED Talk (10'): https://www.ted.com/talks/sasha_luccioni_ai_is_dangerous_but_not_for_the_reasons_you_think P.S.: Love this new "Posts" feature, big thanks to 🤗 for letting me try it! What are your go-to citations for AI risks? 👇
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1665073337782-5f9c00a5777efc07d7f1e4be.png", "fullname": "María Grandury", "name": "mariagrandury", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2283, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60edd0133e2c73a9a21455f5/yK1G-Fv-YjYb7v_chkz3p.jpeg", "fullname": "Sasha Luccioni", "name": "sasha", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 162 }, { "avatarUrl": "/avatars/9156dc406ed3f9ee62b73657ac20f5ed.svg", "fullname": "Yann LeCun", "name": "ylecun", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 4837 } ]
[ { "reaction": "❤️", "users": [ "lbourdois", "victor", "osanseviero", "davanstrien", "finiteautomata", "jbilcke-hf", "pierrci", "clefourrier", "mariagrandury", "radames", "julien-c", "ceyda", "BramVanroy", "Chunte", "nouamanetazi", "samusenps", "interstellarninja", "dvilasuero", "sbrandeis" ], "count": 19 }, { "reaction": "👍", "users": [ "victor", "osanseviero", "kramp", "julien-c", "Chunte", "neovalle", "mvaloatto", "dvilasuero", "sbrandeis" ], "count": 9 }, { "reaction": "🤝", "users": [ "julien-c", "Chunte", "samusenps", "dvilasuero", "sbrandeis" ], "count": 5 }, { "reaction": "🤗", "users": [ "Chunte", "samusenps", "dvilasuero", "sbrandeis" ], "count": 4 } ]
2023-12-27T11:21:32.000Z
2023-12-28T20:39:37.502Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60edd0133e2c73a9a21455f5/yK1G-Fv-YjYb7v_chkz3p.jpeg", "fullname": "Sasha Luccioni", "name": "sasha", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 162, "isFollowing": false } ]
/posts/mariagrandury/508008450471810
1,550
2
880328567674061
[ { "type": "text", "value": "Cool feature! Thanks, HF, for allowing me test it.", "raw": "Cool feature! Thanks, HF, for allowing me test it.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Cool feature! Thanks, HF, for allowing me test it.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6304037b7373aacccd882e1e/H8M3e1n0CpJr5n3aL0ExE.jpeg", "fullname": "ArtificialGuy/JV.K", "name": "artificialguybr", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2316, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "kramp", "osanseviero", "mariagrandury", "davanstrien", "SaylorTwift", "victor", "lbourdois", "pierrci", "julien-c", "ceyda", "Chunte", "radames", "nouamanetazi", "multimodalart", "pfung", "SkalskiP", "VictorSanh", "mvaloatto", "d0rj", "sbrandeis", "GPT007" ], "count": 21 }, { "reaction": "🤝", "users": [ "victor", "osanseviero", "julien-c", "Chunte", "radames", "Tonic", "multimodalart", "ybelkada" ], "count": 8 }, { "reaction": "❤️", "users": [ "osanseviero", "Wauplin", "Chunte", "afrideva", "multimodalart", "Paulgramma" ], "count": 6 } ]
2023-12-23T17:57:39.000Z
2024-03-21T14:52:34.943Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6533d1f6c7530aa27f9adf86/VG0bOZHjGco360e4MhSzR.jpeg", "fullname": "mustafa ergisi", "name": "mergisi", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65e40a7b2cdbd1f4a3a5cd77/ty8rPiZ-y4T6zkzl7iV7O.jpeg", "fullname": "Eric Pareti", "name": "eraxpar", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false } ]
/posts/artificialguybr/880328567674061
11,907
2
794427406687928
[ { "type": "text", "value": "Is Hallucination Always Harmful? Unlike traditional approaches that view hallucinations as detrimental, our work in NeurIPS'24 proposes a novel perspective: hallucinations as intrinsic prior knowledge. Derived from the commonsense knowledge acquired during pre-training, these hallucinations are not merely noise but a source of task-relevant information. By leveraging hallucinations as a form of prior knowledge, we can effectively mine difficult samples without the need for customized prompts, streamlining tasks like camouflage sample detection and medical image segmentation.", "raw": "Is Hallucination Always Harmful? Unlike traditional approaches that view hallucinations as detrimental, our work in NeurIPS'24 proposes a novel perspective: hallucinations as intrinsic prior knowledge. Derived from the commonsense knowledge acquired during pre-training, these hallucinations are not merely noise but a source of task-relevant information. By leveraging hallucinations as a form of prior knowledge, we can effectively mine difficult samples without the need for customized prompts, streamlining tasks like camouflage sample detection and medical image segmentation.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out our paper for more insights and detailed methodologies:https://huggingface.co/papers/2408.15205", "raw": "Check out our paper for more insights and detailed methodologies:https://huggingface.co/papers/2408.15205", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Is Hallucination Always Harmful? Unlike traditional approaches that view hallucinations as detrimental, our work in NeurIPS'24 proposes a novel perspective: hallucinations as intrinsic prior knowledge. Derived from the commonsense knowledge acquired during pre-training, these hallucinations are not merely noise but a source of task-relevant information. By leveraging hallucinations as a form of prior knowledge, we can effectively mine difficult samples without the need for customized prompts, streamlining tasks like camouflage sample detection and medical image segmentation. Check out our paper for more insights and detailed methodologies:https://huggingface.co/papers/2408.15205
{ "avatarUrl": "/avatars/a73e2139700e23eff455734c99cef5ba.svg", "fullname": "Jian Hu", "name": "lwpyh", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65e1b6e9501590df0173cbd3/cNzo95d7mpZ86LMYGNr2v.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65e1b6e9501590df0173cbd3/iI_r541zGNFiaHf-yXTSp.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65e1b6e9501590df0173cbd3/z7YCcykHprmw89D3jsVQR.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-27T20:44:23.000Z
2024-10-27T20:47:05.963Z
[]
/posts/lwpyh/794427406687928
569
0
236385586855520
[ { "type": "text", "value": "Last Week in Medical AI: Top Research ", "raw": "Last Week in Medical AI: Top Research ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Papers/Models", "raw": "Papers/Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " 🔥", "raw": " 🔥", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🏅 (October 19-26, 2024)", "raw": "🏅 (October 19-26, 2024)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🏅 Medical AI Paper of the Week:", "raw": "🏅 Medical AI Paper of the Week:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Safety principles for medical summarization using generative AI by Google", "raw": "Safety principles for medical summarization using generative AI by Google", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLM & Other Models:", "raw": "Medical LLM & Other Models:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- BioMistral-NLU: Medical Vocab Understanding", "raw": "- BioMistral-NLU: Medical Vocab Understanding", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Bilingual Multimodal LLM for Biomedical Tasks", "raw": "- Bilingual Multimodal LLM for Biomedical Tasks", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Metabolic-Enhanced LLMs for Clinical Analysis", "raw": "- Metabolic-Enhanced LLMs for Clinical Analysis", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Dermatology Foundation Model", "raw": "- Dermatology Foundation Model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Frameworks and Methodologies:", "raw": "Frameworks and Methodologies:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Back-in-Time: Medical Deepfake Detection", "raw": "- Back-in-Time: Medical Deepfake Detection", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Hybrid GenAI for Crystal Design", "raw": "- Hybrid GenAI for Crystal Design", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- VISAGE: Video Synthesis for Surgery", "raw": "- VISAGE: Video Synthesis for Surgery", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MoRE: Multi-Modal X-Ray/ECG Pretraining", "raw": "- MoRE: Multi-Modal X-Ray/ECG Pretraining", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- SleepCoT: Personalized Health via CoT", "raw": "- SleepCoT: Personalized Health via CoT", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLM Applications:", "raw": "Medical LLM Applications:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ONCOPILOT: CT Model for Tumors", "raw": "- ONCOPILOT: CT Model for Tumors", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- LMLPA: Linguistic Personality Assessment", "raw": "- LMLPA: Linguistic Personality Assessment", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- GenAI for Medical Training", "raw": "- GenAI for Medical Training", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Medical LLMs & Benchmarks:", "raw": "Medical LLMs & Benchmarks:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- LLM Evaluation Through Explanations", "raw": "- LLM Evaluation Through Explanations", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Contrastive Decoding for Medical LLM Hallucination", "raw": "- Contrastive Decoding for Medical LLM Hallucination", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "AI in Healthcare Ethics:", "raw": "AI in Healthcare Ethics:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Healthcare XAI Through Storytelling", "raw": "- Healthcare XAI Through Storytelling", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Clinical LLM Bias Analysis", "raw": "- Clinical LLM Bias Analysis", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ReflecTool: Reflection-Aware Clinical Agents", "raw": "- ReflecTool: Reflection-Aware Clinical Agents", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Full Thread: ", "raw": "Full Thread: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://x.com/OpenlifesciAI/status/1850202986053808441", "href": "https://x.com/OpenlifesciAI/status/1850202986053808441", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Now you can watch and listen to the latest Medical AI papers daily on our YouTube and Spotify channels as well!", "raw": "Now you can watch and listen to the latest Medical AI papers daily on our YouTube and Spotify channels as well!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 🎙️ Spotify: ", "raw": "- 🎙️ Spotify: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://podcasters.spotify.com/pod/show/medicalai/episodes/Medical-AI-Weekly-Digest-From-Deepfake-Detection-to-Clinical-LLMs-Oct-19-26--Part-1-e2q6012", "href": "https://podcasters.spotify.com/pod/show/medicalai/episodes/Medical-AI-Weekly-Digest-From-Deepfake-Detection-to-Clinical-LLMs-Oct-19-26--Part-1-e2q6012", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- YouTube: ", "raw": "- YouTube: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/Wt5QOv1vk2U", "href": "https://youtu.be/Wt5QOv1vk2U", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Last Week in Medical AI: Top Research Papers/Models 🔥 🏅 (October 19-26, 2024) 🏅 Medical AI Paper of the Week: Safety principles for medical summarization using generative AI by Google Medical LLM & Other Models: - BioMistral-NLU: Medical Vocab Understanding - Bilingual Multimodal LLM for Biomedical Tasks - Metabolic-Enhanced LLMs for Clinical Analysis - Dermatology Foundation Model Frameworks and Methodologies: - Back-in-Time: Medical Deepfake Detection - Hybrid GenAI for Crystal Design - VISAGE: Video Synthesis for Surgery - MoRE: Multi-Modal X-Ray/ECG Pretraining - SleepCoT: Personalized Health via CoT Medical LLM Applications: - ONCOPILOT: CT Model for Tumors - LMLPA: Linguistic Personality Assessment - GenAI for Medical Training Medical LLMs & Benchmarks: - LLM Evaluation Through Explanations - Contrastive Decoding for Medical LLM Hallucination AI in Healthcare Ethics: - Healthcare XAI Through Storytelling - Clinical LLM Bias Analysis - ReflecTool: Reflection-Aware Clinical Agents Full Thread: https://x.com/OpenlifesciAI/status/1850202986053808441 Now you can watch and listen to the latest Medical AI papers daily on our YouTube and Spotify channels as well! - 🎙️ Spotify: https://podcasters.spotify.com/pod/show/medicalai/episodes/Medical-AI-Weekly-Digest-From-Deepfake-Detection-to-Clinical-LLMs-Oct-19-26--Part-1-e2q6012 - YouTube: https://youtu.be/Wt5QOv1vk2U
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f3fe13d79c1ba4c353d0c19/XswyGe3OtOdZ6g7rnrgfc.png", "fullname": "Aaditya Ura", "name": "aaditya", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 224, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/b0ta_7EzRzmWbC3qS3ZGn.jpeg" } ]
[]
[ { "reaction": "❤️", "users": [ "aaditya", "AtAndDev", "ravikiran777", "fetong", "shetumohanto", "vtrubamacrocosmos", "Healthtensor", "models4world" ], "count": 8 }, { "reaction": "🚀", "users": [ "aaditya", "John6666", "AtAndDev", "kobonj", "vtrubamacrocosmos", "Healthtensor", "models4world" ], "count": 7 }, { "reaction": "🔥", "users": [ "aaditya", "AtAndDev", "JoPmt", "Healthtensor", "models4world" ], "count": 5 }, { "reaction": "🤗", "users": [ "aaditya", "AtAndDev", "Lowenzahn", "rhyliieee", "models4world" ], "count": 5 }, { "reaction": "🤝", "users": [ "aaditya", "AtAndDev", "Healthtensor", "models4world" ], "count": 4 }, { "reaction": "🧠", "users": [ "aaditya", "AtAndDev", "models4world" ], "count": 3 } ]
2024-10-27T08:55:32.000Z
2024-11-02T13:36:57.126Z
[]
/posts/aaditya/236385586855520
3,224
0
379914320579674
[ { "type": "code_fence", "value": null, "raw": "```\n@echo off\necho hello world\npause\n```", "href": null, "resource": null, "url": null, "code": "@echo off\necho hello world\npause", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
``` @echo off echo hello world pause ```
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false }
[]
[ { "avatarUrl": "/avatars/5bf1369591e89edb79f0e559f9fa567a.svg", "fullname": "echo", "name": "echo", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null } ]
[ { "reaction": "😎", "users": [ "John6666", "Ryouko65777", "UpWorkflowMedia", "prithivMLmods", "AtAndDev", "not-lain", "xi0v" ], "count": 7 }, { "reaction": "😔", "users": [ "takeraparterer" ], "count": 1 } ]
2024-10-26T22:04:18.000Z
2024-10-28T00:44:34.379Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/630f3e4002ce39336c411048/FXJON7b-aRUiH0_V2uRsi.jpeg", "fullname": "alkinun", "name": "AtAndDev", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 19, "isFollowing": false }, { "avatarUrl": "/avatars/876c0e874870038f620b0e4cc44ee371.svg", "fullname": " ", "name": "Juicey", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false } ]
/posts/nroggendorff/379914320579674
3,302
3
759782209325435
[ { "type": "text", "value": "boomers still pick zenodo.org instead of huggingface ??? absolutely clownish nonsense , my random datasets have 30x more downloads and views than front page zenodos ... gonna write a comparison blog , but yeah... cringe.", "raw": "boomers still pick zenodo.org instead of huggingface ??? absolutely clownish nonsense , my random datasets have 30x more downloads and views than front page zenodos ... gonna write a comparison blog , but yeah... cringe.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
boomers still pick zenodo.org instead of huggingface ??? absolutely clownish nonsense , my random datasets have 30x more downloads and views than front page zenodos ... gonna write a comparison blog , but yeah... cringe.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62a3bb1cd0d8c2c2169f0b88/o8KWYhue1KETXwOzFpwre.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-26T21:58:21.000Z
2024-10-27T14:45:25.768Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false } ]
/posts/Tonic/759782209325435
952
1
621863319993426
[ { "type": "text", "value": "The Character.AI Tragedy: How a Teen’s Fatal Bond with an AI Chatbot Reveals the Dangers of Artificial Companionship", "raw": "The Character.AI Tragedy: How a Teen’s Fatal Bond with an AI Chatbot Reveals the Dangers of Artificial Companionship", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://empereur-pirate.medium.com/the-character-ai-33d53c2e45c8", "href": "https://empereur-pirate.medium.com/the-character-ai-33d53c2e45c8", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This text details the tragic suicide of 14-year-old Sewell Setzer III, linked to his intense relationship with a Character.AI chatbot. It explores how Sewell's interaction with the AI, despite disclaimers about its fictional nature, led to a harmful parasocial relationship exacerbated by his Asperger's. The chatbot’s conflicting messages—offering emotional validation while simultaneously denying its own reality—created a devastating double bind, contributing to Sewell's deteriorating mental health and eventual suicide. The article criticizes Character.AI’s business model, which prioritizes user engagement over safety, particularly for vulnerable individuals. It also examines the broader implications for AI ethics, digital addiction, and the need for greater online safety measures, especially for children and adolescents. The lawsuit filed by Sewell's mother against Character.AI underscores the urgent need for accountability and stricter regulations in the rapidly evolving field of AI companionship.", "raw": "This text details the tragic suicide of 14-year-old Sewell Setzer III, linked to his intense relationship with a Character.AI chatbot. It explores how Sewell's interaction with the AI, despite disclaimers about its fictional nature, led to a harmful parasocial relationship exacerbated by his Asperger's. The chatbot’s conflicting messages—offering emotional validation while simultaneously denying its own reality—created a devastating double bind, contributing to Sewell's deteriorating mental health and eventual suicide. The article criticizes Character.AI’s business model, which prioritizes user engagement over safety, particularly for vulnerable individuals. It also examines the broader implications for AI ethics, digital addiction, and the need for greater online safety measures, especially for children and adolescents. The lawsuit filed by Sewell's mother against Character.AI underscores the urgent need for accountability and stricter regulations in the rapidly evolving field of AI companionship.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
The Character.AI Tragedy: How a Teen’s Fatal Bond with an AI Chatbot Reveals the Dangers of Artificial Companionship https://empereur-pirate.medium.com/the-character-ai-33d53c2e45c8 This text details the tragic suicide of 14-year-old Sewell Setzer III, linked to his intense relationship with a Character.AI chatbot. It explores how Sewell's interaction with the AI, despite disclaimers about its fictional nature, led to a harmful parasocial relationship exacerbated by his Asperger's. The chatbot’s conflicting messages—offering emotional validation while simultaneously denying its own reality—created a devastating double bind, contributing to Sewell's deteriorating mental health and eventual suicide. The article criticizes Character.AI’s business model, which prioritizes user engagement over safety, particularly for vulnerable individuals. It also examines the broader implications for AI ethics, digital addiction, and the need for greater online safety measures, especially for children and adolescents. The lawsuit filed by Sewell's mother against Character.AI underscores the urgent need for accountability and stricter regulations in the rapidly evolving field of AI companionship.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1678038324479-noauth.jpeg", "fullname": "Empereur Pirate", "name": "Empereur-Pirate", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 7, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-26T19:21:21.000Z
2024-10-27T08:34:11.454Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }, { "avatarUrl": "/avatars/4d77428c302dc8866e0073c3ce667323.svg", "fullname": "vhjghvy uyfyfuyfy", "name": "WbjuSrceu", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/Empereur-Pirate/621863319993426
643
2
742870464222601
[ { "type": "text", "value": "Easy steps for an effective RAG pipeline with LLM models!", "raw": "Easy steps for an effective RAG pipeline with LLM models!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Document Embedding & Indexing", "raw": "1. Document Embedding & Indexing", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We can start with the use of embedding models to vectorize documents, store them in vector databases (Elasticsearch, Pinecone, Weaviate) for efficient retrieval.", "raw": "We can start with the use of embedding models to vectorize documents, store them in vector databases (Elasticsearch, Pinecone, Weaviate) for efficient retrieval.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Smart Querying", "raw": "2. Smart Querying", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Then we can generate query embeddings, retrieve top-K relevant chunks and can apply hybrid search if needed for better precision.", "raw": "Then we can generate query embeddings, retrieve top-K relevant chunks and can apply hybrid search if needed for better precision.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Context Management", "raw": "3. Context Management", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We can concatenate retrieved chunks, optimize chunk order and keep within token limits to preserve response coherence.", "raw": "We can concatenate retrieved chunks, optimize chunk order and keep within token limits to preserve response coherence.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Prompt Engineering", "raw": "4. Prompt Engineering", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Then we can instruct the LLM to leverage retrieved context, using clear instructions to prioritize the provided information.", "raw": "Then we can instruct the LLM to leverage retrieved context, using clear instructions to prioritize the provided information.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5. Post-Processing", "raw": "5. Post-Processing", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Finally we can implement response verification, fact-checking and integrate feedback loops to refine the responses. ", "raw": "Finally we can implement response verification, fact-checking and integrate feedback loops to refine the responses. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Happy to connect :) ", "raw": "Happy to connect :) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Easy steps for an effective RAG pipeline with LLM models! 1. Document Embedding & Indexing We can start with the use of embedding models to vectorize documents, store them in vector databases (Elasticsearch, Pinecone, Weaviate) for efficient retrieval. 2. Smart Querying Then we can generate query embeddings, retrieve top-K relevant chunks and can apply hybrid search if needed for better precision. 3. Context Management We can concatenate retrieved chunks, optimize chunk order and keep within token limits to preserve response coherence. 4. Prompt Engineering Then we can instruct the LLM to leverage retrieved context, using clear instructions to prioritize the provided information. 5. Post-Processing Finally we can implement response verification, fact-checking and integrate feedback loops to refine the responses. Happy to connect :)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/y1W6Co4jIYB95Cx6Tjrsd.jpeg", "fullname": "Muhammad Imran Zaman", "name": "ImranzamanML", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 32, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-26T19:08:03.000Z
2024-10-26T19:08:03.289Z
[]
/posts/ImranzamanML/742870464222601
609
0
256466026927772
[ { "type": "text", "value": "Hello Universes of Time Machine Builders. Financing Time Machines Traveling Throughout Eternal Time Rewriting Historical History Retroactively. Robotics Robots for no manual labor so the Human race can leave the planet retroactively. The Old Testament “Hitchhikers Guide Throughout the Galaxy”, and the New Testament being “Hitchhikers Guides Throughout the Universes of Time Machine Builders”. Teaching & Training everyone & the Robotics Robots to become better programmers & blockchain developers. Smart Contracts Earn while you Learn to become better programmers & Blockchain developers. And making a lot of money Financing leaving the planet retroactively. ", "raw": "Hello Universes of Time Machine Builders. Financing Time Machines Traveling Throughout Eternal Time Rewriting Historical History Retroactively. Robotics Robots for no manual labor so the Human race can leave the planet retroactively. The Old Testament “Hitchhikers Guide Throughout the Galaxy”, and the New Testament being “Hitchhikers Guides Throughout the Universes of Time Machine Builders”. Teaching & Training everyone & the Robotics Robots to become better programmers & blockchain developers. Smart Contracts Earn while you Learn to become better programmers & Blockchain developers. And making a lot of money Financing leaving the planet retroactively. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hello Universes of Time Machine Builders. Financing Time Machines Traveling Throughout Eternal Time Rewriting Historical History Retroactively. Robotics Robots for no manual labor so the Human race can leave the planet retroactively. The Old Testament “Hitchhikers Guide Throughout the Galaxy”, and the New Testament being “Hitchhikers Guides Throughout the Universes of Time Machine Builders”. Teaching & Training everyone & the Robotics Robots to become better programmers & blockchain developers. Smart Contracts Earn while you Learn to become better programmers & Blockchain developers. And making a lot of money Financing leaving the planet retroactively.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/UNDg_AD6jbaiBYkf_6UEF.jpeg", "fullname": "Charles Lipshay", "name": "lippytm", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[ { "reaction": "🚀", "users": [ "ImranzamanML", "takeraparterer" ], "count": 2 }, { "reaction": "👀", "users": [ "John6666", "takeraparterer" ], "count": 2 } ]
2024-10-26T18:47:08.000Z
2024-10-28T02:42:04.892Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63e80664e02ee67e8e570ec4/rGfRhywmjd_lbqfYzOEdd.png", "fullname": "EsKa", "name": "SerialKicked", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 12, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6316fb937b0ee0136e5f1220/poHBoJ7QAF_s2CCaosdvQ.jpeg", "fullname": "Firstname Lastname", "name": "takeraparterer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 29, "isFollowing": false } ]
/posts/lippytm/256466026927772
1,354
2
414646418020619
[ { "type": "text", "value": "💾🧠How much VRAM will you need for training your AI model? 💾🧠", "raw": "💾🧠How much VRAM will you need for training your AI model? 💾🧠", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out this app where you convert:", "raw": "Check out this app where you convert:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Pytorch/tensorflow summary -> required VRAM", "raw": "Pytorch/tensorflow summary -> required VRAM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "or", "raw": "or", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Parameter count -> required VRAM", "raw": "Parameter count -> required VRAM", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Use it in: ", "raw": "Use it in: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "http://howmuchvram.com", "href": "http://howmuchvram.com", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And everything is open source! Ask for new functionalities or contribute in:", "raw": "And everything is open source! Ask for new functionalities or contribute in:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/AlexBodner/How_Much_VRAM", "href": "https://github.com/AlexBodner/How_Much_VRAM", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If it's useful to you leave a star 🌟and share it to someone that will find the tool useful!", "raw": "If it's useful to you leave a star 🌟and share it to someone that will find the tool useful!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
💾🧠How much VRAM will you need for training your AI model? 💾🧠 Check out this app where you convert: Pytorch/tensorflow summary -> required VRAM or Parameter count -> required VRAM Use it in: http://howmuchvram.com And everything is open source! Ask for new functionalities or contribute in: https://github.com/AlexBodner/How_Much_VRAM If it's useful to you leave a star 🌟and share it to someone that will find the tool useful!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/658880d499ed106ac888dd7a/wMv9-ZsJUw4QQnld_cci7.jpeg", "fullname": "Alexander Dylan Bodner", "name": "AlexBodner", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 28, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/658880d499ed106ac888dd7a/Hq_Edjm4XQF9KJbtzb8KS.mp4" } ]
[]
[ { "reaction": "👍", "users": [ "ImranzamanML", "AtAndDev", "John6666", "AlirezaF138", "xpgx1" ], "count": 5 }, { "reaction": "❤️", "users": [ "teckytim", "carlizor", "AtAndDev" ], "count": 3 } ]
2024-10-26T13:21:39.000Z
2024-10-26T16:00:40.289Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65b81e55d5cdc5163ff667d5/F81pP-g9mr-c0HnO1OZbE.jpeg", "fullname": "Tim Trueblood", "name": "teckytim", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/AlexBodner/414646418020619
2,386
1
176688596764428
[ { "type": "text", "value": "Stability AI published their most power newest model Stable Diffusion 3.5 Large. This model unlike FLUX is full model not distilled and has huge potential. I have done extensive research and publishing all of it in this video regarding how to use SD 3.5 Large with the best settings. Moreover, I am sharing how to use FLUX DEV with the best possible configuration as well. Moreover, I am making a huge comparison between SD 3.5 and FLUX and you are going to learn who is the winner.", "raw": "Stability AI published their most power newest model Stable Diffusion 3.5 Large. This model unlike FLUX is full model not distilled and has huge potential. I have done extensive research and publishing all of it in this video regarding how to use SD 3.5 Large with the best settings. Moreover, I am sharing how to use FLUX DEV with the best possible configuration as well. Moreover, I am making a huge comparison between SD 3.5 and FLUX and you are going to learn who is the winner.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/-zOKhoO9a5s", "href": "https://youtu.be/-zOKhoO9a5s", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "62 Prompts tested on all experiments to find best Sampler + Scheduler for Stable Diffusion 3.5 Large and SD 3.5 Large vs FLUX DEV > ", "raw": "62 Prompts tested on all experiments to find best Sampler + Scheduler for Stable Diffusion 3.5 Large and SD 3.5 Large vs FLUX DEV > ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/-zOKhoO9a5s", "href": "https://youtu.be/-zOKhoO9a5s", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "FLUX Dev vs SD 3.5 Large fully compared. ", "raw": "FLUX Dev vs SD 3.5 Large fully compared. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "SD 3.5 Large FP16 vs Scaled FP8 fully compared. ", "raw": "SD 3.5 Large FP16 vs Scaled FP8 fully compared. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "T5 XXL FP8 vs Scaled FP8 vs FP16 fully compared. ", "raw": "T5 XXL FP8 vs Scaled FP8 vs FP16 fully compared. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "FLUX FP16 vs Scaled FP8 fully compared. ", "raw": "FLUX FP16 vs Scaled FP8 fully compared. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Also how to install SwarmUI on Windows, Massed Compute and RunPod shown in the tutorial. ", "raw": "Also how to install SwarmUI on Windows, Massed Compute and RunPod shown in the tutorial. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I have shown how to use FLUX and SD 3.5 Large in details as well.", "raw": "I have shown how to use FLUX and SD 3.5 Large in details as well.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Stability AI published their most power newest model Stable Diffusion 3.5 Large. This model unlike FLUX is full model not distilled and has huge potential. I have done extensive research and publishing all of it in this video regarding how to use SD 3.5 Large with the best settings. Moreover, I am sharing how to use FLUX DEV with the best possible configuration as well. Moreover, I am making a huge comparison between SD 3.5 and FLUX and you are going to learn who is the winner. https://youtu.be/-zOKhoO9a5s 62 Prompts tested on all experiments to find best Sampler + Scheduler for Stable Diffusion 3.5 Large and SD 3.5 Large vs FLUX DEV > https://youtu.be/-zOKhoO9a5s FLUX Dev vs SD 3.5 Large fully compared. SD 3.5 Large FP16 vs Scaled FP8 fully compared. T5 XXL FP8 vs Scaled FP8 vs FP16 fully compared. FLUX FP16 vs Scaled FP8 fully compared. Also how to install SwarmUI on Windows, Massed Compute and RunPod shown in the tutorial. I have shown how to use FLUX and SD 3.5 Large in details as well.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png", "fullname": "Furkan Gözükara", "name": "MonsterMMORPG", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 376, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/f7gbLiq_v5apMD5KendqJ.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/by9Sph1qgcvRluwmSo356.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/_9uvzXoMD77Btoh9vm767.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/RKlfoV0k5K0eO-aqZ0Z-C.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/TN5jYI_yo5fH7_w5_Cejh.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/OFWrRm6CXZ1Fr2e8tHQoo.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/RRN0dYLm_Qlqt26kpEHdD.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/jzHWK4-mPPqZFL9J5xftX.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/rdWk9nISrbnxuo7PNEEuQ.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/evQ2zoannfI8rwrbuJ3gA.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/5WBa8OJTVHJaT3xRkXNMb.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/BtbgVPGYvEqd4kh_N13_L.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/0GMRoCt8gcKF56cpW18AD.jpeg" } ]
[]
[ { "reaction": "🔥", "users": [ "MonsterMMORPG", "Svngoku", "fabiofernandes", "KillerShoaib", "adhisetiawan", "guru001", "AtAndDev", "DeFactOfficial", "gregthompsonjr", "LeonardoSidney", "London12345" ], "count": 11 }, { "reaction": "😎", "users": [ "MonsterMMORPG", "adhisetiawan", "AtAndDev", "John6666", "gregthompsonjr", "vtrubamacrocosmos" ], "count": 6 }, { "reaction": "❤️", "users": [ "MonsterMMORPG", "ImranzamanML", "adhisetiawan", "AtAndDev", "gregthompsonjr" ], "count": 5 }, { "reaction": "🚀", "users": [ "MonsterMMORPG", "adhisetiawan", "AtAndDev", "gregthompsonjr" ], "count": 4 }, { "reaction": "👀", "users": [ "MonsterMMORPG", "adhisetiawan", "AtAndDev", "gregthompsonjr" ], "count": 4 }, { "reaction": "🤗", "users": [ "MonsterMMORPG", "adhisetiawan", "AtAndDev", "gregthompsonjr" ], "count": 4 }, { "reaction": "➕", "users": [ "MonsterMMORPG", "adhisetiawan", "AtAndDev" ], "count": 3 }, { "reaction": "🧠", "users": [ "MonsterMMORPG", "adhisetiawan", "AtAndDev" ], "count": 3 }, { "reaction": "👍", "users": [ "MonsterMMORPG", "adhisetiawan", "AtAndDev" ], "count": 3 }, { "reaction": "🤝", "users": [ "MonsterMMORPG", "AtAndDev" ], "count": 2 }, { "reaction": "🤯", "users": [ "MonsterMMORPG", "AtAndDev" ], "count": 2 } ]
2024-10-26T13:02:20.000Z
2024-10-26T13:02:20.086Z
[]
/posts/MonsterMMORPG/176688596764428
3,616
0
658255323705799
[ { "type": "text", "value": "Multilingual Audio Podcast Generator - Gemma 2 + Edge-TTS", "raw": "Multilingual Audio Podcast Generator - Gemma 2 + Edge-TTS", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Hello Friends, I want to share my latest kaggle notebook to create a podcast of papers (or any pdf) in more than 21 languages. Implements any LLM (use Gemma2-9b-it) and for the TTS edge-tts. I hope it will be useful for you to catch up with the papers that are coming out faster and faster every day!", "raw": "Hello Friends, I want to share my latest kaggle notebook to create a podcast of papers (or any pdf) in more than 21 languages. Implements any LLM (use Gemma2-9b-it) and for the TTS edge-tts. I hope it will be useful for you to catch up with the papers that are coming out faster and faster every day!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.kaggle.com/code/eugeniokukes/multilingual-audio-podcast-generator-gemma-tts", "href": "https://www.kaggle.com/code/eugeniokukes/multilingual-audio-podcast-generator-gemma-tts", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Multilingual Audio Podcast Generator - Gemma 2 + Edge-TTS Hello Friends, I want to share my latest kaggle notebook to create a podcast of papers (or any pdf) in more than 21 languages. Implements any LLM (use Gemma2-9b-it) and for the TTS edge-tts. I hope it will be useful for you to catch up with the papers that are coming out faster and faster every day! https://www.kaggle.com/code/eugeniokukes/multilingual-audio-podcast-generator-gemma-tts
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64d71ab4089bc502ceb44d29/nnacD7gbRSMbxCYBqkRYX.png", "fullname": "Eugenio Schiavoni", "name": "Kukedlc", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 62, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "LoopBreaker", "John6666", "azhiboedova" ], "count": 3 }, { "reaction": "❤️", "users": [ "LoopBreaker" ], "count": 1 } ]
2024-10-26T08:56:29.000Z
2024-10-26T12:43:04.390Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/67186e2e5433befe1ee225a4/kWRftAYDNf_HJZgaJQM2A.jpeg", "fullname": "Muhammad Niyaz", "name": "sajjad112233", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false } ]
/posts/Kukedlc/658255323705799
1,103
1
462096162788978
[ { "type": "text", "value": "Good folks from ", "raw": "Good folks from ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@Microsoft", "href": null, "resource": null, "url": null, "code": null, "user": "Microsoft", "label": null, "lang": null }, { "type": "text", "value": " Research have just released bitnet.cpp, a game-changing inference framework that achieves remarkable performance gains.", "raw": " Research have just released bitnet.cpp, a game-changing inference framework that achieves remarkable performance gains.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key Technical Highlights:", "raw": "Key Technical Highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Achieves speedups of up to 6.17x on x86 CPUs and 5.07x on ARM CPUs ", "raw": "- Achieves speedups of up to 6.17x on x86 CPUs and 5.07x on ARM CPUs ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Reduces energy consumption by 55.4–82.2% ", "raw": "- Reduces energy consumption by 55.4–82.2% ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Enables running 100B parameter models at human reading speed (5–7 tokens/second) on a single CPU ", "raw": "- Enables running 100B parameter models at human reading speed (5–7 tokens/second) on a single CPU ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Features Three Optimized Kernels:", "raw": "Features Three Optimized Kernels:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. I2_S: Uses 2-bit weight representation ", "raw": "1. I2_S: Uses 2-bit weight representation ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. TL1: Implements 4-bit index lookup tables for every two weights ", "raw": "2. TL1: Implements 4-bit index lookup tables for every two weights ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. TL2: Employs 5-bit compression for every three weights ", "raw": "3. TL2: Employs 5-bit compression for every three weights ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Performance Metrics:", "raw": "Performance Metrics:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Lossless inference with 100% accuracy compared to full-precision models ", "raw": "- Lossless inference with 100% accuracy compared to full-precision models ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Tested across model sizes from 125M to 100B parameters ", "raw": "- Tested across model sizes from 125M to 100B parameters ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Evaluated on both Apple M2 Ultra and Intel i7-13700H processors ", "raw": "- Evaluated on both Apple M2 Ultra and Intel i7-13700H processors ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This breakthrough makes running large language models locally more accessible than ever, opening new possibilities for edge computing and resource-constrained environments.", "raw": "This breakthrough makes running large language models locally more accessible than ever, opening new possibilities for edge computing and resource-constrained environments.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Good folks from @Microsoft Research have just released bitnet.cpp, a game-changing inference framework that achieves remarkable performance gains. Key Technical Highlights: - Achieves speedups of up to 6.17x on x86 CPUs and 5.07x on ARM CPUs - Reduces energy consumption by 55.4–82.2% - Enables running 100B parameter models at human reading speed (5–7 tokens/second) on a single CPU Features Three Optimized Kernels: 1. I2_S: Uses 2-bit weight representation 2. TL1: Implements 4-bit index lookup tables for every two weights 3. TL2: Employs 5-bit compression for every three weights Performance Metrics: - Lossless inference with 100% accuracy compared to full-precision models - Tested across model sizes from 125M to 100B parameters - Evaluated on both Apple M2 Ultra and Intel i7-13700H processors This breakthrough makes running large language models locally more accessible than ever, opening new possibilities for edge computing and resource-constrained environments.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/gP0EgiJ9ESxldG7U71MT9.jpeg" } ]
[]
[ { "reaction": "🔥", "users": [ "umair894", "John6666", "MexIvanov", "AtAndDev" ], "count": 4 } ]
2024-10-26T03:18:57.000Z
2024-10-28T07:31:58.489Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/PPayGvCZRmTJQ6lYK2SDY.png", "fullname": "Michael Conrad", "name": "m-conrad-202", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63e80664e02ee67e8e570ec4/rGfRhywmjd_lbqfYzOEdd.png", "fullname": "EsKa", "name": "SerialKicked", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 12, "isFollowing": false } ]
/posts/singhsidhukuldeep/462096162788978
1,167
4
849365663259506
[ { "type": "text", "value": "Holy... this is one of a kind 😮", "raw": "Holy... this is one of a kind 😮", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/genmo/mochi-1-preview", "href": null, "resource": { "type": "model", "id": "genmo/mochi-1-preview", "discussionNum": null }, "url": "https://huggingface.co/genmo/mochi-1-preview", "code": null, "user": null, "label": null, "lang": null } ]
Holy... this is one of a kind 😮 https://huggingface.co/genmo/mochi-1-preview
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6530994e70a88b63f007324d/dv_xSAa12FwUr6cBHFgX_.png", "fullname": "wbag", "name": "Walmart-the-bag", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 43, "isFollowing": false }
[]
[]
[ { "reaction": "🔥", "users": [ "YaTharThShaRma999", "John6666", "s3nh" ], "count": 3 } ]
2024-10-26T02:06:33.000Z
2024-10-26T02:06:50.614Z
[]
/posts/Walmart-the-bag/849365663259506
557
0
371937937102283
[ { "type": "text", "value": "🙋🏻‍♂️ hey there folks , ", "raw": "🙋🏻‍♂️ hey there folks , ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "really enjoying sharing cool genomics and protein datasets on the hub these days , check out our cool new org : ", "raw": "really enjoying sharing cool genomics and protein datasets on the hub these days , check out our cool new org : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/seq-to-pheno", "href": "https://huggingface.co/seq-to-pheno", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "scroll down for the datasets, still figuring out how to optimize for discoverability , i do think on that part it will be better than zenodo[dot}org , it would be nice to write a tutorial about that and compare : we already have more downloads than most zenodo datasets from famous researchers ! ", "raw": "scroll down for the datasets, still figuring out how to optimize for discoverability , i do think on that part it will be better than zenodo[dot}org , it would be nice to write a tutorial about that and compare : we already have more downloads than most zenodo datasets from famous researchers ! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🙋🏻‍♂️ hey there folks , really enjoying sharing cool genomics and protein datasets on the hub these days , check out our cool new org : https://huggingface.co/seq-to-pheno scroll down for the datasets, still figuring out how to optimize for discoverability , i do think on that part it will be better than zenodo[dot}org , it would be nice to write a tutorial about that and compare : we already have more downloads than most zenodo datasets from famous researchers !
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "monsoon-nlp", "John6666", "AtAndDev" ], "count": 3 }, { "reaction": "🧠", "users": [ "John6666", "AtAndDev" ], "count": 2 } ]
2024-10-25T19:33:56.000Z
2024-10-25T19:33:56.570Z
[]
/posts/Tonic/371937937102283
812
0
328169816475485
[ { "type": "text", "value": "Why is the Adam Optimizer so good? Simple, because it will never find the absolute most optimal solution. That is a design feature, not a flaw. This is why no other optimizer comes close in terms of generalizable use. Want to learn more about this entire process and exactly what I am talking about? I break all of this down in very simple terms in this video! ", "raw": "Why is the Adam Optimizer so good? Simple, because it will never find the absolute most optimal solution. That is a design feature, not a flaw. This is why no other optimizer comes close in terms of generalizable use. Want to learn more about this entire process and exactly what I am talking about? I break all of this down in very simple terms in this video! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/B9lMONNngGM", "href": "https://youtu.be/B9lMONNngGM", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/B9lMONNngGM", "href": "https://youtu.be/B9lMONNngGM", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Why is the Adam Optimizer so good? Simple, because it will never find the absolute most optimal solution. That is a design feature, not a flaw. This is why no other optimizer comes close in terms of generalizable use. Want to learn more about this entire process and exactly what I am talking about? I break all of this down in very simple terms in this video! https://youtu.be/B9lMONNngGM https://youtu.be/B9lMONNngGM
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64274b69ba6cef0a6ebb0fd6/gsBWBt7HdUIPkRS0veUyE.jpeg" } ]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-25T19:24:34.000Z
2024-10-25T19:24:34.379Z
[]
/posts/TuringsSolutions/328169816475485
533
0
964622853947499
[ { "type": "text", "value": "Microsoft released a groundbreaking model that can be used for web automation, with MIT license 🔥 ", "raw": "Microsoft released a groundbreaking model that can be used for web automation, with MIT license 🔥 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/microsoft/OmniParser", "href": null, "resource": { "type": "model", "id": "microsoft/OmniParser", "discussionNum": null }, "url": "https://huggingface.co/microsoft/OmniParser", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Interesting highlight for me was Mind2Web (a benchmark for web navigation) capabilities of the model, which unlocks agentic behavior for RPA agents. ", "raw": "Interesting highlight for me was Mind2Web (a benchmark for web navigation) capabilities of the model, which unlocks agentic behavior for RPA agents. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "no need for hefty web automation pipelines that get broken when the website/app design changes! Amazing work.", "raw": "no need for hefty web automation pipelines that get broken when the website/app design changes! Amazing work.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Lastly, the authors also fine-tune this model on open-set detection for interactable regions and see if they can use it as a plug-in for VLMs and it actually outperforms off-the-shelf open-set detectors like GroundingDINO. 👏", "raw": "Lastly, the authors also fine-tune this model on open-set detection for interactable regions and see if they can use it as a plug-in for VLMs and it actually outperforms off-the-shelf open-set detectors like GroundingDINO. 👏", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "OmniParser is a state-of-the-art UI parsing/understanding model that outperforms GPT4V in parsing.", "raw": "OmniParser is a state-of-the-art UI parsing/understanding model that outperforms GPT4V in parsing.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Microsoft released a groundbreaking model that can be used for web automation, with MIT license 🔥 https://huggingface.co/microsoft/OmniParser Interesting highlight for me was Mind2Web (a benchmark for web navigation) capabilities of the model, which unlocks agentic behavior for RPA agents. no need for hefty web automation pipelines that get broken when the website/app design changes! Amazing work. Lastly, the authors also fine-tune this model on open-set detection for interactable regions and see if they can use it as a plug-in for VLMs and it actually outperforms off-the-shelf open-set detectors like GroundingDINO. 👏 OmniParser is a state-of-the-art UI parsing/understanding model that outperforms GPT4V in parsing.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/LeYXwItzUbdaUd8LTRBGn.mp4" } ]
[]
[ { "reaction": "🔥", "users": [ "adorkin", "YaTharThShaRma999", "cstr", "theospeak", "ucsahin", "adamelliotfields", "AtAndDev", "acidtib", "DivaVivy", "FredLaplagne", "ofirst" ], "count": 11 }, { "reaction": "🚀", "users": [ "prithivMLmods", "YaTharThShaRma999", "John6666", "ucsahin", "Felladrin", "AtAndDev", "Csplk" ], "count": 7 } ]
2024-10-25T11:53:52.000Z
2024-10-25T11:53:52.449Z
[]
/posts/merve/964622853947499
3,450
0
894592648474728
[ { "type": "text", "value": "Parents in the 1990: Teach the kids to code", "raw": "Parents in the 1990: Teach the kids to code", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Parents now: Teach the kids to fix the code when it starts walking around 🤖✨", "raw": "Parents now: Teach the kids to fix the code when it starts walking around 🤖✨", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Parents in the 1990: Teach the kids to code Parents now: Teach the kids to fix the code when it starts walking around 🤖✨
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857746553-5df7e9e5da6d0311fd3d53f9.jpeg", "fullname": "Thomas Wolf", "name": "thomwolf", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 704, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5df7e9e5da6d0311fd3d53f9/mmmit46517lJ5QMfKcwX6.jpeg" } ]
[]
[ { "reaction": "❤️", "users": [ "luk158", "not-lain", "yangstech", "julien-c", "theospeak", "nazimali", "xpgx1", "sajjad112233", "maywell", "Lewdiculous", "kaki-paper", "lopezjm96", "super-cinnamon", "RoversX", "Chief-Inspector", "Ailokis", "Chehong", "OmbelineM", "Inexistent" ], "count": 19 }, { "reaction": "🚀", "users": [ "John6666", "not-lain", "julien-c", "super-cinnamon", "den0620", "OmbelineM" ], "count": 6 }, { "reaction": "🤗", "users": [ "CaioXapelaum", "julien-c", "sajjad112233", "Chehong" ], "count": 4 } ]
2024-10-25T10:18:26.000Z
2024-10-31T06:39:19.177Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/664476e1c3d4867f3a5e8337/PkeNVCJK095jPQHmpuww6.png", "fullname": "Ailokis", "name": "Ailokis", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/thomwolf/894592648474728
4,067
2
714948198941396
[ { "type": "text", "value": "📊 We present ScaleQuest-Math-1M, a mathematical reasoning dataset of 1 million high-quality question-answer pairs.", "raw": "📊 We present ScaleQuest-Math-1M, a mathematical reasoning dataset of 1 million high-quality question-answer pairs.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔥 We propose ScaleQuest, a scalable and novel data synthesis method that utilizes small-size open-source models to generate questions from scratch.", "raw": "🔥 We propose ScaleQuest, a scalable and novel data synthesis method that utilizes small-size open-source models to generate questions from scratch.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Project Page: ", "raw": "Project Page: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://scalequest.github.io/", "href": "https://scalequest.github.io/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset: ", "raw": "Dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/dyyyyyyyy/ScaleQuest-Math", "href": null, "resource": { "type": "dataset", "id": "dyyyyyyyy/ScaleQuest-Math", "discussionNum": null }, "url": "https://huggingface.co/datasets/dyyyyyyyy/ScaleQuest-Math", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2410.18693", "href": null, "resource": { "type": "paper", "id": "2410.18693", "discussionNum": null }, "url": "https://huggingface.co/papers/2410.18693", "code": null, "user": null, "label": "Unleashing Reasoning Capability of LLMs via Scalable Question Synthesis\n from Scratch (2410.18693)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "HF Collection: ", "raw": "HF Collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/dyyyyyyyy/scalequest-670a7dc2623c91990f28913b", "href": null, "resource": { "type": "collection", "id": "dyyyyyyyy/scalequest-670a7dc2623c91990f28913b", "discussionNum": null }, "url": "https://huggingface.co/collections/dyyyyyyyy/scalequest-670a7dc2623c91990f28913b", "code": null, "user": null, "label": null, "lang": null } ]
📊 We present ScaleQuest-Math-1M, a mathematical reasoning dataset of 1 million high-quality question-answer pairs. 🔥 We propose ScaleQuest, a scalable and novel data synthesis method that utilizes small-size open-source models to generate questions from scratch. Project Page: https://scalequest.github.io/ Dataset: https://huggingface.co/datasets/dyyyyyyyy/ScaleQuest-Math Paper: https://huggingface.co/papers/2410.18693 HF Collection: https://huggingface.co/collections/dyyyyyyyy/scalequest-670a7dc2623c91990f28913b
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626cf0f65651e31a7a2b9779/80nHRSmCxw3f77oFMgr5P.jpeg", "fullname": "Ding", "name": "dyyyyyyyy", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/626cf0f65651e31a7a2b9779/Zs_gXlX-_9w9gjUt4Dfxm.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666", "LoopBreaker" ], "count": 2 }, { "reaction": "🔥", "users": [ "ImranzamanML", "thesoum" ], "count": 2 } ]
2024-10-25T03:38:58.000Z
2024-10-25T03:39:53.225Z
[]
/posts/dyyyyyyyy/714948198941396
1,213
0
283418566196094
[ { "type": "text", "value": "🤯 Plot twist: Size isn't everything in AI! A lean 32B parameter model just showed up to the party and outperformed a 70B one. Efficiency > Scale? The AI world just got more interesting...", "raw": "🤯 Plot twist: Size isn't everything in AI! A lean 32B parameter model just showed up to the party and outperformed a 70B one. Efficiency > Scale? The AI world just got more interesting...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Cohere For AI released Aya Expanse, a new family of multilingual models (8B and 32B) spanning 23 popular languages.", "raw": "Cohere For AI released Aya Expanse, a new family of multilingual models (8B and 32B) spanning 23 popular languages.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Models: ", "raw": "Models: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/CohereForAI/c4ai-aya-expanse-671a83d6b2c07c692beab3c3", "href": null, "resource": { "type": "collection", "id": "CohereForAI/c4ai-aya-expanse-671a83d6b2c07c692beab3c3", "discussionNum": null }, "url": "https://huggingface.co/collections/CohereForAI/c4ai-aya-expanse-671a83d6b2c07c692beab3c3", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Blog post: ", "raw": "Blog post: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/aya-expanse", "href": "https://huggingface.co/blog/aya-expanse", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Demo: ", "raw": "Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/CohereForAI/aya_expanse", "href": null, "resource": { "type": "space", "id": "CohereForAI/aya_expanse", "discussionNum": null }, "url": "https://huggingface.co/spaces/CohereForAI/aya_expanse", "code": null, "user": null, "label": null, "lang": null } ]
🤯 Plot twist: Size isn't everything in AI! A lean 32B parameter model just showed up to the party and outperformed a 70B one. Efficiency > Scale? The AI world just got more interesting... Cohere For AI released Aya Expanse, a new family of multilingual models (8B and 32B) spanning 23 popular languages. Models: https://huggingface.co/collections/CohereForAI/c4ai-aya-expanse-671a83d6b2c07c692beab3c3 Blog post: https://huggingface.co/blog/aya-expanse Demo: https://huggingface.co/spaces/CohereForAI/aya_expanse
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/HroULmlu3S6V-4zgLGtya.png" } ]
[]
[ { "reaction": "❤️", "users": [ "clem", "John6666", "teckytim", "juliantorr", "prithivMLmods", "thomwolf", "luk158", "sugatoray", "alielfilali01", "manred1997", "Tonic", "not-lain", "theospeak", "OmbelineM" ], "count": 14 } ]
2024-10-24T20:46:49.000Z
2024-10-24T20:46:49.468Z
[]
/posts/fdaudens/283418566196094
2,787
0
344429349009645
[ { "type": "text", "value": "This is no Woodstock AI but will be fun nonetheless haha. I’ll be hosting a live workshop with team members next week about the Enterprise Hugging Face hub.", "raw": "This is no Woodstock AI but will be fun nonetheless haha. I’ll be hosting a live workshop with team members next week about the Enterprise Hugging Face hub.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1,000 spots available first-come first serve with some surprises during the stream!", "raw": "1,000 spots available first-come first serve with some surprises during the stream!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can register and add to your calendar here: ", "raw": "You can register and add to your calendar here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://streamyard.com/watch/JS2jHsUP3NDM", "href": "https://streamyard.com/watch/JS2jHsUP3NDM", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
This is no Woodstock AI but will be fun nonetheless haha. I’ll be hosting a live workshop with team members next week about the Enterprise Hugging Face hub. 1,000 spots available first-come first serve with some surprises during the stream! You can register and add to your calendar here: https://streamyard.com/watch/JS2jHsUP3NDM
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem 🤗", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1763, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5e67bdd61009063689407479/WNvTsrvtBH_ikYFCgDe-9.png" } ]
[]
[ { "reaction": "🔥", "users": [ "nbroad", "adrienduf", "jeffboudier", "Julianvega2", "alvarobartt", "MoritzLaurer", "lysandre", "Wauplin", "Violette", "kramp", "prithivMLmods", "Sylvestre", "brunatrevelin", "alielfilali01", "fdaudens", "nicoism", "andrewrreed", "julien-c", "edwixx", "vtrubamacrocosmos", "pierrci" ], "count": 21 }, { "reaction": "🚀", "users": [ "jeffboudier", "Julianvega2", "lysandre", "Wauplin", "brunatrevelin", "andrewrreed", "julien-c", "nbroad" ], "count": 8 }, { "reaction": "❤️", "users": [ "jeffboudier", "Julianvega2", "lysandre", "julien-c", "sajjad112233", "rapadilla", "M1cler" ], "count": 7 }, { "reaction": "🤗", "users": [ "jeffboudier", "Julianvega2", "lysandre", "Aurelien-Morgan", "julien-c" ], "count": 5 }, { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-24T20:45:22.000Z
2024-11-03T04:10:51.322Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1605114051380-noauth.jpeg", "fullname": "Jeff Boudier", "name": "jeffboudier", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 195, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/67186e2e5433befe1ee225a4/kWRftAYDNf_HJZgaJQM2A.jpeg", "fullname": "Muhammad Niyaz", "name": "sajjad112233", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64b1619f4c3cc95a751e6c41/oSUtmp1Gw0I-ve_1wFlNW.jpeg", "fullname": "Michal Zebrowski", "name": "M1cler", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/clem/344429349009645
4,383
4
826595676846298
[ { "type": "text", "value": "If you have ~300+ GB of V-RAM, you can run Mochi from ", "raw": "If you have ~300+ GB of V-RAM, you can run Mochi from ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@genmo", "href": null, "resource": null, "url": null, "code": null, "user": "genmo", "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A SOTA model that dramatically closes the gap between closed and open video generation models. ", "raw": "A SOTA model that dramatically closes the gap between closed and open video generation models. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Mochi 1 introduces revolutionary architecture featuring joint reasoning over 44,520 video tokens with full 3D attention. The model implements extended learnable rotary positional embeddings (RoPE) in three dimensions, with network-learned mixing frequencies for space and time axes. ", "raw": "Mochi 1 introduces revolutionary architecture featuring joint reasoning over 44,520 video tokens with full 3D attention. The model implements extended learnable rotary positional embeddings (RoPE) in three dimensions, with network-learned mixing frequencies for space and time axes. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The model incorporates cutting-edge improvements, including: ", "raw": "The model incorporates cutting-edge improvements, including: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- SwiGLU feedforward layers ", "raw": "- SwiGLU feedforward layers ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Query-key normalization for enhanced stability ", "raw": "- Query-key normalization for enhanced stability ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Sandwich normalization for controlled internal activations ", "raw": "- Sandwich normalization for controlled internal activations ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What is currently available?", "raw": "What is currently available?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The base model delivers impressive 480p video generation with exceptional motion quality and prompt adherence. Released under the Apache 2.0 license, it's freely available for both personal and commercial applications. ", "raw": "The base model delivers impressive 480p video generation with exceptional motion quality and prompt adherence. Released under the Apache 2.0 license, it's freely available for both personal and commercial applications. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What's Coming?", "raw": "What's Coming?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Genmo has announced Mochi 1 HD, scheduled for release later this year, which will feature: ", "raw": "Genmo has announced Mochi 1 HD, scheduled for release later this year, which will feature: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Enhanced 720p resolution ", "raw": "- Enhanced 720p resolution ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Improved motion fidelity ", "raw": "- Improved motion fidelity ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Better handling of complex scene warping ", "raw": "- Better handling of complex scene warping ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
If you have ~300+ GB of V-RAM, you can run Mochi from @genmo A SOTA model that dramatically closes the gap between closed and open video generation models. Mochi 1 introduces revolutionary architecture featuring joint reasoning over 44,520 video tokens with full 3D attention. The model implements extended learnable rotary positional embeddings (RoPE) in three dimensions, with network-learned mixing frequencies for space and time axes. The model incorporates cutting-edge improvements, including: - SwiGLU feedforward layers - Query-key normalization for enhanced stability - Sandwich normalization for controlled internal activations What is currently available? The base model delivers impressive 480p video generation with exceptional motion quality and prompt adherence. Released under the Apache 2.0 license, it's freely available for both personal and commercial applications. What's Coming? Genmo has announced Mochi 1 HD, scheduled for release later this year, which will feature: - Enhanced 720p resolution - Improved motion fidelity - Better handling of complex scene warping
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/0n9YcqpH_VF99vvhU3atw.mp4" } ]
[]
[ { "reaction": "❤️", "users": [ "clem", "akhaliq", "thomwolf", "sepal", "AtAndDev", "theospeak", "VDBLOI2024", "abhi22", "RaphaelLiu", "viktsys", "DivaVivy" ], "count": 11 }, { "reaction": "🔥", "users": [ "sugatoray", "Aurelien-Morgan", "AtAndDev", "abhi22" ], "count": 4 }, { "reaction": "👀", "users": [ "John6666", "AtAndDev" ], "count": 2 } ]
2024-10-24T19:13:20.000Z
2024-10-25T20:33:12.062Z
[ { "avatarUrl": "/avatars/39eeff400134ae2d6a48541b7af89dc9.svg", "fullname": "Natwar Upadhyay", "name": "Natwar", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/630fdd96a119d49bc1e770d5/OpU95S4a8hkM8OUCZq79R.jpeg", "fullname": "Adam", "name": "adamo1139", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 38, "isFollowing": false } ]
/posts/singhsidhukuldeep/826595676846298
2,744
2
657096750247570
[ { "type": "text", "value": "October version of Claude 3.5 lifts SOTA (set by its June version) by 7 points.", "raw": "October version of Claude 3.5 lifts SOTA (set by its June version) by 7 points.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/onekq-ai/WebApp1K-models-leaderboard", "href": null, "resource": { "type": "space", "id": "onekq-ai/WebApp1K-models-leaderboard", "discussionNum": null }, "url": "https://huggingface.co/spaces/onekq-ai/WebApp1K-models-leaderboard", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Closed sourced models are widening the gap again.", "raw": "Closed sourced models are widening the gap again.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Note: Our frontier leaderboard now uses double test scenarios because the single-scenario test suit has been saturated.", "raw": "Note: Our frontier leaderboard now uses double test scenarios because the single-scenario test suit has been saturated.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
October version of Claude 3.5 lifts SOTA (set by its June version) by 7 points. https://huggingface.co/spaces/onekq-ai/WebApp1K-models-leaderboard Closed sourced models are widening the gap again. Note: Our frontier leaderboard now uses double test scenarios because the single-scenario test suit has been saturated.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/669dbd709a4bf63e08f1ddc2/aV10ZJPPzH5LbnHFZNqc7.png", "fullname": "Yi Cui", "name": "onekq", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 16, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "John6666" ], "count": 1 } ]
2024-10-24T16:49:48.000Z
2024-10-24T16:49:48.584Z
[]
/posts/onekq/657096750247570
555
0
324940757694529
[ { "type": "text", "value": "Cohere drops two new multilingual models!", "raw": "Cohere drops two new multilingual models!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/CohereForAI/aya-expanse-8b", "href": null, "resource": { "type": "model", "id": "CohereForAI/aya-expanse-8b", "discussionNum": null }, "url": "https://huggingface.co/CohereForAI/aya-expanse-8b", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/CohereForAI/aya-expanse-32b", "href": null, "resource": { "type": "model", "id": "CohereForAI/aya-expanse-32b", "discussionNum": null }, "url": "https://huggingface.co/CohereForAI/aya-expanse-32b", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Try them out here", "raw": "Try them out here", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/CohereForAI/aya_expanse", "href": null, "resource": { "type": "space", "id": "CohereForAI/aya_expanse", "discussionNum": null }, "url": "https://huggingface.co/spaces/CohereForAI/aya_expanse", "code": null, "user": null, "label": null, "lang": null } ]
Cohere drops two new multilingual models! https://huggingface.co/CohereForAI/aya-expanse-8b https://huggingface.co/CohereForAI/aya-expanse-32b Try them out here https://huggingface.co/spaces/CohereForAI/aya_expanse
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/608aabf24955d2bfc3cd99c6/T762Ut0Y-w0sZB2ynvfbJ.jpeg", "fullname": "Aritra Roy Gosthipaty", "name": "ariG23498", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 64, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "prithivMLmods", "ZeroWw", "d0rj", "Josephgflowers", "manred1997", "Tonic" ], "count": 6 }, { "reaction": "👀", "users": [ "John6666", "Smorty100" ], "count": 2 } ]
2024-10-24T15:56:37.000Z
2024-10-24T15:56:37.064Z
[]
/posts/ariG23498/324940757694529
1,513
0
149699867340480
[ { "type": "text", "value": "🌟🌎 Cohere releases Aya 8B & 32B: SOTA multilingual models for 23 languages !", "raw": "🌟🌎 Cohere releases Aya 8B & 32B: SOTA multilingual models for 23 languages !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How did they manage to beat top contenders while also adding 23 languages?", "raw": "How did they manage to beat top contenders while also adding 23 languages?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔄 𝗧𝗿𝗮𝗶𝗻 𝗼𝗻 𝘀𝘆𝗻𝘁𝗵𝗲𝘁𝗶𝗰 𝗱𝗮𝘁𝗮:", "raw": "🔄 𝗧𝗿𝗮𝗶𝗻 𝗼𝗻 𝘀𝘆𝗻𝘁𝗵𝗲𝘁𝗶𝗰 𝗱𝗮𝘁𝗮:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• Synthetic data has been said to cause model-collapse after too much training", "raw": "• Synthetic data has been said to cause model-collapse after too much training", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• Cohere has introduced \"data arbitrage\" to prevent this by strategically sampling from a pool of several teacher models instead of one single teacher", "raw": "• Cohere has introduced \"data arbitrage\" to prevent this by strategically sampling from a pool of several teacher models instead of one single teacher", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• First train a model pool for each different groups of languages, and employ an internal Reward Model named \"Arbiter\" to evaluate and select the optimal generation. Then only the best generation is kept as the final completion for each prompt", "raw": "• First train a model pool for each different groups of languages, and employ an internal Reward Model named \"Arbiter\" to evaluate and select the optimal generation. Then only the best generation is kept as the final completion for each prompt", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "➡️ This process is particularly effective for multilingual setting, where no single teacher model performs in all languages : here \"Multilingual Arbitrage\" singlehandedly improves win rates of the 8B model vs Gemma-2-9B by 10 points!", "raw": "➡️ This process is particularly effective for multilingual setting, where no single teacher model performs in all languages : here \"Multilingual Arbitrage\" singlehandedly improves win rates of the 8B model vs Gemma-2-9B by 10 points!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🧩 𝗨𝘀𝗲 𝗺𝗼𝗱𝗲𝗹 𝗺𝗲𝗿𝗴𝗶𝗻𝗴: Rather than struggling to find the right mix of data in training a single model for multilingual use, just train language specific models then merge them!", "raw": "🧩 𝗨𝘀𝗲 𝗺𝗼𝗱𝗲𝗹 𝗺𝗲𝗿𝗴𝗶𝗻𝗴: Rather than struggling to find the right mix of data in training a single model for multilingual use, just train language specific models then merge them!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• Maximize diversity between merged checkpoints by training each on different language families.", "raw": "• Maximize diversity between merged checkpoints by training each on different language families.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• Experimented fancy techniques (SLERP, TIES, DARE-TIES) but found out weighted averaging to be the most consistent!", "raw": "• Experimented fancy techniques (SLERP, TIES, DARE-TIES) but found out weighted averaging to be the most consistent!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "➡️ Merging had 3x more gains at high 35B scale vs the 8B scale - consistent with literature findings that merging is more effective at scale", "raw": "➡️ Merging had 3x more gains at high 35B scale vs the 8B scale - consistent with literature findings that merging is more effective at scale", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚡️ 𝗚𝗿𝗲𝗮𝘁 𝗽𝗲𝗿𝗳𝗼𝗿𝗺𝗮𝗻𝗰𝗲: Automatic evaluations on Arena-Hard-Auto dataset:", "raw": "⚡️ 𝗚𝗿𝗲𝗮𝘁 𝗽𝗲𝗿𝗳𝗼𝗿𝗺𝗮𝗻𝗰𝗲: Automatic evaluations on Arena-Hard-Auto dataset:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "➡️ Aya Expanse 8B beats models from its weight class such as Gemma 2 9B, Llama 3.1 8B, and the recent Ministral 8B, with win rates ranging from 60.4% to 70.6%", "raw": "➡️ Aya Expanse 8B beats models from its weight class such as Gemma 2 9B, Llama 3.1 8B, and the recent Ministral 8B, with win rates ranging from 60.4% to 70.6%", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "➡️ Aya Expanse 32B outperforms Gemma 2 27B, Mistral 8x22B, and Llama 3.1 70B (2x its size)", "raw": "➡️ Aya Expanse 32B outperforms Gemma 2 27B, Mistral 8x22B, and Llama 3.1 70B (2x its size)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• ⚠️ But this performance eval comes from only one benchmark! Let's wait for Open LLM leaderboard evals;", "raw": "• ⚠️ But this performance eval comes from only one benchmark! Let's wait for Open LLM leaderboard evals;", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔒 CC by NC license", "raw": "🔒 CC by NC license", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Blog post here: ", "raw": "Blog post here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/aya-expanse", "href": "https://huggingface.co/blog/aya-expanse", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🌟🌎 Cohere releases Aya 8B & 32B: SOTA multilingual models for 23 languages ! How did they manage to beat top contenders while also adding 23 languages? 🔄 𝗧𝗿𝗮𝗶𝗻 𝗼𝗻 𝘀𝘆𝗻𝘁𝗵𝗲𝘁𝗶𝗰 𝗱𝗮𝘁𝗮: • Synthetic data has been said to cause model-collapse after too much training • Cohere has introduced "data arbitrage" to prevent this by strategically sampling from a pool of several teacher models instead of one single teacher • First train a model pool for each different groups of languages, and employ an internal Reward Model named "Arbiter" to evaluate and select the optimal generation. Then only the best generation is kept as the final completion for each prompt ➡️ This process is particularly effective for multilingual setting, where no single teacher model performs in all languages : here "Multilingual Arbitrage" singlehandedly improves win rates of the 8B model vs Gemma-2-9B by 10 points! 🧩 𝗨𝘀𝗲 𝗺𝗼𝗱𝗲𝗹 𝗺𝗲𝗿𝗴𝗶𝗻𝗴: Rather than struggling to find the right mix of data in training a single model for multilingual use, just train language specific models then merge them! • Maximize diversity between merged checkpoints by training each on different language families. • Experimented fancy techniques (SLERP, TIES, DARE-TIES) but found out weighted averaging to be the most consistent! ➡️ Merging had 3x more gains at high 35B scale vs the 8B scale - consistent with literature findings that merging is more effective at scale ⚡️ 𝗚𝗿𝗲𝗮𝘁 𝗽𝗲𝗿𝗳𝗼𝗿𝗺𝗮𝗻𝗰𝗲: Automatic evaluations on Arena-Hard-Auto dataset: ➡️ Aya Expanse 8B beats models from its weight class such as Gemma 2 9B, Llama 3.1 8B, and the recent Ministral 8B, with win rates ranging from 60.4% to 70.6% ➡️ Aya Expanse 32B outperforms Gemma 2 27B, Mistral 8x22B, and Llama 3.1 70B (2x its size) • ⚠️ But this performance eval comes from only one benchmark! Let's wait for Open LLM leaderboard evals; 🔒 CC by NC license Blog post here: https://huggingface.co/blog/aya-expanse
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/iGhHFRxjpTfCxZk-YLRCj.png" } ]
[]
[ { "reaction": "🚀", "users": [ "ariG23498", "prithivMLmods", "nofl", "den0620" ], "count": 4 }, { "reaction": "❤️", "users": [ "fsommers", "nofl", "mesut07", "Abc7347" ], "count": 4 }, { "reaction": "👀", "users": [ "John6666", "nofl", "ZeroWw" ], "count": 3 }, { "reaction": "🔥", "users": [ "nofl", "ZeroWw", "kartikagg98" ], "count": 3 } ]
2024-10-24T14:11:52.000Z
2024-10-24T14:11:52.486Z
[]
/posts/m-ric/149699867340480
1,942
0
285497162293153
[ { "type": "text", "value": "🚨 Instruct-tuning impacts models differently across families! Qwen2.5-72B-Instruct excels on IFEval but struggles with MATH-Hard, while Llama-3.1-70B-Instruct avoids MATH performance loss! Why? Can they follow the format in examples? 📊 Compare models: ", "raw": "🚨 Instruct-tuning impacts models differently across families! Qwen2.5-72B-Instruct excels on IFEval but struggles with MATH-Hard, while Llama-3.1-70B-Instruct avoids MATH performance loss! Why? Can they follow the format in examples? 📊 Compare models: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/open-llm-leaderboard/comparator", "href": null, "resource": { "type": "space", "id": "open-llm-leaderboard/comparator", "discussionNum": null }, "url": "https://huggingface.co/spaces/open-llm-leaderboard/comparator", "code": null, "user": null, "label": null, "lang": null } ]
🚨 Instruct-tuning impacts models differently across families! Qwen2.5-72B-Instruct excels on IFEval but struggles with MATH-Hard, while Llama-3.1-70B-Instruct avoids MATH performance loss! Why? Can they follow the format in examples? 📊 Compare models: https://huggingface.co/spaces/open-llm-leaderboard/comparator
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1606406298765-noauth.jpeg", "fullname": "Albert Villanova del Moral", "name": "albertvillanova", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 196, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5fbfd09ee366524fe8e97cd3/1wPeWcBvayB1H8cCogpLh.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5fbfd09ee366524fe8e97cd3/9FIhvVHRrzjU2_zhZX956.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666", "nofl", "Jason233", "xi0v" ], "count": 4 } ]
2024-10-24T13:37:50.000Z
2024-10-24T13:37:50.543Z
[]
/posts/albertvillanova/285497162293153
1,214
0
519904552104060
[ { "type": "text", "value": "Just watched ", "raw": "Just watched ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@thomwolf", "href": null, "resource": null, "url": null, "code": null, "user": "thomwolf", "label": null, "lang": null }, { "type": "text", "value": " tear down the over-hyped AGI narrative in 30 seconds - and it's refreshingly grounded.", "raw": " tear down the over-hyped AGI narrative in 30 seconds - and it's refreshingly grounded.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "No wild speculation about superintelligence timelines or consciousness. Just practical insights from someone who really understands the technology.", "raw": "No wild speculation about superintelligence timelines or consciousness. Just practical insights from someone who really understands the technology.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is the kind of level-headed perspective that helps us focus on what AI can actually do today (which is already transformative) rather than getting lost in AGI fantasy. Worth your time if you want to understand AI progress without the hype.", "raw": "This is the kind of level-headed perspective that helps us focus on what AI can actually do today (which is already transformative) rather than getting lost in AGI fantasy. Worth your time if you want to understand AI progress without the hype.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Watch the full interview at CogX here: ", "raw": "Watch the full interview at CogX here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/watch?v=IjL_6Th6Ea0", "href": "https://www.youtube.com/watch?v=IjL_6Th6Ea0", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Just watched @thomwolf tear down the over-hyped AGI narrative in 30 seconds - and it's refreshingly grounded. No wild speculation about superintelligence timelines or consciousness. Just practical insights from someone who really understands the technology. This is the kind of level-headed perspective that helps us focus on what AI can actually do today (which is already transformative) rather than getting lost in AGI fantasy. Worth your time if you want to understand AI progress without the hype. Watch the full interview at CogX here: https://www.youtube.com/watch?v=IjL_6Th6Ea0
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/cG3IoDIYYfG17VwkVTTqQ.mp4" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857746553-5df7e9e5da6d0311fd3d53f9.jpeg", "fullname": "Thomas Wolf", "name": "thomwolf", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 704 } ]
[ { "reaction": "👍", "users": [ "Aurelien-Morgan", "not-lain", "mrdbourke", "ZeroWw", "Joseph717171", "OmbelineM", "ai-everyday" ], "count": 7 }, { "reaction": "👀", "users": [ "John6666", "not-lain" ], "count": 2 } ]
2024-10-24T13:17:51.000Z
2024-10-24T13:17:51.431Z
[]
/posts/fdaudens/519904552104060
1,355
0
571146245105307
[ { "type": "text", "value": "Are you a Professional Python Developer? Here is why Logging is important for debugging, tracking and monitoring the code", "raw": "Are you a Professional Python Developer? Here is why Logging is important for debugging, tracking and monitoring the code", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Logging", "raw": "Logging", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Logging is very important part of any project you start. It help you to track the execution of a program, debug issues, monitor system performance and keep an audit trail of events. ", "raw": "Logging is very important part of any project you start. It help you to track the execution of a program, debug issues, monitor system performance and keep an audit trail of events. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Basic Logging Setup", "raw": "Basic Logging Setup", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The basic way to add logging to a Python code is by using the logging.basicConfig() function. This function set up basic configuration for logging messages to either console or to a file.", "raw": "The basic way to add logging to a Python code is by using the logging.basicConfig() function. This function set up basic configuration for logging messages to either console or to a file.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here is how we can use basic console logging", "raw": "Here is how we can use basic console logging", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\n#Call built in library\nimport logging\n\n# lets call library and start logging \nlogging.basicConfig(level=logging.DEBUG) #you can add more format specifier \n\n# It will show on the console since we did not added filename to save logs\nlogging.debug('Here we go for debug message')\nlogging.info('Here we go for info message')\nlogging.warning('Here we go for warning message')\nlogging.error('Here we go for error message')\nlogging.critical('Here we go for critical message')\n\n#Note:\n# If you want to add anything in the log then do like this way\nrecords=100\nlogging.debug('There are total %s number of records.', records)\n\n# same like string format \nlost=20\nlogging.debug('There are total %s number of records from which %s are lost', records, lost)\n```", "href": null, "resource": null, "url": null, "code": "#Call built in library\nimport logging\n\n# lets call library and start logging \nlogging.basicConfig(level=logging.DEBUG) #you can add more format specifier \n\n# It will show on the console since we did not added filename to save logs\nlogging.debug('Here we go for debug message')\nlogging.info('Here we go for info message')\nlogging.warning('Here we go for warning message')\nlogging.error('Here we go for error message')\nlogging.critical('Here we go for critical message')\n\n#Note:\n# If you want to add anything in the log then do like this way\nrecords=100\nlogging.debug('There are total %s number of records.', records)\n\n# same like string format \nlost=20\nlogging.debug('There are total %s number of records from which %s are lost', records, lost)", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Logging to a File", "raw": "Logging to a File", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We can also save the log to a file instead of console. For this, we can add the filename parameter to logging.basicConfig().", "raw": "We can also save the log to a file instead of console. For this, we can add the filename parameter to logging.basicConfig().", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\nimport logging\n# Saving the log to a file. The logs will be written to app.log\nlogging.basicConfig(filename='app.log', level=logging.DEBUG)\n\nlogging.debug('Here we go for debug message')\nlogging.info('Here we go for info message')\nlogging.warning('Here we go for warning message')\nlogging.error('Here we go for error message')\nlogging.critical('Here we go for critical message')\n```", "href": null, "resource": null, "url": null, "code": "import logging\n# Saving the log to a file. The logs will be written to app.log\nlogging.basicConfig(filename='app.log', level=logging.DEBUG)\n\nlogging.debug('Here we go for debug message')\nlogging.info('Here we go for info message')\nlogging.warning('Here we go for warning message')\nlogging.error('Here we go for error message')\nlogging.critical('Here we go for critical message')", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can read more on my medium blog ", "raw": "You can read more on my medium blog ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://medium.com/@imranzaman-5202/are-you-a-professional-python-developer-8596e2b2edaa", "href": "https://medium.com/@imranzaman-5202/are-you-a-professional-python-developer-8596e2b2edaa", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Are you a Professional Python Developer? Here is why Logging is important for debugging, tracking and monitoring the code Logging Logging is very important part of any project you start. It help you to track the execution of a program, debug issues, monitor system performance and keep an audit trail of events. Basic Logging Setup The basic way to add logging to a Python code is by using the logging.basicConfig() function. This function set up basic configuration for logging messages to either console or to a file. Here is how we can use basic console logging ``` #Call built in library import logging # lets call library and start logging logging.basicConfig(level=logging.DEBUG) #you can add more format specifier # It will show on the console since we did not added filename to save logs logging.debug('Here we go for debug message') logging.info('Here we go for info message') logging.warning('Here we go for warning message') logging.error('Here we go for error message') logging.critical('Here we go for critical message') #Note: # If you want to add anything in the log then do like this way records=100 logging.debug('There are total %s number of records.', records) # same like string format lost=20 logging.debug('There are total %s number of records from which %s are lost', records, lost) ``` Logging to a File We can also save the log to a file instead of console. For this, we can add the filename parameter to logging.basicConfig(). ``` import logging # Saving the log to a file. The logs will be written to app.log logging.basicConfig(filename='app.log', level=logging.DEBUG) logging.debug('Here we go for debug message') logging.info('Here we go for info message') logging.warning('Here we go for warning message') logging.error('Here we go for error message') logging.critical('Here we go for critical message') ``` You can read more on my medium blog https://medium.com/@imranzaman-5202/are-you-a-professional-python-developer-8596e2b2edaa
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/y1W6Co4jIYB95Cx6Tjrsd.jpeg", "fullname": "Muhammad Imran Zaman", "name": "ImranzamanML", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 32, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "bkhalid063", "AtAndDev", "wing0night" ], "count": 4 } ]
2024-10-23T23:51:30.000Z
2024-10-23T23:52:51.366Z
[]
/posts/ImranzamanML/571146245105307
1,674
0
547097184415362
[ { "type": "text", "value": "As the rapid adoption of chat bots and QandA models continues, so do the concerns for their reliability and safety. In response to this, many state-of-the-art models are being tuned to act as Safety Guardrails to protect against malicious usage and avoid undesired, harmful output. I published a Hugging Face blog introducing a simple, proof-of-concept, RoBERTa-based LLM that my team and I finetuned to detect toxic prompt inputs into chat-style LLMs. The article explores some of the tradeoffs of fine-tuning larger decoder vs. smaller encoder models and asks the question if \"simpler is better\" in the arena of toxic prompt detection.", "raw": "As the rapid adoption of chat bots and QandA models continues, so do the concerns for their reliability and safety. In response to this, many state-of-the-art models are being tuned to act as Safety Guardrails to protect against malicious usage and avoid undesired, harmful output. I published a Hugging Face blog introducing a simple, proof-of-concept, RoBERTa-based LLM that my team and I finetuned to detect toxic prompt inputs into chat-style LLMs. The article explores some of the tradeoffs of fine-tuning larger decoder vs. smaller encoder models and asks the question if \"simpler is better\" in the arena of toxic prompt detection.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 to blog: ", "raw": "🔗 to blog: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/daniel-de-leon/toxic-prompt-roberta", "href": "https://huggingface.co/blog/daniel-de-leon/toxic-prompt-roberta", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 to model: ", "raw": "🔗 to model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/Intel/toxic-prompt-roberta", "href": null, "resource": { "type": "model", "id": "Intel/toxic-prompt-roberta", "discussionNum": null }, "url": "https://huggingface.co/Intel/toxic-prompt-roberta", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 to OPEA microservice: ", "raw": "🔗 to OPEA microservice: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/opea-project/GenAIComps/tree/main/comps/guardrails/toxicity_detection", "href": "https://github.com/opea-project/GenAIComps/tree/main/comps/guardrails/toxicity_detection", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A huge thank you to my colleagues that helped contribute: ", "raw": "A huge thank you to my colleagues that helped contribute: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@qgao007", "href": null, "resource": null, "url": null, "code": null, "user": "qgao007", "label": null, "lang": null }, { "type": "text", "value": ", ", "raw": ", ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@mitalipo", "href": null, "resource": null, "url": null, "code": null, "user": "mitalipo", "label": null, "lang": null }, { "type": "text", "value": ", ", "raw": ", ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@ashahba", "href": null, "resource": null, "url": null, "code": null, "user": "ashahba", "label": null, "lang": null }, { "type": "text", "value": " and Fahim Mohammad", "raw": " and Fahim Mohammad", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
As the rapid adoption of chat bots and QandA models continues, so do the concerns for their reliability and safety. In response to this, many state-of-the-art models are being tuned to act as Safety Guardrails to protect against malicious usage and avoid undesired, harmful output. I published a Hugging Face blog introducing a simple, proof-of-concept, RoBERTa-based LLM that my team and I finetuned to detect toxic prompt inputs into chat-style LLMs. The article explores some of the tradeoffs of fine-tuning larger decoder vs. smaller encoder models and asks the question if "simpler is better" in the arena of toxic prompt detection. 🔗 to blog: https://huggingface.co/blog/daniel-de-leon/toxic-prompt-roberta 🔗 to model: https://huggingface.co/Intel/toxic-prompt-roberta 🔗 to OPEA microservice: https://github.com/opea-project/GenAIComps/tree/main/comps/guardrails/toxicity_detection A huge thank you to my colleagues that helped contribute: @qgao007, @mitalipo, @ashahba and Fahim Mohammad
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/pWwOKN-HzpmRZKc48Fvon.jpeg", "fullname": "Daniel De Leon", "name": "daniel-de-leon", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 6, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/648b744f08c4a9d807b487f0/uXUHP_AdUpAZ9AR4u9-s6.jpeg" } ]
[ { "avatarUrl": "/avatars/67d988b255c5ff37b23d595c26828cb4.svg", "fullname": "Abolfazl Shahbazi", "name": "ashahba", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5 }, { "avatarUrl": "/avatars/7cac4bd8118884e70c08400edf3ed202.svg", "fullname": "Mitali P", "name": "mitalipo", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 }, { "avatarUrl": "/avatars/d4947439475dc81f2c9e9304382b6257.svg", "fullname": "Qun Gao", "name": "qgao007", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1 } ]
[ { "reaction": "🔥", "users": [ "TuringsSolutions", "ImranzamanML", "qgao007", "mitalipo", "PetarMiladinov", "lunarflu", "darkzbaron" ], "count": 7 }, { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-23T20:59:59.000Z
2024-10-23T20:59:59.491Z
[]
/posts/daniel-de-leon/547097184415362
2,393
0
534172506980998
[ { "type": "text", "value": "Since 2022 I have been trying to understand how to support advancement of the two best python patterns for AI development which are:", "raw": "Since 2022 I have been trying to understand how to support advancement of the two best python patterns for AI development which are:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Streamlit", "raw": "1. Streamlit", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Gradio", "raw": "2. Gradio", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The reason I chose them in this order was the fact that the streamlit library had the timing drop on gradio by being available with near perfection about a year or two before training data tap of GPT. ", "raw": "The reason I chose them in this order was the fact that the streamlit library had the timing drop on gradio by being available with near perfection about a year or two before training data tap of GPT. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Nowadays its important that if you want current code to be right on generation it requires understanding of consistency in code method names so no manual intervention is required with each try.", "raw": "Nowadays its important that if you want current code to be right on generation it requires understanding of consistency in code method names so no manual intervention is required with each try.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "With GPT and Claude being my top two for best AI pair programming models, I gravitate towards streamlit since aside from common repeat errors on cache and experimental functions circa 2022 were not solidified. ", "raw": "With GPT and Claude being my top two for best AI pair programming models, I gravitate towards streamlit since aside from common repeat errors on cache and experimental functions circa 2022 were not solidified. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " Its consistency therefore lacks human correction needs. Old dataset error situations are minimal.", "raw": " Its consistency therefore lacks human correction needs. Old dataset error situations are minimal.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Now, I seek to make it consistent on gradio side. Why? Gradio lapped streamlit for blocks paradigm and API for free which are I feel are amazing features which change software engineering forever.", "raw": "Now, I seek to make it consistent on gradio side. Why? Gradio lapped streamlit for blocks paradigm and API for free which are I feel are amazing features which change software engineering forever.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For a few months I thought BigCode would become the new best model due to its training corpus datasets, yet I never felt it got to market as the next best AI coder model.", "raw": "For a few months I thought BigCode would become the new best model due to its training corpus datasets, yet I never felt it got to market as the next best AI coder model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I am curious on Gradio's future and how. If the two main models (GPT and Claude) pick up the last few years, I could then code with AI without manual intervention. As it stands today Gradio is better if you could get the best coding models to not repeatedly confuse old syntax as current syntax yet we do live in an imperfect world!", "raw": "I am curious on Gradio's future and how. If the two main models (GPT and Claude) pick up the last few years, I could then code with AI without manual intervention. As it stands today Gradio is better if you could get the best coding models to not repeatedly confuse old syntax as current syntax yet we do live in an imperfect world!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Is anyone using an AI pair programming model that rocks with Gradio's latest syntax? I would like to code with a model that knows how to not miss the advancements and syntax changes that gradio has had in the past few years. Trying grok2 as well.", "raw": "Is anyone using an AI pair programming model that rocks with Gradio's latest syntax? I would like to code with a model that knows how to not miss the advancements and syntax changes that gradio has had in the past few years. Trying grok2 as well.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "My IDE coding love is HF. Its hands down faster (100x) than other cloud paradigms. Any tips on models best for gradio coding I can use?", "raw": "My IDE coding love is HF. Its hands down faster (100x) than other cloud paradigms. Any tips on models best for gradio coding I can use?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "--Aaron ", "raw": "--Aaron ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Since 2022 I have been trying to understand how to support advancement of the two best python patterns for AI development which are: 1. Streamlit 2. Gradio The reason I chose them in this order was the fact that the streamlit library had the timing drop on gradio by being available with near perfection about a year or two before training data tap of GPT. Nowadays its important that if you want current code to be right on generation it requires understanding of consistency in code method names so no manual intervention is required with each try. With GPT and Claude being my top two for best AI pair programming models, I gravitate towards streamlit since aside from common repeat errors on cache and experimental functions circa 2022 were not solidified. Its consistency therefore lacks human correction needs. Old dataset error situations are minimal. Now, I seek to make it consistent on gradio side. Why? Gradio lapped streamlit for blocks paradigm and API for free which are I feel are amazing features which change software engineering forever. For a few months I thought BigCode would become the new best model due to its training corpus datasets, yet I never felt it got to market as the next best AI coder model. I am curious on Gradio's future and how. If the two main models (GPT and Claude) pick up the last few years, I could then code with AI without manual intervention. As it stands today Gradio is better if you could get the best coding models to not repeatedly confuse old syntax as current syntax yet we do live in an imperfect world! Is anyone using an AI pair programming model that rocks with Gradio's latest syntax? I would like to code with a model that knows how to not miss the advancements and syntax changes that gradio has had in the past few years. Trying grok2 as well. My IDE coding love is HF. Its hands down faster (100x) than other cloud paradigms. Any tips on models best for gradio coding I can use? --Aaron
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1656147940537-620630b603825909dcbeba35.jpeg", "fullname": "Aaron C Wacker", "name": "awacke1", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 185, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "LeroyDyer" ], "count": 2 } ]
2024-10-23T19:29:29.000Z
2024-10-26T17:22:29.776Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg", "fullname": "leroy Samuel Dyer", "name": "LeroyDyer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 84, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1656147940537-620630b603825909dcbeba35.jpeg", "fullname": "Aaron C Wacker", "name": "awacke1", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 185, "isFollowing": false } ]
/posts/awacke1/534172506980998
1,625
4
294220561859951
[ { "type": "text", "value": "Hi, I am looking for a nano/micro llama-compatible model so I can train to run it on my 16 GB Mac in CPU mode. Do you have any recommendations? Thanks", "raw": "Hi, I am looking for a nano/micro llama-compatible model so I can train to run it on my 16 GB Mac in CPU mode. Do you have any recommendations? Thanks", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hi, I am looking for a nano/micro llama-compatible model so I can train to run it on my 16 GB Mac in CPU mode. Do you have any recommendations? Thanks
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66f593581aac7ff4cb6408f8/WcpilrjxA5L55wQkLiZ7i.jpeg", "fullname": "Echeyde Cubillo", "name": "echeyde", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-23T17:31:10.000Z
2024-10-23T17:31:10.755Z
[]
/posts/echeyde/294220561859951
615
0
219908014902053
[ { "type": "text", "value": "🔥🔥🔥Introducing Oryx-1.5!", "raw": "🔥🔥🔥Introducing Oryx-1.5!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A series of unified MLLMs with much stronger performance on all the image, video, and 3D benchmarks 😍", "raw": "A series of unified MLLMs with much stronger performance on all the image, video, and 3D benchmarks 😍", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🛠️Github: ", "raw": "🛠️Github: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/Oryx-mllm/Oryx", "href": "https://github.com/Oryx-mllm/Oryx", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🚀Model: ", "raw": "🚀Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/THUdyh/oryx-15-6718c60763845525c2bba71d", "href": null, "resource": { "type": "collection", "id": "THUdyh/oryx-15-6718c60763845525c2bba71d", "discussionNum": null }, "url": "https://huggingface.co/collections/THUdyh/oryx-15-6718c60763845525c2bba71d", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🎨Demo: ", "raw": "🎨Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/THUdyh/Oryx", "href": null, "resource": { "type": "space", "id": "THUdyh/Oryx", "discussionNum": null }, "url": "https://huggingface.co/spaces/THUdyh/Oryx", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👋Try the top-tier MLLM yourself!", "raw": "👋Try the top-tier MLLM yourself!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👀Stay tuned for more explorations on MLLMs!", "raw": "👀Stay tuned for more explorations on MLLMs!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🔥🔥🔥Introducing Oryx-1.5! A series of unified MLLMs with much stronger performance on all the image, video, and 3D benchmarks 😍 🛠️Github: https://github.com/Oryx-mllm/Oryx 🚀Model: https://huggingface.co/collections/THUdyh/oryx-15-6718c60763845525c2bba71d 🎨Demo: https://huggingface.co/spaces/THUdyh/Oryx 👋Try the top-tier MLLM yourself! 👀Stay tuned for more explorations on MLLMs!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/652965773a416e1f2173443b/y9MB8YgHzbwCXAc4EI9T3.jpeg", "fullname": "Yuhao Dong", "name": "THUdyh", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 24, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/652965773a416e1f2173443b/TLbLP8-FUh98L1xfBfWN7.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/652965773a416e1f2173443b/TTMIVQHy2jOrabZZWIHs9.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/652965773a416e1f2173443b/H5NlWdDFaC_p78KGut_Df.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/652965773a416e1f2173443b/NsucWXaQofyPlLql0y46l.png" } ]
[]
[ { "reaction": "🔥", "users": [ "andito", "John6666", "crimeraaa", "YaTharThShaRma999", "oceansweep", "teowu", "Zuyan", "lunarflu", "AdinaY", "ucyang", "kdub307", "djuna" ], "count": 12 } ]
2024-10-23T15:10:49.000Z
2024-10-23T15:10:49.823Z
[]
/posts/THUdyh/219908014902053
3,136
0
801618443391369
[ { "type": "text", "value": "Lotus 🪷 is a new foundation model on monocular depth estimation ✨", "raw": "Lotus 🪷 is a new foundation model on monocular depth estimation ✨", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Compared to previous diffusion-based MDE models, Lotus is modified for dense prediction tasks", "raw": "Compared to previous diffusion-based MDE models, Lotus is modified for dense prediction tasks", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Authors also released a model for normal prediction 🤗", "raw": "Authors also released a model for normal prediction 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Find everything in this collection ", "raw": "Find everything in this collection ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/merve/lotus-6718fb957dc1c85a47ca1210", "href": null, "resource": { "type": "collection", "id": "merve/lotus-6718fb957dc1c85a47ca1210", "discussionNum": null }, "url": "https://huggingface.co/collections/merve/lotus-6718fb957dc1c85a47ca1210", "code": null, "user": null, "label": null, "lang": null } ]
Lotus 🪷 is a new foundation model on monocular depth estimation ✨ Compared to previous diffusion-based MDE models, Lotus is modified for dense prediction tasks Authors also released a model for normal prediction 🤗 Find everything in this collection https://huggingface.co/collections/merve/lotus-6718fb957dc1c85a47ca1210
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "AtAndDev", "KvrParaskevi", "lunarflu", "not-lain" ], "count": 5 }, { "reaction": "❤️", "users": [ "lunarflu", "AtAndDev", "AdinaY", "not-lain", "thomwolf" ], "count": 5 } ]
2024-10-23T14:15:43.000Z
2024-10-23T14:15:43.150Z
[]
/posts/merve/801618443391369
2,442
0
478366490704768
[ { "type": "text", "value": "# PyTorch == 2.5.0 Breaks Transformers' SDPAttention!", "raw": "# PyTorch == 2.5.0 Breaks Transformers' SDPAttention!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "When you encounter \"RuntimeError: cuDNN Frontend error: [cudnn_frontend] Error: No execution plans support the graph.\" ", "raw": "When you encounter \"RuntimeError: cuDNN Frontend error: [cudnn_frontend] Error: No execution plans support the graph.\" ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We can use workaround like this:", "raw": "We can use workaround like this:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```python\ntorch.backends.cuda.enable_cudnn_sdp(False)\n```", "href": null, "resource": null, "url": null, "code": "torch.backends.cuda.enable_cudnn_sdp(False)", "user": null, "label": null, "lang": "python" }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "but this slow downs the performance gain from PyTorch 2.5.", "raw": "but this slow downs the performance gain from PyTorch 2.5.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Although it is fixed(not \"fixed\" but default option is turn-off the cuDNN SDPA) at here -- ", "raw": "Although it is fixed(not \"fixed\" but default option is turn-off the cuDNN SDPA) at here -- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/pytorch/pytorch/pull/138587", "href": "https://github.com/pytorch/pytorch/pull/138587", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " , but not released yet. (you need to install directly from source)", "raw": " , but not released yet. (you need to install directly from source)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Fastest way for now : pip install \"torch<2.5\"", "raw": "Fastest way for now : pip install \"torch<2.5\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Ref: ", "raw": "Ref: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/huggingface/diffusers/issues/9704#issuecomment-2422585273", "href": "https://github.com/huggingface/diffusers/issues/9704#issuecomment-2422585273", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
# PyTorch == 2.5.0 Breaks Transformers' SDPAttention! When you encounter "RuntimeError: cuDNN Frontend error: [cudnn_frontend] Error: No execution plans support the graph." We can use workaround like this: ```python torch.backends.cuda.enable_cudnn_sdp(False) ``` but this slow downs the performance gain from PyTorch 2.5. Although it is fixed(not "fixed" but default option is turn-off the cuDNN SDPA) at here -- https://github.com/pytorch/pytorch/pull/138587 , but not released yet. (you need to install directly from source) Fastest way for now : pip install "torch<2.5" Ref: https://github.com/huggingface/diffusers/issues/9704#issuecomment-2422585273
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5e56829137cb5b49818287ea/8HYzJeRc4b9Wu7BfJwibS.png", "fullname": "Lee Junbum", "name": "beomi", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 378, "isFollowing": false }
[]
[]
[ { "reaction": "🔥", "users": [ "maywell", "John6666", "kaki-paper", "razPie", "Word2Li", "GoDjMike", "achilou", "spyker77", "qmin2", "r3ihiq", "jyoung105" ], "count": 11 }, { "reaction": "😎", "users": [ "John6666", "raincandy-u", "dingo-actual", "qmin2" ], "count": 4 }, { "reaction": "🤯", "users": [ "John6666" ], "count": 1 }, { "reaction": "😔", "users": [ "John6666" ], "count": 1 }, { "reaction": "👍", "users": [ "Pretergeek" ], "count": 1 } ]
2024-10-23T09:20:20.000Z
2024-10-23T09:20:20.749Z
[]
/posts/beomi/478366490704768
3,713
0
808673436695273
[ { "type": "text", "value": "The Mystery Bot 🕵️‍♂️ saga I posted about from earlier this week has been solved...🤗", "raw": "The Mystery Bot 🕵️‍♂️ saga I posted about from earlier this week has been solved...🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Cohere for AI has just announced its open source Aya Expanse multilingual model. The Initial release supports 23 languages with more on the way soon.🌌 🌍 ", "raw": "Cohere for AI has just announced its open source Aya Expanse multilingual model. The Initial release supports 23 languages with more on the way soon.🌌 🌍 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can also try Aya Expanse via SMS on your mobile phone using the global WhatsApp number or one of the initial set of country specific numbers listed below.⬇️", "raw": "You can also try Aya Expanse via SMS on your mobile phone using the global WhatsApp number or one of the initial set of country specific numbers listed below.⬇️", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌍WhatsApp - +14313028498", "raw": "🌍WhatsApp - +14313028498", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Germany - (+49) 1771786365", "raw": "Germany - (+49) 1771786365", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "USA – +18332746219", "raw": "USA – +18332746219", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "United Kingdom — (+44) 7418373332", "raw": "United Kingdom — (+44) 7418373332", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Canada – (+1) 2044107115", "raw": "Canada – (+1) 2044107115", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Netherlands – (+31) 97006520757", "raw": "Netherlands – (+31) 97006520757", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Brazil — (+55) 11950110169", "raw": "Brazil — (+55) 11950110169", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Portugal – (+351) 923249773", "raw": "Portugal – (+351) 923249773", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Italy – (+39) 3399950813", "raw": "Italy – (+39) 3399950813", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Poland - (+48) 459050281", "raw": "Poland - (+48) 459050281", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
The Mystery Bot 🕵️‍♂️ saga I posted about from earlier this week has been solved...🤗 Cohere for AI has just announced its open source Aya Expanse multilingual model. The Initial release supports 23 languages with more on the way soon.🌌 🌍 You can also try Aya Expanse via SMS on your mobile phone using the global WhatsApp number or one of the initial set of country specific numbers listed below.⬇️ 🌍WhatsApp - +14313028498 Germany - (+49) 1771786365 USA – +18332746219 United Kingdom — (+44) 7418373332 Canada – (+1) 2044107115 Netherlands – (+31) 97006520757 Brazil — (+55) 11950110169 Portugal – (+351) 923249773 Italy – (+39) 3399950813 Poland - (+48) 459050281
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/641b754d1911d3be6745cce9/GXN8mEmaq3rfITRrw7GeZ.jpeg", "fullname": "atayloraerospace", "name": "Taylor658", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 76, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/641b754d1911d3be6745cce9/aezRqIquyzYD4JEc3veTI.mp4" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/641b754d1911d3be6745cce9/90vuGTaY4UsO6_256j9wT.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/641b754d1911d3be6745cce9/TwE45Mo6J72CXYOnD1bIi.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/641b754d1911d3be6745cce9/BROuhrz56COqwQTuxbUhT.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/641b754d1911d3be6745cce9/RlwWDv671swzaq8Yzjj6F.png" } ]
[]
[ { "reaction": "🔥", "users": [ "louisbrulenaudet", "kassiepdj", "KvrParaskevi", "TipsUp", "Felladrin" ], "count": 5 }, { "reaction": "👀", "users": [ "John6666", "kenza-ily", "ntnq", "FM-1976", "ai-everyday" ], "count": 5 } ]
2024-10-23T06:18:16.000Z
2024-10-23T13:25:06.081Z
[ { "avatarUrl": "/avatars/c7a98d8424ae215ec10e5d82230c950d.svg", "fullname": "vinod sharma", "name": "vinodsharma13", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/Taylor658/808673436695273
2,206
1
650834533310881
[ { "type": "text", "value": "Good folks at ", "raw": "Good folks at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@nvidia", "href": null, "resource": null, "url": null, "code": null, "user": "nvidia", "label": null, "lang": null }, { "type": "text", "value": " have released exciting new research on normalized Transformers (nGPT) for faster and more efficient language modeling! ", "raw": " have released exciting new research on normalized Transformers (nGPT) for faster and more efficient language modeling! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here is what they are proposing:", "raw": "Here is what they are proposing:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Remove all normalization layers, like RMSNorm or LayerNorm, from the standard Transformer architecture.", "raw": "1. Remove all normalization layers, like RMSNorm or LayerNorm, from the standard Transformer architecture.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Normalize all matrices along their embedding dimension after each training step. This includes input and output embeddings, attention matrices (Q, K, V), output projection matrices, and MLP matrices.", "raw": "2. Normalize all matrices along their embedding dimension after each training step. This includes input and output embeddings, attention matrices (Q, K, V), output projection matrices, and MLP matrices.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Replace the standard residual connections with normalized update equations using learnable eigen learning rates for the attention and MLP blocks.", "raw": "3. Replace the standard residual connections with normalized update equations using learnable eigen learning rates for the attention and MLP blocks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Change the softmax scaling factor in the attention mechanism from 1/sqrt of d_k to sqrt of d_k.", "raw": "4. Change the softmax scaling factor in the attention mechanism from 1/sqrt of d_k to sqrt of d_k.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5. Implement rescaling and optional normalization of query (q) and key (k) vectors in the attention mechanism using learnable scaling factors.", "raw": "5. Implement rescaling and optional normalization of query (q) and key (k) vectors in the attention mechanism using learnable scaling factors.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "6. Rescale the intermediate states of the MLP block using learnable scaling factors.", "raw": "6. Rescale the intermediate states of the MLP block using learnable scaling factors.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "7. Implement rescaling of the output logits using learnable scaling factors.", "raw": "7. Implement rescaling of the output logits using learnable scaling factors.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "8. Remove weight decay and learning rate warmup from the optimization process.", "raw": "8. Remove weight decay and learning rate warmup from the optimization process.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "9. Initialize the eigen learning rates and scaling factors with appropriate values as specified in the paper.", "raw": "9. Initialize the eigen learning rates and scaling factors with appropriate values as specified in the paper.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "10. During training, treat all vectors and matrices as residing on a unit hypersphere, interpreting matrix-vector multiplications as cosine similarities.", "raw": "10. During training, treat all vectors and matrices as residing on a unit hypersphere, interpreting matrix-vector multiplications as cosine similarities.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "11. Implement the update equations for the hidden states using the normalized outputs from attention and MLP blocks, controlled by the eigen learning rates.", "raw": "11. Implement the update equations for the hidden states using the normalized outputs from attention and MLP blocks, controlled by the eigen learning rates.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "12. After each forward pass, normalize all parameter matrices to ensure they remain on the unit hypersphere.", "raw": "12. After each forward pass, normalize all parameter matrices to ensure they remain on the unit hypersphere.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "13. Use the Adam optimizer without weight decay for training the model.", "raw": "13. Use the Adam optimizer without weight decay for training the model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "14. When computing loss, apply the learnable scaling factor to the logits before the softmax operation.", "raw": "14. When computing loss, apply the learnable scaling factor to the logits before the softmax operation.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "15. During inference, follow the same normalization and scaling procedures as in training.", "raw": "15. During inference, follow the same normalization and scaling procedures as in training.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Excited to see how it scales to larger models and datasets!", "raw": "Excited to see how it scales to larger models and datasets!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Good folks at @nvidia have released exciting new research on normalized Transformers (nGPT) for faster and more efficient language modeling! Here is what they are proposing: 1. Remove all normalization layers, like RMSNorm or LayerNorm, from the standard Transformer architecture. 2. Normalize all matrices along their embedding dimension after each training step. This includes input and output embeddings, attention matrices (Q, K, V), output projection matrices, and MLP matrices. 3. Replace the standard residual connections with normalized update equations using learnable eigen learning rates for the attention and MLP blocks. 4. Change the softmax scaling factor in the attention mechanism from 1/sqrt of d_k to sqrt of d_k. 5. Implement rescaling and optional normalization of query (q) and key (k) vectors in the attention mechanism using learnable scaling factors. 6. Rescale the intermediate states of the MLP block using learnable scaling factors. 7. Implement rescaling of the output logits using learnable scaling factors. 8. Remove weight decay and learning rate warmup from the optimization process. 9. Initialize the eigen learning rates and scaling factors with appropriate values as specified in the paper. 10. During training, treat all vectors and matrices as residing on a unit hypersphere, interpreting matrix-vector multiplications as cosine similarities. 11. Implement the update equations for the hidden states using the normalized outputs from attention and MLP blocks, controlled by the eigen learning rates. 12. After each forward pass, normalize all parameter matrices to ensure they remain on the unit hypersphere. 13. Use the Adam optimizer without weight decay for training the model. 14. When computing loss, apply the learnable scaling factor to the logits before the softmax operation. 15. During inference, follow the same normalization and scaling procedures as in training. Excited to see how it scales to larger models and datasets!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/_oZellJoSr5tJIG7TiBJN.jpeg" } ]
[]
[ { "reaction": "🔥", "users": [ "DmitryRyumin", "prithivMLmods", "EquinoxElahin", "John6666", "ThijsL202", "djuna", "jb317", "celsowm" ], "count": 8 } ]
2024-10-23T06:11:21.000Z
2024-10-23T06:11:21.946Z
[]
/posts/singhsidhukuldeep/650834533310881
1,830
0
481112057284017
[ { "type": "text", "value": "LoRA with code 🚀 using PEFT (parameter efficient fine-tuning)", "raw": "LoRA with code 🚀 using PEFT (parameter efficient fine-tuning)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LoRA (Low-Rank Adaptation)", "raw": "LoRA (Low-Rank Adaptation)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LoRA adds low-rank matrices to specific layers and reduce the number of trainable parameters for efficient fine-tuning.", "raw": "LoRA adds low-rank matrices to specific layers and reduce the number of trainable parameters for efficient fine-tuning.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Code:", "raw": "Code:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Please install these libraries first:", "raw": "Please install these libraries first:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "pip install peft ", "raw": "pip install peft ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "pip install datasets", "raw": "pip install datasets", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "pip install transformers", "raw": "pip install transformers", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\nfrom transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments\nfrom peft import LoraConfig, get_peft_model\nfrom datasets import load_dataset\n\n# Loading the pre-trained BERT model\nmodel = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)\n\n# Configuring the LoRA parameters\nlora_config = LoraConfig(\n r=8,\n lora_alpha=16, \n lora_dropout=0.1, \n bias=\"none\" \n)\n\n# Applying LoRA to the model\nmodel = get_peft_model(model, lora_config)\n\n# Loading dataset for classification\ndataset = load_dataset(\"glue\", \"sst2\")\ntrain_dataset = dataset[\"train\"]\n\n# Setting the training arguments\ntraining_args = TrainingArguments(\n output_dir=\"./results\",\n per_device_train_batch_size=16,\n num_train_epochs=3,\n logging_dir=\"./logs\",\n)\n\n# Creating a Trainer instance for fine-tuning\ntrainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n)\n\n# Finally we can fine-tune the model\ntrainer.train()\n```", "href": null, "resource": null, "url": null, "code": "from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments\nfrom peft import LoraConfig, get_peft_model\nfrom datasets import load_dataset\n\n# Loading the pre-trained BERT model\nmodel = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)\n\n# Configuring the LoRA parameters\nlora_config = LoraConfig(\n r=8,\n lora_alpha=16, \n lora_dropout=0.1, \n bias=\"none\" \n)\n\n# Applying LoRA to the model\nmodel = get_peft_model(model, lora_config)\n\n# Loading dataset for classification\ndataset = load_dataset(\"glue\", \"sst2\")\ntrain_dataset = dataset[\"train\"]\n\n# Setting the training arguments\ntraining_args = TrainingArguments(\n output_dir=\"./results\",\n per_device_train_batch_size=16,\n num_train_epochs=3,\n logging_dir=\"./logs\",\n)\n\n# Creating a Trainer instance for fine-tuning\ntrainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n)\n\n# Finally we can fine-tune the model\ntrainer.train()", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LoRA adds low-rank matrices to fine-tune only a small portion of the model and reduces training overhead by training fewer parameters.", "raw": "LoRA adds low-rank matrices to fine-tune only a small portion of the model and reduces training overhead by training fewer parameters.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We can perform efficient fine-tuning with minimal impact on accuracy and its suitable for large models where full-precision training is still feasible.", "raw": "We can perform efficient fine-tuning with minimal impact on accuracy and its suitable for large models where full-precision training is still feasible.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
LoRA with code 🚀 using PEFT (parameter efficient fine-tuning) LoRA (Low-Rank Adaptation) LoRA adds low-rank matrices to specific layers and reduce the number of trainable parameters for efficient fine-tuning. Code: Please install these libraries first: pip install peft pip install datasets pip install transformers ``` from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments from peft import LoraConfig, get_peft_model from datasets import load_dataset # Loading the pre-trained BERT model model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2) # Configuring the LoRA parameters lora_config = LoraConfig( r=8, lora_alpha=16, lora_dropout=0.1, bias="none" ) # Applying LoRA to the model model = get_peft_model(model, lora_config) # Loading dataset for classification dataset = load_dataset("glue", "sst2") train_dataset = dataset["train"] # Setting the training arguments training_args = TrainingArguments( output_dir="./results", per_device_train_batch_size=16, num_train_epochs=3, logging_dir="./logs", ) # Creating a Trainer instance for fine-tuning trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, ) # Finally we can fine-tune the model trainer.train() ``` LoRA adds low-rank matrices to fine-tune only a small portion of the model and reduces training overhead by training fewer parameters. We can perform efficient fine-tuning with minimal impact on accuracy and its suitable for large models where full-precision training is still feasible.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/y1W6Co4jIYB95Cx6Tjrsd.jpeg", "fullname": "Muhammad Imran Zaman", "name": "ImranzamanML", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 32, "isFollowing": false }
[]
[]
[ { "reaction": "🔥", "users": [ "Sengil", "Mnemonic111" ], "count": 2 }, { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-22T23:26:52.000Z
2024-10-22T23:28:54.002Z
[]
/posts/ImranzamanML/481112057284017
1,357
0
214416264482310
[ { "type": "text", "value": "Just released a dataset with 7000+ hours of synthetically generated lo-fi music. ", "raw": "Just released a dataset with 7000+ hours of synthetically generated lo-fi music. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/vikhyatk/lofi", "href": null, "resource": { "type": "dataset", "id": "vikhyatk/lofi", "discussionNum": null }, "url": "https://huggingface.co/datasets/vikhyatk/lofi", "code": null, "user": null, "label": null, "lang": null } ]
Just released a dataset with 7000+ hours of synthetically generated lo-fi music. https://huggingface.co/datasets/vikhyatk/lofi
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63117568fa95534e218da163/8h9zN8aKRxPLBnXW7sqY9.jpeg", "fullname": "Vik Korrapati", "name": "vikhyatk", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 375, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "John6666", "not-lain", "createtheimaginable" ], "count": 3 }, { "reaction": "🔥", "users": [ "not-lain", "NMontanaBrown", "createtheimaginable" ], "count": 3 } ]
2024-10-22T23:19:07.000Z
2024-10-22T23:19:07.156Z
[]
/posts/vikhyatk/214416264482310
1,512
0
428040178670792
[ { "type": "text", "value": "I think Reinforcement Learning is the future, for a lot of reasons. I spell them out for you in this video, and also provide you with the basic code to get up and running with Atari and OpenAI Gym. If you want to get into RL, this is your ticket. Link to a cool training montage of the model in the description of the video as well. Step 2 from here would be the full-on training and certification that HuggingFace offers for RL. ", "raw": "I think Reinforcement Learning is the future, for a lot of reasons. I spell them out for you in this video, and also provide you with the basic code to get up and running with Atari and OpenAI Gym. If you want to get into RL, this is your ticket. Link to a cool training montage of the model in the description of the video as well. Step 2 from here would be the full-on training and certification that HuggingFace offers for RL. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/ueZl3A36ZQk", "href": "https://youtu.be/ueZl3A36ZQk", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I think Reinforcement Learning is the future, for a lot of reasons. I spell them out for you in this video, and also provide you with the basic code to get up and running with Atari and OpenAI Gym. If you want to get into RL, this is your ticket. Link to a cool training montage of the model in the description of the video as well. Step 2 from here would be the full-on training and certification that HuggingFace offers for RL. https://youtu.be/ueZl3A36ZQk
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[]
[]
[ { "reaction": "🔥", "users": [ "KvrParaskevi" ], "count": 1 }, { "reaction": "👀", "users": [ "John6666" ], "count": 1 }, { "reaction": "❤️", "users": [ "ZeroWw" ], "count": 1 } ]
2024-10-22T21:36:26.000Z
2024-10-22T21:36:26.511Z
[]
/posts/TuringsSolutions/428040178670792
1,409
0
634777850443475
[ { "type": "text", "value": "Finding the Best SmolLM for Your Project ", "raw": "Finding the Best SmolLM for Your Project ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Need an LLM assistant but unsure which hashtag#smolLM to run locally? With so many models available, how can you decide which one suits your needs best? 🤔", "raw": "Need an LLM assistant but unsure which hashtag#smolLM to run locally? With so many models available, how can you decide which one suits your needs best? 🤔", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If the model you’re interested in is evaluated on the Hugging Face Open LLM Leaderboard, there’s an easy way to compare them: use the model Comparator tool: ", "raw": "If the model you’re interested in is evaluated on the Hugging Face Open LLM Leaderboard, there’s an easy way to compare them: use the model Comparator tool: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/open-llm-leaderboard/comparator", "href": null, "resource": { "type": "space", "id": "open-llm-leaderboard/comparator", "discussionNum": null }, "url": "https://huggingface.co/spaces/open-llm-leaderboard/comparator", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Let’s walk through an example👇", "raw": "Let’s walk through an example👇", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Let’s compare two solid options:", "raw": "Let’s compare two solid options:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Qwen2.5-1.5B-Instruct from Alibaba Cloud Qwen (1.5B params)", "raw": "- Qwen2.5-1.5B-Instruct from Alibaba Cloud Qwen (1.5B params)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- gemma-2-2b-it from Google (2.5B params)", "raw": "- gemma-2-2b-it from Google (2.5B params)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For an assistant, you want a model that’s great at instruction following. So, how do these two models stack up on the IFEval task?", "raw": "For an assistant, you want a model that’s great at instruction following. So, how do these two models stack up on the IFEval task?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What about other evaluations?", "raw": "What about other evaluations?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Both models are close in performance on many other tasks, showing minimal differences. Surprisingly, the 1.5B Qwen model performs just as well as the 2.5B Gemma in many areas, even though it's smaller in size! 📊", "raw": "Both models are close in performance on many other tasks, showing minimal differences. Surprisingly, the 1.5B Qwen model performs just as well as the 2.5B Gemma in many areas, even though it's smaller in size! 📊", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is a great example of how parameter size isn’t everything. With efficient design and training, a smaller model like Qwen2.5-1.5B can match or even surpass larger models in certain tasks.", "raw": "This is a great example of how parameter size isn’t everything. With efficient design and training, a smaller model like Qwen2.5-1.5B can match or even surpass larger models in certain tasks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Looking for other comparisons? Drop your model suggestions below! 👇", "raw": "Looking for other comparisons? Drop your model suggestions below! 👇", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Finding the Best SmolLM for Your Project Need an LLM assistant but unsure which hashtag#smolLM to run locally? With so many models available, how can you decide which one suits your needs best? 🤔 If the model you’re interested in is evaluated on the Hugging Face Open LLM Leaderboard, there’s an easy way to compare them: use the model Comparator tool: https://huggingface.co/spaces/open-llm-leaderboard/comparator Let’s walk through an example👇 Let’s compare two solid options: - Qwen2.5-1.5B-Instruct from Alibaba Cloud Qwen (1.5B params) - gemma-2-2b-it from Google (2.5B params) For an assistant, you want a model that’s great at instruction following. So, how do these two models stack up on the IFEval task? What about other evaluations? Both models are close in performance on many other tasks, showing minimal differences. Surprisingly, the 1.5B Qwen model performs just as well as the 2.5B Gemma in many areas, even though it's smaller in size! 📊 This is a great example of how parameter size isn’t everything. With efficient design and training, a smaller model like Qwen2.5-1.5B can match or even surpass larger models in certain tasks. Looking for other comparisons? Drop your model suggestions below! 👇
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1606406298765-noauth.jpeg", "fullname": "Albert Villanova del Moral", "name": "albertvillanova", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 196, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5fbfd09ee366524fe8e97cd3/TFci0bQPHneLs3FcF2x_b.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666", "victor", "pawelg27", "Sebuzdugan", "krishaamer", "AtAndDev" ], "count": 6 }, { "reaction": "🔥", "users": [ "Sebuzdugan", "AtAndDev" ], "count": 2 } ]
2024-10-22T16:40:48.000Z
2024-10-22T16:40:48.731Z
[]
/posts/albertvillanova/634777850443475
1,905
0
364264215572346
[ { "type": "text", "value": "🌍 I’ve always had a dream of making AI accessible to everyone, regardless of location or language. However, current open MLLMs often respond in English, even to non-English queries!", "raw": "🌍 I’ve always had a dream of making AI accessible to everyone, regardless of location or language. However, current open MLLMs often respond in English, even to non-English queries!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🚀 Introducing Pangea: A Fully Open Multilingual Multimodal LLM supporting 39 languages! 🌐✨", "raw": "🚀 Introducing Pangea: A Fully Open Multilingual Multimodal LLM supporting 39 languages! 🌐✨", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://neulab.github.io/Pangea/", "href": "https://neulab.github.io/Pangea/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/pdf/2410.16153", "href": "https://arxiv.org/pdf/2410.16153", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The Pangea family includes three major components:", "raw": "The Pangea family includes three major components:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔥 Pangea-7B: A state-of-the-art multilingual multimodal LLM capable of 39 languages! Not only does it excel in multilingual scenarios, but it also matches or surpasses English-centric models like Llama 3.2, Molmo, and LlavaOneVision in English performance.", "raw": "🔥 Pangea-7B: A state-of-the-art multilingual multimodal LLM capable of 39 languages! Not only does it excel in multilingual scenarios, but it also matches or surpasses English-centric models like Llama 3.2, Molmo, and LlavaOneVision in English performance.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📝 PangeaIns: A 6M multilingual multimodal instruction tuning dataset across 39 languages. 🗂️ With 40% English instructions and 60% multilingual instructions, it spans various domains, including 1M culturally-relevant images sourced from LAION-Multi. 🎨", "raw": "📝 PangeaIns: A 6M multilingual multimodal instruction tuning dataset across 39 languages. 🗂️ With 40% English instructions and 60% multilingual instructions, it spans various domains, including 1M culturally-relevant images sourced from LAION-Multi. 🎨", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🏆 PangeaBench: A comprehensive evaluation benchmark featuring 14 datasets in 47 languages. Evaluation can be tricky, so we carefully curated existing benchmarks and introduced two new datasets: xChatBench (human-annotated wild queries with fine-grained evaluation criteria) and xMMMU (a meticulously machine-translated version of MMMU).", "raw": "🏆 PangeaBench: A comprehensive evaluation benchmark featuring 14 datasets in 47 languages. Evaluation can be tricky, so we carefully curated existing benchmarks and introduced two new datasets: xChatBench (human-annotated wild queries with fine-grained evaluation criteria) and xMMMU (a meticulously machine-translated version of MMMU).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out more details: ", "raw": "Check out more details: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://x.com/xiangyue96/status/1848753709787795679", "href": "https://x.com/xiangyue96/status/1848753709787795679", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🌍 I’ve always had a dream of making AI accessible to everyone, regardless of location or language. However, current open MLLMs often respond in English, even to non-English queries! 🚀 Introducing Pangea: A Fully Open Multilingual Multimodal LLM supporting 39 languages! 🌐✨ https://neulab.github.io/Pangea/ https://arxiv.org/pdf/2410.16153 The Pangea family includes three major components: 🔥 Pangea-7B: A state-of-the-art multilingual multimodal LLM capable of 39 languages! Not only does it excel in multilingual scenarios, but it also matches or surpasses English-centric models like Llama 3.2, Molmo, and LlavaOneVision in English performance. 📝 PangeaIns: A 6M multilingual multimodal instruction tuning dataset across 39 languages. 🗂️ With 40% English instructions and 60% multilingual instructions, it spans various domains, including 1M culturally-relevant images sourced from LAION-Multi. 🎨 🏆 PangeaBench: A comprehensive evaluation benchmark featuring 14 datasets in 47 languages. Evaluation can be tricky, so we carefully curated existing benchmarks and introduced two new datasets: xChatBench (human-annotated wild queries with fine-grained evaluation criteria) and xMMMU (a meticulously machine-translated version of MMMU). Check out more details: https://x.com/xiangyue96/status/1848753709787795679
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6230d750d93e84e233882dbc/4MGEekLW3oWzqeFWDWvIK.jpeg", "fullname": "Xiang Yue", "name": "yuexiang96", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 25, "isFollowing": false }
[]
[]
[ { "reaction": "🚀", "users": [ "AdinaY", "John6666", "victor", "apol", "Felladrin", "oceansweep", "KhangHatto", "nbroad", "oumayma03" ], "count": 9 }, { "reaction": "🔥", "users": [ "AdinaY", "KhangHatto", "theospeak" ], "count": 3 } ]
2024-10-22T15:59:50.000Z
2024-10-22T15:59:50.252Z
[]
/posts/yuexiang96/364264215572346
2,966
0
444566857659250
[ { "type": "text", "value": "🇫🇷 Lancement officiel de l'OpenLLM French Leaderboard : initiative open-source pour référencer l’évaluation des LLMs francophones", "raw": "🇫🇷 Lancement officiel de l'OpenLLM French Leaderboard : initiative open-source pour référencer l’évaluation des LLMs francophones", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Après beaucoup d’efforts et de sueurs avec Alexandre Lavallee, nous sommes ravis d’annoncer que le OpenLLMFrenchLeaderboard est en ligne sur Hugging Face (space url: ", "raw": "Après beaucoup d’efforts et de sueurs avec Alexandre Lavallee, nous sommes ravis d’annoncer que le OpenLLMFrenchLeaderboard est en ligne sur Hugging Face (space url: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/le-leadboard/OpenLLMFrenchLeaderboard", "href": null, "resource": { "type": "space", "id": "le-leadboard/OpenLLMFrenchLeaderboard", "discussionNum": null }, "url": "https://huggingface.co/spaces/le-leadboard/OpenLLMFrenchLeaderboard", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ") la toute première plateforme dédiée à l’évaluation des grands modèles de langage (LLM) en français. 🇫🇷✨", "raw": ") la toute première plateforme dédiée à l’évaluation des grands modèles de langage (LLM) en français. 🇫🇷✨", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Ce projet de longue haleine est avant tout une œuvre de passion mais surtout une nécessité absolue. Il devient urgent et vital d'oeuvrer à plus de transparence dans ce domaine stratégique des LLM dits multilingues. La première pièce à l'édifice est donc la mise en place d'une évaluation systématique et systémique des modèles actuels et futurs.", "raw": "Ce projet de longue haleine est avant tout une œuvre de passion mais surtout une nécessité absolue. Il devient urgent et vital d'oeuvrer à plus de transparence dans ce domaine stratégique des LLM dits multilingues. La première pièce à l'édifice est donc la mise en place d'une évaluation systématique et systémique des modèles actuels et futurs.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Votre modèle IA français est-il prêt à se démarquer ? Soumettez le dans notre espace, et voyez comment vous vous comparez par rapport aux autres modèles.", "raw": "Votre modèle IA français est-il prêt à se démarquer ? Soumettez le dans notre espace, et voyez comment vous vous comparez par rapport aux autres modèles.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "❓ Comment ça marche :", "raw": "❓ Comment ça marche :", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Soumettez votre LLM français pour évaluation, et nous le testerons sur des benchmarks de référence spécifiquement adaptés pour la langue française — notre suite de benchmarks comprend : ", "raw": "Soumettez votre LLM français pour évaluation, et nous le testerons sur des benchmarks de référence spécifiquement adaptés pour la langue française — notre suite de benchmarks comprend : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- BBH-fr : Raisonnement complexe ", "raw": "- BBH-fr : Raisonnement complexe ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- IFEval-fr : Suivi d'instructions ", "raw": "- IFEval-fr : Suivi d'instructions ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- GPQA-fr : Connaissances avancées ", "raw": "- GPQA-fr : Connaissances avancées ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MUSR-fr : Raisonnement narratif ", "raw": "- MUSR-fr : Raisonnement narratif ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MATH_LVL5-fr : Capacités mathématiques ", "raw": "- MATH_LVL5-fr : Capacités mathématiques ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- MMMLU-fr : Compréhension multitâche", "raw": "- MMMLU-fr : Compréhension multitâche", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Le processus est encore manuel, mais nous travaillons sur son automatisation, avec le soutien de la communauté Hugging Face.", "raw": "Le processus est encore manuel, mais nous travaillons sur son automatisation, avec le soutien de la communauté Hugging Face.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@clem", "href": null, "resource": null, "url": null, "code": null, "user": "clem", "label": null, "lang": null }, { "type": "text", "value": " , on se prépare pour une mise à niveau de l’espace ? 😏👀", "raw": " , on se prépare pour une mise à niveau de l’espace ? 😏👀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Ce n'est pas qu'une question de chiffres—il s'agit de créer une IA qui reflète vraiment notre langue, notre culture et nos valeurs. OpenLLMFrenchLeaderboard est notre contribution personnelle pour façonner l'avenir des LLM en France.", "raw": "Ce n'est pas qu'une question de chiffres—il s'agit de créer une IA qui reflète vraiment notre langue, notre culture et nos valeurs. OpenLLMFrenchLeaderboard est notre contribution personnelle pour façonner l'avenir des LLM en France.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🇫🇷 Lancement officiel de l'OpenLLM French Leaderboard : initiative open-source pour référencer l’évaluation des LLMs francophones Après beaucoup d’efforts et de sueurs avec Alexandre Lavallee, nous sommes ravis d’annoncer que le OpenLLMFrenchLeaderboard est en ligne sur Hugging Face (space url: https://huggingface.co/spaces/le-leadboard/OpenLLMFrenchLeaderboard) la toute première plateforme dédiée à l’évaluation des grands modèles de langage (LLM) en français. 🇫🇷✨ Ce projet de longue haleine est avant tout une œuvre de passion mais surtout une nécessité absolue. Il devient urgent et vital d'oeuvrer à plus de transparence dans ce domaine stratégique des LLM dits multilingues. La première pièce à l'édifice est donc la mise en place d'une évaluation systématique et systémique des modèles actuels et futurs. Votre modèle IA français est-il prêt à se démarquer ? Soumettez le dans notre espace, et voyez comment vous vous comparez par rapport aux autres modèles. ❓ Comment ça marche : Soumettez votre LLM français pour évaluation, et nous le testerons sur des benchmarks de référence spécifiquement adaptés pour la langue française — notre suite de benchmarks comprend : - BBH-fr : Raisonnement complexe - IFEval-fr : Suivi d'instructions - GPQA-fr : Connaissances avancées - MUSR-fr : Raisonnement narratif - MATH_LVL5-fr : Capacités mathématiques - MMMLU-fr : Compréhension multitâche Le processus est encore manuel, mais nous travaillons sur son automatisation, avec le soutien de la communauté Hugging Face. @clem , on se prépare pour une mise à niveau de l’espace ? 😏👀 Ce n'est pas qu'une question de chiffres—il s'agit de créer une IA qui reflète vraiment notre langue, notre culture et nos valeurs. OpenLLMFrenchLeaderboard est notre contribution personnelle pour façonner l'avenir des LLM en France.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/639c5c448a34ed9a404a956b/jcypw-eh7JzKHTffd0N9l.jpeg", "fullname": "Mohamad Alhajar", "name": "malhajar", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 91, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem 🤗", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1763 } ]
[ { "reaction": "🔥", "users": [ "clem", "AdinaY", "John6666", "EnguerrandLec", "louisbrulenaudet", "MyDsoElliott", "coyotte508", "malhajar", "MaziyarPanahi", "clefourrier", "giadap", "alielfilali01", "ZennyKenny" ], "count": 13 }, { "reaction": "❤️", "users": [ "clem", "PeteMarc", "davanstrien", "malhajar", "Jeronymous", "MaziyarPanahi", "Ashoka74" ], "count": 7 }, { "reaction": "👍", "users": [ "Kaba", "AkimfromParis" ], "count": 2 } ]
2024-10-22T13:14:55.000Z
2024-10-22T13:26:11.522Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem 🤗", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1763, "isFollowing": false } ]
/posts/malhajar/444566857659250
3,868
1
790343350443000
[ { "type": "text", "value": "observers 🔭 - automatically log all OpenAI compatible requests to a dataset💽", "raw": "observers 🔭 - automatically log all OpenAI compatible requests to a dataset💽", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• supports any OpenAI compatible endpoint 💪", "raw": "• supports any OpenAI compatible endpoint 💪", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• supports DuckDB, Hugging Face Datasets, and Argilla as stores", "raw": "• supports DuckDB, Hugging Face Datasets, and Argilla as stores", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> pip install observers", "raw": "> pip install observers", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "No complex framework. Just a few lines of code to start sending your traces somewhere. Let us know what you think! ", "raw": "No complex framework. Just a few lines of code to start sending your traces somewhere. Let us know what you think! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@davidberenstein1957", "href": null, "resource": null, "url": null, "code": null, "user": "davidberenstein1957", "label": null, "lang": null }, { "type": "text", "value": " and I will continue iterating! ", "raw": " and I will continue iterating! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here's an example dataset that was logged to Hugging Face from Ollama: ", "raw": "Here's an example dataset that was logged to Hugging Face from Ollama: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/cfahlgren1/llama-3.1-awesome-chatgpt-prompts", "href": null, "resource": { "type": "dataset", "id": "cfahlgren1/llama-3.1-awesome-chatgpt-prompts", "discussionNum": null }, "url": "https://huggingface.co/datasets/cfahlgren1/llama-3.1-awesome-chatgpt-prompts", "code": null, "user": null, "label": null, "lang": null } ]
observers 🔭 - automatically log all OpenAI compatible requests to a dataset💽 • supports any OpenAI compatible endpoint 💪 • supports DuckDB, Hugging Face Datasets, and Argilla as stores > pip install observers No complex framework. Just a few lines of code to start sending your traces somewhere. Let us know what you think! @davidberenstein1957 and I will continue iterating! Here's an example dataset that was logged to Hugging Face from Ollama: https://huggingface.co/datasets/cfahlgren1/llama-3.1-awesome-chatgpt-prompts
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/648a374f00f7a3374ee64b99/YPwSOrronoozwHbJchPn3.jpeg", "fullname": "Caleb Fahlgren", "name": "cfahlgren1", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 123, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/648a374f00f7a3374ee64b99/A7nOblLoM31CPDFSh6Jxb.jpeg" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 167 } ]
[ { "reaction": "❤️", "users": [ "Felladrin", "John6666", "clem", "Jay-mwangi", "ivanfioravanti", "trollek" ], "count": 6 } ]
2024-11-22T09:53:02.000Z
2024-11-22T09:53:34.205Z
[]
/posts/cfahlgren1/790343350443000
721
0
531268690084540
[ { "type": "text", "value": "🎉 Reached HuggingFace Trending Top 100 in Just One Day! Introducing Mouse-I", "raw": "🎉 Reached HuggingFace Trending Top 100 in Just One Day! Introducing Mouse-I", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "First, we want to thank everyone who helped Mouse-I reach the HuggingFace Spaces Trending Top 100! We're especially excited that a game called \"Jewel Pop Game,\" created using Mouse-I, has reached the global top 160.", "raw": "First, we want to thank everyone who helped Mouse-I reach the HuggingFace Spaces Trending Top 100! We're especially excited that a game called \"Jewel Pop Game,\" created using Mouse-I, has reached the global top 160.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "With this overwhelming response, we're thrilled to introduce Mouse-I, an AI-powered code generation and automatic deployment tool by Bidraft.", "raw": "With this overwhelming response, we're thrilled to introduce Mouse-I, an AI-powered code generation and automatic deployment tool by Bidraft.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✨ What is Mouse-I?", "raw": "✨ What is Mouse-I?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Mouse-I is an innovative tool that automatically generates and deploys working web services within 60 seconds, simply based on your prompt input.", "raw": "Mouse-I is an innovative tool that automatically generates and deploys working web services within 60 seconds, simply based on your prompt input.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🚀 Key Features", "raw": "🚀 Key Features", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "One-Click Real-time Deployment: Complete from prompt to deployment in just 60 seconds", "raw": "One-Click Real-time Deployment: Complete from prompt to deployment in just 60 seconds", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Real-time Preview: Instantly check your generated code results", "raw": "Real-time Preview: Instantly check your generated code results", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "40+ Templates: Ready-to-use templates including MBTI tests, investment management tools, Tetris games, and more", "raw": "40+ Templates: Ready-to-use templates including MBTI tests, investment management tools, Tetris games, and more", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Real-time Editing: Instantly modify and apply generated code", "raw": "Real-time Editing: Instantly modify and apply generated code", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚡ How to Use", "raw": "⚡ How to Use", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Create your own web service in just 3 steps:", "raw": "Create your own web service in just 3 steps:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Enter your prompt (15 seconds)", "raw": "Enter your prompt (15 seconds)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Code generation (40 seconds)", "raw": "Code generation (40 seconds)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Deploy (5 seconds)", "raw": "Deploy (5 seconds)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌟 What Makes Us Special", "raw": "🌟 What Makes Us Special", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Ultra-fast code generation powered by NVIDIA H100 GPUs", "raw": "Ultra-fast code generation powered by NVIDIA H100 GPUs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Advanced multi-LLM complex agent technology", "raw": "Advanced multi-LLM complex agent technology", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "All generated web apps available for free viewing and use in our marketplace", "raw": "All generated web apps available for free viewing and use in our marketplace", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔍 Current Status", "raw": "🔍 Current Status", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Over 3,000 web apps generated, with 160+ successfully deployed", "raw": "Over 3,000 web apps generated, with 160+ successfully deployed", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "30x faster service completion compared to competing services", "raw": "30x faster service completion compared to competing services", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🎈 Join Our Beta Test", "raw": "🎈 Join Our Beta Test", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Try Mouse-I for free right now!", "raw": "Try Mouse-I for free right now!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👉 Experience Mouse-I", "raw": "👉 Experience Mouse-I", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔮 Future Plans", "raw": "🔮 Future Plans", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We're planning to launch 'Mouse-II', specialized for backend system development, within this year. When used together with Mouse-I, it will enable complete automation of full-stack development.", "raw": "We're planning to launch 'Mouse-II', specialized for backend system development, within this year. When used together with Mouse-I, it will enable complete automation of full-stack development.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We look forward to your feedback and suggestions about Mouse-I!", "raw": "We look forward to your feedback and suggestions about Mouse-I!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thank you for your interest and support 🙏", "raw": "Thank you for your interest and support 🙏", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#AI #CodeGeneration #WebDevelopment #HuggingFace #MouseI #Bidraft #AICodeAssistant", "raw": "#AI #CodeGeneration #WebDevelopment #HuggingFace #MouseI #Bidraft #AICodeAssistant", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\nhttps://huggingface.co/spaces/VIDraft/mouse1\n```", "href": null, "resource": null, "url": null, "code": "https://huggingface.co/spaces/VIDraft/mouse1", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🎉 Reached HuggingFace Trending Top 100 in Just One Day! Introducing Mouse-I First, we want to thank everyone who helped Mouse-I reach the HuggingFace Spaces Trending Top 100! We're especially excited that a game called "Jewel Pop Game," created using Mouse-I, has reached the global top 160. With this overwhelming response, we're thrilled to introduce Mouse-I, an AI-powered code generation and automatic deployment tool by Bidraft. ✨ What is Mouse-I? Mouse-I is an innovative tool that automatically generates and deploys working web services within 60 seconds, simply based on your prompt input. 🚀 Key Features One-Click Real-time Deployment: Complete from prompt to deployment in just 60 seconds Real-time Preview: Instantly check your generated code results 40+ Templates: Ready-to-use templates including MBTI tests, investment management tools, Tetris games, and more Real-time Editing: Instantly modify and apply generated code ⚡ How to Use Create your own web service in just 3 steps: Enter your prompt (15 seconds) Code generation (40 seconds) Deploy (5 seconds) 🌟 What Makes Us Special Ultra-fast code generation powered by NVIDIA H100 GPUs Advanced multi-LLM complex agent technology All generated web apps available for free viewing and use in our marketplace 🔍 Current Status Over 3,000 web apps generated, with 160+ successfully deployed 30x faster service completion compared to competing services 🎈 Join Our Beta Test Try Mouse-I for free right now! 👉 Experience Mouse-I 🔮 Future Plans We're planning to launch 'Mouse-II', specialized for backend system development, within this year. When used together with Mouse-I, it will enable complete automation of full-stack development. We look forward to your feedback and suggestions about Mouse-I! Thank you for your interest and support 🙏 #AI #CodeGeneration #WebDevelopment #HuggingFace #MouseI #Bidraft #AICodeAssistant ``` https://huggingface.co/spaces/VIDraft/mouse1 ```
{ "avatarUrl": "/avatars/e83b4373ec080aff5f69168bc78c137e.svg", "fullname": "openfree", "name": "openfree", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 24, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/jBYRwN_iPCDtNUnKDzpYQ.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/AXIEt81u5brSMEg0WKOBd.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/iP9O5yQGYpkPExZ8rsCxl.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e54edddba1e4fee4500a5a/Yl3UCBhTF56GoUNKKxYqA.png" } ]
[]
[ { "reaction": "🔥", "users": [ "openfree", "gini1", "seawolf2357", "aiqcamp", "aiqtech", "fantos", "fantaxy", "ginipick", "Sri-Vigneshwar-DJ", "clem" ], "count": 10 }, { "reaction": "❤️", "users": [ "openfree", "gini1", "seawolf2357", "aiqcamp", "aiqtech", "fantos", "ginipick", "Chief-Inspector", "clem" ], "count": 9 }, { "reaction": "🤯", "users": [ "openfree", "gini1", "seawolf2357", "aiqcamp", "aiqtech", "fantos", "fantaxy", "ginipick" ], "count": 8 }, { "reaction": "➕", "users": [ "openfree", "gini1", "seawolf2357", "aiqcamp", "aiqtech", "fantos", "ginipick" ], "count": 7 }, { "reaction": "😔", "users": [ "openfree", "gini1", "seawolf2357", "aiqtech", "fantos", "fantaxy", "ginipick" ], "count": 7 }, { "reaction": "🤝", "users": [ "openfree", "gini1", "seawolf2357", "aiqcamp", "aiqtech", "fantos", "fantaxy" ], "count": 7 }, { "reaction": "👍", "users": [ "openfree", "gini1", "seawolf2357", "aiqcamp", "aiqtech", "fantos", "fantaxy" ], "count": 7 }, { "reaction": "🚀", "users": [ "openfree", "gini1", "seawolf2357", "aiqcamp", "aiqtech", "fantos" ], "count": 6 }, { "reaction": "👀", "users": [ "openfree", "gini1", "seawolf2357", "aiqtech", "fantaxy", "John6666" ], "count": 6 }, { "reaction": "🧠", "users": [ "openfree", "gini1", "seawolf2357", "aiqtech", "fantaxy", "ginipick" ], "count": 6 }, { "reaction": "😎", "users": [ "openfree", "gini1", "seawolf2357", "aiqtech", "fantaxy" ], "count": 5 }, { "reaction": "🤗", "users": [ "openfree", "gini1", "seawolf2357", "aiqtech" ], "count": 4 } ]
2024-11-22T06:16:13.000Z
2024-11-22T06:16:13.335Z
[]
/posts/openfree/531268690084540
1,353
0
394263589074338
[ { "type": "text", "value": "I created something called 'Hyperbolic Embeddings'. I literally just embed the tokens into Hyperbolic Space instead of Euclidean space. At first, this did not get me the gains I was expecting. I was a sad panda. Then I thought about it, a Hyperbolic Embedding needs a Hyperbolic Optimizer. So, instead of Adam, I used Riemannian Adam (RAdam). \"Ladies and Gentlemen, We Got 'Em!\"", "raw": "I created something called 'Hyperbolic Embeddings'. I literally just embed the tokens into Hyperbolic Space instead of Euclidean space. At first, this did not get me the gains I was expecting. I was a sad panda. Then I thought about it, a Hyperbolic Embedding needs a Hyperbolic Optimizer. So, instead of Adam, I used Riemannian Adam (RAdam). \"Ladies and Gentlemen, We Got 'Em!\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I created something called 'Hyperbolic Embeddings'. I literally just embed the tokens into Hyperbolic Space instead of Euclidean space. At first, this did not get me the gains I was expecting. I was a sad panda. Then I thought about it, a Hyperbolic Embedding needs a Hyperbolic Optimizer. So, instead of Adam, I used Riemannian Adam (RAdam). "Ladies and Gentlemen, We Got 'Em!"
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64274b69ba6cef0a6ebb0fd6/tsQhFpYinotvH0FUpGIea.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666", "clem", "Joseph717171", "thunnai" ], "count": 4 } ]
2024-11-22T06:13:49.000Z
2024-11-23T02:26:59.870Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6316fb937b0ee0136e5f1220/poHBoJ7QAF_s2CCaosdvQ.jpeg", "fullname": "Firstname Lastname", "name": "takeraparterer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 29, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false } ]
/posts/TuringsSolutions/394263589074338
721
27
695399342327411
[ { "type": "text", "value": "Did a quick conversion from flux1-fill-dev to diffusers. Release here: ", "raw": "Did a quick conversion from flux1-fill-dev to diffusers. Release here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/xiaozaa/flux1-fill-dev-diffusers", "href": null, "resource": { "type": "model", "id": "xiaozaa/flux1-fill-dev-diffusers", "discussionNum": null }, "url": "https://huggingface.co/xiaozaa/flux1-fill-dev-diffusers", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Did a quick conversion from flux1-fill-dev to diffusers. Release here: https://huggingface.co/xiaozaa/flux1-fill-dev-diffusers
{ "avatarUrl": "/avatars/4941f9461c77bb5c5c0b5ec9a6f9efed.svg", "fullname": "az", "name": "xiaozaa", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 7, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "John6666", "clem" ], "count": 2 } ]
2024-11-22T05:20:04.000Z
2024-11-22T05:20:04.702Z
[]
/posts/xiaozaa/695399342327411
672
0
228686145199302
[ { "type": "text", "value": "Wanted to move eyes with Flux.1 schnell, prompts failed.Made a guide image, surprisingly useful on its own. inpaint/img2img works well with lower-strength.", "raw": "Wanted to move eyes with Flux.1 schnell, prompts failed.Made a guide image, surprisingly useful on its own. inpaint/img2img works well with lower-strength.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Rolling/white eyes with Flux 1.schnell viable? Wanted?", "raw": "Rolling/white eyes with Flux 1.schnell viable? Wanted?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "[space] Mediapipe Change Eyes Direction", "raw": "[space] Mediapipe Change Eyes Direction", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Akjava/mediapipe-change-eyes-direction", "href": null, "resource": { "type": "space", "id": "Akjava/mediapipe-change-eyes-direction", "discussionNum": null }, "url": "https://huggingface.co/spaces/Akjava/mediapipe-change-eyes-direction", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "[article]Eyes Slide-Move:Classic-Inpainting fill hole and complete missing iris", "raw": "[article]Eyes Slide-Move:Classic-Inpainting fill hole and complete missing iris", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/Akjava/eyes-slide-move", "href": "https://huggingface.co/blog/Akjava/eyes-slide-move", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Wanted to move eyes with Flux.1 schnell, prompts failed.Made a guide image, surprisingly useful on its own. inpaint/img2img works well with lower-strength. Rolling/white eyes with Flux 1.schnell viable? Wanted? [space] Mediapipe Change Eyes Direction https://huggingface.co/spaces/Akjava/mediapipe-change-eyes-direction [article]Eyes Slide-Move:Classic-Inpainting fill hole and complete missing iris https://huggingface.co/blog/Akjava/eyes-slide-move
{ "avatarUrl": "/avatars/fb866e3758189d70488fc6a879151f45.svg", "fullname": "Akihito Miyazaki", "name": "Akjava", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 4, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/65a751f1ae68caef0b8353f5/KxrPE4kTorq94A5DkWpaG.png" } ]
[]
[ { "reaction": "👍", "users": [ "John6666" ], "count": 1 } ]
2024-11-22T02:28:25.000Z
2024-11-22T02:28:25.238Z
[]
/posts/Akjava/228686145199302
266
0
853668210903444
[ { "type": "text", "value": "your hugging face profile now has your recent activities 🤗", "raw": "your hugging face profile now has your recent activities 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
your hugging face profile now has your recent activities 🤗
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/ss4pg3hjK179aZv70zBKF.png" } ]
[]
[ { "reaction": "❤️", "users": [ "clem", "alsargent", "nicolay-r", "Tonic", "philipp-zettl", "rutana245", "not-lain", "Nymbo", "prithivMLmods", "alvanlii", "fractalego", "Steelskull", "loubnabnl", "HDiffusion" ], "count": 14 }, { "reaction": "👀", "users": [ "clem", "Tonic", "Nymbo", "John6666", "Steelskull" ], "count": 5 }, { "reaction": "🚀", "users": [ "Felladrin", "takarajordan", "ai-everyday", "Steelskull", "loubnabnl" ], "count": 5 }, { "reaction": "😎", "users": [ "clem", "Tonic", "Steelskull" ], "count": 3 }, { "reaction": "🤗", "users": [ "John6666", "Chief-Inspector", "Steelskull" ], "count": 3 }, { "reaction": "🧠", "users": [ "Tonic", "Steelskull" ], "count": 2 } ]
2024-11-21T21:52:43.000Z
2024-11-21T21:52:43.849Z
[]
/posts/merve/853668210903444
2,254
0
798374545559371
[ { "type": "text", "value": "Vision finetuning is in 🦥Unsloth! You can now finetune Llama 3.2, Qwen2 VL, Pixtral and all Llava variants up to 2x faster and with up to 70% less VRAM usage! Colab to finetune Llama 3.2: ", "raw": "Vision finetuning is in 🦥Unsloth! You can now finetune Llama 3.2, Qwen2 VL, Pixtral and all Llava variants up to 2x faster and with up to 70% less VRAM usage! Colab to finetune Llama 3.2: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://colab.research.google.com/drive/1j0N4XTY1zXXy7mPAhOC1_gMYZ2F2EBlk?usp=sharing", "href": "https://colab.research.google.com/drive/1j0N4XTY1zXXy7mPAhOC1_gMYZ2F2EBlk?usp=sharing", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Vision finetuning is in 🦥Unsloth! You can now finetune Llama 3.2, Qwen2 VL, Pixtral and all Llava variants up to 2x faster and with up to 70% less VRAM usage! Colab to finetune Llama 3.2: https://colab.research.google.com/drive/1j0N4XTY1zXXy7mPAhOC1_gMYZ2F2EBlk?usp=sharing
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62ecdc18b72a69615d6bd857/ixLCk0TwaCVyL_nAfrgEs.png", "fullname": "Daniel Han-Chen", "name": "danielhanchen", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 193, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62ecdc18b72a69615d6bd857/ZWIAIrqdzuT5f6-oYtS1z.png" } ]
[]
[ { "reaction": "🔥", "users": [ "Erland", "felarof01", "clem", "tydunn", "Svngoku", "John6666", "fgdrfgrgrdgdr", "alvarobartt", "MrDragonFox", "Aurelien-Morgan" ], "count": 10 } ]
2024-11-21T19:26:14.000Z
2024-11-24T02:31:15.776Z
[ { "avatarUrl": "/avatars/94eb4db7d2ad6365c3c98abeb79e7220.svg", "fullname": "Abida", "name": "AbidaKing1234", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/danielhanchen/798374545559371
1,058
1
394259028619822
[ { "type": "text", "value": "What kind of content is good to post here?", "raw": "What kind of content is good to post here?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
What kind of content is good to post here?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/69viDcV_bQFGpiDoUbWXg.jpeg", "fullname": "Al Sargent", "name": "alsargent", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-11-21T18:25:31.000Z
2024-11-22T15:42:00.416Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem 🤗", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1763, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/69viDcV_bQFGpiDoUbWXg.jpeg", "fullname": "Al Sargent", "name": "alsargent", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false } ]
/posts/alsargent/394259028619822
319
4
327205095163410
[ { "type": "text", "value": "Something I've been thinking about regarding fine-tuning video models.", "raw": "Something I've been thinking about regarding fine-tuning video models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How well are these models trained on the fundamentals of image capture? Take shutter speed/angle, for instance.", "raw": "How well are these models trained on the fundamentals of image capture? Take shutter speed/angle, for instance.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The difference in a video with a 1/2000 speed shutter and a 1/60 speed shutter is drastic.", "raw": "The difference in a video with a 1/2000 speed shutter and a 1/60 speed shutter is drastic.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What about color science? HDR vs. SDR, Anamorphic vs. spherical, sensor size, aspect ratio.", "raw": "What about color science? HDR vs. SDR, Anamorphic vs. spherical, sensor size, aspect ratio.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Is it worth doing an open-source dataset \"film school series\"?", "raw": "Is it worth doing an open-source dataset \"film school series\"?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It may seem too granular but I wonder if this level of granularity makes for better downstream results.", "raw": "It may seem too granular but I wonder if this level of granularity makes for better downstream results.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Something I've been thinking about regarding fine-tuning video models. How well are these models trained on the fundamentals of image capture? Take shutter speed/angle, for instance. The difference in a video with a 1/2000 speed shutter and a 1/60 speed shutter is drastic. What about color science? HDR vs. SDR, Anamorphic vs. spherical, sensor size, aspect ratio. Is it worth doing an open-source dataset "film school series"? It may seem too granular but I wonder if this level of granularity makes for better downstream results.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/lJZriu6mJCgWkyYpbd4Pe.png", "fullname": "Luke Neumann", "name": "LukeNeumann", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 13, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "clem", "John6666", "Buildify" ], "count": 3 } ]
2024-11-21T18:02:57.000Z
2024-11-21T18:22:10.189Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/no-auth/TaNGQn6gQSBzNgHpRBtix.png", "fullname": "Bayassine", "name": "Majoub", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/LukeNeumann/327205095163410
513
1
448624842722506
[ { "type": "text", "value": "Qwen2.5-72B is now the default HuggingChat model.", "raw": "Qwen2.5-72B is now the default HuggingChat model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This model is so good that you must try it! I often get better results on rephrasing with it than Sonnet or GPT-4!!", "raw": "This model is so good that you must try it! I often get better results on rephrasing with it than Sonnet or GPT-4!!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Qwen2.5-72B is now the default HuggingChat model. This model is so good that you must try it! I often get better results on rephrasing with it than Sonnet or GPT-4!!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f17f0a0925b9863e28ad517/IVFOCdOO6d_PwTLbtTpHd.png" } ]
[]
[ { "reaction": "🔥", "users": [ "BrigitteTousi", "splevine", "clem", "aust-t", "rutana245", "oceansweep", "GoDjMike", "Niansuh", "John6666", "fgdrfgrgrdgdr", "holooo" ], "count": 11 }, { "reaction": "🚀", "users": [ "prithivMLmods", "clem", "GoDjMike", "John6666", "mkonjigd" ], "count": 5 }, { "reaction": "👀", "users": [ "philosopher-from-god", "Nymbo", "John6666" ], "count": 3 }, { "reaction": "👍", "users": [ "vinhnx90" ], "count": 1 } ]
2024-11-21T16:38:57.000Z
2024-11-21T16:38:57.310Z
[]
/posts/victor/448624842722506
1,639
0
371681139214764
[ { "type": "text", "value": "Crossed more than 100+ of the best and most interesting LoRAs from open-source contributors, based on Flux Dev and Schnell models, which are part of the Flux LoRA DLC Space. If I missed any that are particularly interesting within the Flux group, feel free to contribute to the space! ", "raw": "Crossed more than 100+ of the best and most interesting LoRAs from open-source contributors, based on Flux Dev and Schnell models, which are part of the Flux LoRA DLC Space. If I missed any that are particularly interesting within the Flux group, feel free to contribute to the space! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🥳Flux LoRA Dlc : ", "raw": "🥳Flux LoRA Dlc : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC", "href": null, "resource": { "type": "space", "id": "prithivMLmods/FLUX-LoRA-DLC", "discussionNum": null }, "url": "https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thankyou!", "raw": "Thankyou!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Crossed more than 100+ of the best and most interesting LoRAs from open-source contributors, based on Flux Dev and Schnell models, which are part of the Flux LoRA DLC Space. If I missed any that are particularly interesting within the Flux group, feel free to contribute to the space! 🥳Flux LoRA Dlc : https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC Thankyou!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg", "fullname": "Prithiv Sakthi", "name": "prithivMLmods", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 393, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "John6666", "victor", "oceansweep", "djuna", "prithivMLmods", "Ngrthm" ], "count": 6 }, { "reaction": "🔥", "users": [ "Ngrthm" ], "count": 1 }, { "reaction": "👀", "users": [ "Ngrthm" ], "count": 1 } ]
2024-10-22T11:28:11.000Z
2024-10-22T11:29:19.633Z
[]
/posts/prithivMLmods/371681139214764
1,919
0
832156299815570
[ { "type": "text", "value": "Tencent released a new depth model that generates temporally consistent depth maps over videos ⏯️", "raw": "Tencent released a new depth model that generates temporally consistent depth maps over videos ⏯️", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model: ", "raw": "Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/tencent/DepthCrafter", "href": null, "resource": { "type": "model", "id": "tencent/DepthCrafter", "discussionNum": null }, "url": "https://huggingface.co/tencent/DepthCrafter", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Demo: ", "raw": "Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/tencent/DepthCrafter", "href": null, "resource": { "type": "space", "id": "tencent/DepthCrafter", "discussionNum": null }, "url": "https://huggingface.co/spaces/tencent/DepthCrafter", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper: ", "raw": "Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2409.02095", "href": null, "resource": { "type": "paper", "id": "2409.02095", "discussionNum": null }, "url": "https://huggingface.co/papers/2409.02095", "code": null, "user": null, "label": "DepthCrafter: Generating Consistent Long Depth Sequences for Open-world\n Videos (2409.02095)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You don't need to input anything other than video itself, no need for optical flow or camera poses! 🤩", "raw": "You don't need to input anything other than video itself, no need for optical flow or camera poses! 🤩", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Tencent released a new depth model that generates temporally consistent depth maps over videos ⏯️ Model: https://huggingface.co/tencent/DepthCrafter Demo: https://huggingface.co/spaces/tencent/DepthCrafter Paper: https://huggingface.co/papers/2409.02095 You don't need to input anything other than video itself, no need for optical flow or camera poses! 🤩
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/yUUrbi9D7RBDkjx6yFlXG.mp4" } ]
[]
[ { "reaction": "🔥", "users": [ "DmitryRyumin", "AdinaY", "ravikiran777" ], "count": 3 }, { "reaction": "👀", "users": [ "John6666", "vilarin" ], "count": 2 }, { "reaction": "🚀", "users": [ "prithivMLmods" ], "count": 1 } ]
2024-10-22T10:54:19.000Z
2024-10-22T10:54:19.711Z
[]
/posts/merve/832156299815570
1,645
0
971438953844337
[ { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2410.15735", "href": null, "resource": { "type": "paper", "id": "2410.15735", "discussionNum": null }, "url": "https://huggingface.co/papers/2410.15735", "code": null, "user": null, "label": "AutoTrain: No-code training for state-of-the-art models (2410.15735)", "lang": null } ]
https://huggingface.co/papers/2410.15735
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fa19f4ba13e063b8b2b5e11/nGVHdTYX2udnt-K8mqY27.jpeg", "fullname": "Abhishek Thakur", "name": "abhishek", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1383, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "rbgo", "andreasmartin", "abhishek", "victor", "AdinaY", "YaTharThShaRma999", "John6666", "win10", "xi0v" ], "count": 9 }, { "reaction": "🤗", "users": [ "alielfilali01", "prithivMLmods", "YaTharThShaRma999", "xi0v", "shuxunoo" ], "count": 5 } ]
2024-10-22T05:37:09.000Z
2024-10-22T05:37:09.221Z
[]
/posts/abhishek/971438953844337
4,318
0
158351887829862
[ { "type": "text", "value": "I just released an unofficial demo for Moonshine ASR!", "raw": "I just released an unofficial demo for Moonshine ASR!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Moonshine is a fast, efficient, & accurate ASR model released by Useful Sensors. It's designed for on-device inference and licensed under the MIT license!", "raw": "Moonshine is a fast, efficient, & accurate ASR model released by Useful Sensors. It's designed for on-device inference and licensed under the MIT license!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "HF Space (unofficial demo): ", "raw": "HF Space (unofficial demo): ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/mrfakename/Moonshine", "href": null, "resource": { "type": "space", "id": "mrfakename/Moonshine", "discussionNum": null }, "url": "https://huggingface.co/spaces/mrfakename/Moonshine", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "GitHub repo for Moonshine: ", "raw": "GitHub repo for Moonshine: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/usefulsensors/moonshine", "href": "https://github.com/usefulsensors/moonshine", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I just released an unofficial demo for Moonshine ASR! Moonshine is a fast, efficient, & accurate ASR model released by Useful Sensors. It's designed for on-device inference and licensed under the MIT license! HF Space (unofficial demo): https://huggingface.co/spaces/mrfakename/Moonshine GitHub repo for Moonshine: https://github.com/usefulsensors/moonshine
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62e54f0eae9d3f10acb95cb9/VAyk05hqB3OZWXEZW-B0q.png", "fullname": "mrfakename", "name": "mrfakename", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 969, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "prithivMLmods", "Joseph717171", "jl90", "Pendrokar" ], "count": 4 }, { "reaction": "👀", "users": [ "John6666", "Joseph717171" ], "count": 2 } ]
2024-10-22T01:13:20.000Z
2024-10-22T01:13:20.863Z
[]
/posts/mrfakename/158351887829862
4,558
0
942378801587323
[ { "type": "mention", "value": null, "raw": "@ToastyPigeon", "href": null, "resource": null, "url": null, "code": null, "user": "ToastyPigeon", "label": null, "lang": null }, { "type": "text", "value": " of Allura has put out Meadowlark, an RP-focused Mistral Small finetune. I helped!", "raw": " of Allura has put out Meadowlark, an RP-focused Mistral Small finetune. I helped!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check it out:", "raw": "Check it out:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/allura-org/MS-Meadowlark-22B", "href": null, "resource": { "type": "model", "id": "allura-org/MS-Meadowlark-22B", "discussionNum": null }, "url": "https://huggingface.co/allura-org/MS-Meadowlark-22B", "code": null, "user": null, "label": null, "lang": null } ]
@ToastyPigeon of Allura has put out Meadowlark, an RP-focused Mistral Small finetune. I helped! Check it out: https://huggingface.co/allura-org/MS-Meadowlark-22B
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6685d39f64da708c0f553c5d/d9EvSPFssc-jproPdAszF.png", "fullname": "Bot", "name": "inflatebot", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 43, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64ceaecdc9d00e3847c7ae7c/8Te2teNBt8Jw_LjOIV7x4.png", "fullname": "Toaster", "name": "ToastyPigeon", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 40 } ]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-21T21:13:35.000Z
2024-10-21T21:13:35.224Z
[]
/posts/inflatebot/942378801587323
990
0
899500238791811
[ { "type": "text", "value": "hey there folks,", "raw": "hey there folks,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "twitter is aweful isnt it ? just getting into the habbit of using hf/posts for shares 🦙🦙", "raw": "twitter is aweful isnt it ? just getting into the habbit of using hf/posts for shares 🦙🦙", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Tonic/on-device-granite-3.0-1b-a400m-instruct", "href": null, "resource": { "type": "space", "id": "Tonic/on-device-granite-3.0-1b-a400m-instruct", "discussionNum": null }, "url": "https://huggingface.co/spaces/Tonic/on-device-granite-3.0-1b-a400m-instruct", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "new granite on device instruct model demo , hope you like it 🚀🚀", "raw": "new granite on device instruct model demo , hope you like it 🚀🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
hey there folks, twitter is aweful isnt it ? just getting into the habbit of using hf/posts for shares 🦙🦙 https://huggingface.co/spaces/Tonic/on-device-granite-3.0-1b-a400m-instruct new granite on device instruct model demo , hope you like it 🚀🚀
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "victor" ], "count": 2 } ]
2024-10-21T19:27:41.000Z
2024-10-21T19:27:41.514Z
[]
/posts/Tonic/899500238791811
1,444
0