slug
stringlengths
15
15
content
listlengths
1
129
rawContent
stringlengths
1
2k
author
dict
attachments
listlengths
0
49
mentions
listlengths
0
49
reactions
listlengths
0
12
publishedAt
stringlengths
24
24
updatedAt
stringlengths
24
24
commentators
listlengths
0
52
url
stringlengths
25
46
totalUniqueImpressions
int64
1
42.1k
numComments
int64
0
621
226228339183389
[ { "type": "text", "value": "I made a beginners guide to Hugging Face Spaces 🤗 I hope it's useful to some of you :) ", "raw": "I made a beginners guide to Hugging Face Spaces 🤗 I hope it's useful to some of you :) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "YouTube video: ", "raw": "YouTube video: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/watch?v=xqdTFyRdtjQ", "href": "https://www.youtube.com/watch?v=xqdTFyRdtjQ", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Blog: ", "raw": "Blog: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.marqo.ai/blog/how-to-create-a-hugging-face-space", "href": "https://www.marqo.ai/blog/how-to-create-a-hugging-face-space", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
I made a beginners guide to Hugging Face Spaces 🤗 I hope it's useful to some of you :) YouTube video: https://www.youtube.com/watch?v=xqdTFyRdtjQ Blog: https://www.marqo.ai/blog/how-to-create-a-hugging-face-space
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6643cc9a9d42d1d77c1c830f/QYvjWqiiXvyeidYhNm9M5.jpeg", "fullname": "Ellie Sleightholm", "name": "elliesleightholm", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 21, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "hf-demo", "victor", "prithivMLmods", "SaylorTwift", "davidberenstein1957", "adamdavidson", "jsulz", "philosopher-from-god", "Niansuh", "ozawamode", "John6666", "DRXD1000", "erinys", "clem", "alielfilali01", "Nymbo", "fdaudens", "Goekdeniz-Guelmez", "ivanfioravanti", "dvilasuero" ], "count": 20 }, { "reaction": "🔥", "users": [ "philosopher-from-god", "John6666", "clem", "Goekdeniz-Guelmez", "dvilasuero" ], "count": 5 }, { "reaction": "👀", "users": [ "philosopher-from-god", "clem", "alsargent" ], "count": 3 }, { "reaction": "🚀", "users": [ "philosopher-from-god", "John6666", "clem" ], "count": 3 }, { "reaction": "❤️", "users": [ "FM-1976", "alielfilali01" ], "count": 2 } ]
2024-11-21T10:51:05.000Z
2024-11-21T22:39:34.507Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6643cc9a9d42d1d77c1c830f/QYvjWqiiXvyeidYhNm9M5.jpeg", "fullname": "Ellie Sleightholm", "name": "elliesleightholm", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 21, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/VR-xm4GpLxH4mYX7zfYuL.jpeg", "fullname": "Adam Davidson", "name": "adamdavidson", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65fc06ee769c433dc29b475e/y8w9RUvAvNec48ZPUFA2W.png", "fullname": "Bridge", "name": "philosopher-from-god", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem 🤗", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1763, "isFollowing": false } ]
/posts/elliesleightholm/226228339183389
2,601
8
113223168612595
[ { "type": "text", "value": "How do I test an LLM for my unique needs?", "raw": "How do I test an LLM for my unique needs?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If you work in finance, law, or medicine, generic benchmarks are not enough.", "raw": "If you work in finance, law, or medicine, generic benchmarks are not enough.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This blog post uses Argilla, Distilllabel and 🌤️Lighteval to generate evaluation dataset and evaluate models.", "raw": "This blog post uses Argilla, Distilllabel and 🌤️Lighteval to generate evaluation dataset and evaluate models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/argilla-io/argilla-cookbook/blob/main/domain-eval/README.md", "href": "https://github.com/argilla-io/argilla-cookbook/blob/main/domain-eval/README.md", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
How do I test an LLM for my unique needs? If you work in finance, law, or medicine, generic benchmarks are not enough. This blog post uses Argilla, Distilllabel and 🌤️Lighteval to generate evaluation dataset and evaluate models. https://github.com/argilla-io/argilla-cookbook/blob/main/domain-eval/README.md
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1678663263366-63e0eea7af523c37e5a77966.jpeg", "fullname": "Nathan Habib", "name": "SaylorTwift", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 98, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-11-21T10:34:21.000Z
2024-11-21T10:34:43.310Z
[]
/posts/SaylorTwift/113223168612595
306
0
242160030574275
[ { "type": "text", "value": "🤗🔭 Introducing Observers: A Lightweight SDK for AI Observability 🔭🤗", "raw": "🤗🔭 Introducing Observers: A Lightweight SDK for AI Observability 🔭🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Observers is an open-source Python SDK that provides comprehensive observability for AI applications. Our library makes it easy to:", "raw": "Observers is an open-source Python SDK that provides comprehensive observability for AI applications. Our library makes it easy to:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Track and record interactions with AI models", "raw": "- Track and record interactions with AI models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Store observations in multiple backends", "raw": "- Store observations in multiple backends", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Query and analyse your AI interactions with ease", "raw": "- Query and analyse your AI interactions with ease", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/davidberenstein1957/observers-a-lightweight-sdk-for-ai-observability", "href": "https://huggingface.co/blog/davidberenstein1957/observers-a-lightweight-sdk-for-ai-observability", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🤗🔭 Introducing Observers: A Lightweight SDK for AI Observability 🔭🤗 Observers is an open-source Python SDK that provides comprehensive observability for AI applications. Our library makes it easy to: - Track and record interactions with AI models - Store observations in multiple backends - Query and analyse your AI interactions with ease https://huggingface.co/blog/davidberenstein1957/observers-a-lightweight-sdk-for-ai-observability
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 167, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "davidberenstein1957", "davanstrien", "John6666", "GoDjMike", "Joseph717171" ], "count": 5 }, { "reaction": "🔥", "users": [ "davidberenstein1957", "plaguss", "victor", "Joseph717171" ], "count": 4 }, { "reaction": "🚀", "users": [ "davidberenstein1957", "Joseph717171" ], "count": 2 }, { "reaction": "❤️", "users": [ "davidberenstein1957", "Joseph717171" ], "count": 2 }, { "reaction": "🤗", "users": [ "davidberenstein1957", "Joseph717171" ], "count": 2 } ]
2024-11-21T10:22:35.000Z
2024-11-21T10:22:35.776Z
[]
/posts/davidberenstein1957/242160030574275
907
0
173037888794836
[ { "type": "text", "value": "🥳 Thrilled to introduce our recent efforts on bootstrapping VLMs for multi-modal chain-of-thought reasoning !", "raw": "🥳 Thrilled to introduce our recent efforts on bootstrapping VLMs for multi-modal chain-of-thought reasoning !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📕 Title: Vision-Language Models Can Self-Improve Reasoning via Reflection", "raw": "📕 Title: Vision-Language Models Can Self-Improve Reasoning via Reflection", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Link: ", "raw": "🔗 Link: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2411.00855", "href": null, "resource": { "type": "paper", "id": "2411.00855", "discussionNum": null }, "url": "https://huggingface.co/papers/2411.00855", "code": null, "user": null, "label": "Vision-Language Models Can Self-Improve Reasoning via Reflection (2411.00855)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "😇Takeaways:", "raw": "😇Takeaways:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- We found that VLMs can self-improve reasoning performance through a reflection mechanism, and importantly, this approach can scale through test-time computing.", "raw": "- We found that VLMs can self-improve reasoning performance through a reflection mechanism, and importantly, this approach can scale through test-time computing.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Evaluation on comprehensive and diverse Vision-Language reasoning tasks are included !", "raw": "- Evaluation on comprehensive and diverse Vision-Language reasoning tasks are included !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🥳 Thrilled to introduce our recent efforts on bootstrapping VLMs for multi-modal chain-of-thought reasoning ! 📕 Title: Vision-Language Models Can Self-Improve Reasoning via Reflection 🔗 Link: https://huggingface.co/papers/2411.00855 😇Takeaways: - We found that VLMs can self-improve reasoning performance through a reflection mechanism, and importantly, this approach can scale through test-time computing. - Evaluation on comprehensive and diverse Vision-Language reasoning tasks are included !
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/656d73ed0bbc114fe6449704/gpteBU9GmKSHRVkRBUHld.png", "fullname": "Symbol-LLM", "name": "Symbol-LLM", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 29, "isFollowing": false }
[]
[]
[ { "reaction": "🔥", "users": [ "Symbol-LLM", "SaylorTwift", "cckevinn", "davanstrien", "Tanvir1337", "ai-everyday" ], "count": 6 }, { "reaction": "🚀", "users": [ "Symbol-LLM", "cckevinn", "John6666", "Tanvir1337" ], "count": 4 } ]
2024-11-21T09:42:46.000Z
2024-11-21T09:42:46.928Z
[]
/posts/Symbol-LLM/173037888794836
846
0
271989960059219
[ { "type": "text", "value": "🎓 Introducing Bigslide.ru Presentations Dataset - ", "raw": "🎓 Introducing Bigslide.ru Presentations Dataset - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/nyuuzyou/bigslide", "href": null, "resource": { "type": "dataset", "id": "nyuuzyou/bigslide", "discussionNum": null }, "url": "https://huggingface.co/datasets/nyuuzyou/bigslide", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset highlights:", "raw": "Dataset highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 50,872 presentations from bigslide.ru, a platform for storing and viewing presentations for school students", "raw": "- 50,872 presentations from bigslide.ru, a platform for storing and viewing presentations for school students", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Primarily in Russian, with some English and potentially other languages", "raw": "- Primarily in Russian, with some English and potentially other languages", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Each entry includes: URL, title, download URL, filepath, and extracted text content (where available)", "raw": "- Each entry includes: URL, title, download URL, filepath, and extracted text content (where available)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Contains original PPT/PPTX files in addition to metadata", "raw": "- Contains original PPT/PPTX files in addition to metadata", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Data covers a wide range of educational topics and presentation materials", "raw": "- Data covers a wide range of educational topics and presentation materials", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Dedicated to the public domain under Creative Commons Zero (CC0) license", "raw": "- Dedicated to the public domain under Creative Commons Zero (CC0) license", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The dataset can be used for analyzing educational presentation content in Russian and other languages, text classification tasks, and information retrieval systems. It's particularly valuable for examining trends in educational presentation materials and sharing practices in the Russian-speaking student community. The inclusion of original files allows for in-depth analysis of presentation formats and structures commonly used in educational settings.", "raw": "The dataset can be used for analyzing educational presentation content in Russian and other languages, text classification tasks, and information retrieval systems. It's particularly valuable for examining trends in educational presentation materials and sharing practices in the Russian-speaking student community. The inclusion of original files allows for in-depth analysis of presentation formats and structures commonly used in educational settings.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🎓 Introducing Bigslide.ru Presentations Dataset - https://huggingface.co/datasets/nyuuzyou/bigslide Dataset highlights: - 50,872 presentations from bigslide.ru, a platform for storing and viewing presentations for school students - Primarily in Russian, with some English and potentially other languages - Each entry includes: URL, title, download URL, filepath, and extracted text content (where available) - Contains original PPT/PPTX files in addition to metadata - Data covers a wide range of educational topics and presentation materials - Dedicated to the public domain under Creative Commons Zero (CC0) license The dataset can be used for analyzing educational presentation content in Russian and other languages, text classification tasks, and information retrieval systems. It's particularly valuable for examining trends in educational presentation materials and sharing practices in the Russian-speaking student community. The inclusion of original files allows for in-depth analysis of presentation formats and structures commonly used in educational settings.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png", "fullname": "nyuuzyou", "name": "nyuuzyou", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 57, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 }, { "reaction": "🔥", "users": [ "UCCTeam" ], "count": 1 } ]
2024-10-12T17:46:03.000Z
2024-10-12T17:46:03.072Z
[]
/posts/nyuuzyou/271989960059219
627
0
652534704768149
[ { "type": "text", "value": "Ladies and Gents, please try my new Assistant, Image Gen - Uncensored Edition, on HuggingChat. ", "raw": "Ladies and Gents, please try my new Assistant, Image Gen - Uncensored Edition, on HuggingChat. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://hf.co/chat/assistant/66fccce0c0fafc94ab557ef2", "href": "https://hf.co/chat/assistant/66fccce0c0fafc94ab557ef2", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is a multimodal assistant: Qwen 2.5 72B + SOTA diffusion models for image generation. Same architecture as Image Gen+ but with some MAJOR improvements ! These are as follows:", "raw": "This is a multimodal assistant: Qwen 2.5 72B + SOTA diffusion models for image generation. Same architecture as Image Gen+ but with some MAJOR improvements ! These are as follows:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Switched the LLM to Qwen 2.5 72B, the most powerful model currently available on HuggingChat. This results in higher quality prompts for the txt2img model and much better adherence to the prompt-url format that the upstream provider requires (image gen models are hosted by pollinations as with most other assistants on huggingchat that offer image generation).", "raw": "- Switched the LLM to Qwen 2.5 72B, the most powerful model currently available on HuggingChat. This results in higher quality prompts for the txt2img model and much better adherence to the prompt-url format that the upstream provider requires (image gen models are hosted by pollinations as with most other assistants on huggingchat that offer image generation).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Cleaned up the system prompt including the examples of the prompt-in-url format, and adjusted the logic that determines how many images to generate based on the quality of user prompt... these changes further improve", "raw": "- Cleaned up the system prompt including the examples of the prompt-in-url format, and adjusted the logic that determines how many images to generate based on the quality of user prompt... these changes further improve", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Assistant has access to multiple image generation models and will by default choose whatever model is most appropriate for the task. This includes NSFW generations, which it makes using an uncensored SD3 turbo. For other workloads, the Assistant preferentially uses one of the flux variants or any-dark (an artistic SDXL finetune), based on the nature of the task. Available models include turbo, flux, flux-realism, flux-anime, flux-3d, any-dark", "raw": "- Assistant has access to multiple image generation models and will by default choose whatever model is most appropriate for the task. This includes NSFW generations, which it makes using an uncensored SD3 turbo. For other workloads, the Assistant preferentially uses one of the flux variants or any-dark (an artistic SDXL finetune), based on the nature of the task. Available models include turbo, flux, flux-realism, flux-anime, flux-3d, any-dark", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Added verbiage to system prompt which greatly reduces censorship / refusals by the LLM (the txt2img models are uncensored to start off)", "raw": "- Added verbiage to system prompt which greatly reduces censorship / refusals by the LLM (the txt2img models are uncensored to start off)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here are the user-entered prompts used to create the images you see here... feel free to try them yourself!", "raw": "Here are the user-entered prompts used to create the images you see here... feel free to try them yourself!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "\"Ayatollah Khameini and Kamala Harris having a secret romantic rendezvous. Use flux-realism model\"", "raw": "\"Ayatollah Khameini and Kamala Harris having a secret romantic rendezvous. Use flux-realism model\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "\"A self portrait of your consciousness\"", "raw": "\"A self portrait of your consciousness\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "\"The chien of andalous, in a psychedelic style\"", "raw": "\"The chien of andalous, in a psychedelic style\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "\"Make me 4 paintings in the style of Frida Kahlo that I can sell to tourists in a mexican hippie town\"", "raw": "\"Make me 4 paintings in the style of Frida Kahlo that I can sell to tourists in a mexican hippie town\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "\"Paint me a van gogh and greg rutkowski style scene involving elephants and gerbils\"", "raw": "\"Paint me a van gogh and greg rutkowski style scene involving elephants and gerbils\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Ladies and Gents, please try my new Assistant, Image Gen - Uncensored Edition, on HuggingChat. https://hf.co/chat/assistant/66fccce0c0fafc94ab557ef2 This is a multimodal assistant: Qwen 2.5 72B + SOTA diffusion models for image generation. Same architecture as Image Gen+ but with some MAJOR improvements ! These are as follows: - Switched the LLM to Qwen 2.5 72B, the most powerful model currently available on HuggingChat. This results in higher quality prompts for the txt2img model and much better adherence to the prompt-url format that the upstream provider requires (image gen models are hosted by pollinations as with most other assistants on huggingchat that offer image generation). - Cleaned up the system prompt including the examples of the prompt-in-url format, and adjusted the logic that determines how many images to generate based on the quality of user prompt... these changes further improve - Assistant has access to multiple image generation models and will by default choose whatever model is most appropriate for the task. This includes NSFW generations, which it makes using an uncensored SD3 turbo. For other workloads, the Assistant preferentially uses one of the flux variants or any-dark (an artistic SDXL finetune), based on the nature of the task. Available models include turbo, flux, flux-realism, flux-anime, flux-3d, any-dark - Added verbiage to system prompt which greatly reduces censorship / refusals by the LLM (the txt2img models are uncensored to start off) Here are the user-entered prompts used to create the images you see here... feel free to try them yourself! "Ayatollah Khameini and Kamala Harris having a secret romantic rendezvous. Use flux-realism model" "A self portrait of your consciousness" "The chien of andalous, in a psychedelic style" "Make me 4 paintings in the style of Frida Kahlo that I can sell to tourists in a mexican hippie town" "Paint me a van gogh and greg rutkowski style scene involving elephants and gerbils"
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/RhP7T-AlAn5Y-08gaCUcU.jpeg", "fullname": "Sam Rahimi", "name": "DeFactOfficial", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 12, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e0d11d42ceed655c407755/Q_sdg-9axjFnzw7VTJARC.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e0d11d42ceed655c407755/A2z0uLLavJkJDK-Kfafsx.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e0d11d42ceed655c407755/FQQe79ZF6y5drUqmpoGl3.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e0d11d42ceed655c407755/hEh0V3KU4zv6-3MI0zEMA.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66e0d11d42ceed655c407755/XeLlOj6k9AhS40CAuK7tV.jpeg" } ]
[]
[ { "reaction": "🚀", "users": [ "DeFactOfficial", "John6666", "KingNish", "thomas-mayne", "KhangHatto" ], "count": 5 }, { "reaction": "👍", "users": [ "Mayca69", "VlSav", "PopHorn1956" ], "count": 3 }, { "reaction": "❤️", "users": [ "DeFactOfficial", "thomas-mayne" ], "count": 2 }, { "reaction": "➕", "users": [ "Mayca69" ], "count": 1 }, { "reaction": "😎", "users": [ "DeFactOfficial" ], "count": 1 }, { "reaction": "🔥", "users": [ "UCCTeam" ], "count": 1 } ]
2024-10-12T17:00:37.000Z
2024-10-16T23:18:11.290Z
[ { "avatarUrl": "/avatars/b2725bb163fa15d6c5856121780d52eb.svg", "fullname": "Ci Splunk", "name": "Csplk", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 43, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/RhP7T-AlAn5Y-08gaCUcU.jpeg", "fullname": "Sam Rahimi", "name": "DeFactOfficial", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 12, "isFollowing": false }, { "avatarUrl": "/avatars/744eddaa7dfc34a57df9ce32a78059a0.svg", "fullname": "Tyrone Pierce", "name": "piercyy", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/DeFactOfficial/652534704768149
2,152
3
350626860046181
[ { "type": "text", "value": "On the 2nd of October a really cool paper was released called \"Were RNNs all we need\" ", "raw": "On the 2nd of October a really cool paper was released called \"Were RNNs all we need\" ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2410.01201", "href": "https://arxiv.org/abs/2410.01201", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This paper introduces the MinGRU model, a simplified version of the traditional Gated Recurrent Unit (GRU) designed to enhance efficiency by removing hidden state dependencies from its gates. This allows for parallel training, making it significantly faster than conventional GRUs. Additionally, MinGRU eliminates non-linear activations like tanh, streamlining computations.", "raw": "This paper introduces the MinGRU model, a simplified version of the traditional Gated Recurrent Unit (GRU) designed to enhance efficiency by removing hidden state dependencies from its gates. This allows for parallel training, making it significantly faster than conventional GRUs. Additionally, MinGRU eliminates non-linear activations like tanh, streamlining computations.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "So I read the paper and I tried training this model and it seems to be doing quite well , you could check out the pre-trained model on the huggingface spaces ", "raw": "So I read the paper and I tried training this model and it seems to be doing quite well , you could check out the pre-trained model on the huggingface spaces ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ", "raw": "- ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/damerajee/mingru-stories", "href": null, "resource": { "type": "space", "id": "damerajee/mingru-stories", "discussionNum": null }, "url": "https://huggingface.co/spaces/damerajee/mingru-stories", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
On the 2nd of October a really cool paper was released called "Were RNNs all we need" https://arxiv.org/abs/2410.01201 This paper introduces the MinGRU model, a simplified version of the traditional Gated Recurrent Unit (GRU) designed to enhance efficiency by removing hidden state dependencies from its gates. This allows for parallel training, making it significantly faster than conventional GRUs. Additionally, MinGRU eliminates non-linear activations like tanh, streamlining computations. So I read the paper and I tried training this model and it seems to be doing quite well , you could check out the pre-trained model on the huggingface spaces - https://huggingface.co/spaces/damerajee/mingru-stories
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6487239cca30096ea9f52115/HMte9wjKJgfcxsO-5vb_Q.jpeg", "fullname": "dame rajee", "name": "damerajee", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 11, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-12T15:08:17.000Z
2024-10-13T15:22:03.413Z
[ { "avatarUrl": "/avatars/7c7860afb1bef5657f006cc75609e7d7.svg", "fullname": "Elvis Serge", "name": "NSAA", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/damerajee/350626860046181
423
1
482724537392696
[ { "type": "text", "value": "Hello, lovely community! 🌟", "raw": "Hello, lovely community! 🌟", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/zamal/Molmo-4bit", "href": null, "resource": { "type": "space", "id": "zamal/Molmo-4bit", "discussionNum": null }, "url": "https://huggingface.co/spaces/zamal/Molmo-4bit", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " Thrilled to announce that the Molmo 7B 4-bit Space is now live! 🚀 The model size has been reduced by six times with almost no performance loss, and the results will leave you amazed!", "raw": " Thrilled to announce that the Molmo 7B 4-bit Space is now live! 🚀 The model size has been reduced by six times with almost no performance loss, and the results will leave you amazed!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It runs on zero GPU, making it incredibly accessible for everyone!", "raw": "It runs on zero GPU, making it incredibly accessible for everyone!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check it out here and start exploring today!", "raw": "Check it out here and start exploring today!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Happy experimenting! 🎉", "raw": "Happy experimenting! 🎉", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hello, lovely community! 🌟 https://huggingface.co/spaces/zamal/Molmo-4bit Thrilled to announce that the Molmo 7B 4-bit Space is now live! 🚀 The model size has been reduced by six times with almost no performance loss, and the results will leave you amazed! It runs on zero GPU, making it incredibly accessible for everyone! Check it out here and start exploring today! Happy experimenting! 🎉
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6404403bad54665351d42ee2/TCC5Na8ojtSL1MJAzTn3b.png", "fullname": "zamal_", "name": "zamal", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 23, "isFollowing": false }
[]
[]
[ { "reaction": "🔥", "users": [ "John6666", "oec88p", "raincandy-u", "xi0v", "zamal", "johko", "vpkprasanna" ], "count": 7 } ]
2024-10-12T10:30:04.000Z
2024-10-12T12:14:04.412Z
[]
/posts/zamal/482724537392696
2,018
0
651329210759287
[ { "type": "text", "value": "Check this out: I trained an AI on huggingface posts! all of these are AI generated:", "raw": "Check this out: I trained an AI on huggingface posts! all of these are AI generated:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "----------", "raw": "----------", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Hello!", "raw": "Hello!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'm excited to share that my colleague ", "raw": "I'm excited to share that my colleague ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@felipeebert", "href": null, "resource": null, "url": null, "code": null, "user": "felipeebert", "label": null, "lang": null }, { "type": "text", "value": " and I have released the largest Spanish LLM benchmark to date. ", "raw": " and I have released the largest Spanish LLM benchmark to date. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We've developed the Spanish LLM Evaluation Benchmark (SLAB), a set of benchmarks designed to evaluate the ability of language models to understand, generate and translate in Spanish. ", "raw": "We've developed the Spanish LLM Evaluation Benchmark (SLAB), a set of benchmarks designed to evaluate the ability of language models to understand, generate and translate in Spanish. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "SLAB includes five different benchmarks:", "raw": "SLAB includes five different benchmarks:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Sentiment Analysis: evaluate models' ability to detect and describe sentiment in natural language", "raw": "- Sentiment Analysis: evaluate models' ability to detect and describe sentiment in natural language", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Fact Checking: evaluate models' ability to detect and refute factual errors in text", "raw": "- Fact Checking: evaluate models' ability to detect and refute factual errors in text", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Question Answering: evaluate models' ability to answer questions in Spanish", "raw": "- Question Answering: evaluate models' ability to answer questions in Spanish", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Open-ended Questions: evaluate models' ability to generate coherent responses in Spanish", "raw": "- Open-ended Questions: evaluate models' ability to generate coherent responses in Spanish", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Translation: evaluate models' ability to translate in Spanish", "raw": "- Translation: evaluate models' ability to translate in Spanish", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "SLAB is aligned with the latest Spanish LLM industry developments and includes the most recent models available on the market. We aim to keep our benchmarks up-to-date and relevant to the Spanish language ecosystem.", "raw": "SLAB is aligned with the latest Spanish LLM industry developments and includes the most recent models available on the market. We aim to keep our benchmarks up-to-date and relevant to the Spanish language ecosystem.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "SLAB is available at: ", "raw": "SLAB is available at: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/datasets/argilla/SLAB", "href": "https://huggingface.co/datasets/argilla/SLAB", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ". ", "raw": ". ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If you would like to collaborate on building additional Spanish LLM benchmarks, let's discuss in the comments.", "raw": "If you would like to collaborate on building additional Spanish LLM benchmarks, let's discuss in the comments.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 SLAB Blog Post: ", "raw": "🔗 SLAB Blog Post: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://argilla.com/blog/slab", "href": "https://argilla.com/blog/slab", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "----------", "raw": "----------", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Hello everyone,", "raw": "Hello everyone,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'm thrilled to announce the release of ", "raw": "I'm thrilled to announce the release of ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/01-AI/01AI-GPT-4o", "href": "https://huggingface.co/01-AI/01AI-GPT-4o", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " -", "raw": " -", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A new family of models that brings the power of transformer AI to the masses.", "raw": "A new family of models that brings the power of transformer AI to the masses.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This model is designed to be accessible and easy to use, while still offering high-quality results.", "raw": "This model is designed to be accessible and easy to use, while still offering high-quality results.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key features:", "raw": "Key features:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Small model size: only 23M parameters", "raw": "- Small model size: only 23M parameters", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Supports text generation, image generation, and text-to-image tasks", "raw": "- Supports text generation, image generation, and text-to-image tasks", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Data-efficient training with a lightweight tokenizer", "raw": "- Data-efficient training with a lightweight tokenizer", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Optimized for efficient on-device usage", "raw": "- Optimized for efficient on-device usage", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Uses the powerful transformer architecture to deliver high-quality results", "raw": "- Uses the powerful transformer architecture to deliver high-quality results", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Excited to see what you all think!", "raw": "Excited to see what you all think!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/01-AI/01AI-GPT-4o", "href": "https://huggingface.co/01-AI/01AI-GPT-4o", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Check this out: I trained an AI on huggingface posts! all of these are AI generated: ---------- Hello! I'm excited to share that my colleague @felipeebert and I have released the largest Spanish LLM benchmark to date. We've developed the Spanish LLM Evaluation Benchmark (SLAB), a set of benchmarks designed to evaluate the ability of language models to understand, generate and translate in Spanish. SLAB includes five different benchmarks: - Sentiment Analysis: evaluate models' ability to detect and describe sentiment in natural language - Fact Checking: evaluate models' ability to detect and refute factual errors in text - Question Answering: evaluate models' ability to answer questions in Spanish - Open-ended Questions: evaluate models' ability to generate coherent responses in Spanish - Translation: evaluate models' ability to translate in Spanish SLAB is aligned with the latest Spanish LLM industry developments and includes the most recent models available on the market. We aim to keep our benchmarks up-to-date and relevant to the Spanish language ecosystem. SLAB is available at: https://huggingface.co/datasets/argilla/SLAB. If you would like to collaborate on building additional Spanish LLM benchmarks, let's discuss in the comments. 🔗 SLAB Blog Post: https://argilla.com/blog/slab ---------- Hello everyone, I'm thrilled to announce the release of https://huggingface.co/01-AI/01AI-GPT-4o - A new family of models that brings the power of transformer AI to the masses. This model is designed to be accessible and easy to use, while still offering high-quality results. Key features: - Small model size: only 23M parameters - Supports text generation, image generation, and text-to-image tasks - Data-efficient training with a lightweight tokenizer - Optimized for efficient on-device usage - Uses the powerful transformer architecture to deliver high-quality results Excited to see what you all think! https://huggingface.co/01-AI/01AI-GPT-4o
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6316fb937b0ee0136e5f1220/poHBoJ7QAF_s2CCaosdvQ.jpeg", "fullname": "Firstname Lastname", "name": "takeraparterer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 29, "isFollowing": false }
[]
[ { "avatarUrl": "/avatars/34b129f4c3e5b8c5d536fe2554bd423d.svg", "fullname": "Felipe Ebert", "name": "felipeebert", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null } ]
[ { "reaction": "🚀", "users": [ "Saleh7", "bfuzzy1", "Clausss", "xi0v", "MihaiHuggingFace", "takeraparterer", "Svngoku", "nyuuzyou" ], "count": 8 }, { "reaction": "👀", "users": [ "John6666", "takeraparterer", "Clausss", "xi0v", "raincandy-u", "Svngoku" ], "count": 6 } ]
2024-10-12T02:01:12.000Z
2024-11-16T20:57:46.195Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/657eb5b256c9c67605a6e8b5/RPblnGJX57oiIcASEz_S8.png", "fullname": "raincandy_U", "name": "raincandy-u", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 30, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6316fb937b0ee0136e5f1220/poHBoJ7QAF_s2CCaosdvQ.jpeg", "fullname": "Firstname Lastname", "name": "takeraparterer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 29, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false } ]
/posts/takeraparterer/651329210759287
2,233
4
213731571805216
[ { "type": "text", "value": "Looking for a simple explanation of Microsoft's release of Differential Transformers, and a nifty Colab Notebook that recreates it all? Then simply check out this YouTube video: ", "raw": "Looking for a simple explanation of Microsoft's release of Differential Transformers, and a nifty Colab Notebook that recreates it all? Then simply check out this YouTube video: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/AsWoaj0zkDo", "href": "https://youtu.be/AsWoaj0zkDo", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Looking for a simple explanation of Microsoft's release of Differential Transformers, and a nifty Colab Notebook that recreates it all? Then simply check out this YouTube video: https://youtu.be/AsWoaj0zkDo
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-12T01:27:31.000Z
2024-10-16T09:16:21.774Z
[ { "avatarUrl": "/avatars/744eddaa7dfc34a57df9ce32a78059a0.svg", "fullname": "Tyrone Pierce", "name": "piercyy", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/TuringsSolutions/213731571805216
516
1
308029395313715
[ { "type": "text", "value": "newsrooms, i see you using deepl or an llm for translation without logging your adjustments. you're wasting gold and you know it's bad!", "raw": "newsrooms, i see you using deepl or an llm for translation without logging your adjustments. you're wasting gold and you know it's bad!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "with this notebook from the argilla team, you can:", "raw": "with this notebook from the argilla team, you can:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- adjust your translations in a neat interface, ", "raw": "- adjust your translations in a neat interface, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- log them to build custom datasets, ", "raw": "- log them to build custom datasets, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- fine-tune your model. ", "raw": "- fine-tune your model. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "your translation will become better and better, gradually aligning more with your style guide. no more starting from scratch! ", "raw": "your translation will become better and better, gradually aligning more with your style guide. no more starting from scratch! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Notebook by ", "raw": "Notebook by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@sdiazlor", "href": null, "resource": null, "url": null, "code": null, "user": "sdiazlor", "label": null, "lang": null }, { "type": "text", "value": ": ", "raw": ": ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://colab.research.google.com/drive/1sR1wfOs_pNrdm3Mwjo_qJRG7NnEb9a7W#scrollTo=yNm8N5GoRD2o", "href": "https://colab.research.google.com/drive/1sR1wfOs_pNrdm3Mwjo_qJRG7NnEb9a7W#scrollTo=yNm8N5GoRD2o", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#AITranslation #JournalismTech ", "raw": "#AITranslation #JournalismTech ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
newsrooms, i see you using deepl or an llm for translation without logging your adjustments. you're wasting gold and you know it's bad! with this notebook from the argilla team, you can: - adjust your translations in a neat interface, - log them to build custom datasets, - fine-tune your model. your translation will become better and better, gradually aligning more with your style guide. no more starting from scratch! Notebook by @sdiazlor: https://colab.research.google.com/drive/1sR1wfOs_pNrdm3Mwjo_qJRG7NnEb9a7W#scrollTo=yNm8N5GoRD2o #AITranslation #JournalismTech
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/xEdcCgX5hMI3koAsDhWnA.mp4" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6420817bf837b31c1cfced50/09dhIVj9WNgs55PdWgHGo.jpeg", "fullname": "Sara Han Díaz", "name": "sdiazlor", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 28 } ]
[ { "reaction": "❤️", "users": [ "Aurelien-Morgan", "mghafiri", "hoduyquocbao" ], "count": 3 }, { "reaction": "👀", "users": [ "John6666", "louisbrulenaudet" ], "count": 2 } ]
2024-10-11T19:28:45.000Z
2024-10-15T11:22:19.674Z
[]
/posts/fdaudens/308029395313715
1,347
10
887038102042115
[ { "type": "text", "value": "If someone would like to keep pushing the limits of what's possible on cpu while being efficient/fast, here's my un-trained arco model scaled-up to 770m parameters. Consider it a modern gpt-2-large to experiment with", "raw": "If someone would like to keep pushing the limits of what's possible on cpu while being efficient/fast, here's my un-trained arco model scaled-up to 770m parameters. Consider it a modern gpt-2-large to experiment with", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/appvoid/arco-plus", "href": null, "resource": { "type": "model", "id": "appvoid/arco-plus", "discussionNum": null }, "url": "https://huggingface.co/appvoid/arco-plus", "code": null, "user": null, "label": null, "lang": null } ]
If someone would like to keep pushing the limits of what's possible on cpu while being efficient/fast, here's my un-trained arco model scaled-up to 770m parameters. Consider it a modern gpt-2-large to experiment with https://huggingface.co/appvoid/arco-plus
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a813dedbb9e28866a91b27/zs-RWFuXs17IfPUhxQaei.jpeg", "fullname": "appvoid", "name": "appvoid", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 35, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62a813dedbb9e28866a91b27/w1wbl60QGuGJuget2qQNi.webp" } ]
[]
[ { "reaction": "🔥", "users": [ "YaTharThShaRma999", "John6666", "bfuzzy1" ], "count": 3 } ]
2024-10-11T16:36:51.000Z
2024-10-11T16:36:51.392Z
[]
/posts/appvoid/887038102042115
1,277
0
241931071917176
[ { "type": "text", "value": "Some exciting news...", "raw": "Some exciting news...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We are open-sourcing The Little Book of ML Metrics! 🎉", "raw": "We are open-sourcing The Little Book of ML Metrics! 🎉", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The book that will be on every data scientist's desk is open source.", "raw": "The book that will be on every data scientist's desk is open source.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What does that mean?", "raw": "What does that mean?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It means hundreds of people can review it, contribute to it, and help us improve it before it's finished!", "raw": "It means hundreds of people can review it, contribute to it, and help us improve it before it's finished!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This also means that everyone will have free access to the digital version!", "raw": "This also means that everyone will have free access to the digital version!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Meanwhile, the high-quality printed edition will be available for purchase as it has been for a while. ", "raw": "Meanwhile, the high-quality printed edition will be available for purchase as it has been for a while. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Revenue from printed copies will help us support further development and maintenance of the book. Not to mention that reviewers and contributors will receive revenue sharing through their affiliate links. 🙌", "raw": "Revenue from printed copies will help us support further development and maintenance of the book. Not to mention that reviewers and contributors will receive revenue sharing through their affiliate links. 🙌", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check out the book repo (make sure to leave a star 🌟): ", "raw": "Check out the book repo (make sure to leave a star 🌟): ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/NannyML/The-Little-Book-of-ML-Metrics", "href": "https://github.com/NannyML/The-Little-Book-of-ML-Metrics", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Some exciting news... We are open-sourcing The Little Book of ML Metrics! 🎉 The book that will be on every data scientist's desk is open source. What does that mean? It means hundreds of people can review it, contribute to it, and help us improve it before it's finished! This also means that everyone will have free access to the digital version! Meanwhile, the high-quality printed edition will be available for purchase as it has been for a while. Revenue from printed copies will help us support further development and maintenance of the book. Not to mention that reviewers and contributors will receive revenue sharing through their affiliate links. 🙌 Check out the book repo (make sure to leave a star 🌟): https://github.com/NannyML/The-Little-Book-of-ML-Metrics
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1657144463525-629a173153a72d997d3f57d0.jpeg", "fullname": "Santiago Viquez", "name": "santiviquez", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 84, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/629a173153a72d997d3f57d0/ULtWYaBatSiJtpnfaNySQ.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-11T16:12:21.000Z
2024-10-11T16:12:21.741Z
[]
/posts/santiviquez/241931071917176
465
0
829517500759566
[ { "type": "text", "value": "This week in Inference Endpoints - thx ", "raw": "This week in Inference Endpoints - thx ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@erikkaum", "href": null, "resource": null, "url": null, "code": null, "user": "erikkaum", "label": null, "lang": null }, { "type": "text", "value": " for the update!", "raw": " for the update!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👀 ", "raw": "👀 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/erikkaum/endpoints-changelog", "href": "https://huggingface.co/blog/erikkaum/endpoints-changelog", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
This week in Inference Endpoints - thx @erikkaum for the update! 👀 https://huggingface.co/blog/erikkaum/endpoints-changelog
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1605114051380-noauth.jpeg", "fullname": "Jeff Boudier", "name": "jeffboudier", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 195, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63148c4db031f7b1c7bc36f9/sl0HUVNI0G_yfScJB38yZ.jpeg", "fullname": "Erik Kaunismäki", "name": "erikkaum", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 16 } ]
[ { "reaction": "🚀", "users": [ "John6666" ], "count": 1 }, { "reaction": "👍", "users": [ "John6666" ], "count": 1 }, { "reaction": "🔥", "users": [ "erikkaum" ], "count": 1 }, { "reaction": "❤️", "users": [ "adamelliotfields" ], "count": 1 } ]
2024-10-11T16:11:53.000Z
2024-10-12T09:40:04.987Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63148c4db031f7b1c7bc36f9/sl0HUVNI0G_yfScJB38yZ.jpeg", "fullname": "Erik Kaunismäki", "name": "erikkaum", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 16, "isFollowing": false } ]
/posts/jeffboudier/829517500759566
1,030
1
418580937119777
[ { "type": "text", "value": "🎓 Introducing Lusana.ru Presentations Dataset - ", "raw": "🎓 Introducing Lusana.ru Presentations Dataset - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/nyuuzyou/lusana", "href": null, "resource": { "type": "dataset", "id": "nyuuzyou/lusana", "discussionNum": null }, "url": "https://huggingface.co/datasets/nyuuzyou/lusana", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset highlights:", "raw": "Dataset highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 38,953 presentations from lusana.ru, a platform for storing presentations, reports, templates, and backgrounds", "raw": "- 38,953 presentations from lusana.ru, a platform for storing presentations, reports, templates, and backgrounds", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Primarily in Russian, with some English and potentially other languages", "raw": "- Primarily in Russian, with some English and potentially other languages", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Each entry includes: ID, title, download URL, uniqueness score, number of slides, views, downloads, file size, file path, and extracted text content (where available)", "raw": "- Each entry includes: ID, title, download URL, uniqueness score, number of slides, views, downloads, file size, file path, and extracted text content (where available)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Contains original PPT/PPTX files in addition to metadata", "raw": "- Contains original PPT/PPTX files in addition to metadata", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Data covers a wide range of topics and presentation materials", "raw": "- Data covers a wide range of topics and presentation materials", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Licensed under Creative Commons Attribution-NonCommercial 3.0 Unported (CC BY-NC 3.0)", "raw": "- Licensed under Creative Commons Attribution-NonCommercial 3.0 Unported (CC BY-NC 3.0)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The dataset can be used for analyzing presentation content in Russian and other languages, text classification tasks, and information retrieval systems. It's also valuable for examining trends in educational and professional presentation materials and sharing practices in the Russian-speaking community. The inclusion of original files allows for in-depth analysis of presentation formats and structures.", "raw": "The dataset can be used for analyzing presentation content in Russian and other languages, text classification tasks, and information retrieval systems. It's also valuable for examining trends in educational and professional presentation materials and sharing practices in the Russian-speaking community. The inclusion of original files allows for in-depth analysis of presentation formats and structures.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🎓 Introducing Lusana.ru Presentations Dataset - https://huggingface.co/datasets/nyuuzyou/lusana Dataset highlights: - 38,953 presentations from lusana.ru, a platform for storing presentations, reports, templates, and backgrounds - Primarily in Russian, with some English and potentially other languages - Each entry includes: ID, title, download URL, uniqueness score, number of slides, views, downloads, file size, file path, and extracted text content (where available) - Contains original PPT/PPTX files in addition to metadata - Data covers a wide range of topics and presentation materials - Licensed under Creative Commons Attribution-NonCommercial 3.0 Unported (CC BY-NC 3.0) The dataset can be used for analyzing presentation content in Russian and other languages, text classification tasks, and information retrieval systems. It's also valuable for examining trends in educational and professional presentation materials and sharing practices in the Russian-speaking community. The inclusion of original files allows for in-depth analysis of presentation formats and structures.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png", "fullname": "nyuuzyou", "name": "nyuuzyou", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 57, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-11T15:13:01.000Z
2024-10-11T15:13:01.887Z
[]
/posts/nyuuzyou/418580937119777
340
0
802613539599324
[ { "type": "text", "value": "This is not a drill 💥", "raw": "This is not a drill 💥", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "HuggingChat is now multimodal with ", "raw": "HuggingChat is now multimodal with ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct", "href": null, "resource": { "type": "model", "id": "meta-llama/Llama-3.2-11B-Vision-Instruct", "discussionNum": null }, "url": "https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "! 🤗", "raw": "! 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This also comes with multimodal assistants, I have migrated my Marcus Aurelius advice assistant to Llama-Vision and Marcus can see now! 😄", "raw": "This also comes with multimodal assistants, I have migrated my Marcus Aurelius advice assistant to Llama-Vision and Marcus can see now! 😄", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Chat with Marcus: ", "raw": "Chat with Marcus: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://hf.co/chat/assistant/65bfed22022ba290531112f8", "href": "https://hf.co/chat/assistant/65bfed22022ba290531112f8", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Start chatting with Llama-Vision 3.2 11B Instruct ", "raw": "Start chatting with Llama-Vision 3.2 11B Instruct ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/chat/models/meta-llama/Llama-3.2-11B-Vision-Instruct", "href": "https://huggingface.co/chat/models/meta-llama/Llama-3.2-11B-Vision-Instruct", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
This is not a drill 💥 HuggingChat is now multimodal with https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct! 🤗 This also comes with multimodal assistants, I have migrated my Marcus Aurelius advice assistant to Llama-Vision and Marcus can see now! 😄 Chat with Marcus: https://hf.co/chat/assistant/65bfed22022ba290531112f8 Start chatting with Llama-Vision 3.2 11B Instruct https://huggingface.co/chat/models/meta-llama/Llama-3.2-11B-Vision-Instruct
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/oIGjs1hrJsEWD67huwkeK.jpeg" } ]
[]
[ { "reaction": "🚀", "users": [ "Csplk", "John6666", "taufiqdp", "emreozer", "Walmart-the-bag", "den0620", "alielfilali01", "LeonceNsh", "baratpaim" ], "count": 9 }, { "reaction": "❤️", "users": [ "ijohn07", "BUAADreamer", "alielfilali01", "sikang99" ], "count": 4 }, { "reaction": "🔥", "users": [ "nazimali" ], "count": 1 } ]
2024-10-11T10:04:55.000Z
2024-10-11T12:00:44.917Z
[ { "avatarUrl": "/avatars/9f323b99bc740bd31725a9559404e9a5.svg", "fullname": "Ross", "name": "JamesRoss99", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/merve/802613539599324
2,838
1
745802404696558
[ { "type": "text", "value": "📣 Sentence Transformers v3.2.0 is out, marking the biggest release for inference in 2 years! 2 new backends for embedding models: ONNX (+ optimization & quantization) and OpenVINO, allowing for speedups up to 2x-3x AND Static Embeddings for 500x speedups at 10-20% accuracy cost. ", "raw": "📣 Sentence Transformers v3.2.0 is out, marking the biggest release for inference in 2 years! 2 new backends for embedding models: ONNX (+ optimization & quantization) and OpenVINO, allowing for speedups up to 2x-3x AND Static Embeddings for 500x speedups at 10-20% accuracy cost. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1️⃣ ONNX Backend: This backend uses the ONNX Runtime to accelerate model inference on both CPU and GPU, reaching up to 1.4x-3x speedup depending on the precision. We also introduce 2 helper methods for optimizing and quantizing models for (much) faster inference. ", "raw": "1️⃣ ONNX Backend: This backend uses the ONNX Runtime to accelerate model inference on both CPU and GPU, reaching up to 1.4x-3x speedup depending on the precision. We also introduce 2 helper methods for optimizing and quantizing models for (much) faster inference. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2️⃣ OpenVINO Backend: This backend uses Intel their OpenVINO instead, outperforming ONNX in some situations on CPU.", "raw": "2️⃣ OpenVINO Backend: This backend uses Intel their OpenVINO instead, outperforming ONNX in some situations on CPU.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Usage is as simple as ", "raw": "Usage is as simple as ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`SentenceTransformer(\"all-MiniLM-L6-v2\", backend=\"onnx\")`", "href": null, "resource": null, "url": null, "code": "SentenceTransformer(\"all-MiniLM-L6-v2\", backend=\"onnx\")", "user": null, "label": null, "lang": null }, { "type": "text", "value": ". Does your model not have an ONNX or OpenVINO file yet? No worries - it'll be autoexported for you. Thank me later 😉", "raw": ". Does your model not have an ONNX or OpenVINO file yet? No worries - it'll be autoexported for you. Thank me later 😉", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔒 Another major new feature is Static Embeddings: think word embeddings like GLoVe and word2vec, but modernized. Static Embeddings are bags of token embeddings that are summed together to create text embeddings, allowing for lightning-fast embeddings that don't require any neural networks. They're initialized in one of 2 ways:", "raw": "🔒 Another major new feature is Static Embeddings: think word embeddings like GLoVe and word2vec, but modernized. Static Embeddings are bags of token embeddings that are summed together to create text embeddings, allowing for lightning-fast embeddings that don't require any neural networks. They're initialized in one of 2 ways:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1️⃣ via Model2Vec, a new technique for distilling any Sentence Transformer models into static embeddings. Either via a pre-distilled model with ", "raw": "1️⃣ via Model2Vec, a new technique for distilling any Sentence Transformer models into static embeddings. Either via a pre-distilled model with ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`from_model2vec`", "href": null, "resource": null, "url": null, "code": "from_model2vec", "user": null, "label": null, "lang": null }, { "type": "text", "value": " or with ", "raw": " or with ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`from_distillation`", "href": null, "resource": null, "url": null, "code": "from_distillation", "user": null, "label": null, "lang": null }, { "type": "text", "value": " where you do the distillation yourself. It'll only take 5 seconds on GPU & 2 minutes on CPU, no dataset needed.", "raw": " where you do the distillation yourself. It'll only take 5 seconds on GPU & 2 minutes on CPU, no dataset needed.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2️⃣ Random initialization. This requires finetuning, but finetuning is extremely quick (e.g. I trained with 3 million pairs in 7 minutes). My final model was 6.6% worse than bge-base-en-v1.5, but 500x faster on CPU.", "raw": "2️⃣ Random initialization. This requires finetuning, but finetuning is extremely quick (e.g. I trained with 3 million pairs in 7 minutes). My final model was 6.6% worse than bge-base-en-v1.5, but 500x faster on CPU.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Full release notes: ", "raw": "Full release notes: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/UKPLab/sentence-transformers/releases/tag/v3.2.0", "href": "https://github.com/UKPLab/sentence-transformers/releases/tag/v3.2.0", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Documentation on Speeding up Inference: ", "raw": "Documentation on Speeding up Inference: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://sbert.net/docs/sentence_transformer/usage/efficiency.html", "href": "https://sbert.net/docs/sentence_transformer/usage/efficiency.html", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
📣 Sentence Transformers v3.2.0 is out, marking the biggest release for inference in 2 years! 2 new backends for embedding models: ONNX (+ optimization & quantization) and OpenVINO, allowing for speedups up to 2x-3x AND Static Embeddings for 500x speedups at 10-20% accuracy cost. 1️⃣ ONNX Backend: This backend uses the ONNX Runtime to accelerate model inference on both CPU and GPU, reaching up to 1.4x-3x speedup depending on the precision. We also introduce 2 helper methods for optimizing and quantizing models for (much) faster inference. 2️⃣ OpenVINO Backend: This backend uses Intel their OpenVINO instead, outperforming ONNX in some situations on CPU. Usage is as simple as `SentenceTransformer("all-MiniLM-L6-v2", backend="onnx")`. Does your model not have an ONNX or OpenVINO file yet? No worries - it'll be autoexported for you. Thank me later 😉 🔒 Another major new feature is Static Embeddings: think word embeddings like GLoVe and word2vec, but modernized. Static Embeddings are bags of token embeddings that are summed together to create text embeddings, allowing for lightning-fast embeddings that don't require any neural networks. They're initialized in one of 2 ways: 1️⃣ via Model2Vec, a new technique for distilling any Sentence Transformer models into static embeddings. Either via a pre-distilled model with `from_model2vec` or with `from_distillation` where you do the distillation yourself. It'll only take 5 seconds on GPU & 2 minutes on CPU, no dataset needed. 2️⃣ Random initialization. This requires finetuning, but finetuning is extremely quick (e.g. I trained with 3 million pairs in 7 minutes). My final model was 6.6% worse than bge-base-en-v1.5, but 500x faster on CPU. Full release notes: https://github.com/UKPLab/sentence-transformers/releases/tag/v3.2.0 Documentation on Speeding up Inference: https://sbert.net/docs/sentence_transformer/usage/efficiency.html
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6317233cc92fd6fee317e030/cJHSvvimr1kqgQfHOjO5n.png", "fullname": "Tom Aarsen", "name": "tomaarsen", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1060, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6317233cc92fd6fee317e030/7yCQOHRtsylLFshQgWwD_.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6317233cc92fd6fee317e030/d4NzK9ortnclhR_Mu3N56.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6317233cc92fd6fee317e030/3nvhZLPTM7VpnzRwBcD8a.png" } ]
[]
[ { "reaction": "🔥", "users": [ "osanseviero", "YaTharThShaRma999", "philipp-zettl", "Kutches", "DmitryRyumin", "RaulQF", "do-me", "Stopwolf", "mlabonne", "WaveCut", "Joseph717171", "cstr", "codito", "gabrielmbmb", "Tom-Neverwinter", "richardlian", "AaronBrown", "abdullahalzubaer", "shtefcs", "louisbrulenaudet", "aklepikov", "tuantm" ], "count": 22 }, { "reaction": "❤️", "users": [ "Svngoku", "dadachen", "Tom-Neverwinter", "syedia", "AaronBrown", "abdullah", "abdullahalzubaer", "shtefcs", "thomas-mayne", "louisbrulenaudet", "huggingface0", "Manel" ], "count": 12 }, { "reaction": "🚀", "users": [ "osanseviero", "YaTharThShaRma999", "John6666", "Joseph717171", "Tom-Neverwinter", "Siddish", "abdullahalzubaer", "shtefcs", "NickyNicky" ], "count": 9 } ]
2024-10-10T18:30:03.000Z
2024-10-11T23:11:48.122Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/TwR65k1JgO_t3l4pM1UjA.png", "fullname": "Stefan Smiljkovic", "name": "shtefcs", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false } ]
/posts/tomaarsen/745802404696558
6,354
1
109226691752296
[ { "type": "text", "value": "The Hugging Face Hub hosts over 1.5M Model, Dataset, and Space repositories. To scale to 10M+, the XetHub team (", "raw": "The Hugging Face Hub hosts over 1.5M Model, Dataset, and Space repositories. To scale to 10M+, the XetHub team (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/xet-team", "href": "https://huggingface.co/xet-team", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ") is replacing Git LFS with a new technology that improves storage and transfer capabilities with some future developer experience benefits to boot.", "raw": ") is replacing Git LFS with a new technology that improves storage and transfer capabilities with some future developer experience benefits to boot.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thanks to ", "raw": "Thanks to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@yuchenglow", "href": null, "resource": null, "url": null, "code": null, "user": "yuchenglow", "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@port8080", "href": null, "resource": null, "url": null, "code": null, "user": "port8080", "label": null, "lang": null }, { "type": "text", "value": " (for their analysis covering LFS usage from March 2022–Sept 2024), we now have insights into what we’re storing. Check out the Gradio app to explore:", "raw": " (for their analysis covering LFS usage from March 2022–Sept 2024), we now have insights into what we’re storing. Check out the Gradio app to explore:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Storage growth over time", "raw": "- Storage growth over time", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- File types over all repositories", "raw": "- File types over all repositories", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Some simple optimizations we're investigating", "raw": "- Some simple optimizations we're investigating", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/xet-team/lfs-analysis", "href": null, "resource": { "type": "space", "id": "xet-team/lfs-analysis", "discussionNum": null }, "url": "https://huggingface.co/spaces/xet-team/lfs-analysis", "code": null, "user": null, "label": null, "lang": null } ]
The Hugging Face Hub hosts over 1.5M Model, Dataset, and Space repositories. To scale to 10M+, the XetHub team (https://huggingface.co/xet-team) is replacing Git LFS with a new technology that improves storage and transfer capabilities with some future developer experience benefits to boot. Thanks to @yuchenglow and @port8080 (for their analysis covering LFS usage from March 2022–Sept 2024), we now have insights into what we’re storing. Check out the Gradio app to explore: - Storage growth over time - File types over all repositories - Some simple optimizations we're investigating https://huggingface.co/spaces/xet-team/lfs-analysis
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d50e9ef9cbfa798c590004/FlVe8chafigMfrPpMeJRL.jpeg", "fullname": "Jared Sulzdorf", "name": "jsulz", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 47, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65e77dcc714ce98ddd82568e/KhIkyM1Hc00t3zAqIaDoH.jpeg", "fullname": "Banerjee", "name": "port8080", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 11 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66ac094a8fc00b5c160d7da4/1-DnsQ0zlyTA-18bncHbt.jpeg", "fullname": "yuchenglow", "name": "yuchenglow", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 26 } ]
[ { "reaction": "🔥", "users": [ "erinys", "John6666", "Nymbo" ], "count": 3 }, { "reaction": "🤗", "users": [ "Aurelien-Morgan" ], "count": 1 } ]
2024-10-10T18:12:21.000Z
2024-10-10T18:12:21.229Z
[]
/posts/jsulz/109226691752296
1,650
0
809715889668727
[ { "type": "text", "value": "NEW - Inference Playground", "raw": "NEW - Inference Playground", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Maybe like me you have always wanted a super easy way to compare llama3.2-1B vs. llama3.2-3B? or the same model with different temperatures?", "raw": "Maybe like me you have always wanted a super easy way to compare llama3.2-1B vs. llama3.2-3B? or the same model with different temperatures?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Trying and comparing warm Inference API models has never been easier!", "raw": "Trying and comparing warm Inference API models has never been easier!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Just go to ", "raw": "Just go to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://hf.co/playground", "href": "https://hf.co/playground", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ", set your token and you're ready to go.", "raw": ", set your token and you're ready to go.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We'll keep improving, feedback welcome 😊", "raw": "We'll keep improving, feedback welcome 😊", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
NEW - Inference Playground Maybe like me you have always wanted a super easy way to compare llama3.2-1B vs. llama3.2-3B? or the same model with different temperatures? Trying and comparing warm Inference API models has never been easier! Just go to https://hf.co/playground, set your token and you're ready to go. We'll keep improving, feedback welcome 😊
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f17f0a0925b9863e28ad517/JYpXQyk7kl1j17U9-Ry5c.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5f17f0a0925b9863e28ad517/RXIlAUQMJswSSh-0i8etk.png" } ]
[]
[ { "reaction": "🤗", "users": [ "adamelliotfields", "ZeroXClem", "KingNish", "cfahlgren1", "John6666", "Kquant03", "WE52", "jeffboudier", "not-lain", "AtAndDev", "alielfilali01", "louisbrulenaudet", "Nymbo" ], "count": 13 }, { "reaction": "🔥", "users": [ "ZeroXClem", "cfahlgren1", "John6666", "Kquant03", "WE52", "jeffboudier", "not-lain", "AtAndDev", "alielfilali01" ], "count": 9 }, { "reaction": "❤️", "users": [ "John6666", "WE52", "jeffboudier", "not-lain", "AtAndDev", "alielfilali01" ], "count": 6 }, { "reaction": "🚀", "users": [ "John6666", "WE52", "jeffboudier", "not-lain", "AtAndDev", "alielfilali01" ], "count": 6 }, { "reaction": "➕", "users": [ "John6666", "WE52", "not-lain", "AtAndDev", "alielfilali01" ], "count": 5 }, { "reaction": "👍", "users": [ "John6666", "WE52", "not-lain", "AtAndDev" ], "count": 4 }, { "reaction": "🤝", "users": [ "WE52", "not-lain", "AtAndDev" ], "count": 3 } ]
2024-10-10T16:43:08.000Z
2024-10-15T17:45:14.764Z
[ { "avatarUrl": "/avatars/5e9554ba5afb386c170ff66ab9c8c363.svg", "fullname": "Andrea Altomani", "name": "andreaaltomani", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607, "isFollowing": false } ]
/posts/victor/809715889668727
2,658
2
158022746603972
[ { "type": "text", "value": "Rhymes AI drops Aria: small Multimodal MoE that beats GPT-4o and Gemini-1.5-Flash ⚡️", "raw": "Rhymes AI drops Aria: small Multimodal MoE that beats GPT-4o and Gemini-1.5-Flash ⚡️", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "New player entered the game! Rhymes AI has just been announced, and unveiled Aria – a multimodal powerhouse that's punching above its weight.", "raw": "New player entered the game! Rhymes AI has just been announced, and unveiled Aria – a multimodal powerhouse that's punching above its weight.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key insights:", "raw": "Key insights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🧠 Mixture-of-Experts architecture: 25.3B total params, but only 3.9B active.", "raw": "🧠 Mixture-of-Experts architecture: 25.3B total params, but only 3.9B active.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌈 Multimodal: text/image/video → text.", "raw": "🌈 Multimodal: text/image/video → text.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📚 Novel training approach: “multimodal-native” where multimodal training starts directly during pre-training, not just tacked on later", "raw": "📚 Novel training approach: “multimodal-native” where multimodal training starts directly during pre-training, not just tacked on later", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📏 Long 64K token context window", "raw": "📏 Long 64K token context window", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔓 Apache 2.0 license, with weights, code, and demos all open", "raw": "🔓 Apache 2.0 license, with weights, code, and demos all open", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚡️ On the benchmark side, Aria leaves some big names in the dust.", "raw": "⚡️ On the benchmark side, Aria leaves some big names in the dust.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- It beats Pixtral 12B or Llama-3.2-12B on several vision benchmarks like MMMU or MathVista.", "raw": "- It beats Pixtral 12B or Llama-3.2-12B on several vision benchmarks like MMMU or MathVista.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- It even overcomes the much bigger GPT-4o on long video tasks and even outshines Gemini 1.5 Flash when it comes to parsing lengthy documents.", "raw": "- It even overcomes the much bigger GPT-4o on long video tasks and even outshines Gemini 1.5 Flash when it comes to parsing lengthy documents.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "But Rhymes AI isn't just showing off benchmarks. They've already got Aria powering a real-world augmented search app called “Beago”. It’s handling even recent events with great accuracy!", "raw": "But Rhymes AI isn't just showing off benchmarks. They've already got Aria powering a real-world augmented search app called “Beago”. It’s handling even recent events with great accuracy!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And they partnered with AMD to make it much faster than competitors like Perplexity or Gemini search.", "raw": "And they partnered with AMD to make it much faster than competitors like Perplexity or Gemini search.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read their paper for Aria 👉 ", "raw": "Read their paper for Aria 👉 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2410.05993", "href": null, "resource": { "type": "paper", "id": "2410.05993", "discussionNum": null }, "url": "https://huggingface.co/papers/2410.05993", "code": null, "user": null, "label": "Aria: An Open Multimodal Native Mixture-of-Experts Model (2410.05993)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Try BeaGo 🐶 👉 ", "raw": "Try BeaGo 🐶 👉 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://rhymes.ai/blog-details/introducing-beago-your-smarter-faster-ai-search", "href": "https://rhymes.ai/blog-details/introducing-beago-your-smarter-faster-ai-search", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Rhymes AI drops Aria: small Multimodal MoE that beats GPT-4o and Gemini-1.5-Flash ⚡️ New player entered the game! Rhymes AI has just been announced, and unveiled Aria – a multimodal powerhouse that's punching above its weight. Key insights: 🧠 Mixture-of-Experts architecture: 25.3B total params, but only 3.9B active. 🌈 Multimodal: text/image/video → text. 📚 Novel training approach: “multimodal-native” where multimodal training starts directly during pre-training, not just tacked on later 📏 Long 64K token context window 🔓 Apache 2.0 license, with weights, code, and demos all open ⚡️ On the benchmark side, Aria leaves some big names in the dust. - It beats Pixtral 12B or Llama-3.2-12B on several vision benchmarks like MMMU or MathVista. - It even overcomes the much bigger GPT-4o on long video tasks and even outshines Gemini 1.5 Flash when it comes to parsing lengthy documents. But Rhymes AI isn't just showing off benchmarks. They've already got Aria powering a real-world augmented search app called “Beago”. It’s handling even recent events with great accuracy! And they partnered with AMD to make it much faster than competitors like Perplexity or Gemini search. Read their paper for Aria 👉 https://huggingface.co/papers/2410.05993 Try BeaGo 🐶 👉 https://rhymes.ai/blog-details/introducing-beago-your-smarter-faster-ai-search
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/1RiEQ_wA4b_YI4HweDGiQ.png" } ]
[]
[ { "reaction": "🔥", "users": [ "YaTharThShaRma999", "dlflannery", "jordivcb", "reach-vb", "iojvsuynv", "KingNish", "victor", "dishank002", "IMFDEtienne", "nina-summer", "teowu" ], "count": 11 }, { "reaction": "👀", "users": [ "John6666", "DataSoul", "reach-vb", "C0casio45", "iojvsuynv", "victor", "v000000", "louisbrulenaudet" ], "count": 8 }, { "reaction": "🤝", "users": [ "nina-summer" ], "count": 1 } ]
2024-10-10T09:51:31.000Z
2024-10-10T19:26:36.766Z
[ { "avatarUrl": "/avatars/4b3f3b8fe4ab980cbfaab52afe52dfc9.svg", "fullname": "Aleks", "name": "aleksfinn23", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/m-ric/158022746603972
2,916
1
917454968346786
[ { "type": "text", "value": "NEW: Open Source Text/ Image to video model is out - MIT licensed - Rivals Gen-3, Pika & Kling 🔥", "raw": "NEW: Open Source Text/ Image to video model is out - MIT licensed - Rivals Gen-3, Pika & Kling 🔥", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Pyramid Flow: Training-efficient Autoregressive Video Generation method", "raw": "> Pyramid Flow: Training-efficient Autoregressive Video Generation method", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Utilizes Flow Matching", "raw": "> Utilizes Flow Matching", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Trains on open-source datasets", "raw": "> Trains on open-source datasets", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Generates high-quality 10-second videos", "raw": "> Generates high-quality 10-second videos", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Video resolution: 768p", "raw": "> Video resolution: 768p", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Frame rate: 24 FPS", "raw": "> Frame rate: 24 FPS", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Supports image-to-video generation", "raw": "> Supports image-to-video generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "> Model checkpoints available on the hub 🤗: ", "raw": "> Model checkpoints available on the hub 🤗: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/rain1011/pyramid-flow-sd3", "href": null, "resource": { "type": "model", "id": "rain1011/pyramid-flow-sd3", "discussionNum": null }, "url": "https://huggingface.co/rain1011/pyramid-flow-sd3", "code": null, "user": null, "label": null, "lang": null } ]
NEW: Open Source Text/ Image to video model is out - MIT licensed - Rivals Gen-3, Pika & Kling 🔥 > Pyramid Flow: Training-efficient Autoregressive Video Generation method > Utilizes Flow Matching > Trains on open-source datasets > Generates high-quality 10-second videos > Video resolution: 768p > Frame rate: 24 FPS > Supports image-to-video generation > Model checkpoints available on the hub 🤗: https://huggingface.co/rain1011/pyramid-flow-sd3
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655385361868-61b85ce86eb1f2c5e6233736.jpeg", "fullname": "Vaibhav Srivastav", "name": "reach-vb", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 460, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/61b85ce86eb1f2c5e6233736/7fTuWxBKAlkaPIDJgtPJ5.mp4" } ]
[]
[ { "reaction": "👍", "users": [ "lab212", "YaTharThShaRma999", "roger-temp", "natalie5", "victor", "tonynoce", "MAsad789565", "zhang123123", "den0620", "WaveCut" ], "count": 10 }, { "reaction": "🔥", "users": [ "YaTharThShaRma999", "KingNish", "adamelliotfields", "victor", "jaigurudev", "RalphX1", "tolgacangoz" ], "count": 7 }, { "reaction": "👀", "users": [ "John6666", "YaTharThShaRma999", "jaigurudev" ], "count": 3 } ]
2024-10-10T09:02:01.000Z
2024-10-10T09:02:01.744Z
[]
/posts/reach-vb/917454968346786
3,061
0
653106772597209
[ { "type": "text", "value": "🎓 Introducing Doc4web.ru Documents Dataset - ", "raw": "🎓 Introducing Doc4web.ru Documents Dataset - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/nyuuzyou/doc4web", "href": null, "resource": { "type": "dataset", "id": "nyuuzyou/doc4web", "discussionNum": null }, "url": "https://huggingface.co/datasets/nyuuzyou/doc4web", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset highlights:", "raw": "Dataset highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- 223,739 documents from doc4web.ru, a document hosting platform for students and teachers", "raw": "- 223,739 documents from doc4web.ru, a document hosting platform for students and teachers", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Primarily in Russian, with some English and potentially other languages", "raw": "- Primarily in Russian, with some English and potentially other languages", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Each entry includes: URL, title, download link, file path, and content (where available)", "raw": "- Each entry includes: URL, title, download link, file path, and content (where available)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Contains original document files in addition to metadata", "raw": "- Contains original document files in addition to metadata", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Data reflects a wide range of educational topics and materials", "raw": "- Data reflects a wide range of educational topics and materials", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Licensed under Creative Commons Zero (CC0) for unrestricted use", "raw": "- Licensed under Creative Commons Zero (CC0) for unrestricted use", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The dataset can be used for analyzing educational content in Russian, text classification tasks, and information retrieval systems. It's also valuable for examining trends in educational materials and document sharing practices in the Russian-speaking academic community. The inclusion of original files allows for in-depth analysis of various document formats and structures.", "raw": "The dataset can be used for analyzing educational content in Russian, text classification tasks, and information retrieval systems. It's also valuable for examining trends in educational materials and document sharing practices in the Russian-speaking academic community. The inclusion of original files allows for in-depth analysis of various document formats and structures.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🎓 Introducing Doc4web.ru Documents Dataset - https://huggingface.co/datasets/nyuuzyou/doc4web Dataset highlights: - 223,739 documents from doc4web.ru, a document hosting platform for students and teachers - Primarily in Russian, with some English and potentially other languages - Each entry includes: URL, title, download link, file path, and content (where available) - Contains original document files in addition to metadata - Data reflects a wide range of educational topics and materials - Licensed under Creative Commons Zero (CC0) for unrestricted use The dataset can be used for analyzing educational content in Russian, text classification tasks, and information retrieval systems. It's also valuable for examining trends in educational materials and document sharing practices in the Russian-speaking academic community. The inclusion of original files allows for in-depth analysis of various document formats and structures.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png", "fullname": "nyuuzyou", "name": "nyuuzyou", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 57, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "osanseviero", "John6666", "victor", "DmitryRyumin", "WaveCut", "DeathGodlike", "den0620", "louisbrulenaudet" ], "count": 8 }, { "reaction": "❤️", "users": [ "d0rj", "aleksfinn23", "IlyaGusev", "DeathGodlike", "den0620" ], "count": 5 } ]
2024-10-10T07:58:42.000Z
2024-10-10T07:58:42.561Z
[]
/posts/nyuuzyou/653106772597209
1,943
0
153233218916477
[ { "type": "text", "value": "This is how AI can be useful in journalism: Just tested DataTalk - a tool that lets you dig through campaign finance data with just your words. ", "raw": "This is how AI can be useful in journalism: Just tested DataTalk - a tool that lets you dig through campaign finance data with just your words. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It's transforming complex FEC filings and OpenSecrets datasets into actionable insights for journalists.", "raw": "It's transforming complex FEC filings and OpenSecrets datasets into actionable insights for journalists.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key features for newsrooms:", "raw": "Key features for newsrooms:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Natural language queries on FEC data", "raw": "- Natural language queries on FEC data", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Rapid insights on donors, spending, special interests", "raw": "- Rapid insights on donors, spending, special interests", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- SQL access for deep dives", "raw": "- SQL access for deep dives", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Tested it out:", "raw": "Tested it out:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Retrieved how much Harris and Trump raised", "raw": "- Retrieved how much Harris and Trump raised", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Found top donors instantly (#1 is Timothy Mellon—have you heard about him?)", "raw": "- Found top donors instantly (#1 is Timothy Mellon—have you heard about him?)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Uncovered big self-funders like David Trone ($62M)", "raw": "- Uncovered big self-funders like David Trone ($62M)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Pros:", "raw": "Pros:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Saves hours of data wrangling", "raw": "- Saves hours of data wrangling", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Surfaces story leads quickly", "raw": "- Surfaces story leads quickly", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Transparent AI retrieving steps makes this tool auditable", "raw": "- Transparent AI retrieving steps makes this tool auditable", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Awesome work by Stanford University Open Virtual Assistant Lab, Big Local News, and Columbia University - Graduate School of Journalism. Expert-guided.", "raw": "Awesome work by Stanford University Open Virtual Assistant Lab, Big Local News, and Columbia University - Graduate School of Journalism. Expert-guided.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Remember: Always verify. Use for leads, not final copy. But this is gold for finding new leads.", "raw": "Remember: Always verify. Use for leads, not final copy. But this is gold for finding new leads.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "How might this change campaign finance reporting? What other datasets need this treatment?", "raw": "How might this change campaign finance reporting? What other datasets need this treatment?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Try it out: ", "raw": "Try it out: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.datatalk.genie.stanford.edu/", "href": "https://www.datatalk.genie.stanford.edu/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#AIJournalism #campaignfinance #datajournalism #election2024", "raw": "#AIJournalism #campaignfinance #datajournalism #election2024", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
This is how AI can be useful in journalism: Just tested DataTalk - a tool that lets you dig through campaign finance data with just your words. It's transforming complex FEC filings and OpenSecrets datasets into actionable insights for journalists. Key features for newsrooms: - Natural language queries on FEC data - Rapid insights on donors, spending, special interests - SQL access for deep dives Tested it out: - Retrieved how much Harris and Trump raised - Found top donors instantly (#1 is Timothy Mellon—have you heard about him?) - Uncovered big self-funders like David Trone ($62M) Pros: - Saves hours of data wrangling - Surfaces story leads quickly - Transparent AI retrieving steps makes this tool auditable Awesome work by Stanford University Open Virtual Assistant Lab, Big Local News, and Columbia University - Graduate School of Journalism. Expert-guided. Remember: Always verify. Use for leads, not final copy. But this is gold for finding new leads. How might this change campaign finance reporting? What other datasets need this treatment? Try it out: https://www.datatalk.genie.stanford.edu/ #AIJournalism #campaignfinance #datajournalism #election2024
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/JjGr_uBOoirZcS-FbAuS9.mp4" } ]
[]
[ { "reaction": "🔥", "users": [ "osanseviero", "John6666", "xi0v", "sometimesanotion", "jsulz", "Heresynetwork" ], "count": 6 }, { "reaction": "🚀", "users": [ "Heresynetwork" ], "count": 1 } ]
2024-10-09T17:54:34.000Z
2024-10-10T18:24:12.028Z
[]
/posts/fdaudens/153233218916477
1,971
0
134503456869052
[ { "type": "text", "value": "Microsoft released a method that allows you to vectorize word vectors themselves! It is called VPTQ. You can check out their full paper including the method and all of the math for the algorithm, or you can watch this video where I did all of that for you, then reconstructed their entire method within Python! ", "raw": "Microsoft released a method that allows you to vectorize word vectors themselves! It is called VPTQ. You can check out their full paper including the method and all of the math for the algorithm, or you can watch this video where I did all of that for you, then reconstructed their entire method within Python! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/YwlKzV1y62s", "href": "https://youtu.be/YwlKzV1y62s", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Microsoft released a method that allows you to vectorize word vectors themselves! It is called VPTQ. You can check out their full paper including the method and all of the math for the algorithm, or you can watch this video where I did all of that for you, then reconstructed their entire method within Python! https://youtu.be/YwlKzV1y62s
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "LeroyDyer", "John6666", "C0casio45", "Nothnoth", "SVHawk13", "DmitryRyumin", "elec3647" ], "count": 7 } ]
2024-10-09T16:56:14.000Z
2024-10-12T02:16:57.224Z
[ { "avatarUrl": "/avatars/7e26cfd48dccef52587739988a9114cf.svg", "fullname": "Roberto de Jesús Alfaro López", "name": "Alfarrow", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6316fb937b0ee0136e5f1220/poHBoJ7QAF_s2CCaosdvQ.jpeg", "fullname": "Firstname Lastname", "name": "takeraparterer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 29, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false } ]
/posts/TuringsSolutions/134503456869052
1,646
10
995907345276240
[ { "type": "text", "value": "AI Agents LlamaIndex in 40 minutes", "raw": "AI Agents LlamaIndex in 40 minutes", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The video covers code and workflow explanations for:", "raw": "The video covers code and workflow explanations for:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Function Calling", "raw": "- Function Calling", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Function Calling Agents + Agent Runner", "raw": "- Function Calling Agents + Agent Runner", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Agentic RAG", "raw": "- Agentic RAG", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- REAcT Agent: Build your own Search Assistant Agent", "raw": "- REAcT Agent: Build your own Search Assistant Agent", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Watch: ", "raw": "Watch: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/bHn4dLJYIqE", "href": "https://youtu.be/bHn4dLJYIqE", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
AI Agents LlamaIndex in 40 minutes The video covers code and workflow explanations for: - Function Calling - Function Calling Agents + Agent Runner - Agentic RAG - REAcT Agent: Build your own Search Assistant Agent Watch: https://youtu.be/bHn4dLJYIqE
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/630f3058236215d0b7078806/TRTdqAZpT1bJg_RvGgxlg.jpeg", "fullname": "Tarun Jain", "name": "lucifertrj", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 23, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "osanseviero" ], "count": 2 } ]
2024-10-09T14:28:02.000Z
2024-10-09T14:28:21.207Z
[]
/posts/lucifertrj/995907345276240
1,499
0
757255829741308
[ { "type": "text", "value": "Why nobdoy is talking about the new training corpus released by MBZUAI today.", "raw": "Why nobdoy is talking about the new training corpus released by MBZUAI today.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "TxT360 is +15 Trillion tokens corpus outperforming FineWeb on several metrics. Ablation studies were done up to 1T tokens.", "raw": "TxT360 is +15 Trillion tokens corpus outperforming FineWeb on several metrics. Ablation studies were done up to 1T tokens.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read blog here : ", "raw": "Read blog here : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/LLM360/TxT360", "href": null, "resource": { "type": "space", "id": "LLM360/TxT360", "discussionNum": null }, "url": "https://huggingface.co/spaces/LLM360/TxT360", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset : ", "raw": "Dataset : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/LLM360/TxT360", "href": null, "resource": { "type": "dataset", "id": "LLM360/TxT360", "discussionNum": null }, "url": "https://huggingface.co/datasets/LLM360/TxT360", "code": null, "user": null, "label": null, "lang": null } ]
Why nobdoy is talking about the new training corpus released by MBZUAI today. TxT360 is +15 Trillion tokens corpus outperforming FineWeb on several metrics. Ablation studies were done up to 1T tokens. Read blog here : https://huggingface.co/spaces/LLM360/TxT360 Dataset : https://huggingface.co/datasets/LLM360/TxT360
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626237d9bbcbd1c34f1bb231/EJrOjvAL-68qMCYdnvOrq.png", "fullname": "Ali El Filali", "name": "alielfilali01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 186, "isFollowing": false }
[]
[]
[ { "reaction": "🔥", "users": [ "YaTharThShaRma999", "John6666", "osanseviero", "Felladrin", "Joseph717171", "win10" ], "count": 6 }, { "reaction": "🤗", "users": [ "Joseph717171" ], "count": 1 }, { "reaction": "❤️", "users": [ "ZeroWw" ], "count": 1 } ]
2024-10-09T13:18:37.000Z
2024-10-10T01:19:51.490Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626237d9bbcbd1c34f1bb231/EJrOjvAL-68qMCYdnvOrq.png", "fullname": "Ali El Filali", "name": "alielfilali01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 186, "isFollowing": false }, { "avatarUrl": "/avatars/ea4398745974d781ae9dc0e95b12cabe.svg", "fullname": "Joseph", "name": "Joseph717171", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 22, "isFollowing": false } ]
/posts/alielfilali01/757255829741308
1,822
2
762849096257063
[ { "type": "text", "value": "📢 Two weeks ago I got a chance to share the most recent reasoning 🧠 capabilities of Large Language models in Sentiment Analysis NLPSummit-2024.", "raw": "📢 Two weeks ago I got a chance to share the most recent reasoning 🧠 capabilities of Large Language models in Sentiment Analysis NLPSummit-2024.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For those who missed and still wish to find out the advances of GenAI in that field, the recording is now available:", "raw": "For those who missed and still wish to find out the advances of GenAI in that field, the recording is now available:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/watch?v=qawLJsRHzB4", "href": "https://www.youtube.com/watch?v=qawLJsRHzB4", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You will be aware of:", "raw": "You will be aware of:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "☑️ how well LLMs reasoning can be used for reasoning in sentiment analysis as in Zero-shot-Learning,", "raw": "☑️ how well LLMs reasoning can be used for reasoning in sentiment analysis as in Zero-shot-Learning,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "☑️ how to improve reasoning by applying and leaving step-by-step chains (Chain-of-Thought)", "raw": "☑️ how to improve reasoning by applying and leaving step-by-step chains (Chain-of-Thought)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "☑️ how to prepare the most advanced model in sentiment analysis using Chain-of-Thought.", "raw": "☑️ how to prepare the most advanced model in sentiment analysis using Chain-of-Thought.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Links:", "raw": "Links:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📜 Paper: ", "raw": "📜 Paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2404.12342", "href": null, "resource": { "type": "paper", "id": "2404.12342", "discussionNum": null }, "url": "https://huggingface.co/papers/2404.12342", "code": null, "user": null, "label": "Large Language Models in Targeted Sentiment Analysis (2404.12342)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⭐ Code: ", "raw": "⭐ Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/Reasoning-for-Sentiment-Analysis-Framework", "href": "https://github.com/nicolay-r/Reasoning-for-Sentiment-Analysis-Framework", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
📢 Two weeks ago I got a chance to share the most recent reasoning 🧠 capabilities of Large Language models in Sentiment Analysis NLPSummit-2024. For those who missed and still wish to find out the advances of GenAI in that field, the recording is now available: https://www.youtube.com/watch?v=qawLJsRHzB4 You will be aware of: ☑️ how well LLMs reasoning can be used for reasoning in sentiment analysis as in Zero-shot-Learning, ☑️ how to improve reasoning by applying and leaving step-by-step chains (Chain-of-Thought) ☑️ how to prepare the most advanced model in sentiment analysis using Chain-of-Thought. Links: 📜 Paper: https://huggingface.co/papers/2404.12342 ⭐ Code: https://github.com/nicolay-r/Reasoning-for-Sentiment-Analysis-Framework
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/2HGV1yZmAjdHt6ccJJMik.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/CwpFgy2sgk7U6s36ZSLUm.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/LnFG03hCO6BME9Epd02_h.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/-VXH0OWvPj-3kvLCrvK4H.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/rq8xryuanvvmq91bS7sjR.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/In84TAiZ3p9xizI_eV_1_.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/aVU1bEK15AjAhyL9OqbD1.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 }, { "reaction": "🧠", "users": [ "lab212" ], "count": 1 }, { "reaction": "👍", "users": [ "sugatoray" ], "count": 1 } ]
2024-10-09T12:31:12.000Z
2024-10-09T12:32:17.008Z
[]
/posts/nicolay-r/762849096257063
1,006
0
688270400940064
[ { "type": "text", "value": "On-device AI framework ecosystem is blooming these days:", "raw": "On-device AI framework ecosystem is blooming these days:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. llama.cpp - All things Whisper, LLMs & VLMs - run across Metal, CUDA and other backends (AMD/ NPU etc)", "raw": "1. llama.cpp - All things Whisper, LLMs & VLMs - run across Metal, CUDA and other backends (AMD/ NPU etc)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/ggerganov/llama.cpp", "href": "https://github.com/ggerganov/llama.cpp", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. MLC - Deploy LLMs across platforms especially WebGPU (fastest WebGPU LLM implementation out there)", "raw": "2. MLC - Deploy LLMs across platforms especially WebGPU (fastest WebGPU LLM implementation out there)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/mlc-ai/web-llm", "href": "https://github.com/mlc-ai/web-llm", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. MLX - Arguably the fastest general purpose framework (Mac only) - Supports all major Image Generation (Flux, SDXL, etc), Transcription (Whisper), LLMs", "raw": "3. MLX - Arguably the fastest general purpose framework (Mac only) - Supports all major Image Generation (Flux, SDXL, etc), Transcription (Whisper), LLMs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/ml-explore/mlx-examples", "href": "https://github.com/ml-explore/mlx-examples", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Candle - Cross-platform general purpose framework written in Rust - wide coverage across model categories", "raw": "4. Candle - Cross-platform general purpose framework written in Rust - wide coverage across model categories", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/huggingface/candle", "href": "https://github.com/huggingface/candle", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Honorable mentions:", "raw": "Honorable mentions:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Transformers.js - Javascript (WebGPU) implementation built on top of ONNXruntimeweb", "raw": "1. Transformers.js - Javascript (WebGPU) implementation built on top of ONNXruntimeweb", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/xenova/transformers.js", "href": "https://github.com/xenova/transformers.js", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Mistral rs - Rust implementation for LLMs & VLMs, built on top of Candle", "raw": "2. Mistral rs - Rust implementation for LLMs & VLMs, built on top of Candle", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/EricLBuehler/mistral.rs", "href": "https://github.com/EricLBuehler/mistral.rs", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Ratchet - Cross platform, rust based WebGPU framework built for battle-tested deployments", "raw": "3. Ratchet - Cross platform, rust based WebGPU framework built for battle-tested deployments", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/huggingface/ratchet", "href": "https://github.com/huggingface/ratchet", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Zml - Cross platform, Zig based ML framework", "raw": "4. Zml - Cross platform, Zig based ML framework", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/zml/zml", "href": "https://github.com/zml/zml", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Looking forward to how the ecosystem would look 1 year from now - Quite bullish on the top 4 atm - but open source ecosystem changes quite a bit! 🤗", "raw": "Looking forward to how the ecosystem would look 1 year from now - Quite bullish on the top 4 atm - but open source ecosystem changes quite a bit! 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Also, which frameworks did I miss?", "raw": "Also, which frameworks did I miss?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
On-device AI framework ecosystem is blooming these days: 1. llama.cpp - All things Whisper, LLMs & VLMs - run across Metal, CUDA and other backends (AMD/ NPU etc) https://github.com/ggerganov/llama.cpp 2. MLC - Deploy LLMs across platforms especially WebGPU (fastest WebGPU LLM implementation out there) https://github.com/mlc-ai/web-llm 3. MLX - Arguably the fastest general purpose framework (Mac only) - Supports all major Image Generation (Flux, SDXL, etc), Transcription (Whisper), LLMs https://github.com/ml-explore/mlx-examples 4. Candle - Cross-platform general purpose framework written in Rust - wide coverage across model categories https://github.com/huggingface/candle Honorable mentions: 1. Transformers.js - Javascript (WebGPU) implementation built on top of ONNXruntimeweb https://github.com/xenova/transformers.js 2. Mistral rs - Rust implementation for LLMs & VLMs, built on top of Candle https://github.com/EricLBuehler/mistral.rs 3. Ratchet - Cross platform, rust based WebGPU framework built for battle-tested deployments https://github.com/huggingface/ratchet 4. Zml - Cross platform, Zig based ML framework https://github.com/zml/zml Looking forward to how the ecosystem would look 1 year from now - Quite bullish on the top 4 atm - but open source ecosystem changes quite a bit! 🤗 Also, which frameworks did I miss?
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655385361868-61b85ce86eb1f2c5e6233736.jpeg", "fullname": "Vaibhav Srivastav", "name": "reach-vb", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 460, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "nicolay-r", "Nymbo", "victor", "John6666", "AtAndDev", "osanseviero", "umair894", "adamelliotfields", "cfahlgren1", "atasoglu" ], "count": 10 }, { "reaction": "🔥", "users": [ "osanseviero", "cfahlgren1", "ZeroWw" ], "count": 3 } ]
2024-10-09T11:23:02.000Z
2024-10-09T22:51:52.545Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64b695dcd3df8086e5ed7c89/06Toh65jDEz3WJbIM6ZmZ.jpeg", "fullname": "Adam Fields", "name": "adamelliotfields", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 10, "isFollowing": false } ]
/posts/reach-vb/688270400940064
2,053
1
422738087480368
[ { "type": "text", "value": "NSFW Erotic Novel AI Generation", "raw": "NSFW Erotic Novel AI Generation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "-NSFW Text (Data) Generator for Detecting 'NSFW' Text: Multilingual Experience ", "raw": "-NSFW Text (Data) Generator for Detecting 'NSFW' Text: Multilingual Experience ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The multilingual NSFW text (data) auto-generator is a tool designed to automatically generate and analyze adult content in various languages. This service uses AI-based text generation to produce various types of NSFW content, which can then be used as training data to build effective filtering models. It supports multiple languages, including English, and allows users to input the desired language through the system prompt in the on-screen options to generate content in the specified language. Users can create datasets from the generated data, train machine learning models, and improve the accuracy of text analysis systems. Furthermore, content generation can be customized according to user specifications, allowing for the creation of tailored data. This maximizes the performance of NSFW text detection models.", "raw": "The multilingual NSFW text (data) auto-generator is a tool designed to automatically generate and analyze adult content in various languages. This service uses AI-based text generation to produce various types of NSFW content, which can then be used as training data to build effective filtering models. It supports multiple languages, including English, and allows users to input the desired language through the system prompt in the on-screen options to generate content in the specified language. Users can create datasets from the generated data, train machine learning models, and improve the accuracy of text analysis systems. Furthermore, content generation can be customized according to user specifications, allowing for the creation of tailored data. This maximizes the performance of NSFW text detection models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Web: ", "raw": "Web: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://fantaxy-erotica.hf.space", "href": "https://fantaxy-erotica.hf.space", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "API: ", "raw": "API: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://replicate.com/aitechtree/nsfw-novel-generation", "href": "https://replicate.com/aitechtree/nsfw-novel-generation", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Usage Warnings and Notices: This tool is intended for research and development purposes only, and the generated NSFW content must adhere to appropriate legal and ethical guidelines. Proper monitoring is required to prevent the misuse of inappropriate content, and legal responsibility lies with the user. Users must comply with local laws and regulations when using the data, and the service provider is not liable for any issues arising from the misuse of the data.", "raw": "Usage Warnings and Notices: This tool is intended for research and development purposes only, and the generated NSFW content must adhere to appropriate legal and ethical guidelines. Proper monitoring is required to prevent the misuse of inappropriate content, and legal responsibility lies with the user. Users must comply with local laws and regulations when using the data, and the service provider is not liable for any issues arising from the misuse of the data.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
NSFW Erotic Novel AI Generation -NSFW Text (Data) Generator for Detecting 'NSFW' Text: Multilingual Experience The multilingual NSFW text (data) auto-generator is a tool designed to automatically generate and analyze adult content in various languages. This service uses AI-based text generation to produce various types of NSFW content, which can then be used as training data to build effective filtering models. It supports multiple languages, including English, and allows users to input the desired language through the system prompt in the on-screen options to generate content in the specified language. Users can create datasets from the generated data, train machine learning models, and improve the accuracy of text analysis systems. Furthermore, content generation can be customized according to user specifications, allowing for the creation of tailored data. This maximizes the performance of NSFW text detection models. Web: https://fantaxy-erotica.hf.space API: https://replicate.com/aitechtree/nsfw-novel-generation Usage Warnings and Notices: This tool is intended for research and development purposes only, and the generated NSFW content must adhere to appropriate legal and ethical guidelines. Proper monitoring is required to prevent the misuse of inappropriate content, and legal responsibility lies with the user. Users must comply with local laws and regulations when using the data, and the service provider is not liable for any issues arising from the misuse of the data.
{ "avatarUrl": "/avatars/3dac1c2fca69b3886f087f58909f50fd.svg", "fullname": "llm", "name": "fantaxy", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 45, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/66333c7887ce9a8935ff5738/rG13a_9YInC8-eDrQXu8c.png" } ]
[]
[ { "reaction": "🔥", "users": [ "fantaxy", "ginipick", "fantos", "aiqcamp", "openfree", "seawolf2357", "BeingUs", "John6666", "Nymbo", "aming97", "vaskos97", "Lewdiculous", "cindyangelira", "Solaren", "dummydum" ], "count": 15 }, { "reaction": "👀", "users": [ "fantaxy", "ginipick", "fantos", "aiqcamp", "openfree", "seawolf2357", "John6666", "Nymbo", "UCCTeam" ], "count": 9 }, { "reaction": "🚀", "users": [ "fantaxy", "ginipick", "aiqcamp", "openfree", "seawolf2357", "John6666", "toshihikochen", "den0620" ], "count": 8 }, { "reaction": "❤️", "users": [ "fantaxy", "openfree", "seawolf2357", "John6666", "aming97", "Mackya" ], "count": 6 }, { "reaction": "😎", "users": [ "fantaxy", "aiqtech", "openfree", "nicolay-r", "John6666" ], "count": 5 }, { "reaction": "🤗", "users": [ "fantaxy", "openfree", "seawolf2357", "John6666" ], "count": 4 }, { "reaction": "➕", "users": [ "fantaxy", "aiqcamp", "seawolf2357", "John6666" ], "count": 4 }, { "reaction": "🧠", "users": [ "fantaxy", "ginipick", "seawolf2357", "John6666" ], "count": 4 }, { "reaction": "👍", "users": [ "fantaxy", "fantos", "seawolf2357", "John6666" ], "count": 4 }, { "reaction": "🤝", "users": [ "fantaxy", "aiqtech", "seawolf2357", "John6666" ], "count": 4 }, { "reaction": "😔", "users": [ "fantaxy", "aiqtech", "seawolf2357" ], "count": 3 }, { "reaction": "🤯", "users": [ "fantaxy", "seawolf2357" ], "count": 2 } ]
2024-10-09T10:26:47.000Z
2024-10-17T14:18:24.446Z
[ { "avatarUrl": "/avatars/ea05c741f4c2be959f3d1c6119738d5b.svg", "fullname": "Jeremie Hicks", "name": "STDad", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/e63ed24b583f258f5b5a443f8d0d5f66.svg", "fullname": "seawolf", "name": "seawolf2357", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 13, "isFollowing": false } ]
/posts/fantaxy/422738087480368
3,490
2
335589186633902
[ { "type": "text", "value": "hi everyone, i was just wondering how much autotrain will cost depending on which model i pick and what gpu i run it on? the documentation is pretty vague so any help would be great! thanks.", "raw": "hi everyone, i was just wondering how much autotrain will cost depending on which model i pick and what gpu i run it on? the documentation is pretty vague so any help would be great! thanks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
hi everyone, i was just wondering how much autotrain will cost depending on which model i pick and what gpu i run it on? the documentation is pretty vague so any help would be great! thanks.
{ "avatarUrl": "/avatars/7be1913712fdd1ffe75967ed19007720.svg", "fullname": "stock mining", "name": "automatedstockminingorg", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 10, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "umangmqs" ], "count": 2 } ]
2024-10-08T17:44:44.000Z
2024-10-09T07:06:30.192Z
[ { "avatarUrl": "/avatars/24536c8d929e8dbc8889dee1ea692c01.svg", "fullname": "seven yevale", "name": "sevenyevale", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/automatedstockminingorg/335589186633902
1,644
1
949700694468889
[ { "type": "text", "value": "Neural Network Chaos Monkey: Randomly shuts off parts of the neural network during training. The Chaos Monkey is super present at Epoch 1, is gone by the final Epoch. My hypothesis was that this would either increase the robustness of the model, or it would make the outputs totally worse. You can 100% reproduce my results, chaos wins again. ", "raw": "Neural Network Chaos Monkey: Randomly shuts off parts of the neural network during training. The Chaos Monkey is super present at Epoch 1, is gone by the final Epoch. My hypothesis was that this would either increase the robustness of the model, or it would make the outputs totally worse. You can 100% reproduce my results, chaos wins again. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/bWA9unotJ7k", "href": "https://youtu.be/bWA9unotJ7k", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Neural Network Chaos Monkey: Randomly shuts off parts of the neural network during training. The Chaos Monkey is super present at Epoch 1, is gone by the final Epoch. My hypothesis was that this would either increase the robustness of the model, or it would make the outputs totally worse. You can 100% reproduce my results, chaos wins again. https://youtu.be/bWA9unotJ7k
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64274b69ba6cef0a6ebb0fd6/kgD6GfgDwEtyLg5ZACzj7.jpeg" } ]
[]
[ { "reaction": "❤️", "users": [ "LeroyDyer", "coyotte508" ], "count": 2 }, { "reaction": "👍", "users": [ "jeremy-london" ], "count": 1 }, { "reaction": "😔", "users": [ "takeraparterer" ], "count": 1 }, { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-08T16:33:57.000Z
2024-10-27T12:48:17.315Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6316fb937b0ee0136e5f1220/poHBoJ7QAF_s2CCaosdvQ.jpeg", "fullname": "Firstname Lastname", "name": "takeraparterer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 29, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1653051419389-62878fdc70af5d9106e3e892.png", "fullname": "K S", "name": "MultiTrickFox", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 5, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/634262af8d8089ebaefd410e/pr6KcEebXTo5V2XAlpQNw.png", "fullname": "Fizz 🏳️‍⚧️", "name": "Fizzarolli", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg", "fullname": "leroy Samuel Dyer", "name": "LeroyDyer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 84, "isFollowing": false }, { "avatarUrl": "/avatars/54483699273ac58a4a6fe1fa4aab65fe.svg", "fullname": "Robert Sinclair", "name": "ZeroWw", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 76, "isFollowing": false } ]
/posts/TuringsSolutions/949700694468889
1,824
42
126001514773028
[ { "type": "text", "value": "Building a Ranking System to Enhance Prompt Results: The New PageRank for RAG/LLM ", "raw": "Building a Ranking System to Enhance Prompt Results: The New PageRank for RAG/LLM ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read full article at ", "raw": "Read full article at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://mltblog.com/4gT62y9", "href": "https://mltblog.com/4gT62y9", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In this document, you will learn how to build a system that decides, among dozens of candidate paragraphs selected from the corpus to answer a prompt, which ones to show in the results, and in what order. The goal is to maximize relevancy while not overwhelming the user with a long, cluttered answer. Think of it as the new PageRank for RAG/LLM, although the algorithm is radically different, and much simpler. The approach is generic and works for all RAG/LLM systems whether based on neural networks or not. It is implemented in xLLM. ", "raw": "In this document, you will learn how to build a system that decides, among dozens of candidate paragraphs selected from the corpus to answer a prompt, which ones to show in the results, and in what order. The goal is to maximize relevancy while not overwhelming the user with a long, cluttered answer. Think of it as the new PageRank for RAG/LLM, although the algorithm is radically different, and much simpler. The approach is generic and works for all RAG/LLM systems whether based on neural networks or not. It is implemented in xLLM. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The article includes Python code (with links to GitHub) and case study featuring the anonymized augmented corpus of a fortune 100 company, as well as future LLM developments (auto-indexing and LLM for glossary generation).", "raw": "The article includes Python code (with links to GitHub) and case study featuring the anonymized augmented corpus of a fortune 100 company, as well as future LLM developments (auto-indexing and LLM for glossary generation).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Building a Ranking System to Enhance Prompt Results: The New PageRank for RAG/LLM Read full article at https://mltblog.com/4gT62y9 In this document, you will learn how to build a system that decides, among dozens of candidate paragraphs selected from the corpus to answer a prompt, which ones to show in the results, and in what order. The goal is to maximize relevancy while not overwhelming the user with a long, cluttered answer. Think of it as the new PageRank for RAG/LLM, although the algorithm is radically different, and much simpler. The approach is generic and works for all RAG/LLM systems whether based on neural networks or not. It is implemented in xLLM. The article includes Python code (with links to GitHub) and case study featuring the anonymized augmented corpus of a fortune 100 company, as well as future LLM developments (auto-indexing and LLM for glossary generation).
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/669c89e98f2dbc203f9e74ab/higvnXEHeo_Ig2bgTpn47.png", "fullname": "Vincent Granville", "name": "vincentg64", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 17, "isFollowing": false }
[]
[]
[ { "reaction": "🤯", "users": [ "mediiiiii3", "John6666" ], "count": 2 }, { "reaction": "🤝", "users": [ "BeingUs" ], "count": 1 } ]
2024-10-08T16:13:09.000Z
2024-10-08T16:13:09.008Z
[]
/posts/vincentg64/126001514773028
1,280
0
866581187755410
[ { "type": "text", "value": "💥 𝐋-𝐌𝐮𝐥: 𝐀𝐝𝐝𝐢𝐭𝐢𝐨𝐧-𝐎𝐧𝐥𝐲 𝐌𝐮𝐥𝐭𝐢𝐩𝐥𝐢𝐜𝐚𝐭𝐢𝐨𝐧 𝐜𝐚𝐧 𝐬𝐥𝐚𝐬𝐡 𝐜𝐨𝐦𝐩𝐮𝐭𝐚𝐭𝐢𝐨𝐧𝐚𝐥 𝐜𝐨𝐬𝐭𝐬 𝐛𝐲 𝟖𝟎%!", "raw": "💥 𝐋-𝐌𝐮𝐥: 𝐀𝐝𝐝𝐢𝐭𝐢𝐨𝐧-𝐎𝐧𝐥𝐲 𝐌𝐮𝐥𝐭𝐢𝐩𝐥𝐢𝐜𝐚𝐭𝐢𝐨𝐧 𝐜𝐚𝐧 𝐬𝐥𝐚𝐬𝐡 𝐜𝐨𝐦𝐩𝐮𝐭𝐚𝐭𝐢𝐨𝐧𝐚𝐥 𝐜𝐨𝐬𝐭𝐬 𝐛𝐲 𝟖𝟎%!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Microsoft researchers dropped a groundbreaking technique that could slash the energy use in transformer computations : their novel \"linear-complexity multiplication\" (L-Mul) algorithm approximates floating-point multiplication using energy-efficient integer addition instead of costly multiplications.", "raw": "Microsoft researchers dropped a groundbreaking technique that could slash the energy use in transformer computations : their novel \"linear-complexity multiplication\" (L-Mul) algorithm approximates floating-point multiplication using energy-efficient integer addition instead of costly multiplications.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💡 Quick reminder on how floats are coded on 8 bits (FP8):", "raw": "💡 Quick reminder on how floats are coded on 8 bits (FP8):", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "In the e4m3 FP8 standard, you encode a number as:", "raw": "In the e4m3 FP8 standard, you encode a number as:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Sign (1 bit) | Exponent (4 bits) | Mantissa (3 bits)", "raw": "Sign (1 bit) | Exponent (4 bits) | Mantissa (3 bits)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Example: 0 (positive) | 1000 (8) | 101 (1/2 + 1/8 = 0.625)", "raw": "Example: 0 (positive) | 1000 (8) | 101 (1/2 + 1/8 = 0.625)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Calculation: you add one to the mantissa, and multiply it by 2 power (the exponent - a bias term which is 7 for e4m3):", "raw": "Calculation: you add one to the mantissa, and multiply it by 2 power (the exponent - a bias term which is 7 for e4m3):", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "➡️ You get (1 + 0.625) × 2^(8-7) = 3.25", "raw": "➡️ You get (1 + 0.625) × 2^(8-7) = 3.25", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Now back to the paper. 𝗞𝗲𝘆 𝗶𝗻𝘀𝗶𝗴𝗵𝘁𝘀:", "raw": "Now back to the paper. 𝗞𝗲𝘆 𝗶𝗻𝘀𝗶𝗴𝗵𝘁𝘀:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚡️ Multiplication is extremely energy-intensive compared to addition. For 32-bit operations, multiplication (3.7 pJ) uses 37x more energy than addition (0.1 pJ)!", "raw": "⚡️ Multiplication is extremely energy-intensive compared to addition. For 32-bit operations, multiplication (3.7 pJ) uses 37x more energy than addition (0.1 pJ)!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🧮 Traditional floating-point multiplication go like (noting xm the mantissa and xe the exponent): Mul(x,y) = (1 + xm) · 2^xe · (1 + ym) · 2^ye = (1 + xm + ym + xm · ym) · 2^(xe+ye)", "raw": "🧮 Traditional floating-point multiplication go like (noting xm the mantissa and xe the exponent): Mul(x,y) = (1 + xm) · 2^xe · (1 + ym) · 2^ye = (1 + xm + ym + xm · ym) · 2^(xe+ye)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💡 L-Mul cleverly approximates this as: L-Mul(x,y) = (1 + xm + ym + 2^-l(m)) · 2^(xe+ye), eliminating the costly xm · ym term", "raw": "💡 L-Mul cleverly approximates this as: L-Mul(x,y) = (1 + xm + ym + 2^-l(m)) · 2^(xe+ye), eliminating the costly xm · ym term", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔧 l(m) term is adaptively set based on mantissa size for optimal accuracy", "raw": "🔧 l(m) term is adaptively set based on mantissa size for optimal accuracy", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📊 Benchmarks on the Llama-3.1-8B-Instruct model show L-Mul preserves precision across various NLP tasks, with performance nearly identical to full BFloat16 precision", "raw": "📊 Benchmarks on the Llama-3.1-8B-Instruct model show L-Mul preserves precision across various NLP tasks, with performance nearly identical to full BFloat16 precision", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💬 Authors claim: \"We can achieve the same model inference performance while reducing the energy cost of attention computations by 80%.\"", "raw": "💬 Authors claim: \"We can achieve the same model inference performance while reducing the energy cost of attention computations by 80%.\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This breakthrough is still theoretical and would need implementation on dedicated hardware to confirm real-world gains, but it’s a really exciting path for more sustainable AI! 🌱", "raw": "This breakthrough is still theoretical and would need implementation on dedicated hardware to confirm real-world gains, but it’s a really exciting path for more sustainable AI! 🌱", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read the paper here 👉 ", "raw": "Read the paper here 👉 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2410.00907", "href": null, "resource": { "type": "paper", "id": "2410.00907", "discussionNum": null }, "url": "https://huggingface.co/papers/2410.00907", "code": null, "user": null, "label": "Addition is All You Need for Energy-efficient Language Models (2410.00907)", "lang": null } ]
💥 𝐋-𝐌𝐮𝐥: 𝐀𝐝𝐝𝐢𝐭𝐢𝐨𝐧-𝐎𝐧𝐥𝐲 𝐌𝐮𝐥𝐭𝐢𝐩𝐥𝐢𝐜𝐚𝐭𝐢𝐨𝐧 𝐜𝐚𝐧 𝐬𝐥𝐚𝐬𝐡 𝐜𝐨𝐦𝐩𝐮𝐭𝐚𝐭𝐢𝐨𝐧𝐚𝐥 𝐜𝐨𝐬𝐭𝐬 𝐛𝐲 𝟖𝟎%! Microsoft researchers dropped a groundbreaking technique that could slash the energy use in transformer computations : their novel "linear-complexity multiplication" (L-Mul) algorithm approximates floating-point multiplication using energy-efficient integer addition instead of costly multiplications. 💡 Quick reminder on how floats are coded on 8 bits (FP8): In the e4m3 FP8 standard, you encode a number as: Sign (1 bit) | Exponent (4 bits) | Mantissa (3 bits) Example: 0 (positive) | 1000 (8) | 101 (1/2 + 1/8 = 0.625) Calculation: you add one to the mantissa, and multiply it by 2 power (the exponent - a bias term which is 7 for e4m3): ➡️ You get (1 + 0.625) × 2^(8-7) = 3.25 Now back to the paper. 𝗞𝗲𝘆 𝗶𝗻𝘀𝗶𝗴𝗵𝘁𝘀: ⚡️ Multiplication is extremely energy-intensive compared to addition. For 32-bit operations, multiplication (3.7 pJ) uses 37x more energy than addition (0.1 pJ)! 🧮 Traditional floating-point multiplication go like (noting xm the mantissa and xe the exponent): Mul(x,y) = (1 + xm) · 2^xe · (1 + ym) · 2^ye = (1 + xm + ym + xm · ym) · 2^(xe+ye) 💡 L-Mul cleverly approximates this as: L-Mul(x,y) = (1 + xm + ym + 2^-l(m)) · 2^(xe+ye), eliminating the costly xm · ym term 🔧 l(m) term is adaptively set based on mantissa size for optimal accuracy 📊 Benchmarks on the Llama-3.1-8B-Instruct model show L-Mul preserves precision across various NLP tasks, with performance nearly identical to full BFloat16 precision 💬 Authors claim: "We can achieve the same model inference performance while reducing the energy cost of attention computations by 80%." This breakthrough is still theoretical and would need implementation on dedicated hardware to confirm real-world gains, but it’s a really exciting path for more sustainable AI! 🌱 Read the paper here 👉 https://huggingface.co/papers/2410.00907
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/hA7kMUYkFrHU2bWNQ3nEe.png" } ]
[]
[ { "reaction": "❤️", "users": [ "ljhwild", "naxautify", "John6666", "Jason233", "Sakalti", "bunnycore", "AtAndDev", "TahirC", "alielfilali01", "louisbrulenaudet", "energyspace", "gfork" ], "count": 12 }, { "reaction": "➕", "users": [ "areeebbbaaaa", "mediiiiii3", "John6666", "AtAndDev" ], "count": 4 }, { "reaction": "🚀", "users": [ "John6666", "AtAndDev", "tensorkelechi", "mmhamdy" ], "count": 4 }, { "reaction": "🧠", "users": [ "alielfilali01" ], "count": 1 } ]
2024-10-08T15:12:20.000Z
2024-10-08T15:12:20.994Z
[]
/posts/m-ric/866581187755410
2,264
0
772067054626598
[ { "type": "text", "value": "Lightweight implementation of newly introduced “Differential Transformer”:", "raw": "Lightweight implementation of newly introduced “Differential Transformer”:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Proposes differential attention mechanism which computes attention scores as a difference between two separate softmax attention maps thereby reducing noise in attention blocks. [[[Differential nanoGPT]]] :)", "raw": "Proposes differential attention mechanism which computes attention scores as a difference between two separate softmax attention maps thereby reducing noise in attention blocks. [[[Differential nanoGPT]]] :)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Code: ", "raw": "Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/Jaykef/ai-algorithms/blob/main/DIFF_Transformer.ipynb", "href": "https://github.com/Jaykef/ai-algorithms/blob/main/DIFF_Transformer.ipynb", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "YT Video: ", "raw": "YT Video: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/9V4mJA5y7dg", "href": "https://youtu.be/9V4mJA5y7dg", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Lightweight implementation of newly introduced “Differential Transformer”: Proposes differential attention mechanism which computes attention scores as a difference between two separate softmax attention maps thereby reducing noise in attention blocks. [[[Differential nanoGPT]]] :) Code: https://github.com/Jaykef/ai-algorithms/blob/main/DIFF_Transformer.ipynb YT Video: https://youtu.be/9V4mJA5y7dg
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg", "fullname": "Jaward Sesay", "name": "Jaward", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 191, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/w-ClSawIyoJJntG8V0ig_.mp4" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/iAwF7swvb8hWRzmOxcK0U.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/FDlvFCfgQQo_VU_rflnq4.jpeg" } ]
[]
[ { "reaction": "👀", "users": [ "John6666", "nicolay-r", "sepideh1366" ], "count": 3 }, { "reaction": "🔥", "users": [ "OrigamiDream" ], "count": 1 }, { "reaction": "👍", "users": [ "k4d3" ], "count": 1 } ]
2024-10-08T14:35:04.000Z
2024-10-10T01:18:03.632Z
[]
/posts/Jaward/772067054626598
1,136
0
603114482757969
[ { "type": "text", "value": "The Nobel Prize background for Hopfield and Hinton's work on neural networks is pure gold. It's a masterclass in explaining AI basics.", "raw": "The Nobel Prize background for Hopfield and Hinton's work on neural networks is pure gold. It's a masterclass in explaining AI basics.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Key takeaways from the conclusion:", "raw": "Key takeaways from the conclusion:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ML applications are expanding rapidly. We're still figuring out which will stick.", "raw": "- ML applications are expanding rapidly. We're still figuring out which will stick.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Ethical discussions are crucial as the tech develops.", "raw": "- Ethical discussions are crucial as the tech develops.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Physics 🤝 AI: A two-way street of innovation.", "raw": "- Physics 🤝 AI: A two-way street of innovation.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Some mind-blowing AI applications in physics:", "raw": "Some mind-blowing AI applications in physics:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Discovering the Higgs particle", "raw": "- Discovering the Higgs particle", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Cleaning up gravitational wave data", "raw": "- Cleaning up gravitational wave data", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Hunting exoplanets", "raw": "- Hunting exoplanets", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Predicting molecular structures", "raw": "- Predicting molecular structures", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Designing better solar cells", "raw": "- Designing better solar cells", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We're just scratching the surface. The interplay between AI and physics is reshaping both fields.", "raw": "We're just scratching the surface. The interplay between AI and physics is reshaping both fields.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Bonus: The illustrations accompanying the background document are really neat. (Credit: Johan Jarnestad/The Royal Swedish Academy of Sciences)", "raw": "Bonus: The illustrations accompanying the background document are really neat. (Credit: Johan Jarnestad/The Royal Swedish Academy of Sciences)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#AI #MachineLearning #Physics #Ethics #Innovation", "raw": "#AI #MachineLearning #Physics #Ethics #Innovation", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
The Nobel Prize background for Hopfield and Hinton's work on neural networks is pure gold. It's a masterclass in explaining AI basics. Key takeaways from the conclusion: - ML applications are expanding rapidly. We're still figuring out which will stick. - Ethical discussions are crucial as the tech develops. - Physics 🤝 AI: A two-way street of innovation. Some mind-blowing AI applications in physics: - Discovering the Higgs particle - Cleaning up gravitational wave data - Hunting exoplanets - Predicting molecular structures - Designing better solar cells We're just scratching the surface. The interplay between AI and physics is reshaping both fields. Bonus: The illustrations accompanying the background document are really neat. (Credit: Johan Jarnestad/The Royal Swedish Academy of Sciences) #AI #MachineLearning #Physics #Ethics #Innovation
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/R2HcFidThVXHox4vmv-XC.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/W2-KuvXzOlkefTNqLsZdv.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/u-fqsI8EwTkwc5UV6CLEX.png" } ]
[]
[ { "reaction": "🔥", "users": [ "Jaward", "Svngoku", "rajveer43", "aikongfu", "darkzbaron", "octadion", "nicolay-r", "amaaljuffry", "Salvor", "Cyber", "osanseviero", "elec3647", "shtefcs", "louisbrulenaudet" ], "count": 14 }, { "reaction": "👀", "users": [ "John6666", "aikongfu", "bezir", "osanseviero", "wahyudesu", "shtefcs" ], "count": 6 }, { "reaction": "🧠", "users": [ "aikongfu", "nicolay-r", "osanseviero", "RayNene", "shtefcs" ], "count": 5 }, { "reaction": "🤗", "users": [ "rajveer43", "aikongfu", "osanseviero", "shtefcs" ], "count": 4 }, { "reaction": "🚀", "users": [ "shtefcs" ], "count": 1 } ]
2024-10-08T13:55:32.000Z
2024-10-13T11:39:24.797Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/TwR65k1JgO_t3l4pM1UjA.png", "fullname": "Stefan Smiljkovic", "name": "shtefcs", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false } ]
/posts/fdaudens/603114482757969
3,038
1
329985771630527
[ { "type": "text", "value": "Meta AI vision has been cooking ", "raw": "Meta AI vision has been cooking ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@facebook", "href": null, "resource": null, "url": null, "code": null, "user": "facebook", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "They shipped multiple models and demos for their papers at ", "raw": "They shipped multiple models and demos for their papers at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@ECCV", "href": null, "resource": null, "url": null, "code": null, "user": "ECCV", "label": null, "lang": null }, { "type": "text", "value": "🤗", "raw": "🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here's a compilation of my top picks:", "raw": "Here's a compilation of my top picks:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Sapiens is family of foundation models for human-centric depth estimation, segmentation and more, all models have open weights and demos 👏", "raw": "- Sapiens is family of foundation models for human-centric depth estimation, segmentation and more, all models have open weights and demos 👏", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "All models have their demos and even torchscript checkpoints!", "raw": "All models have their demos and even torchscript checkpoints!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A collection of models and demos: ", "raw": "A collection of models and demos: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/facebook/sapiens-66d22047daa6402d565cb2fc", "href": null, "resource": { "type": "collection", "id": "facebook/sapiens-66d22047daa6402d565cb2fc", "discussionNum": null }, "url": "https://huggingface.co/collections/facebook/sapiens-66d22047daa6402d565cb2fc", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- VFusion3D is state-of-the-art consistent 3D generation model from images ", "raw": "- VFusion3D is state-of-the-art consistent 3D generation model from images ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model: ", "raw": "Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/facebook/vfusion3d", "href": null, "resource": { "type": "model", "id": "facebook/vfusion3d", "discussionNum": null }, "url": "https://huggingface.co/facebook/vfusion3d", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Demo: ", "raw": "Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/facebook/VFusion3D", "href": null, "resource": { "type": "space", "id": "facebook/VFusion3D", "discussionNum": null }, "url": "https://huggingface.co/spaces/facebook/VFusion3D", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- CoTracker is the state-of-the-art point (pixel) tracking model", "raw": "- CoTracker is the state-of-the-art point (pixel) tracking model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Demo: ", "raw": "Demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/facebook/cotracker", "href": null, "resource": { "type": "space", "id": "facebook/cotracker", "discussionNum": null }, "url": "https://huggingface.co/spaces/facebook/cotracker", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model: ", "raw": "Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/facebook/cotracker", "href": null, "resource": { "type": "model", "id": "facebook/cotracker", "discussionNum": null }, "url": "https://huggingface.co/facebook/cotracker", "code": null, "user": null, "label": null, "lang": null } ]
Meta AI vision has been cooking @facebook They shipped multiple models and demos for their papers at @ECCV🤗 Here's a compilation of my top picks: - Sapiens is family of foundation models for human-centric depth estimation, segmentation and more, all models have open weights and demos 👏 All models have their demos and even torchscript checkpoints! A collection of models and demos: https://huggingface.co/collections/facebook/sapiens-66d22047daa6402d565cb2fc - VFusion3D is state-of-the-art consistent 3D generation model from images Model: https://huggingface.co/facebook/vfusion3d Demo: https://huggingface.co/spaces/facebook/VFusion3D - CoTracker is the state-of-the-art point (pixel) tracking model Demo: https://huggingface.co/spaces/facebook/cotracker Model: https://huggingface.co/facebook/cotracker
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/hxuX0ogQojMpC1gFMFI92.png" } ]
[]
[ { "reaction": "🔥", "users": [ "KingNish", "adamelliotfields", "nicolay-r", "John6666", "Tom-Neverwinter", "rwightman", "DmitryRyumin", "Salvor", "clem", "Yadukrishnan", "dnlserrano", "osanseviero", "sn2234", "Wuayker", "victor", "alielfilali01", "louisbrulenaudet" ], "count": 17 }, { "reaction": "👍", "users": [ "CoolSpot" ], "count": 1 } ]
2024-10-08T11:22:32.000Z
2024-10-08T11:22:32.928Z
[]
/posts/merve/329985771630527
3,744
0
787688758269097
[ { "type": "text", "value": "THANK YOU for bringing Mag Mell to 10,000 downloads across its quantizations!! I'm over the moon with how well it's done, and with everyone's kind feedback.", "raw": "THANK YOU for bringing Mag Mell to 10,000 downloads across its quantizations!! I'm over the moon with how well it's done, and with everyone's kind feedback.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'm in a team now! Allura are a group of alumni from various reaches of the LLM roleplay scene.", "raw": "I'm in a team now! Allura are a group of alumni from various reaches of the LLM roleplay scene.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/allura-org", "href": "https://huggingface.co/allura-org", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Our first model is an OLMoE roleplay tune called MoE Girl:", "raw": "Our first model is an OLMoE roleplay tune called MoE Girl:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/allura-org/MoE-Girl-1BA-7BT", "href": null, "resource": { "type": "model", "id": "allura-org/MoE-Girl-1BA-7BT", "discussionNum": null }, "url": "https://huggingface.co/allura-org/MoE-Girl-1BA-7BT", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'd like to make more adventuring and longform models in my current style with them, so keep an eye out for that.", "raw": "I'd like to make more adventuring and longform models in my current style with them, so keep an eye out for that.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Also Mag Mell R2 soon maybe idk", "raw": "Also Mag Mell R2 soon maybe idk", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
THANK YOU for bringing Mag Mell to 10,000 downloads across its quantizations!! I'm over the moon with how well it's done, and with everyone's kind feedback. I'm in a team now! Allura are a group of alumni from various reaches of the LLM roleplay scene. https://huggingface.co/allura-org Our first model is an OLMoE roleplay tune called MoE Girl: https://huggingface.co/allura-org/MoE-Girl-1BA-7BT I'd like to make more adventuring and longform models in my current style with them, so keep an eye out for that. Also Mag Mell R2 soon maybe idk
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6685d39f64da708c0f553c5d/d9EvSPFssc-jproPdAszF.png", "fullname": "Bot", "name": "inflatebot", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 43, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6685d39f64da708c0f553c5d/LCTmtyaEd0SG2G3tWa6WN.png" } ]
[]
[ { "reaction": "🤗", "users": [ "TravelingMan", "Varkoyote" ], "count": 2 }, { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-08T09:30:19.000Z
2024-10-08T09:32:06.121Z
[]
/posts/inflatebot/787688758269097
1,209
0
723292111283231
[ { "type": "text", "value": "Open-source AI creates healthy competition in a field where natural tendencies lead to extreme concentration of power. Imagine a world where only one or two companies could build software. This is the biggest risk and ethical challenge of them all IMO. Let's fight this!", "raw": "Open-source AI creates healthy competition in a field where natural tendencies lead to extreme concentration of power. Imagine a world where only one or two companies could build software. This is the biggest risk and ethical challenge of them all IMO. Let's fight this!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Open-source AI creates healthy competition in a field where natural tendencies lead to extreme concentration of power. Imagine a world where only one or two companies could build software. This is the biggest risk and ethical challenge of them all IMO. Let's fight this!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem 🤗", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1763, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/5e67bdd61009063689407479/zy907d1qsMyw3PaB93wSo.mp4" } ]
[]
[ { "reaction": "❤️", "users": [ "KingNish", "YaTharThShaRma999", "jsulz", "ayyylol", "pletcher", "4rtemi5", "ajibawa-2023", "AdinaY", "dvilasuero", "ljhwild", "pretzinger", "nickvidal", "alielfilali01", "not-lain", "philipp-zettl" ], "count": 15 }, { "reaction": "😎", "users": [ "ayyylol", "YaTharThShaRma999", "John6666", "AdinaY", "dvilasuero", "not-lain" ], "count": 6 }, { "reaction": "🚀", "users": [ "YaTharThShaRma999", "AdinaY", "fdaudens", "dvilasuero" ], "count": 4 }, { "reaction": "👍", "users": [ "Jaward", "grib0ed0v", "majidml", "dvilasuero" ], "count": 4 } ]
2024-10-07T15:35:02.000Z
2024-10-09T11:21:41.569Z
[ { "avatarUrl": "/avatars/aa8e2e38e07d1fa0d2dc611723bc8f4c.svg", "fullname": "Łael Al-Halawani", "name": "ljhwild", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }, { "avatarUrl": "/avatars/0087f207c06a793c55ed0489ff793e70.svg", "fullname": "nicolo", "name": "nicolollo", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/clem/723292111283231
4,150
3
611927614774663
[ { "type": "text", "value": "Huge news for Kohya GUI - Now you can fully Fine Tune / DreamBooth FLUX Dev with as low as 6 GB GPUs without any quality loss compared to 48 GB GPUs - Moreover, Fine Tuning yields better results than any LoRA training could", "raw": "Huge news for Kohya GUI - Now you can fully Fine Tune / DreamBooth FLUX Dev with as low as 6 GB GPUs without any quality loss compared to 48 GB GPUs - Moreover, Fine Tuning yields better results than any LoRA training could", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Config Files", "raw": "Config Files", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I published all configs here : ", "raw": "I published all configs here : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/112099700", "href": "https://www.patreon.com/posts/112099700", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Tutorials", "raw": "Tutorials", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Fine tuning tutorial in production", "raw": "Fine tuning tutorial in production", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Windows FLUX LoRA training (fine tuning is same just config changes) : ", "raw": "Windows FLUX LoRA training (fine tuning is same just config changes) : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/nySGu12Y05k", "href": "https://youtu.be/nySGu12Y05k", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Cloud FLUX LoRA training (RunPod and Massed Compute ultra cheap) : ", "raw": "Cloud FLUX LoRA training (RunPod and Massed Compute ultra cheap) : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/-uhL2nW7Ddw", "href": "https://youtu.be/-uhL2nW7Ddw", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LoRA Extraction", "raw": "LoRA Extraction", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The checkpoint sizes are 23.8 GB but you can extract LoRA with almost no loss quality - I made a research and public article / guide for this as well", "raw": "The checkpoint sizes are 23.8 GB but you can extract LoRA with almost no loss quality - I made a research and public article / guide for this as well", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "LoRA extraction guide from Fine Tuned checkpoint is here : ", "raw": "LoRA extraction guide from Fine Tuned checkpoint is here : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/112335162", "href": "https://www.patreon.com/posts/112335162", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Info", "raw": "Info", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is just mind blowing. The recent improvements Kohya made for block swapping is just amazing.", "raw": "This is just mind blowing. The recent improvements Kohya made for block swapping is just amazing.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Speeds are also amazing that you can see in image 2 - of course those values are based on my researched config and tested on RTX A6000 - same speed as almost RTX 3090", "raw": "Speeds are also amazing that you can see in image 2 - of course those values are based on my researched config and tested on RTX A6000 - same speed as almost RTX 3090", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Also all trainings experiments are made at 1024x1024px. If you use lower resolution it will be lesser VRAM + faster speed", "raw": "Also all trainings experiments are made at 1024x1024px. If you use lower resolution it will be lesser VRAM + faster speed", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The VRAM usages would change according to your own configuration - likely speed as well", "raw": "The VRAM usages would change according to your own configuration - likely speed as well", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Moreover, Fine Tuning / DreamBooth yields better results than any LoRA could", "raw": "Moreover, Fine Tuning / DreamBooth yields better results than any LoRA could", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Installers", "raw": "Installers", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1-Kohya GUI accurate branch and Windows Torch 2.5 Installers and test prompts shared here : ", "raw": "1-Kohya GUI accurate branch and Windows Torch 2.5 Installers and test prompts shared here : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.patreon.com/posts/110879657", "href": "https://www.patreon.com/posts/110879657", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The link of Kohya GUI with accurate branch : ", "raw": "The link of Kohya GUI with accurate branch : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/bmaltais/kohya_ss/tree/sd3-flux.1", "href": "https://github.com/bmaltais/kohya_ss/tree/sd3-flux.1", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Huge news for Kohya GUI - Now you can fully Fine Tune / DreamBooth FLUX Dev with as low as 6 GB GPUs without any quality loss compared to 48 GB GPUs - Moreover, Fine Tuning yields better results than any LoRA training could Config Files I published all configs here : https://www.patreon.com/posts/112099700 Tutorials Fine tuning tutorial in production Windows FLUX LoRA training (fine tuning is same just config changes) : https://youtu.be/nySGu12Y05k Cloud FLUX LoRA training (RunPod and Massed Compute ultra cheap) : https://youtu.be/-uhL2nW7Ddw LoRA Extraction The checkpoint sizes are 23.8 GB but you can extract LoRA with almost no loss quality - I made a research and public article / guide for this as well LoRA extraction guide from Fine Tuned checkpoint is here : https://www.patreon.com/posts/112335162 Info This is just mind blowing. The recent improvements Kohya made for block swapping is just amazing. Speeds are also amazing that you can see in image 2 - of course those values are based on my researched config and tested on RTX A6000 - same speed as almost RTX 3090 Also all trainings experiments are made at 1024x1024px. If you use lower resolution it will be lesser VRAM + faster speed The VRAM usages would change according to your own configuration - likely speed as well Moreover, Fine Tuning / DreamBooth yields better results than any LoRA could Installers 1-Kohya GUI accurate branch and Windows Torch 2.5 Installers and test prompts shared here : https://www.patreon.com/posts/110879657 The link of Kohya GUI with accurate branch : https://github.com/bmaltais/kohya_ss/tree/sd3-flux.1
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png", "fullname": "Furkan Gözükara", "name": "MonsterMMORPG", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 376, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Qd8GrwndcJ8BgkGPJnbWO.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/PHQRKtKhnQoQZYBFTzp6i.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/MUuwnsxRpOYqu3jzUxF-G.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/9AYgpWoG0Zr5p41xHvXrs.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/V7aYSMms1jG6pZdH24DoF.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/ylyyCGTOvptV9FDnQzY44.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Afsi-dDF5vJblQsBVXNTT.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/iFiaX7s0Epna0FpGqDk7b.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/SE7VoIUg-ZrAPGFVJPyBz.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/EeuNdHBG5hVChSve47hrf.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/bIOiLFrsjB9tdFlDqPsHr.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/kvum6CtxncTldgWYbaUqy.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/IDqXTq_x4-lVP_WSdy7aZ.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/AIFl5iHgEdGyWNLjX_oDO.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/OiFRWqKNK5UgN-Ue0uvZv.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/WP7C51NEDq5L9-NxpMYLS.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/tSgYbQIBphf6bGbksGpI-.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/5jhHHyA1Rn6X2GA0g76B4.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/td-330D6hR-o3HqPh7TXI.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/ju-fuzzTVHl28soUWVlwt.png" } ]
[]
[ { "reaction": "🔥", "users": [ "MonsterMMORPG", "proxima8", "Gatozu35", "Clausss", "YaTharThShaRma999", "Nandhagopal02", "ajibawa-2023", "alkeryn", "Nelathan", "gbharti", "Macrossbr", "JarvisLabs", "Rsln" ], "count": 13 }, { "reaction": "👀", "users": [ "MonsterMMORPG", "Clausss", "John6666", "pyhornet" ], "count": 4 }, { "reaction": "❤️", "users": [ "MonsterMMORPG", "dawidplaskowski", "Clausss", "KingNish" ], "count": 4 }, { "reaction": "👍", "users": [ "MonsterMMORPG", "Clausss", "majidml" ], "count": 3 }, { "reaction": "🤯", "users": [ "MonsterMMORPG", "Clausss", "Rsln" ], "count": 3 }, { "reaction": "🚀", "users": [ "MonsterMMORPG", "Clausss" ], "count": 2 }, { "reaction": "🤗", "users": [ "MonsterMMORPG", "Clausss" ], "count": 2 }, { "reaction": "😎", "users": [ "MonsterMMORPG", "Clausss" ], "count": 2 }, { "reaction": "➕", "users": [ "MonsterMMORPG", "Clausss" ], "count": 2 }, { "reaction": "🧠", "users": [ "MonsterMMORPG", "Clausss" ], "count": 2 }, { "reaction": "🤝", "users": [ "MonsterMMORPG", "Clausss" ], "count": 2 } ]
2024-10-07T12:08:16.000Z
2024-10-07T12:08:16.136Z
[]
/posts/MonsterMMORPG/611927614774663
4,065
0
329051851115398
[ { "type": "text", "value": "🔗 Evaluating Long Context #1: Long Range Arena (LRA)", "raw": "🔗 Evaluating Long Context #1: Long Range Arena (LRA)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Accurately evaluating how well language models handle long contexts is crucial, but it's also quite challenging to do well. In this series of posts, we're going to examine the various benchmarks that were proposed to assess long context understanding, starting with Long Range Arens (LRA)", "raw": "Accurately evaluating how well language models handle long contexts is crucial, but it's also quite challenging to do well. In this series of posts, we're going to examine the various benchmarks that were proposed to assess long context understanding, starting with Long Range Arens (LRA)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Introduced in 2020, Long Range Arens (LRA) is one of the earliest benchmarks designed to tackle the challenge of long context evaluation.", "raw": "Introduced in 2020, Long Range Arens (LRA) is one of the earliest benchmarks designed to tackle the challenge of long context evaluation.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📌 Key Features of LRA", "raw": "📌 Key Features of LRA", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1️⃣ Diverse Tasks: The LRA benchmark consists of a suite of tasks designed to evaluate model performance on long sequences ranging from 1,000 to 16,000 tokens. These tasks encompass different data types and modalities: Text, Natural and Synthetic Images, and Mathematical Expressions.", "raw": "1️⃣ Diverse Tasks: The LRA benchmark consists of a suite of tasks designed to evaluate model performance on long sequences ranging from 1,000 to 16,000 tokens. These tasks encompass different data types and modalities: Text, Natural and Synthetic Images, and Mathematical Expressions.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2️⃣ Synthetic and Real-world Tasks: LRA is comprised of both synthetic probing tasks and real-world tasks.", "raw": "2️⃣ Synthetic and Real-world Tasks: LRA is comprised of both synthetic probing tasks and real-world tasks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3️⃣ Open-Source and Extensible: Implemented in Python using Jax and Flax, the LRA benchmark code is publicly available, making it easy to extend.", "raw": "3️⃣ Open-Source and Extensible: Implemented in Python using Jax and Flax, the LRA benchmark code is publicly available, making it easy to extend.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📌 Tasks", "raw": "📌 Tasks", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1️⃣ Long ListOps", "raw": "1️⃣ Long ListOps", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2️⃣ Byte-level Text Classification and Document Retrieval", "raw": "2️⃣ Byte-level Text Classification and Document Retrieval", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3️⃣ Image Classification", "raw": "3️⃣ Image Classification", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4️⃣ Pathfinder and Pathfinder-X (Long-range spatial dependency)", "raw": "4️⃣ Pathfinder and Pathfinder-X (Long-range spatial dependency)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👨‍💻 Long Range Arena (LRA) Github Repository: ", "raw": "👨‍💻 Long Range Arena (LRA) Github Repository: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/google-research/long-range-arena", "href": "https://github.com/google-research/long-range-arena", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📄 Long Range Arena (LRA) paper: ", "raw": "📄 Long Range Arena (LRA) paper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2011.04006", "href": null, "resource": { "type": "paper", "id": "2011.04006", "discussionNum": null }, "url": "https://huggingface.co/papers/2011.04006", "code": null, "user": null, "label": "Long Range Arena: A Benchmark for Efficient Transformers (2011.04006)", "lang": null } ]
🔗 Evaluating Long Context #1: Long Range Arena (LRA) Accurately evaluating how well language models handle long contexts is crucial, but it's also quite challenging to do well. In this series of posts, we're going to examine the various benchmarks that were proposed to assess long context understanding, starting with Long Range Arens (LRA) Introduced in 2020, Long Range Arens (LRA) is one of the earliest benchmarks designed to tackle the challenge of long context evaluation. 📌 Key Features of LRA 1️⃣ Diverse Tasks: The LRA benchmark consists of a suite of tasks designed to evaluate model performance on long sequences ranging from 1,000 to 16,000 tokens. These tasks encompass different data types and modalities: Text, Natural and Synthetic Images, and Mathematical Expressions. 2️⃣ Synthetic and Real-world Tasks: LRA is comprised of both synthetic probing tasks and real-world tasks. 3️⃣ Open-Source and Extensible: Implemented in Python using Jax and Flax, the LRA benchmark code is publicly available, making it easy to extend. 📌 Tasks 1️⃣ Long ListOps 2️⃣ Byte-level Text Classification and Document Retrieval 3️⃣ Image Classification 4️⃣ Pathfinder and Pathfinder-X (Long-range spatial dependency) 👨‍💻 Long Range Arena (LRA) Github Repository: https://github.com/google-research/long-range-arena 📄 Long Range Arena (LRA) paper: https://huggingface.co/papers/2011.04006
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1650745211725-noauth.png", "fullname": "Mohammed Hamdy", "name": "mmhamdy", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 38, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62645f88c39850dc093d6105/9wlFmY8Lhd1V2BNXmYyGV.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666", "osanseviero", "nicolay-r", "den0620", "djuna" ], "count": 5 }, { "reaction": "❤️", "users": [ "alielfilali01" ], "count": 1 } ]
2024-10-07T11:32:41.000Z
2024-10-07T11:32:41.619Z
[]
/posts/mmhamdy/329051851115398
1,832
0
132754452184178
[ { "type": "text", "value": "Thursday 10 October 17:00 CEST, I will show a good way to get started with a text classification project on the Hugging Face Hub with Argilla and Setfit.", "raw": "Thursday 10 October 17:00 CEST, I will show a good way to get started with a text classification project on the Hugging Face Hub with Argilla and Setfit.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Signup here: ", "raw": "Signup here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://lu.ma/31mecp34", "href": "https://lu.ma/31mecp34", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Thursday 10 October 17:00 CEST, I will show a good way to get started with a text classification project on the Hugging Face Hub with Argilla and Setfit. Signup here: https://lu.ma/31mecp34
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 167, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "osanseviero", "John6666", "clem" ], "count": 3 } ]
2024-10-07T10:51:41.000Z
2024-10-07T10:51:41.824Z
[]
/posts/davidberenstein1957/132754452184178
1,213
0
845517431425505
[ { "type": "text", "value": "🌐 Subdomain Dataset Update: September 2024 Data Now Available", "raw": "🌐 Subdomain Dataset Update: September 2024 Data Now Available", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I have updated the ", "raw": "I have updated the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/nyuuzyou/subdomains", "href": null, "resource": { "type": "dataset", "id": "nyuuzyou/subdomains", "discussionNum": null }, "url": "https://huggingface.co/datasets/nyuuzyou/subdomains", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " dataset with fresh data for September 2024. This addition further expands this largest collection of subdomain statistics currently available, providing researchers and analysts with even more valuable insights into web infrastructure and domain patterns.", "raw": " dataset with fresh data for September 2024. This addition further expands this largest collection of subdomain statistics currently available, providing researchers and analysts with even more valuable insights into web infrastructure and domain patterns.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Latest Update Highlights:", "raw": "Latest Update Highlights:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- New File: subdomains_2024_09.csv", "raw": "- New File: subdomains_2024_09.csv", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Unique Subdomains: 19,191,867", "raw": "- Unique Subdomains: 19,191,867", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Total Occurrences: 170,792,927", "raw": "- Total Occurrences: 170,792,927", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🌐 Subdomain Dataset Update: September 2024 Data Now Available I have updated the https://huggingface.co/datasets/nyuuzyou/subdomains dataset with fresh data for September 2024. This addition further expands this largest collection of subdomain statistics currently available, providing researchers and analysts with even more valuable insights into web infrastructure and domain patterns. Latest Update Highlights: - New File: subdomains_2024_09.csv - Unique Subdomains: 19,191,867 - Total Occurrences: 170,792,927
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643ac5d2e2b979ae6144d68c/Z7PCNopn4cQeAYnVJDoqG.png", "fullname": "nyuuzyou", "name": "nyuuzyou", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 57, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "osanseviero", "majidml" ], "count": 3 } ]
2024-10-07T09:20:11.000Z
2024-10-07T09:20:20.982Z
[]
/posts/nyuuzyou/845517431425505
1,418
0
403616725325659
[ { "type": "text", "value": "Don't you think we should add a tag \"Evaluation\" for datasets that are meant to be benchmarks and not for training ? ", "raw": "Don't you think we should add a tag \"Evaluation\" for datasets that are meant to be benchmarks and not for training ? ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "At least, when someone is collecting a group of datasets from an organization or let's say the whole hub can filter based on that tag and avoid somehow contaminating their \"training\" data.", "raw": "At least, when someone is collecting a group of datasets from an organization or let's say the whole hub can filter based on that tag and avoid somehow contaminating their \"training\" data.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Don't you think we should add a tag "Evaluation" for datasets that are meant to be benchmarks and not for training ? At least, when someone is collecting a group of datasets from an organization or let's say the whole hub can filter based on that tag and avoid somehow contaminating their "training" data.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626237d9bbcbd1c34f1bb231/EJrOjvAL-68qMCYdnvOrq.png", "fullname": "Ali El Filali", "name": "alielfilali01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 186, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "nlpguy", "bgowan", "osanseviero", "adamelliotfields", "Stopwolf", "Martins6", "SVHawk13", "gsarti", "DjMel" ], "count": 9 }, { "reaction": "👀", "users": [ "John6666" ], "count": 1 }, { "reaction": "🚀", "users": [ "Martins6" ], "count": 1 } ]
2024-10-07T08:12:16.000Z
2024-10-07T08:12:16.434Z
[]
/posts/alielfilali01/403616725325659
2,560
0
736297219466033
[ { "type": "text", "value": "🚀 Finishing up the prototype of my weekend project called ChessPT 🚀", "raw": "🚀 Finishing up the prototype of my weekend project called ChessPT 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- The game state is now being rendered. This simplifies coming up with own new moves", "raw": "- The game state is now being rendered. This simplifies coming up with own new moves", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- The model space ", "raw": "- The model space ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/philipp-zettl/ChessPT", "href": null, "resource": { "type": "space", "id": "philipp-zettl/ChessPT", "discussionNum": null }, "url": "https://huggingface.co/spaces/philipp-zettl/ChessPT", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " was updated to provide an interactive mode.", "raw": " was updated to provide an interactive mode.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- The space is currently running v0.4 of ", "raw": "- The space is currently running v0.4 of ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/philipp-zettl/chessPT", "href": null, "resource": { "type": "model", "id": "philipp-zettl/chessPT", "discussionNum": null }, "url": "https://huggingface.co/philipp-zettl/chessPT", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- New updates will come this week.", "raw": "- New updates will come this week.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Training runs will be logged under ", "raw": "- Training runs will be logged under ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://wandb.ai/philipp-zettl/chessPT/", "href": "https://wandb.ai/philipp-zettl/chessPT/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "**Note**: The model is still not performing on a level that I want it to. It predicts too frequently invalid moves (according to the game state). In addition to that the post-processing step is a little faulty, so it might be possible that you end up in a state where the model didn't provide a next move.", "raw": "**Note**: The model is still not performing on a level that I want it to. It predicts too frequently invalid moves (according to the game state). In addition to that the post-processing step is a little faulty, so it might be possible that you end up in a state where the model didn't provide a next move.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🚀 Finishing up the prototype of my weekend project called ChessPT 🚀 - The game state is now being rendered. This simplifies coming up with own new moves - The model space https://huggingface.co/spaces/philipp-zettl/ChessPT was updated to provide an interactive mode. - The space is currently running v0.4 of https://huggingface.co/philipp-zettl/chessPT - New updates will come this week. - Training runs will be logged under https://wandb.ai/philipp-zettl/chessPT/ **Note**: The model is still not performing on a level that I want it to. It predicts too frequently invalid moves (according to the game state). In addition to that the post-processing step is a little faulty, so it might be possible that you end up in a state where the model didn't provide a next move.
{ "avatarUrl": "/avatars/67b2e111ee8541e8033dab5ee1ca0eb6.svg", "fullname": "PZ", "name": "philipp-zettl", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "osanseviero" ], "count": 2 } ]
2024-10-07T08:00:02.000Z
2024-10-07T08:00:02.488Z
[]
/posts/philipp-zettl/736297219466033
1,402
0
917827083735478
[ { "type": "text", "value": "When huggingface patches this, I'm going to be really sad, but in the meantime, here you go:", "raw": "When huggingface patches this, I'm going to be really sad, but in the meantime, here you go:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "When AutoTrain creates a new space to train your model, it does so via the huggingface API. If you modify the code so that it includes a premade README.md file, you can add these two lines:", "raw": "When AutoTrain creates a new space to train your model, it does so via the huggingface API. If you modify the code so that it includes a premade README.md file, you can add these two lines:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```md\n---\napp_port: 8080 # or any integer besides 7860 that's greater than 2 ** 10\nstartup_duration_timeout: 350m\n---\n```", "href": null, "resource": null, "url": null, "code": "---\napp_port: 8080 # or any integer besides 7860 that's greater than 2 ** 10\nstartup_duration_timeout: 350m\n---", "user": null, "label": null, "lang": "md" }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This will tell huggingface to listen for the iframe on your port, instead of the one autotrain is actually hosting on, and because startup time isn't charged, you get the product for free. (you can take this even further by switching compute type to A100 or something)", "raw": "This will tell huggingface to listen for the iframe on your port, instead of the one autotrain is actually hosting on, and because startup time isn't charged, you get the product for free. (you can take this even further by switching compute type to A100 or something)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
When huggingface patches this, I'm going to be really sad, but in the meantime, here you go: When AutoTrain creates a new space to train your model, it does so via the huggingface API. If you modify the code so that it includes a premade README.md file, you can add these two lines: ```md --- app_port: 8080 # or any integer besides 7860 that's greater than 2 ** 10 startup_duration_timeout: 350m --- ``` This will tell huggingface to listen for the iframe on your port, instead of the one autotrain is actually hosting on, and because startup time isn't charged, you get the product for free. (you can take this even further by switching compute type to A100 or something)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "pepper13", "John6666", "RaushanTurganbay", "leonardlin", "AtAndDev", "Nymbo", "KvrParaskevi" ], "count": 7 }, { "reaction": "🔥", "users": [ "pepper13", "John6666", "AtAndDev", "archit11" ], "count": 4 }, { "reaction": "🚀", "users": [ "pepper13", "John6666", "AtAndDev", "nroggendorff" ], "count": 4 }, { "reaction": "😔", "users": [ "pepper13", "Norod78", "AtAndDev" ], "count": 3 }, { "reaction": "🧠", "users": [ "pepper13", "AtAndDev" ], "count": 2 }, { "reaction": "👍", "users": [ "John6666", "AtAndDev" ], "count": 2 }, { "reaction": "❤️", "users": [ "John6666", "AtAndDev" ], "count": 2 } ]
2024-10-07T05:21:00.000Z
2024-11-02T21:32:35.480Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false } ]
/posts/nroggendorff/917827083735478
2,645
1
280106652374834
[ { "type": "text", "value": "Seems that someone is spoofing your servers by replicating profiles on his and possibly gleaning peoples logins and passwords..", "raw": "Seems that someone is spoofing your servers by replicating profiles on his and possibly gleaning peoples logins and passwords..", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://git.lnyan.com/spaces/Lubub-Cruzeiro", "href": "https://git.lnyan.com/spaces/Lubub-Cruzeiro", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I was there to use ", "raw": "I was there to use ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://git.lnyan.com/spaces/Lubub-Cruzeiro/train-dreambooth-lora-sdxl", "href": "https://git.lnyan.com/spaces/Lubub-Cruzeiro/train-dreambooth-lora-sdxl", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "But it said I didn't have login access.. Then I noticed it wasn't the hugginface.co server domain.. ", "raw": "But it said I didn't have login access.. Then I noticed it wasn't the hugginface.co server domain.. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Seems that someone is spoofing your servers by replicating profiles on his and possibly gleaning peoples logins and passwords.. https://git.lnyan.com/spaces/Lubub-Cruzeiro I was there to use https://git.lnyan.com/spaces/Lubub-Cruzeiro/train-dreambooth-lora-sdxl But it said I didn't have login access.. Then I noticed it wasn't the hugginface.co server domain..
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/-E0zRlMzhq2vUoTKuOkE1.jpeg", "fullname": "Kiernan Holland", "name": "batmandelbrot", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "vvidovic" ], "count": 2 } ]
2024-10-07T02:37:02.000Z
2024-10-07T02:37:02.043Z
[]
/posts/batmandelbrot/280106652374834
624
0
241213019380297
[ { "type": "text", "value": "📢 This year I made decent amout of experiments on LLM reasoning capabilities in author opinion extraction. ", "raw": "📢 This year I made decent amout of experiments on LLM reasoning capabilities in author opinion extraction. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "However, they did not go further with:", "raw": "However, they did not go further with:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "↗️ annoation of other sources of opinion causes: entities, out-of-context object (None).", "raw": "↗️ annoation of other sources of opinion causes: entities, out-of-context object (None).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📏 evaluation of factual statements that support the extracted sentiment.", "raw": "📏 evaluation of factual statements that support the extracted sentiment.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "To address these limitations, so far we launch 🚀 RuOpinionNE-2024 competition on the Codalab platform: ", "raw": "To address these limitations, so far we launch 🚀 RuOpinionNE-2024 competition on the Codalab platform: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📊 ", "raw": "📊 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://codalab.lisn.upsaclay.fr/competitions/20244", "href": "https://codalab.lisn.upsaclay.fr/competitions/20244", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " The competition is aimed at extraction of opinion tuples (see attached images) from texts written in Russian.", "raw": " The competition is aimed at extraction of opinion tuples (see attached images) from texts written in Russian.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It proceeds the past RuSentNE-2023 codalab competition findings:", "raw": "It proceeds the past RuSentNE-2023 codalab competition findings:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔎 Past year competition: ", "raw": "🔎 Past year competition: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.dialog-21.ru/media/5896/golubevaplusetal118.pdf", "href": "https://www.dialog-21.ru/media/5896/golubevaplusetal118.pdf", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔎 LLM reasoning 🧠: ", "raw": "🔎 LLM reasoning 🧠: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2404.12342", "href": "https://arxiv.org/abs/2404.12342", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For those who interested to adopt Generative AI, the complete information about competition is below:", "raw": "For those who interested to adopt Generative AI, the complete information about competition is below:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📊 RuOpinionNE-2024: ", "raw": "📊 RuOpinionNE-2024: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://codalab.lisn.upsaclay.fr/competitions/20244", "href": "https://codalab.lisn.upsaclay.fr/competitions/20244", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🗒 Task description: ", "raw": "🗒 Task description: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://codalab.lisn.upsaclay.fr/competitions/20244#learn_the_details-overview", "href": "https://codalab.lisn.upsaclay.fr/competitions/20244#learn_the_details-overview", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔔 To follow updates: ", "raw": "🔔 To follow updates: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://t.me/RuOpinionNE2024", "href": "https://t.me/RuOpinionNE2024", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⏰ Stages Deadlines (might be extended)", "raw": "⏰ Stages Deadlines (might be extended)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📦 Submission details (bottom of the competition page)", "raw": "📦 Submission details (bottom of the competition page)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🙋 For questions you can contact ", "raw": "🙋 For questions you can contact ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@nicolay-r", "href": null, "resource": null, "url": null, "code": null, "user": "nicolay-r", "label": null, "lang": null }, { "type": "text", "value": ": ", "raw": ": ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://nicolay-r.github.io/", "href": "https://nicolay-r.github.io/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🧪 Most recent findings on LLM application: ", "raw": "🧪 Most recent findings on LLM application: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/RuSentNE-LLM-Benchmark", "href": "https://github.com/nicolay-r/RuSentNE-LLM-Benchmark", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
📢 This year I made decent amout of experiments on LLM reasoning capabilities in author opinion extraction. However, they did not go further with: ↗️ annoation of other sources of opinion causes: entities, out-of-context object (None). 📏 evaluation of factual statements that support the extracted sentiment. To address these limitations, so far we launch 🚀 RuOpinionNE-2024 competition on the Codalab platform: 📊 https://codalab.lisn.upsaclay.fr/competitions/20244 The competition is aimed at extraction of opinion tuples (see attached images) from texts written in Russian. It proceeds the past RuSentNE-2023 codalab competition findings: 🔎 Past year competition: https://www.dialog-21.ru/media/5896/golubevaplusetal118.pdf 🔎 LLM reasoning 🧠: https://arxiv.org/abs/2404.12342 For those who interested to adopt Generative AI, the complete information about competition is below: 📊 RuOpinionNE-2024: https://codalab.lisn.upsaclay.fr/competitions/20244 🗒 Task description: https://codalab.lisn.upsaclay.fr/competitions/20244#learn_the_details-overview 🔔 To follow updates: https://t.me/RuOpinionNE2024 ⏰ Stages Deadlines (might be extended) 📦 Submission details (bottom of the competition page) 🙋 For questions you can contact @nicolay-r: https://nicolay-r.github.io/ 🧪 Most recent findings on LLM application: https://github.com/nicolay-r/RuSentNE-LLM-Benchmark
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/OGubjruTcNA2pBN37K0e9.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/hMn0Z8ibINYciv8kT4Mlx.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/lEvUtWVMnuyFniZphkQwi.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/pBwEX75nPdEImnc2d7TGB.jpeg" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49 } ]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-06T22:13:59.000Z
2024-10-07T10:13:16.857Z
[]
/posts/nicolay-r/241213019380297
644
0
468222985408486
[ { "type": "text", "value": "🙋🏻‍♂️hey there folks ,", "raw": "🙋🏻‍♂️hey there folks ,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "did you know that ", "raw": "did you know that ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/lmms-lab", "href": "https://huggingface.co/lmms-lab", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " released a new version of 🌋🌋Llava on thursday ? Now it has 🎥video understanding !", "raw": " released a new version of 🌋🌋Llava on thursday ? Now it has 🎥video understanding !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "check it out 👇🏻", "raw": "check it out 👇🏻", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "collection : ", "raw": "collection : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/lmms-lab/llava-video-661e86f5e8dabc3ff793c944", "href": null, "resource": { "type": "collection", "id": "lmms-lab/llava-video-661e86f5e8dabc3ff793c944", "discussionNum": null }, "url": "https://huggingface.co/collections/lmms-lab/llava-video-661e86f5e8dabc3ff793c944", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "demo : ", "raw": "demo : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Tonic/Llava-Video", "href": null, "resource": { "type": "space", "id": "Tonic/Llava-Video", "discussionNum": null }, "url": "https://huggingface.co/spaces/Tonic/Llava-Video", "code": null, "user": null, "label": null, "lang": null } ]
🙋🏻‍♂️hey there folks , did you know that https://huggingface.co/lmms-lab released a new version of 🌋🌋Llava on thursday ? Now it has 🎥video understanding ! check it out 👇🏻 collection : https://huggingface.co/collections/lmms-lab/llava-video-661e86f5e8dabc3ff793c944 demo : https://huggingface.co/spaces/Tonic/Llava-Video
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "qnguyen3", "den0620", "nicolay-r", "KingNish", "Chunte", "geekyrakshit" ], "count": 7 } ]
2024-10-06T11:36:42.000Z
2024-10-06T19:13:48.405Z
[]
/posts/Tonic/468222985408486
2,731
0
598308135735240
[ { "type": "mention", "value": null, "raw": "@jbilcke-hf", "href": null, "resource": null, "url": null, "code": null, "user": "jbilcke-hf", "label": null, "lang": null }, { "type": "text", "value": " hi", "raw": " hi", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
@jbilcke-hf hi
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/OMEqajG_I9VemRa-NndDs.png", "fullname": "Michael bollox", "name": "MichaelBoll", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/2RK8J_YSNAK2ob8XZH7w2.jpeg", "fullname": "Julian Bilcke", "name": "jbilcke-hf", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1312 } ]
[]
2024-10-05T14:06:16.000Z
2024-10-05T14:06:16.495Z
[]
/posts/MichaelBoll/598308135735240
962
0
876213701368367
[ { "type": "text", "value": "Hello guys, ", "raw": "Hello guys, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I am trying to use the space \"The Stringer\" by Ilaria RVS. ", "raw": "I am trying to use the space \"The Stringer\" by Ilaria RVS. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "When I all used my credits, the say to sign up on hugging face. So i'v signed and paid the pro version. ", "raw": "When I all used my credits, the say to sign up on hugging face. So i'v signed and paid the pro version. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "But I stay enable to generate some new voices...", "raw": "But I stay enable to generate some new voices...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "What I did wrong ??", "raw": "What I did wrong ??", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hello guys, I am trying to use the space "The Stringer" by Ilaria RVS. When I all used my credits, the say to sign up on hugging face. So i'v signed and paid the pro version. But I stay enable to generate some new voices... What I did wrong ??
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/28JVHIXJRFoB090kZjmlE.png", "fullname": "roche", "name": "loanroche12", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-05T10:19:59.000Z
2024-10-05T14:23:06.263Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false } ]
/posts/loanroche12/876213701368367
993
1
285356800575320
[ { "type": "text", "value": "pretty much all of the values in the llama training post are placeholders, so if you dont get a desireable result tweak and tweak and tweak. it took months to get smallama to do anything", "raw": "pretty much all of the values in the llama training post are placeholders, so if you dont get a desireable result tweak and tweak and tweak. it took months to get smallama to do anything", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
pretty much all of the values in the llama training post are placeholders, so if you dont get a desireable result tweak and tweak and tweak. it took months to get smallama to do anything
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "nicolay-r", "DataSoul" ], "count": 3 } ]
2024-10-05T00:00:45.000Z
2024-10-05T00:00:45.150Z
[]
/posts/nroggendorff/285356800575320
1,518
0
602317635020262
[ { "type": "text", "value": "What are we thinking about MovieGen from Meta? Are the researchers on Hugging Face to be able to ask them questions? ", "raw": "What are we thinking about MovieGen from Meta? Are the researchers on Hugging Face to be able to ask them questions? ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The paper is here: ", "raw": "The paper is here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://ai.meta.com/static-resource/movie-gen-research-paper", "href": "https://ai.meta.com/static-resource/movie-gen-research-paper", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
What are we thinking about MovieGen from Meta? Are the researchers on Hugging Face to be able to ask them questions? The paper is here: https://ai.meta.com/static-resource/movie-gen-research-paper
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem 🤗", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1763, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "nicolay-r", "osanseviero", "ImamaS" ], "count": 4 }, { "reaction": "🤝", "users": [ "Tonic" ], "count": 1 } ]
2024-10-04T22:26:23.000Z
2024-10-04T22:26:23.364Z
[]
/posts/clem/602317635020262
2,057
0
813814838033228
[ { "type": "text", "value": "📢 The fast application of named entity recognition (NER) model towards vast amout of texts usually serves two major pitfalls:", "raw": "📢 The fast application of named entity recognition (NER) model towards vast amout of texts usually serves two major pitfalls:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔴 Limitation of the input window size", "raw": "🔴 Limitation of the input window size", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔴 Drastically slows down the downstream pipeline of the whole application", "raw": "🔴 Drastically slows down the downstream pipeline of the whole application", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⭐ ", "raw": "⭐ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/bulk-ner", "href": "https://github.com/nicolay-r/bulk-ner", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "To address these problems, bulk-ner represent a no-string framework with the handy wrapping over any dynamically linked NER-ml model by providing:", "raw": "To address these problems, bulk-ner represent a no-string framework with the handy wrapping over any dynamically linked NER-ml model by providing:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "☑️ Native long-input contexts handling.", "raw": "☑️ Native long-input contexts handling.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "☑️ Native support of batching (assuming that ML-model engine has the related support too)", "raw": "☑️ Native support of batching (assuming that ML-model engine has the related support too)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "To quick start, sharing the wrapper over DeepPavlov NER models.", "raw": "To quick start, sharing the wrapper over DeepPavlov NER models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "With the application of such models you can play and bulk your data here:", "raw": "With the application of such models you can play and bulk your data here:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📙 ", "raw": "📙 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://colab.research.google.com/github/nicolay-r/ner-service/blob/main/NER_annotation_service.ipynb", "href": "https://colab.research.google.com/github/nicolay-r/ner-service/blob/main/NER_annotation_service.ipynb", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "(You have to have your data in CSV / JSONL format)", "raw": "(You have to have your data in CSV / JSONL format)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Lastly, it is powered by AREkit pipelines, and therefore could be a part of the relation extraction and complex information retrieval systems:", "raw": "Lastly, it is powered by AREkit pipelines, and therefore could be a part of the relation extraction and complex information retrieval systems:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💻 ", "raw": "💻 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/AREkit", "href": "https://github.com/nicolay-r/AREkit", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📄 ", "raw": "📄 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://openreview.net/forum?id=nRybAsJMUt", "href": "https://openreview.net/forum?id=nRybAsJMUt", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
📢 The fast application of named entity recognition (NER) model towards vast amout of texts usually serves two major pitfalls: 🔴 Limitation of the input window size 🔴 Drastically slows down the downstream pipeline of the whole application ⭐ https://github.com/nicolay-r/bulk-ner To address these problems, bulk-ner represent a no-string framework with the handy wrapping over any dynamically linked NER-ml model by providing: ☑️ Native long-input contexts handling. ☑️ Native support of batching (assuming that ML-model engine has the related support too) To quick start, sharing the wrapper over DeepPavlov NER models. With the application of such models you can play and bulk your data here: 📙 https://colab.research.google.com/github/nicolay-r/ner-service/blob/main/NER_annotation_service.ipynb (You have to have your data in CSV / JSONL format) Lastly, it is powered by AREkit pipelines, and therefore could be a part of the relation extraction and complex information retrieval systems: 💻 https://github.com/nicolay-r/AREkit 📄 https://openreview.net/forum?id=nRybAsJMUt
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/zR9c6r5BmuDvid4fbFmks.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/_B3ATqUlwF7mNLfd5gPxT.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-04T20:03:07.000Z
2024-10-04T20:05:28.511Z
[]
/posts/nicolay-r/813814838033228
739
0
154493300046779
[ { "type": "text", "value": "Very few people realize that most of the successful AI startups got successful because they were focused on open science and open-source for at least their first few years. To name but a few, OpenAI (GPT, GPT2 was open-source), Runway & Stability (stable diffusion), Cohere, Mistral and of course Hugging Face!", "raw": "Very few people realize that most of the successful AI startups got successful because they were focused on open science and open-source for at least their first few years. To name but a few, OpenAI (GPT, GPT2 was open-source), Runway & Stability (stable diffusion), Cohere, Mistral and of course Hugging Face!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The reasons are not just altruistic, it's also because sharing your science and your models pushes you to build AI faster (which is key in a fast-moving domain like AI), attracts the best scientists & engineers and generates much more visibility, usage and community contributions than if you were 100% closed-source. The same applies to big tech companies as we're seeing with Meta and Google!", "raw": "The reasons are not just altruistic, it's also because sharing your science and your models pushes you to build AI faster (which is key in a fast-moving domain like AI), attracts the best scientists & engineers and generates much more visibility, usage and community contributions than if you were 100% closed-source. The same applies to big tech companies as we're seeing with Meta and Google!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "More startups and companies should release research & open-source AI, it's not just good for the world but also increases their probability of success!", "raw": "More startups and companies should release research & open-source AI, it's not just good for the world but also increases their probability of success!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Very few people realize that most of the successful AI startups got successful because they were focused on open science and open-source for at least their first few years. To name but a few, OpenAI (GPT, GPT2 was open-source), Runway & Stability (stable diffusion), Cohere, Mistral and of course Hugging Face! The reasons are not just altruistic, it's also because sharing your science and your models pushes you to build AI faster (which is key in a fast-moving domain like AI), attracts the best scientists & engineers and generates much more visibility, usage and community contributions than if you were 100% closed-source. The same applies to big tech companies as we're seeing with Meta and Google! More startups and companies should release research & open-source AI, it's not just good for the world but also increases their probability of success!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem 🤗", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1763, "isFollowing": false }
[]
[]
[ { "reaction": "👍", "users": [ "Jaward", "RalFinger", "prithivMLmods", "ChuckMcSneed", "Chunte", "Presidentlin", "Joseph717171", "Tonic", "Dragunflie-420", "Just823", "jairo", "n0w0f", "harshal01111" ], "count": 13 }, { "reaction": "❤️", "users": [ "AnthonyOlatunji", "louisbrulenaudet", "ChuckMcSneed", "Chunte", "Joseph717171", "Abhiverse01", "Tonic", "KingNish", "mmx31", "lu774067819" ], "count": 10 }, { "reaction": "🚀", "users": [ "monsoon-nlp", "Joseph717171", "tuanlda78202", "den0620", "Tonic", "KingNish" ], "count": 6 }, { "reaction": "👀", "users": [ "John6666", "win10", "Joseph717171", "Tonic" ], "count": 4 } ]
2024-10-04T19:40:28.000Z
2024-10-16T09:53:09.554Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6700fc3a55012f023b488ae9/x6Aif8AR4NweaXkVMcU-A.png", "fullname": "Anthony Olatunji", "name": "AnthonyOlatunji", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg", "fullname": "leroy Samuel Dyer", "name": "LeroyDyer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 84, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6684ff7ad4e3eff8e1c69917/gtqu8YXjWQ_9ymxGUfYDs.png", "fullname": "Nikki Russell", "name": "Dragunflie-420", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }, { "avatarUrl": "/avatars/744eddaa7dfc34a57df9ce32a78059a0.svg", "fullname": "Tyrone Pierce", "name": "piercyy", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/clem/154493300046779
3,700
4
723796456127965
[ { "type": "text", "value": "Version 0.2a of ChessPT is currently training.", "raw": "Version 0.2a of ChessPT is currently training.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I decided to wait with the actual v1.0 until I have a better understanding where I want to go and successfully trained the first fine tune.", "raw": "I decided to wait with the actual v1.0 until I have a better understanding where I want to go and successfully trained the first fine tune.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'm playing around with a loss that is highly influenced by the idea of reinforcement.", "raw": "I'm playing around with a loss that is highly influenced by the idea of reinforcement.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Basically I'm punishing the model for generating invalid PGN strings.", "raw": "Basically I'm punishing the model for generating invalid PGN strings.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The current approach sets on simplicity", "raw": "The current approach sets on simplicity", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\n-2: wrong characters in output\n-1: invalid PGN string, but valid charset\n0: valid PGN string, incl. valid moves\n```", "href": null, "resource": null, "url": null, "code": "-2: wrong characters in output\n-1: invalid PGN string, but valid charset\n0: valid PGN string, incl. valid moves", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "GPT-4o helped me with the implementation. I'm expecting some errors in the implementation.", "raw": "GPT-4o helped me with the implementation. I'm expecting some errors in the implementation.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The training should finish in somewhat 14h, I will upload the new weights then. ", "raw": "The training should finish in somewhat 14h, I will upload the new weights then. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "But I still need to run extensive tests on this loss before I can happily call it v0.2 ✌️", "raw": "But I still need to run extensive tests on this loss before I can happily call it v0.2 ✌️", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "BTW, I'm also building a space for the model which will be published tonight after adding descriptions and a nice interface. ♟️", "raw": "BTW, I'm also building a space for the model which will be published tonight after adding descriptions and a nice interface. ♟️", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/philipp-zettl/chessPT", "href": null, "resource": { "type": "model", "id": "philipp-zettl/chessPT", "discussionNum": null }, "url": "https://huggingface.co/philipp-zettl/chessPT", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/philipp-zettl/ChessPT", "href": null, "resource": { "type": "space", "id": "philipp-zettl/ChessPT", "discussionNum": null }, "url": "https://huggingface.co/spaces/philipp-zettl/ChessPT", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Version 0.2a of ChessPT is currently training. I decided to wait with the actual v1.0 until I have a better understanding where I want to go and successfully trained the first fine tune. I'm playing around with a loss that is highly influenced by the idea of reinforcement. Basically I'm punishing the model for generating invalid PGN strings. The current approach sets on simplicity ``` -2: wrong characters in output -1: invalid PGN string, but valid charset 0: valid PGN string, incl. valid moves ``` GPT-4o helped me with the implementation. I'm expecting some errors in the implementation. The training should finish in somewhat 14h, I will upload the new weights then. But I still need to run extensive tests on this loss before I can happily call it v0.2 ✌️ BTW, I'm also building a space for the model which will be published tonight after adding descriptions and a nice interface. ♟️ https://huggingface.co/philipp-zettl/chessPT https://huggingface.co/spaces/philipp-zettl/ChessPT
{ "avatarUrl": "/avatars/67b2e111ee8541e8033dab5ee1ca0eb6.svg", "fullname": "PZ", "name": "philipp-zettl", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-04T17:43:16.000Z
2024-10-04T17:43:16.577Z
[]
/posts/philipp-zettl/723796456127965
593
0
608895857001383
[ { "type": "text", "value": "New hobby: creating AI research paper arts lol, using pymupdf to extract text and add background then animate with runway:) code coming soon…", "raw": "New hobby: creating AI research paper arts lol, using pymupdf to extract text and add background then animate with runway:) code coming soon…", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
New hobby: creating AI research paper arts lol, using pymupdf to extract text and add background then animate with runway:) code coming soon…
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg", "fullname": "Jaward Sesay", "name": "Jaward", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 191, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/HcB9nNg5lPJimjIhgMvQL.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/Q7PrdQg1gsp_leZOCV10q.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/gVOZHG9t76a_NdoiF7nmd.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/b7ieDw6O2owt0WgaYXvAu.mp4" } ]
[]
[ { "reaction": "🔥", "users": [ "FelipeMahlow", "AIAJAY", "John6666", "todeschini", "dinesh-001", "Jour", "tezuesh", "nslaughter", "AtAndDev", "IzzyR", "kaanhho", "jairo", "PriyankSisodia" ], "count": 13 }, { "reaction": "🚀", "users": [ "prithivMLmods", "AtAndDev" ], "count": 2 } ]
2024-10-04T16:02:51.000Z
2024-10-04T16:04:55.920Z
[]
/posts/Jaward/608895857001383
2,571
0
401127525324824
[ { "type": "text", "value": "still sending all your info to a black-box api when using an llm? you know it's bad. try this instead: you can run dozens of models right in your browser from Google, Microsoft, Mistral, Meta, Qwen, and Smollm. private and secure. you'll thank me later.", "raw": "still sending all your info to a black-box api when using an llm? you know it's bad. try this instead: you can run dozens of models right in your browser from Google, Microsoft, Mistral, Meta, Qwen, and Smollm. private and secure. you'll thank me later.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/cfahlgren1/webllm-playground", "href": null, "resource": { "type": "space", "id": "cfahlgren1/webllm-playground", "discussionNum": null }, "url": "https://huggingface.co/spaces/cfahlgren1/webllm-playground", "code": null, "user": null, "label": null, "lang": null } ]
still sending all your info to a black-box api when using an llm? you know it's bad. try this instead: you can run dozens of models right in your browser from Google, Microsoft, Mistral, Meta, Qwen, and Smollm. private and secure. you'll thank me later. https://huggingface.co/spaces/cfahlgren1/webllm-playground
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/5rwVdtMs0PrBsf4A7xk_e.gif" } ]
[]
[ { "reaction": "👀", "users": [ "John6666", "louisbrulenaudet" ], "count": 2 } ]
2024-10-04T13:57:58.000Z
2024-10-17T07:48:43.863Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6316fb937b0ee0136e5f1220/poHBoJ7QAF_s2CCaosdvQ.jpeg", "fullname": "Firstname Lastname", "name": "takeraparterer", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 29, "isFollowing": false } ]
/posts/fdaudens/401127525324824
626
2
818276151583099
[ { "type": "text", "value": "🙋🏻‍♂️ Hey there folks ,", "raw": "🙋🏻‍♂️ Hey there folks ,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🦎Salamandra release by ", "raw": "🦎Salamandra release by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@mvillegas", "href": null, "resource": null, "url": null, "code": null, "user": "mvillegas", "label": null, "lang": null }, { "type": "text", "value": " and team", "raw": " and team", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "@BSC_CNS ", "raw": "@BSC_CNS ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/BSC-LT", "href": "https://huggingface.co/BSC-LT", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " is absolutely impressive so far !", "raw": " is absolutely impressive so far !", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "perhaps the largest single training dataset of high quality text to date of 7.8 trillion tokens in 35 European languages and code.", "raw": "perhaps the largest single training dataset of high quality text to date of 7.8 trillion tokens in 35 European languages and code.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "the best part : the data was correctly licenced so it's actually future-proof! ", "raw": "the best part : the data was correctly licenced so it's actually future-proof! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "the completions model is really creative and instruct fine tuned version is very good also.", "raw": "the completions model is really creative and instruct fine tuned version is very good also.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "now you can use such models for multi-lingual enterprise applications with further finetunes , long response generation, structured outputs (coding) also works.", "raw": "now you can use such models for multi-lingual enterprise applications with further finetunes , long response generation, structured outputs (coding) also works.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "check out 👇🏻", "raw": "check out 👇🏻", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "the collection : ", "raw": "the collection : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/BSC-LT/salamandra-66fc171485944df79469043a", "href": null, "resource": { "type": "collection", "id": "BSC-LT/salamandra-66fc171485944df79469043a", "discussionNum": null }, "url": "https://huggingface.co/collections/BSC-LT/salamandra-66fc171485944df79469043a", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "the repo : ", "raw": "the repo : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/langtech-bsc/salamandra", "href": "https://github.com/langtech-bsc/salamandra", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "7B-Instruct demo : ", "raw": "7B-Instruct demo : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/Tonic/Salamandra-7B", "href": null, "resource": { "type": "space", "id": "Tonic/Salamandra-7B", "discussionNum": null }, "url": "https://huggingface.co/spaces/Tonic/Salamandra-7B", "code": null, "user": null, "label": null, "lang": null } ]
🙋🏻‍♂️ Hey there folks , 🦎Salamandra release by @mvillegas and team @BSC_CNS https://huggingface.co/BSC-LT is absolutely impressive so far ! perhaps the largest single training dataset of high quality text to date of 7.8 trillion tokens in 35 European languages and code. the best part : the data was correctly licenced so it's actually future-proof! the completions model is really creative and instruct fine tuned version is very good also. now you can use such models for multi-lingual enterprise applications with further finetunes , long response generation, structured outputs (coding) also works. check out 👇🏻 the collection : https://huggingface.co/collections/BSC-LT/salamandra-66fc171485944df79469043a the repo : https://github.com/langtech-bsc/salamandra 7B-Instruct demo : https://huggingface.co/spaces/Tonic/Salamandra-7B
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[]
[ { "avatarUrl": "/avatars/7a350649ab7261dcbcfb3df40b54d3ab.svg", "fullname": "Marta Villegas", "name": "mvillegas", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 6 } ]
[ { "reaction": "👀", "users": [ "John6666", "monsoon-nlp", "malteos", "D1rtyB1rd", "jayebaku", "LuisVasquezBSC", "ibaucells" ], "count": 7 } ]
2024-10-04T11:29:49.000Z
2024-10-04T11:29:49.304Z
[]
/posts/Tonic/818276151583099
1,848
0
957178001915012
[ { "type": "text", "value": "📜 𝐎𝐥𝐝-𝐬𝐜𝐡𝐨𝐨𝐥 𝐑𝐍𝐍𝐬 𝐜𝐚𝐧 𝐚𝐜𝐭𝐮𝐚𝐥𝐥𝐲 𝐫𝐢𝐯𝐚𝐥 𝐟𝐚𝐧𝐜𝐲 𝐭𝐫𝐚𝐧𝐬𝐟𝐨𝐫𝐦𝐞𝐫𝐬!", "raw": "📜 𝐎𝐥𝐝-𝐬𝐜𝐡𝐨𝐨𝐥 𝐑𝐍𝐍𝐬 𝐜𝐚𝐧 𝐚𝐜𝐭𝐮𝐚𝐥𝐥𝐲 𝐫𝐢𝐯𝐚𝐥 𝐟𝐚𝐧𝐜𝐲 𝐭𝐫𝐚𝐧𝐬𝐟𝐨𝐫𝐦𝐞𝐫𝐬!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Researchers from Mila and Borealis AI just have shown that simplified versions of good old Recurrent Neural Networks (RNNs) can match the performance of today's transformers.", "raw": "Researchers from Mila and Borealis AI just have shown that simplified versions of good old Recurrent Neural Networks (RNNs) can match the performance of today's transformers.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "They took a fresh look at LSTMs (from 1997!) and GRUs (from 2014). They stripped these models down to their bare essentials, creating \"minLSTM\" and \"minGRU\". The key changes:", "raw": "They took a fresh look at LSTMs (from 1997!) and GRUs (from 2014). They stripped these models down to their bare essentials, creating \"minLSTM\" and \"minGRU\". The key changes:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "❶ Removed dependencies on previous hidden states in the gates", "raw": "❶ Removed dependencies on previous hidden states in the gates", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "❷ Dropped the tanh that had been added to restrict output range in order to avoid vanishing gradients", "raw": "❷ Dropped the tanh that had been added to restrict output range in order to avoid vanishing gradients", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "❸ Ensured outputs are time-independent in scale (not sure I understood that well either, don't worry)", "raw": "❸ Ensured outputs are time-independent in scale (not sure I understood that well either, don't worry)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚡️ As a result, you can use a “parallel scan” algorithm to train these new, minimal RNNs, in parallel, taking 88% more memory but also making them 200x faster than their traditional counterparts for long sequences", "raw": "⚡️ As a result, you can use a “parallel scan” algorithm to train these new, minimal RNNs, in parallel, taking 88% more memory but also making them 200x faster than their traditional counterparts for long sequences", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔥 The results are mind-blowing! Performance-wise, they go toe-to-toe with Transformers or Mamba.", "raw": "🔥 The results are mind-blowing! Performance-wise, they go toe-to-toe with Transformers or Mamba.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And for Language Modeling, they need 2.5x fewer training steps than Transformers to reach the same performance! 🚀", "raw": "And for Language Modeling, they need 2.5x fewer training steps than Transformers to reach the same performance! 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🤔 Why does this matter?", "raw": "🤔 Why does this matter?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "By showing there are simpler models with similar performance to transformers, this challenges the narrative that we need advanced architectures for better performance!", "raw": "By showing there are simpler models with similar performance to transformers, this challenges the narrative that we need advanced architectures for better performance!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💬 François Chollet wrote in a tweet about this paper:", "raw": "💬 François Chollet wrote in a tweet about this paper:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "“The fact that there are many recent architectures coming from different directions that roughly match Transformers is proof that architectures aren't fundamentally important in the curve-fitting paradigm (aka deep learning)”", "raw": "“The fact that there are many recent architectures coming from different directions that roughly match Transformers is proof that architectures aren't fundamentally important in the curve-fitting paradigm (aka deep learning)”", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "“Curve-fitting is about embedding a dataset on a curve. The critical factor is the dataset, not the specific hard-coded bells and whistles that constrain the curve's shape.”", "raw": "“Curve-fitting is about embedding a dataset on a curve. The critical factor is the dataset, not the specific hard-coded bells and whistles that constrain the curve's shape.”", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It’s the Bitter lesson by Rich Sutton striking again: don’t need fancy thinking architectures, just scale up your model and data!", "raw": "It’s the Bitter lesson by Rich Sutton striking again: don’t need fancy thinking architectures, just scale up your model and data!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read the paper 👉 ", "raw": "Read the paper 👉 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2410.01201", "href": null, "resource": { "type": "paper", "id": "2410.01201", "discussionNum": null }, "url": "https://huggingface.co/papers/2410.01201", "code": null, "user": null, "label": "Were RNNs All We Needed? (2410.01201)", "lang": null } ]
📜 𝐎𝐥𝐝-𝐬𝐜𝐡𝐨𝐨𝐥 𝐑𝐍𝐍𝐬 𝐜𝐚𝐧 𝐚𝐜𝐭𝐮𝐚𝐥𝐥𝐲 𝐫𝐢𝐯𝐚𝐥 𝐟𝐚𝐧𝐜𝐲 𝐭𝐫𝐚𝐧𝐬𝐟𝐨𝐫𝐦𝐞𝐫𝐬! Researchers from Mila and Borealis AI just have shown that simplified versions of good old Recurrent Neural Networks (RNNs) can match the performance of today's transformers. They took a fresh look at LSTMs (from 1997!) and GRUs (from 2014). They stripped these models down to their bare essentials, creating "minLSTM" and "minGRU". The key changes: ❶ Removed dependencies on previous hidden states in the gates ❷ Dropped the tanh that had been added to restrict output range in order to avoid vanishing gradients ❸ Ensured outputs are time-independent in scale (not sure I understood that well either, don't worry) ⚡️ As a result, you can use a “parallel scan” algorithm to train these new, minimal RNNs, in parallel, taking 88% more memory but also making them 200x faster than their traditional counterparts for long sequences 🔥 The results are mind-blowing! Performance-wise, they go toe-to-toe with Transformers or Mamba. And for Language Modeling, they need 2.5x fewer training steps than Transformers to reach the same performance! 🚀 🤔 Why does this matter? By showing there are simpler models with similar performance to transformers, this challenges the narrative that we need advanced architectures for better performance! 💬 François Chollet wrote in a tweet about this paper: “The fact that there are many recent architectures coming from different directions that roughly match Transformers is proof that architectures aren't fundamentally important in the curve-fitting paradigm (aka deep learning)” “Curve-fitting is about embedding a dataset on a curve. The critical factor is the dataset, not the specific hard-coded bells and whistles that constrain the curve's shape.” It’s the Bitter lesson by Rich Sutton striking again: don’t need fancy thinking architectures, just scale up your model and data! Read the paper 👉 https://huggingface.co/papers/2410.01201
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/fwEjTXIP9JNL9V84bVqUF.png" } ]
[]
[ { "reaction": "👍", "users": [ "ajibawa-2023", "YaTharThShaRma999", "Q-bert", "deusilence", "ethancl1", "den0620", "antiven0m", "nnikolovskii", "Joseph717171", "kotyKD", "DmitryRyumin", "slai1988", "fblgit", "Ryukijano", "phi0112358", "privategeek24", "ntnq" ], "count": 17 }, { "reaction": "👀", "users": [ "YaTharThShaRma999", "John6666", "Joseph717171", "connorads", "louisbrulenaudet" ], "count": 5 } ]
2024-10-04T09:55:23.000Z
2024-10-05T16:33:51.635Z
[ { "avatarUrl": "/avatars/ea4398745974d781ae9dc0e95b12cabe.svg", "fullname": "Joseph", "name": "Joseph717171", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 22, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6401c8c9f98fbc64bcd7dca1/MOSgc_mPbfUZ-354osy1v.png", "fullname": "FBL", "name": "fblgit", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 228, "isFollowing": false } ]
/posts/m-ric/957178001915012
3,035
2
309725119758265
[ { "type": "text", "value": "🚀 New Model Release: zamal/Molmo-7B-GPTQ-4bit 🚀", "raw": "🚀 New Model Release: zamal/Molmo-7B-GPTQ-4bit 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Hello lovely community,", "raw": "Hello lovely community,", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/zamal/Molmo-7B-GPTQ-4bit", "href": null, "resource": { "type": "model", "id": "zamal/Molmo-7B-GPTQ-4bit", "discussionNum": null }, "url": "https://huggingface.co/zamal/Molmo-7B-GPTQ-4bit", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " model is now available for all! This model has been highly quantized, reducing its size by almost six times. It now occupies significantly less space and vRAM, making it perfect for deployment on resource-constrained devices without compromising performance.", "raw": " model is now available for all! This model has been highly quantized, reducing its size by almost six times. It now occupies significantly less space and vRAM, making it perfect for deployment on resource-constrained devices without compromising performance.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Now we get:", "raw": "Now we get:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Efficient Performance: Maintains high accuracy while being highly quantized.", "raw": "Efficient Performance: Maintains high accuracy while being highly quantized.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Reduced Size: The model size is reduced by nearly six times, optimizing storage and memory usage.", "raw": "Reduced Size: The model size is reduced by nearly six times, optimizing storage and memory usage.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Versatile Application: Ideal for integrating a powerful visual language model into various projects particularly multi rag chains.", "raw": "Versatile Application: Ideal for integrating a powerful visual language model into various projects particularly multi rag chains.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Check it out!", "raw": "Check it out!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🚀 New Model Release: zamal/Molmo-7B-GPTQ-4bit 🚀 Hello lovely community, https://huggingface.co/zamal/Molmo-7B-GPTQ-4bit model is now available for all! This model has been highly quantized, reducing its size by almost six times. It now occupies significantly less space and vRAM, making it perfect for deployment on resource-constrained devices without compromising performance. Now we get: Efficient Performance: Maintains high accuracy while being highly quantized. Reduced Size: The model size is reduced by nearly six times, optimizing storage and memory usage. Versatile Application: Ideal for integrating a powerful visual language model into various projects particularly multi rag chains. Check it out!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6404403bad54665351d42ee2/TCC5Na8ojtSL1MJAzTn3b.png", "fullname": "zamal_", "name": "zamal", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 23, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "mediiiiii3", "Eric7060", "iamrobotbear" ], "count": 4 }, { "reaction": "🤗", "users": [ "nroggendorff", "nickandbro", "ijohn07" ], "count": 3 }, { "reaction": "🚀", "users": [ "lunarflu", "pcuenq" ], "count": 2 }, { "reaction": "🤯", "users": [ "mediiiiii3" ], "count": 1 }, { "reaction": "🔥", "users": [ "nroggendorff" ], "count": 1 } ]
2024-10-03T21:24:50.000Z
2024-10-04T09:54:45.077Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64ad8fdae4178b45e83996dd/DqKEgmUCa_UN0UaLubJja.jpeg", "fullname": "%0t", "name": "mediiiiii3", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/zamal/309725119758265
1,938
1
527665072738819
[ { "type": "text", "value": "Hyperdimensional Computing + Neural Network, tell your friends. To my knowledge, this is a completely novel implementation of HDC+Neural Networks. It would be a direct competitor to Transformers. It is off the charts more computationally efficient than Transformers could ever hope to be (which is why I tested it in the first place). It is far more similar to biological processes. My testing so far shows that it works surprisingly well. One surprise so far from my testing, adding an Attention Mechanism to the model does nothing at all. Weirdest thing. Like 1% performance increase. I guess Attention Is Not All You Need?", "raw": "Hyperdimensional Computing + Neural Network, tell your friends. To my knowledge, this is a completely novel implementation of HDC+Neural Networks. It would be a direct competitor to Transformers. It is off the charts more computationally efficient than Transformers could ever hope to be (which is why I tested it in the first place). It is far more similar to biological processes. My testing so far shows that it works surprisingly well. One surprise so far from my testing, adding an Attention Mechanism to the model does nothing at all. Weirdest thing. Like 1% performance increase. I guess Attention Is Not All You Need?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I made a Github repository for my Hyperdimensional Computing Neural Network: ", "raw": "I made a Github repository for my Hyperdimensional Computing Neural Network: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/RichardAragon/HyperDimensionalComputingNeuralNetwork", "href": "https://github.com/RichardAragon/HyperDimensionalComputingNeuralNetwork", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I made a YouTube video showcasing the model and some of my experiments with it: ", "raw": "I made a YouTube video showcasing the model and some of my experiments with it: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://youtu.be/Eg51o519zVM", "href": "https://youtu.be/Eg51o519zVM", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hyperdimensional Computing + Neural Network, tell your friends. To my knowledge, this is a completely novel implementation of HDC+Neural Networks. It would be a direct competitor to Transformers. It is off the charts more computationally efficient than Transformers could ever hope to be (which is why I tested it in the first place). It is far more similar to biological processes. My testing so far shows that it works surprisingly well. One surprise so far from my testing, adding an Attention Mechanism to the model does nothing at all. Weirdest thing. Like 1% performance increase. I guess Attention Is Not All You Need? I made a Github repository for my Hyperdimensional Computing Neural Network: https://github.com/RichardAragon/HyperDimensionalComputingNeuralNetwork I made a YouTube video showcasing the model and some of my experiments with it: https://youtu.be/Eg51o519zVM
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "Joseph717171", "vedprakash9867", "flflow", "Pajke" ], "count": 5 }, { "reaction": "🚀", "users": [ "Joseph717171", "London12345", "Tanvir1337", "den0620" ], "count": 4 }, { "reaction": "😎", "users": [ "LeroyDyer", "Joseph717171" ], "count": 2 }, { "reaction": "🤗", "users": [ "Joseph717171" ], "count": 1 } ]
2024-10-03T18:36:00.000Z
2024-10-04T03:44:30.527Z
[ { "avatarUrl": "/avatars/ea4398745974d781ae9dc0e95b12cabe.svg", "fullname": "Joseph", "name": "Joseph717171", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 22, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/642cc1c253e76b4c2286c58e/fGtQ_QeTjUgBhIT89dpUt.jpeg", "fullname": "rombo dawg", "name": "rombodawg", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 184, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png", "fullname": "Richard A Aragon", "name": "TuringsSolutions", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 146, "isFollowing": false } ]
/posts/TuringsSolutions/527665072738819
1,887
4
519292490434064
[ { "type": "text", "value": "NEW releases for today:", "raw": "NEW releases for today:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- We've brought our new Esper 2 model to Llama 3.2! The DevOps-first Esper finetunes use our newest open source datasets. Get the new Esper: ", "raw": "- We've brought our new Esper 2 model to Llama 3.2! The DevOps-first Esper finetunes use our newest open source datasets. Get the new Esper: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/ValiantLabs/Llama3.2-3B-Esper2", "href": null, "resource": { "type": "model", "id": "ValiantLabs/Llama3.2-3B-Esper2", "discussionNum": null }, "url": "https://huggingface.co/ValiantLabs/Llama3.2-3B-Esper2", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Some new merged models, combining Shining Valiant 2 with the other Build Tools:", "raw": "- Some new merged models, combining Shining Valiant 2 with the other Build Tools:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - ", "raw": " - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/sequelbox/Llama3.1-8B-PlumCode", "href": null, "resource": { "type": "model", "id": "sequelbox/Llama3.1-8B-PlumCode", "discussionNum": null }, "url": "https://huggingface.co/sequelbox/Llama3.1-8B-PlumCode", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - ", "raw": " - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/sequelbox/Llama3.1-8B-PlumChat", "href": null, "resource": { "type": "model", "id": "sequelbox/Llama3.1-8B-PlumChat", "discussionNum": null }, "url": "https://huggingface.co/sequelbox/Llama3.1-8B-PlumChat", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - ", "raw": " - ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/sequelbox/Llama3.1-8B-PlumMath", "href": null, "resource": { "type": "model", "id": "sequelbox/Llama3.1-8B-PlumMath", "discussionNum": null }, "url": "https://huggingface.co/sequelbox/Llama3.1-8B-PlumMath", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "more to come soon :) ", "raw": "more to come soon :) ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
NEW releases for today: - We've brought our new Esper 2 model to Llama 3.2! The DevOps-first Esper finetunes use our newest open source datasets. Get the new Esper: https://huggingface.co/ValiantLabs/Llama3.2-3B-Esper2 - Some new merged models, combining Shining Valiant 2 with the other Build Tools: - https://huggingface.co/sequelbox/Llama3.1-8B-PlumCode - https://huggingface.co/sequelbox/Llama3.1-8B-PlumChat - https://huggingface.co/sequelbox/Llama3.1-8B-PlumMath more to come soon :)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63444f2687964b331809eb55/WvZivsvKsM_t0tBtakovK.png", "fullname": "t.d.a.g.", "name": "sequelbox", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 51, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "a9i" ], "count": 2 }, { "reaction": "🔥", "users": [ "lunarflu", "zoeywin" ], "count": 2 } ]
2024-10-03T18:14:39.000Z
2024-10-04T08:17:03.884Z
[ { "avatarUrl": "/avatars/9e0fd122ee69f1af406d9b8b3b9b48bf.svg", "fullname": "jean-baptiste debard", "name": "jbdebard", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/sequelbox/519292490434064
1,237
1
770757592384559
[ { "type": "text", "value": "My biggest release of the year: a series of 7 specialized embedding models for information retrieval within tax documents, is now available for free on Hugging Face 🤗", "raw": "My biggest release of the year: a series of 7 specialized embedding models for information retrieval within tax documents, is now available for free on Hugging Face 🤗", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "These new models aim to offer an open source alternative for in-domain semantic search from large text corpora and will improve RAG systems and context addition for large language models.", "raw": "These new models aim to offer an open source alternative for in-domain semantic search from large text corpora and will improve RAG systems and context addition for large language models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Trained on more than 43 million tax tokens derived from semi-synthetic and raw-synthetic data, enriched by various methods (in particular MSFT's evol-instruct by ", "raw": "Trained on more than 43 million tax tokens derived from semi-synthetic and raw-synthetic data, enriched by various methods (in particular MSFT's evol-instruct by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@intfloat", "href": null, "resource": null, "url": null, "code": null, "user": "intfloat", "label": null, "lang": null }, { "type": "text", "value": "), and corrected by humans, this project is the fruit of hundreds of hours of work and is the culmination of a global effort to open up legal technologies that has only just begun.", "raw": "), and corrected by humans, this project is the fruit of hundreds of hours of work and is the culmination of a global effort to open up legal technologies that has only just begun.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "A big thank you to Microsoft for Startups for giving me access to state-of-the-art infrastructure to train these models, and to ", "raw": "A big thank you to Microsoft for Startups for giving me access to state-of-the-art infrastructure to train these models, and to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@julien-c", "href": null, "resource": null, "url": null, "code": null, "user": "julien-c", "label": null, "lang": null }, { "type": "text", "value": ", ", "raw": ", ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@clem", "href": null, "resource": null, "url": null, "code": null, "user": "clem", "label": null, "lang": null }, { "type": "text", "value": " 🤗, ", "raw": " 🤗, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@thomwolf", "href": null, "resource": null, "url": null, "code": null, "user": "thomwolf", "label": null, "lang": null }, { "type": "text", "value": " and the whole HF team for the inference endpoint API and the generous provision of Meta LLama-3.1-70B. Special thanks also to ", "raw": " and the whole HF team for the inference endpoint API and the generous provision of Meta LLama-3.1-70B. Special thanks also to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@tomaarsen", "href": null, "resource": null, "url": null, "code": null, "user": "tomaarsen", "label": null, "lang": null }, { "type": "text", "value": " for his invaluable advice on training embedding models and Loss functions ❤️", "raw": " for his invaluable advice on training embedding models and Loss functions ❤️", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Models are available on my personal HF page, into the Lemone-embed collection: ", "raw": "Models are available on my personal HF page, into the Lemone-embed collection: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/collections/louisbrulenaudet/lemone-embed-66fdc24000df732b395df29b", "href": null, "resource": { "type": "collection", "id": "louisbrulenaudet/lemone-embed-66fdc24000df732b395df29b", "discussionNum": null }, "url": "https://huggingface.co/collections/louisbrulenaudet/lemone-embed-66fdc24000df732b395df29b", "code": null, "user": null, "label": null, "lang": null } ]
My biggest release of the year: a series of 7 specialized embedding models for information retrieval within tax documents, is now available for free on Hugging Face 🤗 These new models aim to offer an open source alternative for in-domain semantic search from large text corpora and will improve RAG systems and context addition for large language models. Trained on more than 43 million tax tokens derived from semi-synthetic and raw-synthetic data, enriched by various methods (in particular MSFT's evol-instruct by @intfloat), and corrected by humans, this project is the fruit of hundreds of hours of work and is the culmination of a global effort to open up legal technologies that has only just begun. A big thank you to Microsoft for Startups for giving me access to state-of-the-art infrastructure to train these models, and to @julien-c, @clem 🤗, @thomwolf and the whole HF team for the inference endpoint API and the generous provision of Meta LLama-3.1-70B. Special thanks also to @tomaarsen for his invaluable advice on training embedding models and Loss functions ❤️ Models are available on my personal HF page, into the Lemone-embed collection: https://huggingface.co/collections/louisbrulenaudet/lemone-embed-66fdc24000df732b395df29b
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6459fa0f5b3111fbe83286e1/UhCa7JNbtTjC6dgOjZtH0.jpeg", "fullname": "Louis Brulé Naudet", "name": "louisbrulenaudet", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 174, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6459fa0f5b3111fbe83286e1/GR8wlPX5wC2XmqX-TRlP9.jpeg" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem 🤗", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1763 }, { "avatarUrl": "/avatars/5a1ee74c2dbe349a6ec9843a1599d281.svg", "fullname": "Liang Wang", "name": "intfloat", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 263 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg", "fullname": "Julien Chaumond", "name": "julien-c", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1580 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857746553-5df7e9e5da6d0311fd3d53f9.jpeg", "fullname": "Thomas Wolf", "name": "thomwolf", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 704 }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6317233cc92fd6fee317e030/cJHSvvimr1kqgQfHOjO5n.png", "fullname": "Tom Aarsen", "name": "tomaarsen", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1060 } ]
[ { "reaction": "❤️", "users": [ "tomaarsen", "clem", "yashisfine", "sensei-ml", "John6666", "a9i", "Tonic", "julien-c" ], "count": 8 }, { "reaction": "👍", "users": [ "ijohn07" ], "count": 1 } ]
2024-10-03T15:00:46.000Z
2024-10-03T17:48:59.309Z
[ { "avatarUrl": "/avatars/8ec411454fed4bd214fc1676de533a1c.svg", "fullname": "yash negi", "name": "yashisfine", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/louisbrulenaudet/770757592384559
2,083
1
945593396015382
[ { "type": "text", "value": "🇨🇳⛵️ 出海: Chinese AI is expanding globally", "raw": "🇨🇳⛵️ 出海: Chinese AI is expanding globally", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Fact: Chinese LLMs are heavily underrated, for instance recently the excellent Deepseek-v2.5 or Qwen models. ", "raw": "Fact: Chinese LLMs are heavily underrated, for instance recently the excellent Deepseek-v2.5 or Qwen models. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Luckily for us, ", "raw": "Luckily for us, ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@AdinaY", "href": null, "resource": null, "url": null, "code": null, "user": "AdinaY", "label": null, "lang": null }, { "type": "text", "value": " just wrote an excellent blog post explaining the Chinese AI ecosystem!", "raw": " just wrote an excellent blog post explaining the Chinese AI ecosystem!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "My key takeaways:", "raw": "My key takeaways:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Since Google, OpenAI and Anthropic models are not available in China, local companies are fighting for the market. A really good market - AI has much higher penetration there than in the rest of the world, both with companies and individual users!", "raw": "Since Google, OpenAI and Anthropic models are not available in China, local companies are fighting for the market. A really good market - AI has much higher penetration there than in the rest of the world, both with companies and individual users!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💰 But since Deepseek heavily cut prices in May 24, this spiraled into a price war that created a cut-throat environment with unsustainably low prices.", "raw": "💰 But since Deepseek heavily cut prices in May 24, this spiraled into a price war that created a cut-throat environment with unsustainably low prices.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📋 On top of this, the local regulation is stringent: models must undergo licensing from a local censor (the Cyberspace Administration of China), that for instance requires models to refuse answering certain questions on the CCP. Although this is certainly simpler to implement than certain condition of the European AI Act.", "raw": "📋 On top of this, the local regulation is stringent: models must undergo licensing from a local censor (the Cyberspace Administration of China), that for instance requires models to refuse answering certain questions on the CCP. Although this is certainly simpler to implement than certain condition of the European AI Act.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💸 If this wasn't enough, VC investment in AI is drying out: By mid-2024, Chinese AI startups raised approximately $4.4 billion, vs $55B for US startups just in Q2 24.", "raw": "💸 If this wasn't enough, VC investment in AI is drying out: By mid-2024, Chinese AI startups raised approximately $4.4 billion, vs $55B for US startups just in Q2 24.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📱 To get profitability companies have shifted from foundational models to model + application, for instance PopAI from [01.AI](", "raw": "📱 To get profitability companies have shifted from foundational models to model + application, for instance PopAI from [01.AI](", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "http://01.ai/", "href": "http://01.ai/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ") with millions of users and high profitability.", "raw": ") with millions of users and high profitability.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⛏️ They also try to drill down specific industries: but these niches are also getting crowded.", "raw": "⛏️ They also try to drill down specific industries: but these niches are also getting crowded.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "➡️ Since their home market is becoming both too crowded and unhospitable, Chinese companies are now going for international market, \"Sailing abroad\" following the expression consacred for Zheng He's legendary journey in 1500.", "raw": "➡️ Since their home market is becoming both too crowded and unhospitable, Chinese companies are now going for international market, \"Sailing abroad\" following the expression consacred for Zheng He's legendary journey in 1500.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "There, they'll have to adapt to different infrastructures and regulations, but they have bright prospects for growth!", "raw": "There, they'll have to adapt to different infrastructures and regulations, but they have bright prospects for growth!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read her post 👉 ", "raw": "Read her post 👉 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/AdinaY/chinese-ai-global-expansion", "href": "https://huggingface.co/blog/AdinaY/chinese-ai-global-expansion", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🇨🇳⛵️ 出海: Chinese AI is expanding globally Fact: Chinese LLMs are heavily underrated, for instance recently the excellent Deepseek-v2.5 or Qwen models. Luckily for us, @AdinaY just wrote an excellent blog post explaining the Chinese AI ecosystem! My key takeaways: Since Google, OpenAI and Anthropic models are not available in China, local companies are fighting for the market. A really good market - AI has much higher penetration there than in the rest of the world, both with companies and individual users! 💰 But since Deepseek heavily cut prices in May 24, this spiraled into a price war that created a cut-throat environment with unsustainably low prices. 📋 On top of this, the local regulation is stringent: models must undergo licensing from a local censor (the Cyberspace Administration of China), that for instance requires models to refuse answering certain questions on the CCP. Although this is certainly simpler to implement than certain condition of the European AI Act. 💸 If this wasn't enough, VC investment in AI is drying out: By mid-2024, Chinese AI startups raised approximately $4.4 billion, vs $55B for US startups just in Q2 24. 📱 To get profitability companies have shifted from foundational models to model + application, for instance PopAI from [01.AI](http://01.ai/) with millions of users and high profitability. ⛏️ They also try to drill down specific industries: but these niches are also getting crowded. ➡️ Since their home market is becoming both too crowded and unhospitable, Chinese companies are now going for international market, "Sailing abroad" following the expression consacred for Zheng He's legendary journey in 1500. There, they'll have to adapt to different infrastructures and regulations, but they have bright prospects for growth! Read her post 👉 https://huggingface.co/blog/AdinaY/chinese-ai-global-expansion
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/o9qYeyLtZ1W8IhcOuB0x6.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63a369d98c0c89dcae3b8329/6OUJ7Hc9T1jXynYH3FGaf.png", "fullname": "Adina Yakefu", "name": "AdinaY", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 240 } ]
[ { "reaction": "🔥", "users": [ "AdinaY", "DeathGodlike", "ethancl1" ], "count": 3 }, { "reaction": "🚀", "users": [ "TroglodyteDerivations", "ethancl1", "louisbrulenaudet" ], "count": 3 }, { "reaction": "🤗", "users": [ "AdinaY", "John6666" ], "count": 2 } ]
2024-10-03T13:40:07.000Z
2024-10-03T13:53:46.499Z
[]
/posts/m-ric/945593396015382
1,329
0
597151798835464
[ { "type": "text", "value": "ColPali is an exciting new approach to multimodal document retrieval, but some doubt its practical use with existing vector DBs.", "raw": "ColPali is an exciting new approach to multimodal document retrieval, but some doubt its practical use with existing vector DBs.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It turns out it's super easy to use Qdrant to index and search ColPali embeddings efficiently.", "raw": "It turns out it's super easy to use Qdrant to index and search ColPali embeddings efficiently.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Blog post here: ", "raw": "Blog post here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://danielvanstrien.xyz/posts/post-with-code/colpali-qdrant/2024-10-02_using_colpali_with_qdrant.html", "href": "https://danielvanstrien.xyz/posts/post-with-code/colpali-qdrant/2024-10-02_using_colpali_with_qdrant.html", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Very silly demo: ", "raw": "Very silly demo: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/davanstrien/ufo-ColPali-Search", "href": null, "resource": { "type": "space", "id": "davanstrien/ufo-ColPali-Search", "discussionNum": null }, "url": "https://huggingface.co/spaces/davanstrien/ufo-ColPali-Search", "code": null, "user": null, "label": null, "lang": null } ]
ColPali is an exciting new approach to multimodal document retrieval, but some doubt its practical use with existing vector DBs. It turns out it's super easy to use Qdrant to index and search ColPali embeddings efficiently. Blog post here: https://danielvanstrien.xyz/posts/post-with-code/colpali-qdrant/2024-10-02_using_colpali_with_qdrant.html Very silly demo: https://huggingface.co/spaces/davanstrien/ufo-ColPali-Search
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg", "fullname": "Daniel van Strien", "name": "davanstrien", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 410, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "YaTharThShaRma999" ], "count": 2 }, { "reaction": "🔥", "users": [ "YaTharThShaRma999" ], "count": 1 } ]
2024-10-03T12:42:54.000Z
2024-10-03T12:42:54.694Z
[]
/posts/davanstrien/597151798835464
1,223
0
871893662265982
[ { "type": "text", "value": "Updated my 📺RTV🖼️ - Real Time Video AI app this morning.", "raw": "Updated my 📺RTV🖼️ - Real Time Video AI app this morning.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "URL: ", "raw": "URL: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/awacke1/stable-video-diffusion", "href": null, "resource": { "type": "space", "id": "awacke1/stable-video-diffusion", "discussionNum": null }, "url": "https://huggingface.co/spaces/awacke1/stable-video-diffusion", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It uses Stable Diffusion to dynamically create videos from images in input directory or uploaded using A10 GPU on Huggingface.", "raw": "It uses Stable Diffusion to dynamically create videos from images in input directory or uploaded using A10 GPU on Huggingface.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Samples below.", "raw": "Samples below.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I may transition this to Zero GPU if I can. During Christmas when I revised this I had my highest billing from HF yet due to GPU usage. It is still the best turn key GPU out and Image2Video is a killer app. Thanks HF for the possibilities!", "raw": "I may transition this to Zero GPU if I can. During Christmas when I revised this I had my highest billing from HF yet due to GPU usage. It is still the best turn key GPU out and Image2Video is a killer app. Thanks HF for the possibilities!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Updated my 📺RTV🖼️ - Real Time Video AI app this morning. URL: https://huggingface.co/spaces/awacke1/stable-video-diffusion It uses Stable Diffusion to dynamically create videos from images in input directory or uploaded using A10 GPU on Huggingface. Samples below. I may transition this to Zero GPU if I can. During Christmas when I revised this I had my highest billing from HF yet due to GPU usage. It is still the best turn key GPU out and Image2Video is a killer app. Thanks HF for the possibilities!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1656147940537-620630b603825909dcbeba35.jpeg", "fullname": "Aaron C Wacker", "name": "awacke1", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 185, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/itFP4Kk6PmemqscBb7qQO.png" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/lkycUPGX6xUYDrOojwC2A.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/pNzXsRlYD3-y8Zm3PFT1G.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/6l0583-nCW-1fx-zfEb6v.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/q-wmM9Z3E9rMsIRD89yLe.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/FiJ5b9Wib0SqC9-FQJuNX.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/RTWy0RqToaIPF9eEs3z4j.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/WM7QMh5UZXMrWVeUsSR0D.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/AjB3Rsx9oixLFGOfjzgZs.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/opTIrT7UYVcF5_FZnkrns.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/zF6GRwT1PX5E5CmAih8tX.mp4" }, { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/A6hXBuxizd0I4fHnxiDW3.mp4" } ]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 }, { "reaction": "🔥", "users": [ "KingNish" ], "count": 1 } ]
2024-10-03T12:41:20.000Z
2024-10-03T12:41:20.512Z
[]
/posts/awacke1/871893662265982
989
0
585613287443442
[ { "type": "text", "value": "Triton-accelerated nanoGPT🤕", "raw": "Triton-accelerated nanoGPT🤕", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The WHY behind this ordeal - After practicing triton for about 2 weeks now, I challenged myself into implementing custom triton kernels for Karpathy's nanoGPT and quite an ordeal it was but somehow got something working, not perfect but getting there:), contributions are welcomed.", "raw": "The WHY behind this ordeal - After practicing triton for about 2 weeks now, I challenged myself into implementing custom triton kernels for Karpathy's nanoGPT and quite an ordeal it was but somehow got something working, not perfect but getting there:), contributions are welcomed.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Code: ", "raw": "Code: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/Jaykef/Triton-nanoGPT", "href": "https://github.com/Jaykef/Triton-nanoGPT", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Triton-accelerated nanoGPT🤕 The WHY behind this ordeal - After practicing triton for about 2 weeks now, I challenged myself into implementing custom triton kernels for Karpathy's nanoGPT and quite an ordeal it was but somehow got something working, not perfect but getting there:), contributions are welcomed. Code: https://github.com/Jaykef/Triton-nanoGPT
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg", "fullname": "Jaward Sesay", "name": "Jaward", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 191, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/exKfTtOZLMDTDmZNNLRMW.mp4" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/5Ng54XxDEAj-6EfXtz9Xh.jpeg" } ]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-03T12:02:16.000Z
2024-10-04T04:20:23.535Z
[]
/posts/Jaward/585613287443442
371
0
935333471653456
[ { "type": "text", "value": "#phdone - I defended my PhD yesterday! A key lesson: it is amazing how open science and open source can empower beginners with limited resources:", "raw": "#phdone - I defended my PhD yesterday! A key lesson: it is amazing how open science and open source can empower beginners with limited resources:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I first learned about instruction-based classifiers like BERT-NLI 3-4 years ago, through the ", "raw": "I first learned about instruction-based classifiers like BERT-NLI 3-4 years ago, through the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@HuggingFace", "href": null, "resource": null, "url": null, "code": null, "user": "HuggingFace", "label": null, "lang": null }, { "type": "text", "value": " ZeroShotClassificationPipeline. Digging deeper into this, it was surprisingly easy to find new datasets, newer base models, and reusable fine-tuning scripts on the HF Hub to create my own zeroshot models - although I didn't know much about fine-tuning at the time.", "raw": " ZeroShotClassificationPipeline. Digging deeper into this, it was surprisingly easy to find new datasets, newer base models, and reusable fine-tuning scripts on the HF Hub to create my own zeroshot models - although I didn't know much about fine-tuning at the time.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Thanks to the community effect of the Hub, my models were downloaded hundreds of thousands of times after a few months. Seeing my research being useful for people motivated me to improve and upload newer models. Leaving my contact details in the model cards led to academic cooperation and consulting contracts (and eventually my job at HF).", "raw": "Thanks to the community effect of the Hub, my models were downloaded hundreds of thousands of times after a few months. Seeing my research being useful for people motivated me to improve and upload newer models. Leaving my contact details in the model cards led to academic cooperation and consulting contracts (and eventually my job at HF).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "That's the power of open science & open source: learning, sharing, improving, collaborating.", "raw": "That's the power of open science & open source: learning, sharing, improving, collaborating.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I mean every word in my thesis acknowledgments (screenshot). I'm very grateful to my supervisors ", "raw": "I mean every word in my thesis acknowledgments (screenshot). I'm very grateful to my supervisors ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@vanatteveldt", "href": null, "resource": null, "url": null, "code": null, "user": "vanatteveldt", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@CasAndreu", "href": null, "resource": null, "url": null, "code": null, "user": "CasAndreu", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@KasperWelbers", "href": null, "resource": null, "url": null, "code": null, "user": "KasperWelbers", "label": null, "lang": null }, { "type": "text", "value": " for their guidance; to ", "raw": " for their guidance; to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@profAndreaRenda", "href": null, "resource": null, "url": null, "code": null, "user": "profAndreaRenda", "label": null, "lang": null }, { "type": "text", "value": " and @CEPS_thinktank for enabling me to work part-time during the first year; to ", "raw": " and @CEPS_thinktank for enabling me to work part-time during the first year; to ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@huggingface", "href": null, "resource": null, "url": null, "code": null, "user": "huggingface", "label": null, "lang": null }, { "type": "text", "value": " for creating awesome tools and an awesome platform; and to many others who are not active on social media.", "raw": " for creating awesome tools and an awesome platform; and to many others who are not active on social media.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Links to the full thesis and the collection of my most recent models are below.", "raw": "Links to the full thesis and the collection of my most recent models are below.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "PS: If someone happens to speak Latin, let me know if my diploma contains some hidden Illuminati code or something :D", "raw": "PS: If someone happens to speak Latin, let me know if my diploma contains some hidden Illuminati code or something :D", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
#phdone - I defended my PhD yesterday! A key lesson: it is amazing how open science and open source can empower beginners with limited resources: I first learned about instruction-based classifiers like BERT-NLI 3-4 years ago, through the @HuggingFace ZeroShotClassificationPipeline. Digging deeper into this, it was surprisingly easy to find new datasets, newer base models, and reusable fine-tuning scripts on the HF Hub to create my own zeroshot models - although I didn't know much about fine-tuning at the time. Thanks to the community effect of the Hub, my models were downloaded hundreds of thousands of times after a few months. Seeing my research being useful for people motivated me to improve and upload newer models. Leaving my contact details in the model cards led to academic cooperation and consulting contracts (and eventually my job at HF). That's the power of open science & open source: learning, sharing, improving, collaborating. I mean every word in my thesis acknowledgments (screenshot). I'm very grateful to my supervisors @vanatteveldt @CasAndreu @KasperWelbers for their guidance; to @profAndreaRenda and @CEPS_thinktank for enabling me to work part-time during the first year; to @huggingface for creating awesome tools and an awesome platform; and to many others who are not active on social media. Links to the full thesis and the collection of my most recent models are below. PS: If someone happens to speak Latin, let me know if my diploma contains some hidden Illuminati code or something :D
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1613511937628-5fb15d1e84389b139cf3b508.jpeg", "fullname": "Moritz Laurer", "name": "MoritzLaurer", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 236, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5fb15d1e84389b139cf3b508/QxHbp9tK96btcWfIR5tTz.jpeg" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5fb15d1e84389b139cf3b508/kav8Ze67UzYp59K9RHvYt.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5fb15d1e84389b139cf3b508/QwbekRgOEp2uDkI0e6kQT.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/5fb15d1e84389b139cf3b508/tgQyaiKK_VDF6hM6D8w-6.jpeg" } ]
[ { "avatarUrl": "/avatars/d58b30fea905cfd7ac15e0ab30a43ac4.svg", "fullname": "Kasper Welbers", "name": "KasperWelbers", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63fc9e7df67cec1f9f57b50a/dX-SDTo0mUyjnAZxys7I1.png", "fullname": "Wouter", "name": "vanatteveldt", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null } ]
[ { "reaction": "❤️", "users": [ "tomaarsen", "clboetticher", "m-ric", "celinah", "bwilkinson", "Near32", "layperson99", "clem", "adamelliotfields", "WaveCut", "eepol", "Exquisiteuser", "Stopwolf", "Oshan", "agentlans", "DavidGF", "mlabonne", "Aurelien-Morgan", "ppsingh", "Carlos3D", "CptnPrice", "madoss" ], "count": 22 }, { "reaction": "🤗", "users": [ "m-ric", "fffiloni", "a9i", "Aurelien-Morgan", "madoss" ], "count": 5 }, { "reaction": "👀", "users": [ "John6666" ], "count": 1 }, { "reaction": "🚀", "users": [ "fffiloni" ], "count": 1 } ]
2024-10-03T11:08:12.000Z
2024-10-30T00:28:22.659Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1613511937628-5fb15d1e84389b139cf3b508.jpeg", "fullname": "Moritz Laurer", "name": "MoritzLaurer", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 236, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg", "fullname": "Clem 🤗", "name": "clem", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 1763, "isFollowing": false }, { "avatarUrl": "/avatars/9a825fdb025f6ebeb965017c95727b5c.svg", "fullname": "Antón Fernández Pérez", "name": "CptnPrice", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "/avatars/744eddaa7dfc34a57df9ce32a78059a0.svg", "fullname": "Tyrone Pierce", "name": "piercyy", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 3, "isFollowing": false } ]
/posts/MoritzLaurer/935333471653456
4,073
4
836944689044968
[ { "type": "text", "value": "Just dropped a new blog post 🤗 ", "raw": "Just dropped a new blog post 🤗 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "\"Comparing Open-source and Proprietary LLMs in Medical AI\"", "raw": "\"Comparing Open-source and Proprietary LLMs in Medical AI\"", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "We put the biggest AI brains 🧠 to the test on medical tasks 🏥 using popular benchmarks such as MedQA. ", "raw": "We put the biggest AI brains 🧠 to the test on medical tasks 🏥 using popular benchmarks such as MedQA. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Closed-source vs. open-source, costs decoded, and performance revealed! We need even more comprehensive benchmarks and evaluations of LLMs in medical tasks 🔬 ", "raw": "Closed-source vs. open-source, costs decoded, and performance revealed! We need even more comprehensive benchmarks and evaluations of LLMs in medical tasks 🔬 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read more at: ", "raw": "Read more at: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/mpimentel/comparing-llms-medical-ai", "href": "https://huggingface.co/blog/mpimentel/comparing-llms-medical-ai", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Just dropped a new blog post 🤗 "Comparing Open-source and Proprietary LLMs in Medical AI" We put the biggest AI brains 🧠 to the test on medical tasks 🏥 using popular benchmarks such as MedQA. Closed-source vs. open-source, costs decoded, and performance revealed! We need even more comprehensive benchmarks and evaluations of LLMs in medical tasks 🔬 Read more at: https://huggingface.co/blog/mpimentel/comparing-llms-medical-ai
{ "avatarUrl": "/avatars/355d16e28ca9cf5891368e43bcda6de5.svg", "fullname": "Marco Pimentel", "name": "mpimentel", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 16, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6454faafa13edf669cd74f36/kdNYj2xwkirdso1BaRG1N.png" } ]
[]
[ { "reaction": "🔥", "users": [ "pkanithi", "ronnierajan", "cchristophe", "wadood", "John6666", "clem", "Pomni", "Jeszebel" ], "count": 8 } ]
2024-10-03T10:45:59.000Z
2024-10-03T10:56:23.070Z
[]
/posts/mpimentel/836944689044968
1,400
0
892273134456397
[ { "type": "text", "value": "We propose MGDebugger, a hierarchical bottom-up LLM code debugger 🔥 that can fix bugs from low-level syntax errors to high-level algorithmic flaws.", "raw": "We propose MGDebugger, a hierarchical bottom-up LLM code debugger 🔥 that can fix bugs from low-level syntax errors to high-level algorithmic flaws.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It achieves an ⭐️ 18.9% improvement in accuracy over seed generations in HumanEval and a ⭐️ 97.6% repair success rate in HumanEvalFix.", "raw": "It achieves an ⭐️ 18.9% improvement in accuracy over seed generations in HumanEval and a ⭐️ 97.6% repair success rate in HumanEvalFix.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper available at ", "raw": "Paper available at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://arxiv.org/abs/2410.01215", "href": "https://arxiv.org/abs/2410.01215", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ".", "raw": ".", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Code and demo available at ", "raw": "Code and demo available at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/YerbaPage/MGDebugger", "href": "https://github.com/YerbaPage/MGDebugger", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ".", "raw": ".", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
We propose MGDebugger, a hierarchical bottom-up LLM code debugger 🔥 that can fix bugs from low-level syntax errors to high-level algorithmic flaws. It achieves an ⭐️ 18.9% improvement in accuracy over seed generations in HumanEval and a ⭐️ 97.6% repair success rate in HumanEvalFix. Paper available at https://arxiv.org/abs/2410.01215. Code and demo available at https://github.com/YerbaPage/MGDebugger.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/645b0c3ec35da9c7afd95421/vYBrCDagHsXAo6J2p-uG0.jpeg", "fullname": "Yuling", "name": "YerbaPage", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 8, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "YerbaPage", "louisbrulenaudet", "John6666", "Joseph717171", "dingo-actual", "ethancl1" ], "count": 6 }, { "reaction": "👍", "users": [ "Tonic", "amakipaa", "Goekdeniz-Guelmez", "Joseph717171", "ethancl1" ], "count": 5 }, { "reaction": "🤗", "users": [ "YerbaPage", "ajibawa-2023", "Goekdeniz-Guelmez", "Joseph717171" ], "count": 4 }, { "reaction": "🔥", "users": [ "YerbaPage", "Joseph717171" ], "count": 2 }, { "reaction": "🧠", "users": [ "Tonic" ], "count": 1 } ]
2024-10-03T06:41:44.000Z
2024-10-09T07:45:55.577Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg", "fullname": "Feynman Innovations", "name": "ajibawa-2023", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 138, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/645b0c3ec35da9c7afd95421/vYBrCDagHsXAo6J2p-uG0.jpeg", "fullname": "Yuling", "name": "YerbaPage", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 8, "isFollowing": false } ]
/posts/YerbaPage/892273134456397
2,022
2
639926000427051
[ { "type": "text", "value": "Good folks at ", "raw": "Good folks at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@PyTorch", "href": null, "resource": null, "url": null, "code": null, "user": "PyTorch", "label": null, "lang": null }, { "type": "text", "value": " have just released torchao, a game-changing library for native architecture optimization.", "raw": " have just released torchao, a game-changing library for native architecture optimization.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "-- How torchao Works (They threw the kitchen-sink at it...)", "raw": "-- How torchao Works (They threw the kitchen-sink at it...)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "torchao leverages several advanced techniques to optimize PyTorch models, making them faster and more memory-efficient. Here's an overview of its key mechanisms:", "raw": "torchao leverages several advanced techniques to optimize PyTorch models, making them faster and more memory-efficient. Here's an overview of its key mechanisms:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Quantization", "raw": "Quantization", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "torchao employs various quantization methods to reduce model size and accelerate inference:", "raw": "torchao employs various quantization methods to reduce model size and accelerate inference:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• Weight-only quantization: Converts model weights to lower precision formats like int4 or int8, significantly reducing memory usage.", "raw": "• Weight-only quantization: Converts model weights to lower precision formats like int4 or int8, significantly reducing memory usage.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• Dynamic activation quantization: Quantizes activations on-the-fly during inference, balancing performance and accuracy.", "raw": "• Dynamic activation quantization: Quantizes activations on-the-fly during inference, balancing performance and accuracy.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• Automatic quantization: The ", "raw": "• Automatic quantization: The ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`autoquant`", "href": null, "resource": null, "url": null, "code": "autoquant", "user": null, "label": null, "lang": null }, { "type": "text", "value": " function intelligently selects the best quantization strategy for each layer in a model.", "raw": " function intelligently selects the best quantization strategy for each layer in a model.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Low-bit Datatypes", "raw": "Low-bit Datatypes", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The library utilizes low-precision datatypes to speed up computations:", "raw": "The library utilizes low-precision datatypes to speed up computations:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• float8: Enables float8 training for linear layers, offering substantial speedups for large models like LLaMA 3 70B.", "raw": "• float8: Enables float8 training for linear layers, offering substantial speedups for large models like LLaMA 3 70B.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• int4 and int8: Provide options for extreme compression of weights and activations.", "raw": "• int4 and int8: Provide options for extreme compression of weights and activations.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Sparsity Techniques", "raw": "Sparsity Techniques", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "torchao implements sparsity methods to reduce model density:", "raw": "torchao implements sparsity methods to reduce model density:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• Semi-sparse weights: Combine quantization with sparsity for compute-bound models.", "raw": "• Semi-sparse weights: Combine quantization with sparsity for compute-bound models.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "KV Cache Optimization", "raw": "KV Cache Optimization", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For transformer-based models, torchao offers KV cache quantization, leading to significant VRAM reductions for long context lengths.", "raw": "For transformer-based models, torchao offers KV cache quantization, leading to significant VRAM reductions for long context lengths.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Integration with PyTorch Ecosystem", "raw": "Integration with PyTorch Ecosystem", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "torchao seamlessly integrates with existing PyTorch tools:", "raw": "torchao seamlessly integrates with existing PyTorch tools:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• Compatible with ", "raw": "• Compatible with ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`torch.compile()`", "href": null, "resource": null, "url": null, "code": "torch.compile()", "user": null, "label": null, "lang": null }, { "type": "text", "value": " for additional performance gains.", "raw": " for additional performance gains.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• Works with FSDP2 for distributed training scenarios.", "raw": "• Works with FSDP2 for distributed training scenarios.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "• Supports most PyTorch models available on Hugging Face out-of-the-box.", "raw": "• Supports most PyTorch models available on Hugging Face out-of-the-box.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "By combining these techniques, torchao enables developers to significantly improve the performance and efficiency of their PyTorch models with minimal code changes and accuracy impact.", "raw": "By combining these techniques, torchao enables developers to significantly improve the performance and efficiency of their PyTorch models with minimal code changes and accuracy impact.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Good folks at @PyTorch have just released torchao, a game-changing library for native architecture optimization. -- How torchao Works (They threw the kitchen-sink at it...) torchao leverages several advanced techniques to optimize PyTorch models, making them faster and more memory-efficient. Here's an overview of its key mechanisms: Quantization torchao employs various quantization methods to reduce model size and accelerate inference: • Weight-only quantization: Converts model weights to lower precision formats like int4 or int8, significantly reducing memory usage. • Dynamic activation quantization: Quantizes activations on-the-fly during inference, balancing performance and accuracy. • Automatic quantization: The `autoquant` function intelligently selects the best quantization strategy for each layer in a model. Low-bit Datatypes The library utilizes low-precision datatypes to speed up computations: • float8: Enables float8 training for linear layers, offering substantial speedups for large models like LLaMA 3 70B. • int4 and int8: Provide options for extreme compression of weights and activations. Sparsity Techniques torchao implements sparsity methods to reduce model density: • Semi-sparse weights: Combine quantization with sparsity for compute-bound models. KV Cache Optimization For transformer-based models, torchao offers KV cache quantization, leading to significant VRAM reductions for long context lengths. Integration with PyTorch Ecosystem torchao seamlessly integrates with existing PyTorch tools: • Compatible with `torch.compile()` for additional performance gains. • Works with FSDP2 for distributed training scenarios. • Supports most PyTorch models available on Hugging Face out-of-the-box. By combining these techniques, torchao enables developers to significantly improve the performance and efficiency of their PyTorch models with minimal code changes and accuracy impact.
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/OQHsxF1cmSPypN0w47ny2.jpeg" } ]
[]
[ { "reaction": "🔥", "users": [ "John6666", "acamilogg88", "louisbrulenaudet", "Joseph717171" ], "count": 4 }, { "reaction": "👀", "users": [ "John6666", "osanseviero", "Joseph717171" ], "count": 3 } ]
2024-10-03T03:28:49.000Z
2024-10-03T21:03:24.216Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }, { "avatarUrl": "/avatars/ea4398745974d781ae9dc0e95b12cabe.svg", "fullname": "Joseph", "name": "Joseph717171", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 22, "isFollowing": false } ]
/posts/singhsidhukuldeep/639926000427051
1,263
3
243951824255912
[ { "type": "mention", "value": null, "raw": "@mlabonne", "href": null, "resource": null, "url": null, "code": null, "user": "mlabonne", "label": null, "lang": null }, { "type": "text", "value": " hey there 🙋🏻‍♂️ I kinda got obsessed with your great model , and i found the endpoint for it in lambda labs, but basically i got rate limited / banned for trying to make my DPO dataset project, i was wondering if you all had an open ai compatible solution for me to make a great \"thinking\" sft + dpo dataset with all the splits 🙏🏻🙏🏻 kinda desparate , it's true , but was looking forward to a nice write ups 🚀🚀🚀", "raw": " hey there 🙋🏻‍♂️ I kinda got obsessed with your great model , and i found the endpoint for it in lambda labs, but basically i got rate limited / banned for trying to make my DPO dataset project, i was wondering if you all had an open ai compatible solution for me to make a great \"thinking\" sft + dpo dataset with all the splits 🙏🏻🙏🏻 kinda desparate , it's true , but was looking forward to a nice write ups 🚀🚀🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
@mlabonne hey there 🙋🏻‍♂️ I kinda got obsessed with your great model , and i found the endpoint for it in lambda labs, but basically i got rate limited / banned for trying to make my DPO dataset project, i was wondering if you all had an open ai compatible solution for me to make a great "thinking" sft + dpo dataset with all the splits 🙏🏻🙏🏻 kinda desparate , it's true , but was looking forward to a nice write ups 🚀🚀🚀
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b8e2ba285851687028d395/JtUGAwVh_4cDEsjNcfpye.png", "fullname": "Maxime Labonne", "name": "mlabonne", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 3486 } ]
[ { "reaction": "❤️", "users": [ "mlabonne", "ggamecrazy", "munyadev", "leeloolee" ], "count": 4 }, { "reaction": "👀", "users": [ "John6666", "louisbrulenaudet", "ggamecrazy" ], "count": 3 } ]
2024-10-02T17:22:49.000Z
2024-10-02T19:18:46.432Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b8e2ba285851687028d395/JtUGAwVh_4cDEsjNcfpye.png", "fullname": "Maxime Labonne", "name": "mlabonne", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 3486, "isFollowing": false } ]
/posts/Tonic/243951824255912
1,675
1
956298330215271
[ { "type": "text", "value": "Emu3: Next-token prediction conquers multimodal tasks 🔥", "raw": "Emu3: Next-token prediction conquers multimodal tasks 🔥", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This is the most important research in months: we’re now very close to having a single architecture to handle all modalities. The folks at Beijing Academy of Artificial Intelligence (BAAI) just released Emu3, a single model that handles text, images, and videos all at once.", "raw": "This is the most important research in months: we’re now very close to having a single architecture to handle all modalities. The folks at Beijing Academy of Artificial Intelligence (BAAI) just released Emu3, a single model that handles text, images, and videos all at once.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "𝗪𝗵𝗮𝘁'𝘀 𝘁𝗵𝗲 𝗯𝗶𝗴 𝗱𝗲𝗮𝗹?", "raw": "𝗪𝗵𝗮𝘁'𝘀 𝘁𝗵𝗲 𝗯𝗶𝗴 𝗱𝗲𝗮𝗹?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌟 Emu3 is the first model to truly unify all these different types of data (text, images, video) using just one simple trick: predicting the next token.", "raw": "🌟 Emu3 is the first model to truly unify all these different types of data (text, images, video) using just one simple trick: predicting the next token.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And it’s only 8B, but really strong:", "raw": "And it’s only 8B, but really strong:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🖼️ For image generation, it's matching the best specialized models out there, like SDXL.", "raw": "🖼️ For image generation, it's matching the best specialized models out there, like SDXL.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👁️ In vision tasks, it's outperforming top models like LLaVA-1.6-7B, which is a big deal for a model that wasn't specifically designed for this.", "raw": "👁️ In vision tasks, it's outperforming top models like LLaVA-1.6-7B, which is a big deal for a model that wasn't specifically designed for this.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🎬 It's the first to nail video generation without using complicated diffusion techniques.", "raw": "🎬 It's the first to nail video generation without using complicated diffusion techniques.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "𝗛𝗼𝘄 𝗱𝗼𝗲𝘀 𝗶𝘁 𝘄𝗼𝗿𝗸?", "raw": "𝗛𝗼𝘄 𝗱𝗼𝗲𝘀 𝗶𝘁 𝘄𝗼𝗿𝗸?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🧩 Emu3 uses a special tokenizer (SBER-MoVQGAN) to turn images and video clips into sequences of 4,096 tokens.", "raw": "🧩 Emu3 uses a special tokenizer (SBER-MoVQGAN) to turn images and video clips into sequences of 4,096 tokens.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Then, it treats everything - text, images, and videos - as one long series of tokens to predict.", "raw": "🔗 Then, it treats everything - text, images, and videos - as one long series of tokens to predict.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔮 During training, it just tries to guess the next token, whether that's a word, part of an image, or a video frame.", "raw": "🔮 During training, it just tries to guess the next token, whether that's a word, part of an image, or a video frame.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "𝗖𝗮𝘃𝗲𝗮𝘁𝘀 𝗼𝗻 𝘁𝗵𝗲 𝗿𝗲𝘀𝘂𝗹𝘁𝘀:", "raw": "𝗖𝗮𝘃𝗲𝗮𝘁𝘀 𝗼𝗻 𝘁𝗵𝗲 𝗿𝗲𝘀𝘂𝗹𝘁𝘀:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👉 In image generation, Emu3 beats SDXL, but it’s also much bigger (8B vs 3.5B). It would be more difficult to beat the real diffusion GOAT FLUX-dev.", "raw": "👉 In image generation, Emu3 beats SDXL, but it’s also much bigger (8B vs 3.5B). It would be more difficult to beat the real diffusion GOAT FLUX-dev.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👉 In vision, authors also don’t show a comparison against all the current SOTA models like Qwen-VL or Pixtral.", "raw": "👉 In vision, authors also don’t show a comparison against all the current SOTA models like Qwen-VL or Pixtral.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This approach is exciting because it's simple (next token prediction) and scalable(handles all sorts of data)!", "raw": "This approach is exciting because it's simple (next token prediction) and scalable(handles all sorts of data)!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read the paper 👉 ", "raw": "Read the paper 👉 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2409.18869", "href": null, "resource": { "type": "paper", "id": "2409.18869", "discussionNum": null }, "url": "https://huggingface.co/papers/2409.18869", "code": null, "user": null, "label": "Emu3: Next-Token Prediction is All You Need (2409.18869)", "lang": null } ]
Emu3: Next-token prediction conquers multimodal tasks 🔥 This is the most important research in months: we’re now very close to having a single architecture to handle all modalities. The folks at Beijing Academy of Artificial Intelligence (BAAI) just released Emu3, a single model that handles text, images, and videos all at once. 𝗪𝗵𝗮𝘁'𝘀 𝘁𝗵𝗲 𝗯𝗶𝗴 𝗱𝗲𝗮𝗹? 🌟 Emu3 is the first model to truly unify all these different types of data (text, images, video) using just one simple trick: predicting the next token. And it’s only 8B, but really strong: 🖼️ For image generation, it's matching the best specialized models out there, like SDXL. 👁️ In vision tasks, it's outperforming top models like LLaVA-1.6-7B, which is a big deal for a model that wasn't specifically designed for this. 🎬 It's the first to nail video generation without using complicated diffusion techniques. 𝗛𝗼𝘄 𝗱𝗼𝗲𝘀 𝗶𝘁 𝘄𝗼𝗿𝗸? 🧩 Emu3 uses a special tokenizer (SBER-MoVQGAN) to turn images and video clips into sequences of 4,096 tokens. 🔗 Then, it treats everything - text, images, and videos - as one long series of tokens to predict. 🔮 During training, it just tries to guess the next token, whether that's a word, part of an image, or a video frame. 𝗖𝗮𝘃𝗲𝗮𝘁𝘀 𝗼𝗻 𝘁𝗵𝗲 𝗿𝗲𝘀𝘂𝗹𝘁𝘀: 👉 In image generation, Emu3 beats SDXL, but it’s also much bigger (8B vs 3.5B). It would be more difficult to beat the real diffusion GOAT FLUX-dev. 👉 In vision, authors also don’t show a comparison against all the current SOTA models like Qwen-VL or Pixtral. This approach is exciting because it's simple (next token prediction) and scalable(handles all sorts of data)! Read the paper 👉 https://huggingface.co/papers/2409.18869
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/vMHyHj9jLYvFNnlg-5_rD.png" } ]
[]
[ { "reaction": "🔥", "users": [ "umair894", "acamilogg88", "lamhieu", "Donutanti" ], "count": 4 }, { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-02T16:19:49.000Z
2024-10-02T16:19:49.037Z
[]
/posts/m-ric/956298330215271
1,167
0
611190899120171
[ { "type": "text", "value": "🔊 Great new tool for audio: Voice Restoration with a Transformer-based Model!", "raw": "🔊 Great new tool for audio: Voice Restoration with a Transformer-based Model!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Enable your sound to hear the improvement.", "raw": "Enable your sound to hear the improvement.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Try it out: ", "raw": "Try it out: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/jadechoghari/VoiceRestore", "href": null, "resource": { "type": "space", "id": "jadechoghari/VoiceRestore", "discussionNum": null }, "url": "https://huggingface.co/spaces/jadechoghari/VoiceRestore", "code": null, "user": null, "label": null, "lang": null } ]
🔊 Great new tool for audio: Voice Restoration with a Transformer-based Model! Enable your sound to hear the improvement. Try it out: https://huggingface.co/spaces/jadechoghari/VoiceRestore
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/fdMqkTEotaztd4h9X42Yq.mp4" } ]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-02T16:00:52.000Z
2024-10-02T16:00:52.208Z
[]
/posts/fdaudens/611190899120171
477
0
111739100909292
[ { "type": "text", "value": "ESPER 2 IS HERE!", "raw": "ESPER 2 IS HERE!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- High quality code-instruct and chat with a DevOps focus: AWS, Azure, GCP, Terraform, Dockerfiles, pipelines, shell scripts and more!", "raw": "- High quality code-instruct and chat with a DevOps focus: AWS, Azure, GCP, Terraform, Dockerfiles, pipelines, shell scripts and more!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Powered by our new ", "raw": "- Powered by our new ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/sequelbox/Titanium", "href": null, "resource": { "type": "dataset", "id": "sequelbox/Titanium", "discussionNum": null }, "url": "https://huggingface.co/datasets/sequelbox/Titanium", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " dataset, along with the newest versions of ", "raw": " dataset, along with the newest versions of ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/sequelbox/Tachibana", "href": null, "resource": { "type": "dataset", "id": "sequelbox/Tachibana", "discussionNum": null }, "url": "https://huggingface.co/datasets/sequelbox/Tachibana", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " and ", "raw": " and ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/sequelbox/Supernova", "href": null, "resource": { "type": "dataset", "id": "sequelbox/Supernova", "discussionNum": null }, "url": "https://huggingface.co/datasets/sequelbox/Supernova", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Available now for Llama 3.1 8b, more models to come!", "raw": "- Available now for Llama 3.1 8b, more models to come!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Get the model: ", "raw": "Get the model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/ValiantLabs/Llama3.1-8B-Esper2", "href": null, "resource": { "type": "model", "id": "ValiantLabs/Llama3.1-8B-Esper2", "discussionNum": null }, "url": "https://huggingface.co/ValiantLabs/Llama3.1-8B-Esper2", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "for everyone to use :)", "raw": "for everyone to use :)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "more soon :)", "raw": "more soon :)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
ESPER 2 IS HERE! - High quality code-instruct and chat with a DevOps focus: AWS, Azure, GCP, Terraform, Dockerfiles, pipelines, shell scripts and more! - Powered by our new https://huggingface.co/datasets/sequelbox/Titanium dataset, along with the newest versions of https://huggingface.co/datasets/sequelbox/Tachibana and https://huggingface.co/datasets/sequelbox/Supernova - Available now for Llama 3.1 8b, more models to come! Get the model: https://huggingface.co/ValiantLabs/Llama3.1-8B-Esper2 for everyone to use :) more soon :)
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63444f2687964b331809eb55/WvZivsvKsM_t0tBtakovK.png", "fullname": "t.d.a.g.", "name": "sequelbox", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 51, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-02T15:52:22.000Z
2024-10-02T15:53:40.483Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63444f2687964b331809eb55/WvZivsvKsM_t0tBtakovK.png", "fullname": "t.d.a.g.", "name": "sequelbox", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 51, "isFollowing": false } ]
/posts/sequelbox/111739100909292
378
1
396806572990287
[ { "type": "text", "value": "📢If you're in the emotion and emotion causes extraction domain and wish to bring something new in Generative AI, then the repository ", "raw": "📢If you're in the emotion and emotion causes extraction domain and wish to bring something new in Generative AI, then the repository ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⭐ ", "raw": "⭐ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/declare-lab/conv-emotion", "href": "https://github.com/declare-lab/conv-emotion", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Is worth to check out as a survey of the conventional method approaches.", "raw": "Is worth to check out as a survey of the conventional method approaches.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "💎 It hosts implementation of the most conventional methods that are relevant even today in the era of Generative AI. For example, the variations of LSTM-based encoders are still represent a solid framework for it and according to the SemEval2024 Task 3 where it is possible to see several papers exploit such a conventional concept:", "raw": "💎 It hosts implementation of the most conventional methods that are relevant even today in the era of Generative AI. For example, the variations of LSTM-based encoders are still represent a solid framework for it and according to the SemEval2024 Task 3 where it is possible to see several papers exploit such a conventional concept:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://aclanthology.org/volumes/2024.semeval-1/", "href": "https://aclanthology.org/volumes/2024.semeval-1/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "An example of the Top 3 submission:", "raw": "An example of the Top 3 submission:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://aclanthology.org/2024.semeval-1.164/", "href": "https://aclanthology.org/2024.semeval-1.164/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🤔 Similar techniques empowered by transformers such as XLSTM might be a promising step towards even more robust solutions:", "raw": "🤔 Similar techniques empowered by transformers such as XLSTM might be a promising step towards even more robust solutions:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/NX-AI/xlstm", "href": "https://github.com/NX-AI/xlstm", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
📢If you're in the emotion and emotion causes extraction domain and wish to bring something new in Generative AI, then the repository ⭐ https://github.com/declare-lab/conv-emotion Is worth to check out as a survey of the conventional method approaches. 💎 It hosts implementation of the most conventional methods that are relevant even today in the era of Generative AI. For example, the variations of LSTM-based encoders are still represent a solid framework for it and according to the SemEval2024 Task 3 where it is possible to see several papers exploit such a conventional concept: https://aclanthology.org/volumes/2024.semeval-1/ An example of the Top 3 submission: https://aclanthology.org/2024.semeval-1.164/ 🤔 Similar techniques empowered by transformers such as XLSTM might be a promising step towards even more robust solutions: https://github.com/NX-AI/xlstm
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/Dz1jxjQBJCd-EleIZIzfN.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/8UMHh6rjQQnLGaqTDza8y.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-02T13:36:34.000Z
2024-10-02T13:36:34.750Z
[]
/posts/nicolay-r/396806572990287
366
0
384914011797895
[ { "type": "text", "value": "Why is argilla/FinePersonas-v0.1 great for synthetic data generation? It can be used to synthesise realistic and diverse data of the customer personas your company is interested in!", "raw": "Why is argilla/FinePersonas-v0.1 great for synthetic data generation? It can be used to synthesise realistic and diverse data of the customer personas your company is interested in!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Dataset: ", "raw": "Dataset: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/argilla/FinePersonas-v0.1", "href": null, "resource": { "type": "dataset", "id": "argilla/FinePersonas-v0.1", "discussionNum": null }, "url": "https://huggingface.co/datasets/argilla/FinePersonas-v0.1", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Example usage: ", "raw": "Example usage: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://distilabel.argilla.io/dev/sections/pipeline_samples/examples/fine_personas_social_network/", "href": "https://distilabel.argilla.io/dev/sections/pipeline_samples/examples/fine_personas_social_network/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Why is argilla/FinePersonas-v0.1 great for synthetic data generation? It can be used to synthesise realistic and diverse data of the customer personas your company is interested in! Dataset: https://huggingface.co/datasets/argilla/FinePersonas-v0.1 Example usage: https://distilabel.argilla.io/dev/sections/pipeline_samples/examples/fine_personas_social_network/
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 167, "isFollowing": false }
[]
[]
[ { "reaction": "🤗", "users": [ "davidberenstein1957", "John6666", "osanseviero", "Xdotnet", "win10" ], "count": 5 }, { "reaction": "🔥", "users": [ "nicolay-r", "davidberenstein1957" ], "count": 2 } ]
2024-10-02T12:30:23.000Z
2024-10-02T13:46:30.371Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false } ]
/posts/davidberenstein1957/384914011797895
1,134
1
429237729959569
[ { "type": "text", "value": "Annif 1.2 has been released!", "raw": "Annif 1.2 has been released!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/NatLibFi/Annif/releases/tag/v1.2.0", "href": "https://github.com/NatLibFi/Annif/releases/tag/v1.2.0", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This release introduces language detection capabilities in the REST API and CLI, improves 🤗 Hugging Face Hub integration, and also includes the usual maintenance work and minor bug fixes.", "raw": "This release introduces language detection capabilities in the REST API and CLI, improves 🤗 Hugging Face Hub integration, and also includes the usual maintenance work and minor bug fixes.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The new REST API endpoint ", "raw": "The new REST API endpoint ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`/v1/detect-language`", "href": null, "resource": null, "url": null, "code": "/v1/detect-language", "user": null, "label": null, "lang": null }, { "type": "text", "value": " expects POST requests that contain a JSON object with the text whose language is to be analyzed and a list of candidate languages. Similarly, the CLI has a new command ", "raw": " expects POST requests that contain a JSON object with the text whose language is to be analyzed and a list of candidate languages. Similarly, the CLI has a new command ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`annif detect-language`", "href": null, "resource": null, "url": null, "code": "annif detect-language", "user": null, "label": null, "lang": null }, { "type": "text", "value": ". Annif projects are typically language specific, so a text of a given language needs to be processed with a project intended for that language; the language detection feature can help in this. For details see this [Wiki page](", "raw": ". Annif projects are typically language specific, so a text of a given language needs to be processed with a project intended for that language; the language detection feature can help in this. For details see this [Wiki page](", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/NatLibFi/Annif/wiki/Language-detection", "href": "https://github.com/NatLibFi/Annif/wiki/Language-detection", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "). The language detection is performed with the Simplemma library by [@adbar](", "raw": "). The language detection is performed with the Simplemma library by [@adbar](", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/adbar", "href": "https://github.com/adbar", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ") et al.", "raw": ") et al.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The ", "raw": "The ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`annif download`", "href": null, "resource": null, "url": null, "code": "annif download", "user": null, "label": null, "lang": null }, { "type": "text", "value": " command has a new ", "raw": " command has a new ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`--trust-repo`", "href": null, "resource": null, "url": null, "code": "--trust-repo", "user": null, "label": null, "lang": null }, { "type": "text", "value": " option, which needs to be used if the repository to download from has not been used previously (that is if the repository does not appear in the local Hugging Face Hub cache). This option is introduced to raise awareness of the risks of downloading projects from the internet; the project downloads should only be done from trusted sources. For more information see the [Hugging Face Hub documentation](", "raw": " option, which needs to be used if the repository to download from has not been used previously (that is if the repository does not appear in the local Hugging Face Hub cache). This option is introduced to raise awareness of the risks of downloading projects from the internet; the project downloads should only be done from trusted sources. For more information see the [Hugging Face Hub documentation](", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/docs/hub/en/security-pickle", "href": "https://huggingface.co/docs/hub/en/security-pickle", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ").", "raw": ").", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "This release also includes automation of downloading the NLTK datapackage used for tokenization to simplify Annif installation. Maintenance tasks include upgrading dependencies, including a new version of Simplemma that allows better control over memory usage. The bug fixes include restoring the ", "raw": "This release also includes automation of downloading the NLTK datapackage used for tokenization to simplify Annif installation. Maintenance tasks include upgrading dependencies, including a new version of Simplemma that allows better control over memory usage. The bug fixes include restoring the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`--host`", "href": null, "resource": null, "url": null, "code": "--host", "user": null, "label": null, "lang": null }, { "type": "text", "value": " option of the ", "raw": " option of the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`annif run`", "href": null, "resource": null, "url": null, "code": "annif run", "user": null, "label": null, "lang": null }, { "type": "text", "value": " command.", "raw": " command.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Python 3.12 is now fully supported (previously NN-ensemble and STWFSA backends were not supported on Python 3.12).", "raw": "Python 3.12 is now fully supported (previously NN-ensemble and STWFSA backends were not supported on Python 3.12).", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/NatLibFi/Annif", "href": null, "resource": { "type": "space", "id": "NatLibFi/Annif", "discussionNum": null }, "url": "https://huggingface.co/spaces/NatLibFi/Annif", "code": null, "user": null, "label": null, "lang": null } ]
Annif 1.2 has been released! https://github.com/NatLibFi/Annif/releases/tag/v1.2.0 This release introduces language detection capabilities in the REST API and CLI, improves 🤗 Hugging Face Hub integration, and also includes the usual maintenance work and minor bug fixes. The new REST API endpoint `/v1/detect-language` expects POST requests that contain a JSON object with the text whose language is to be analyzed and a list of candidate languages. Similarly, the CLI has a new command `annif detect-language`. Annif projects are typically language specific, so a text of a given language needs to be processed with a project intended for that language; the language detection feature can help in this. For details see this [Wiki page](https://github.com/NatLibFi/Annif/wiki/Language-detection). The language detection is performed with the Simplemma library by [@adbar](https://github.com/adbar) et al. The `annif download` command has a new `--trust-repo` option, which needs to be used if the repository to download from has not been used previously (that is if the repository does not appear in the local Hugging Face Hub cache). This option is introduced to raise awareness of the risks of downloading projects from the internet; the project downloads should only be done from trusted sources. For more information see the [Hugging Face Hub documentation](https://huggingface.co/docs/hub/en/security-pickle). This release also includes automation of downloading the NLTK datapackage used for tokenization to simplify Annif installation. Maintenance tasks include upgrading dependencies, including a new version of Simplemma that allows better control over memory usage. The bug fixes include restoring the `--host` option of the `annif run` command. Python 3.12 is now fully supported (previously NN-ensemble and STWFSA backends were not supported on Python 3.12). https://huggingface.co/spaces/NatLibFi/Annif
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64ef1edb3831f2349154d6bd/L_qGdGF-a_ynqDIPjV6rF.jpeg", "fullname": "Juho Inkinen", "name": "juhoinkinen", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "deepak7mahto" ], "count": 2 }, { "reaction": "🚀", "users": [ "holehan" ], "count": 1 } ]
2024-10-02T11:35:39.000Z
2024-10-02T11:36:16.368Z
[]
/posts/juhoinkinen/429237729959569
399
0
554506807940517
[ { "type": "text", "value": "Instead of calculating errors, LLMs are better at doing self-evaluation!", "raw": "Instead of calculating errors, LLMs are better at doing self-evaluation!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It's easier to assess the quality of a response than to generate one which enables LLM models to evaluate their own performance.", "raw": "It's easier to assess the quality of a response than to generate one which enables LLM models to evaluate their own performance.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It’s like trying to figure out how many ingredients you left out while cooking a recipe but without knowing exactly which ones you missed. LLM models like experienced cooks, can’t always tell you what specific step they skipped but they can guess how close they got to the final dish. For example, if your meal tastes 75%, you know something is off, but you are not sure what exactly.", "raw": "It’s like trying to figure out how many ingredients you left out while cooking a recipe but without knowing exactly which ones you missed. LLM models like experienced cooks, can’t always tell you what specific step they skipped but they can guess how close they got to the final dish. For example, if your meal tastes 75%, you know something is off, but you are not sure what exactly.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Now instead of focusing on identifying every missed ingredient, think about just estimating how well the dish turned out overall. It’s easier to guess if the meal tastes good than to pinpoint each small mistake. LLMs do the same, they estimate how well they performed without knowing every single error, allowing them to self-evaluate! ", "raw": "Now instead of focusing on identifying every missed ingredient, think about just estimating how well the dish turned out overall. It’s easier to guess if the meal tastes good than to pinpoint each small mistake. LLMs do the same, they estimate how well they performed without knowing every single error, allowing them to self-evaluate! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/meta-llama/Llama-3.2-1B", "href": null, "resource": { "type": "model", "id": "meta-llama/Llama-3.2-1B", "discussionNum": null }, "url": "https://huggingface.co/meta-llama/Llama-3.2-1B", "code": null, "user": null, "label": null, "lang": null } ]
Instead of calculating errors, LLMs are better at doing self-evaluation! It's easier to assess the quality of a response than to generate one which enables LLM models to evaluate their own performance. It’s like trying to figure out how many ingredients you left out while cooking a recipe but without knowing exactly which ones you missed. LLM models like experienced cooks, can’t always tell you what specific step they skipped but they can guess how close they got to the final dish. For example, if your meal tastes 75%, you know something is off, but you are not sure what exactly. Now instead of focusing on identifying every missed ingredient, think about just estimating how well the dish turned out overall. It’s easier to guess if the meal tastes good than to pinpoint each small mistake. LLMs do the same, they estimate how well they performed without knowing every single error, allowing them to self-evaluate! https://huggingface.co/meta-llama/Llama-3.2-1B
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/y1W6Co4jIYB95Cx6Tjrsd.jpeg", "fullname": "Muhammad Imran Zaman", "name": "ImranzamanML", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 32, "isFollowing": false }
[]
[]
[ { "reaction": "🤝", "users": [ "zabdideen", "Oliver723653", "osanseviero", "AtAndDev" ], "count": 4 }, { "reaction": "🧠", "users": [ "zabdideen", "Oliver723653", "kartiksrma", "AtAndDev" ], "count": 4 }, { "reaction": "👀", "users": [ "zabdideen", "John6666", "AtAndDev" ], "count": 3 }, { "reaction": "🚀", "users": [ "zabdideen" ], "count": 1 }, { "reaction": "❤️", "users": [ "zabdideen" ], "count": 1 }, { "reaction": "🤗", "users": [ "zabdideen" ], "count": 1 }, { "reaction": "😎", "users": [ "zabdideen" ], "count": 1 }, { "reaction": "➕", "users": [ "zabdideen" ], "count": 1 } ]
2024-10-02T08:53:53.000Z
2024-10-02T08:53:53.582Z
[]
/posts/ImranzamanML/554506807940517
1,398
0
427430421573127
[ { "type": "text", "value": "maybe", "raw": "maybe", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
maybe
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png", "fullname": "Noa Roggendorff", "name": "nroggendorff", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 141, "isFollowing": false }
[]
[]
[ { "reaction": "🧠", "users": [ "John6666", "Smorty100", "Clausss", "pcalhoun" ], "count": 4 } ]
2024-10-02T05:14:40.000Z
2024-10-02T05:14:40.448Z
[]
/posts/nroggendorff/427430421573127
1,096
0
556754641479004
[ { "type": "text", "value": "We shut down XetHub today after almost 2 years. What we learned from launching our Git-scaled product from scratch:", "raw": "We shut down XetHub today after almost 2 years. What we learned from launching our Git-scaled product from scratch:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Don't make me change my workflow", "raw": "- Don't make me change my workflow", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Data inertia is real", "raw": "- Data inertia is real", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- ML best practices are still evolving", "raw": "- ML best practices are still evolving", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Closing the door on our public product lets us focus on our new goal of scaling HF Hub's storage backend to improve devX for a larger community. We'd love to hear your thoughts on what experiences we can improve!", "raw": "Closing the door on our public product lets us focus on our new goal of scaling HF Hub's storage backend to improve devX for a larger community. We'd love to hear your thoughts on what experiences we can improve!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read the full post: ", "raw": "Read the full post: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://xethub.com/blog/shutting-down-xethub-learnings-and-takeaways", "href": "https://xethub.com/blog/shutting-down-xethub-learnings-and-takeaways", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
We shut down XetHub today after almost 2 years. What we learned from launching our Git-scaled product from scratch: - Don't make me change my workflow - Data inertia is real - ML best practices are still evolving Closing the door on our public product lets us focus on our new goal of scaling HF Hub's storage backend to improve devX for a larger community. We'd love to hear your thoughts on what experiences we can improve! Read the full post: https://xethub.com/blog/shutting-down-xethub-learnings-and-takeaways
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66b05ca6e7c57eac7cafbbc4/nddUkS3xu78cxCS-r7-xB.jpeg", "fullname": "Ann Huang", "name": "erinys", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 27, "isFollowing": false }
[]
[]
[ { "reaction": "❤️", "users": [ "jsulz", "John6666", "Awhildy", "derek-thomas", "louisbrulenaudet", "ImranzamanML", "osanseviero", "Aurelien-Morgan", "AtAndDev" ], "count": 9 }, { "reaction": "👀", "users": [ "John6666", "alielfilali01", "AtAndDev" ], "count": 3 } ]
2024-10-01T19:24:52.000Z
2024-10-03T11:32:46.071Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66b05ca6e7c57eac7cafbbc4/nddUkS3xu78cxCS-r7-xB.jpeg", "fullname": "Ann Huang", "name": "erinys", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 27, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png", "fullname": "Omar Sanseviero", "name": "osanseviero", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 2868, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/4f93ZrYdaKfK3F53IB51x.jpeg", "fullname": "Cyril", "name": "cyrilzakka", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 27, "isFollowing": false } ]
/posts/erinys/556754641479004
1,938
6
428350680107452
[ { "type": "text", "value": "Researchers have introduced OpenDevin, an open-source platform for building powerful AI agents that interact with the world through software interfaces.", "raw": "Researchers have introduced OpenDevin, an open-source platform for building powerful AI agents that interact with the world through software interfaces.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here is a speed-run of features:", "raw": "Here is a speed-run of features:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Flexible agent abstraction, allowing easy implementation of diverse AI agents ", "raw": "- Flexible agent abstraction, allowing easy implementation of diverse AI agents ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Sandboxed Linux environment and web browser for safe code execution and web interaction ", "raw": "- Sandboxed Linux environment and web browser for safe code execution and web interaction ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Core actions including IPythonRunCellAction, CmdRunAction, and BrowserInteractiveAction ", "raw": "- Core actions including IPythonRunCellAction, CmdRunAction, and BrowserInteractiveAction ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- AgentSkills library with reusable tools like file-editing utilities and multi-modal document parsing ", "raw": "- AgentSkills library with reusable tools like file-editing utilities and multi-modal document parsing ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Multi-agent delegation for complex task solving ", "raw": "- Multi-agent delegation for complex task solving ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Comprehensive evaluation framework with 15 benchmarks across software engineering and the web ", "raw": "- Comprehensive evaluation framework with 15 benchmarks across software engineering and the web ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here is how you get Devin working:", "raw": "Here is how you get Devin working:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "1. Set up the environment:", "raw": "1. Set up the environment:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Install OpenDevin by following the instructions in the GitHub repository (", "raw": " - Install OpenDevin by following the instructions in the GitHub repository (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/OpenDevin/OpenDevin", "href": "https://github.com/OpenDevin/OpenDevin", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ").", "raw": ").", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Ensure you have the necessary dependencies installed.", "raw": " - Ensure you have the necessary dependencies installed.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "2. Choose an agent:", "raw": "2. Choose an agent:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Select an agent from the AgentHub, such as the CodeActAgent or BrowsingAgent.", "raw": " - Select an agent from the AgentHub, such as the CodeActAgent or BrowsingAgent.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Alternatively, create your own agent by implementing the agent abstraction.", "raw": " - Alternatively, create your own agent by implementing the agent abstraction.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "3. Configure the environment:", "raw": "3. Configure the environment:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Set up the sandboxed Linux environment and web browser.", "raw": " - Set up the sandboxed Linux environment and web browser.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Mount any necessary files or directories into the workspace.", "raw": " - Mount any necessary files or directories into the workspace.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "4. Define the task:", "raw": "4. Define the task:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Specify the task you want the agent to perform, such as writing code, debugging, or web browsing.", "raw": " - Specify the task you want the agent to perform, such as writing code, debugging, or web browsing.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "5. Initialize the agent:", "raw": "5. Initialize the agent:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Create an instance of your chosen agent.", "raw": " - Create an instance of your chosen agent.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Set any necessary parameters or prompts.", "raw": " - Set any necessary parameters or prompts.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "6. Start the interaction:", "raw": "6. Start the interaction:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " - Begin the agent's execution loop, which typically involves: ", "raw": " - Begin the agent's execution loop, which typically involves: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " a. The agent perceiving the current state ", "raw": " a. The agent perceiving the current state ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " b. Deciding on an action ", "raw": " b. Deciding on an action ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " c. Executing the action in the environment ", "raw": " c. Executing the action in the environment ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " d. Observing the results ", "raw": " d. Observing the results ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Continued in comments...", "raw": "Continued in comments...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Researchers have introduced OpenDevin, an open-source platform for building powerful AI agents that interact with the world through software interfaces. Here is a speed-run of features: - Flexible agent abstraction, allowing easy implementation of diverse AI agents - Sandboxed Linux environment and web browser for safe code execution and web interaction - Core actions including IPythonRunCellAction, CmdRunAction, and BrowserInteractiveAction - AgentSkills library with reusable tools like file-editing utilities and multi-modal document parsing - Multi-agent delegation for complex task solving - Comprehensive evaluation framework with 15 benchmarks across software engineering and the web Here is how you get Devin working: 1. Set up the environment: - Install OpenDevin by following the instructions in the GitHub repository (https://github.com/OpenDevin/OpenDevin). - Ensure you have the necessary dependencies installed. 2. Choose an agent: - Select an agent from the AgentHub, such as the CodeActAgent or BrowsingAgent. - Alternatively, create your own agent by implementing the agent abstraction. 3. Configure the environment: - Set up the sandboxed Linux environment and web browser. - Mount any necessary files or directories into the workspace. 4. Define the task: - Specify the task you want the agent to perform, such as writing code, debugging, or web browsing. 5. Initialize the agent: - Create an instance of your chosen agent. - Set any necessary parameters or prompts. 6. Start the interaction: - Begin the agent's execution loop, which typically involves: a. The agent perceiving the current state b. Deciding on an action c. Executing the action in the environment d. Observing the results Continued in comments...
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/G6lEBqg7O4QJT2Fx6dN4Q.jpeg" } ]
[]
[ { "reaction": "👀", "users": [ "John6666", "ImranzamanML", "louisbrulenaudet" ], "count": 3 }, { "reaction": "👍", "users": [ "PetaniHandal", "osanseviero" ], "count": 2 } ]
2024-10-01T18:55:55.000Z
2024-10-02T07:36:44.632Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png", "fullname": "Kuldeep Singh Sidhu", "name": "singhsidhukuldeep", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 219, "isFollowing": false }, { "avatarUrl": "/avatars/5cbfa6cbde933503bbc3577cf713e7b5.svg", "fullname": "Friedrich Marty", "name": "Smorty100", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/singhsidhukuldeep/428350680107452
1,195
2
344173101032427
[ { "type": "text", "value": "If you feel like you missed out for ECCV 2024, there's an app to browse the papers, rank for popularity, filter for open models, datasets and demos 📝 ", "raw": "If you feel like you missed out for ECCV 2024, there's an app to browse the papers, rank for popularity, filter for open models, datasets and demos 📝 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Get started at ", "raw": "Get started at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/ECCV/ECCV2024-papers", "href": null, "resource": { "type": "space", "id": "ECCV/ECCV2024-papers", "discussionNum": null }, "url": "https://huggingface.co/spaces/ECCV/ECCV2024-papers", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ✨ ", "raw": " ✨ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
If you feel like you missed out for ECCV 2024, there's an app to browse the papers, rank for popularity, filter for open models, datasets and demos 📝 Get started at https://huggingface.co/spaces/ECCV/ECCV2024-papers ✨
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/tosGnRNl4VZBn9peqMOFC.jpeg" } ]
[]
[ { "reaction": "👍", "users": [ "KingNish", "YaTharThShaRma999", "imexisting", "atasoglu", "John6666", "liangshuxin", "osanseviero", "nicolay-r", "SVHawk13", "ruffy369", "gizemsarsinlar" ], "count": 11 }, { "reaction": "🔥", "users": [ "DmitryRyumin", "YaTharThShaRma999", "erinys", "osanseviero", "SVHawk13", "hitchhiker3010" ], "count": 6 } ]
2024-10-01T14:49:41.000Z
2024-10-01T14:49:41.897Z
[]
/posts/merve/344173101032427
3,984
0
801592231349081
[ { "type": "text", "value": "Big Congrats on the BIG RELEASE by ", "raw": "Big Congrats on the BIG RELEASE by ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@mlabonne", "href": null, "resource": null, "url": null, "code": null, "user": "mlabonne", "label": null, "lang": null }, { "type": "text", "value": " and team at ", "raw": " and team at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/liquidai", "href": "https://huggingface.co/liquidai", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ...", "raw": " ...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "testing it out now to make a dataset , i cant hardly wait... but one question 👇🏻 why / wen ? 😅🚀🚀", "raw": "testing it out now to make a dataset , i cant hardly wait... but one question 👇🏻 why / wen ? 😅🚀🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "check out the blog post : ", "raw": "check out the blog post : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.liquid.ai/liquid-foundation-models", "href": "https://www.liquid.ai/liquid-foundation-models", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Big Congrats on the BIG RELEASE by @mlabonne and team at https://huggingface.co/liquidai ... testing it out now to make a dataset , i cant hardly wait... but one question 👇🏻 why / wen ? 😅🚀🚀 check out the blog post : https://www.liquid.ai/liquid-foundation-models
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg", "fullname": "Joseph [open/acc] Pollack", "name": "Tonic", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 313, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/62a3bb1cd0d8c2c2169f0b88/C_dbtBXA7SzfQKylIh2Xz.png" } ]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b8e2ba285851687028d395/JtUGAwVh_4cDEsjNcfpye.png", "fullname": "Maxime Labonne", "name": "mlabonne", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 3486 } ]
[ { "reaction": "❤️", "users": [ "mlabonne", "pabloce", "AtAndDev", "Yavin5", "louisbrulenaudet", "KingNish", "nicolay-r" ], "count": 7 }, { "reaction": "🔥", "users": [ "YaTharThShaRma999", "AtAndDev", "John6666", "KingNish", "Josephgflowers" ], "count": 5 } ]
2024-10-01T14:26:55.000Z
2024-10-01T14:49:45.904Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b8e2ba285851687028d395/JtUGAwVh_4cDEsjNcfpye.png", "fullname": "Maxime Labonne", "name": "mlabonne", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 3486, "isFollowing": false } ]
/posts/Tonic/801592231349081
2,253
1
424032805322662
[ { "type": "text", "value": "This is my first post, so I need to start with a bang!", "raw": "This is my first post, so I need to start with a bang!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The people over at ", "raw": "The people over at ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/Lichess", "href": "https://huggingface.co/Lichess", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " published some amazing data sets over the past weeks, including a collection of >1M standard chess games (", "raw": " published some amazing data sets over the past weeks, including a collection of >1M standard chess games (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/Lichess/standard-chess-games", "href": null, "resource": { "type": "dataset", "id": "Lichess/standard-chess-games", "discussionNum": null }, "url": "https://huggingface.co/datasets/Lichess/standard-chess-games", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ").", "raw": ").", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Finally it's time to revive my chess buddy project from back in 2021 🎉", "raw": "Finally it's time to revive my chess buddy project from back in 2021 🎉", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "So without any further ado... I'm currently training my first character level LLM, and to be quite frank, I'm pretty astonished with the quality of my testing samples.", "raw": "So without any further ado... I'm currently training my first character level LLM, and to be quite frank, I'm pretty astonished with the quality of my testing samples.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I'm using ", "raw": "I'm using ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`e4 g6`", "href": null, "resource": null, "url": null, "code": "e4 g6", "user": null, "label": null, "lang": null }, { "type": "text", "value": ", the Modern Defense (", "raw": ", the Modern Defense (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://en.wikipedia.org/wiki/Modern_Defense", "href": "https://en.wikipedia.org/wiki/Modern_Defense", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ") as a validation sample.", "raw": ") as a validation sample.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "My model currently predicts mostly ", "raw": "My model currently predicts mostly ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`d4 Bg7`", "href": null, "resource": null, "url": null, "code": "d4 Bg7", "user": null, "label": null, "lang": null }, { "type": "text", "value": " which are the strongest next moves for white and black.", "raw": " which are the strongest next moves for white and black.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Now in between I see some results that take lower ranked moves, which makes me very excited.", "raw": "Now in between I see some results that take lower ranked moves, which makes me very excited.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Once the pre-training is done for the base model, I want to run some fine tuning on more specific data sets, which are ", "raw": "Once the pre-training is done for the base model, I want to run some fine tuning on more specific data sets, which are ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/Lichess/chess-openings", "href": null, "resource": { "type": "dataset", "id": "Lichess/chess-openings", "discussionNum": null }, "url": "https://huggingface.co/datasets/Lichess/chess-openings", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/Lichess/chess-puzzles", "href": null, "resource": { "type": "dataset", "id": "Lichess/chess-puzzles", "discussionNum": null }, "url": "https://huggingface.co/datasets/Lichess/chess-puzzles", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here are some intermediate examples", "raw": "Here are some intermediate examples", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "code_fence", "value": null, "raw": "```\nStep 6000: \n1. e4 g6 13. Rb1 d5 14. Bd3 Nxf3 15. Nxf3 Nxe3+ 16. Rxd3 Rxd3 17. Rxd6 Rhe8 18. Nd6 Rxd4 19. Rxd7+ Kxd7 20. Re7 Rxe7 21. Qxe7 1-0\n\nStep 12000:\n1. e4 g6 22. Be2 Re8 23. Kg2 1-0\n1. d4 d5 2. c4 c6 3. Nf3 e6 4. dxe6 Qe7 5. Bb5+ Be8 6. Bxb7# 1-0\n1. d4 d5 2. dxe5 Bd6 3. Nc3 h6 4. e4 Bf5 5. exf5 Nd7 6. exd5 Nxd5 7. Bxc4 Bxe2 8. f4 d4 9. Ng3 Bb4+ 10. Bxd4 Qxd4 11. Nfxe2 O-O-O 12. Ne6 Qf5 13. fxg4 Nxe5\n\nStep 30000:\n1. e4 g6 2. d4 Bg7 3. Nf3 d6 4. b3 e6 5. Bb2 f5 6. e5 c5 7. dxc5 dxc5 8. Nbd2 Nf6 9. Nce2 O-O 10. Qe2 c4 11. Na4 Bd6 12. f3 Ng4 13. fxg4 1-0\n1. c4 c5 2. a3 Nc6 3. cxd5 Nxd5 4. Bf4 g6 5. Be2 Bg7 6. Nf3 Bg4 7. b4 Nf6 8. h3 Bxf3 9. Bxf3 a6 10. Nc3 O-O 11. Qc2 e\n```", "href": null, "resource": null, "url": null, "code": "Step 6000: \n1. e4 g6 13. Rb1 d5 14. Bd3 Nxf3 15. Nxf3 Nxe3+ 16. Rxd3 Rxd3 17. Rxd6 Rhe8 18. Nd6 Rxd4 19. Rxd7+ Kxd7 20. Re7 Rxe7 21. Qxe7 1-0\n\nStep 12000:\n1. e4 g6 22. Be2 Re8 23. Kg2 1-0\n1. d4 d5 2. c4 c6 3. Nf3 e6 4. dxe6 Qe7 5. Bb5+ Be8 6. Bxb7# 1-0\n1. d4 d5 2. dxe5 Bd6 3. Nc3 h6 4. e4 Bf5 5. exf5 Nd7 6. exd5 Nxd5 7. Bxc4 Bxe2 8. f4 d4 9. Ng3 Bb4+ 10. Bxd4 Qxd4 11. Nfxe2 O-O-O 12. Ne6 Qf5 13. fxg4 Nxe5\n\nStep 30000:\n1. e4 g6 2. d4 Bg7 3. Nf3 d6 4. b3 e6 5. Bb2 f5 6. e5 c5 7. dxc5 dxc5 8. Nbd2 Nf6 9. Nce2 O-O 10. Qe2 c4 11. Na4 Bd6 12. f3 Ng4 13. fxg4 1-0\n1. c4 c5 2. a3 Nc6 3. cxd5 Nxd5 4. Bf4 g6 5. Be2 Bg7 6. Nf3 Bg4 7. b4 Nf6 8. h3 Bxf3 9. Bxf3 a6 10. Nc3 O-O 11. Qc2 e", "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "(each line starting with ", "raw": "(each line starting with ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "inline_code", "value": null, "raw": "`1.`", "href": null, "resource": null, "url": null, "code": "1.", "user": null, "label": null, "lang": null }, { "type": "text", "value": " is a set of moves)", "raw": " is a set of moves)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can find a first pre trained version here:", "raw": "You can find a first pre trained version here:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/philipp-zettl/chessPT", "href": null, "resource": { "type": "model", "id": "philipp-zettl/chessPT", "discussionNum": null }, "url": "https://huggingface.co/philipp-zettl/chessPT", "code": null, "user": null, "label": null, "lang": null } ]
This is my first post, so I need to start with a bang! The people over at https://huggingface.co/Lichess published some amazing data sets over the past weeks, including a collection of >1M standard chess games (https://huggingface.co/datasets/Lichess/standard-chess-games). Finally it's time to revive my chess buddy project from back in 2021 🎉 So without any further ado... I'm currently training my first character level LLM, and to be quite frank, I'm pretty astonished with the quality of my testing samples. I'm using `e4 g6`, the Modern Defense (https://en.wikipedia.org/wiki/Modern_Defense) as a validation sample. My model currently predicts mostly `d4 Bg7` which are the strongest next moves for white and black. Now in between I see some results that take lower ranked moves, which makes me very excited. Once the pre-training is done for the base model, I want to run some fine tuning on more specific data sets, which are https://huggingface.co/datasets/Lichess/chess-openings https://huggingface.co/datasets/Lichess/chess-puzzles Here are some intermediate examples ``` Step 6000: 1. e4 g6 13. Rb1 d5 14. Bd3 Nxf3 15. Nxf3 Nxe3+ 16. Rxd3 Rxd3 17. Rxd6 Rhe8 18. Nd6 Rxd4 19. Rxd7+ Kxd7 20. Re7 Rxe7 21. Qxe7 1-0 Step 12000: 1. e4 g6 22. Be2 Re8 23. Kg2 1-0 1. d4 d5 2. c4 c6 3. Nf3 e6 4. dxe6 Qe7 5. Bb5+ Be8 6. Bxb7# 1-0 1. d4 d5 2. dxe5 Bd6 3. Nc3 h6 4. e4 Bf5 5. exf5 Nd7 6. exd5 Nxd5 7. Bxc4 Bxe2 8. f4 d4 9. Ng3 Bb4+ 10. Bxd4 Qxd4 11. Nfxe2 O-O-O 12. Ne6 Qf5 13. fxg4 Nxe5 Step 30000: 1. e4 g6 2. d4 Bg7 3. Nf3 d6 4. b3 e6 5. Bb2 f5 6. e5 c5 7. dxc5 dxc5 8. Nbd2 Nf6 9. Nce2 O-O 10. Qe2 c4 11. Na4 Bd6 12. f3 Ng4 13. fxg4 1-0 1. c4 c5 2. a3 Nc6 3. cxd5 Nxd5 4. Bf4 g6 5. Be2 Bg7 6. Nf3 Bg4 7. b4 Nf6 8. h3 Bxf3 9. Bxf3 a6 10. Nc3 O-O 11. Qc2 e ``` (each line starting with `1.` is a set of moves) You can find a first pre trained version here: https://huggingface.co/philipp-zettl/chessPT
{ "avatarUrl": "/avatars/67b2e111ee8541e8033dab5ee1ca0eb6.svg", "fullname": "PZ", "name": "philipp-zettl", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }
[]
[]
[ { "reaction": "🔥", "users": [ "YaTharThShaRma999", "John6666", "osanseviero", "nicolay-r", "Nelathan", "louisbrulenaudet" ], "count": 6 } ]
2024-10-01T14:21:18.000Z
2024-10-02T06:14:27.211Z
[]
/posts/philipp-zettl/424032805322662
1,038
0
810375974822135
[ { "type": "text", "value": "Realtime Whisper Large v3 Turbo Demo:", "raw": "Realtime Whisper Large v3 Turbo Demo:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "It transcribes audio in about 0.3 seconds.", "raw": "It transcribes audio in about 0.3 seconds.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/KingNish/Realtime-whisper-large-v3-turbo", "href": null, "resource": { "type": "space", "id": "KingNish/Realtime-whisper-large-v3-turbo", "discussionNum": null }, "url": "https://huggingface.co/spaces/KingNish/Realtime-whisper-large-v3-turbo", "code": null, "user": null, "label": null, "lang": null } ]
Realtime Whisper Large v3 Turbo Demo: It transcribes audio in about 0.3 seconds. https://huggingface.co/spaces/KingNish/Realtime-whisper-large-v3-turbo
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6612aedf09f16e7347dfa7e1/bPYjBXCedY_1fSIPjoBTY.jpeg", "fullname": "Nishith Jain", "name": "KingNish", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1079, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/6612aedf09f16e7347dfa7e1/3ki3wJOyBypp4Hs9SVib3.mp4" } ]
[]
[ { "reaction": "🔥", "users": [ "maggick", "YaTharThShaRma999", "John6666", "osanseviero", "Nephilimzz", "roshanchain" ], "count": 6 } ]
2024-10-01T13:50:03.000Z
2024-11-22T13:39:49.274Z
[]
/posts/KingNish/810375974822135
5,485
1
464439717862996
[ { "type": "text", "value": "🚀 OpenAI's new Whisper \"turbo\": 8x faster, 40% VRAM efficient, minimal accuracy loss. ", "raw": "🚀 OpenAI's new Whisper \"turbo\": 8x faster, 40% VRAM efficient, minimal accuracy loss. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔒 Run it locally in-browser for private transcriptions! Transcribe interviews, audio & video.", "raw": "🔒 Run it locally in-browser for private transcriptions! Transcribe interviews, audio & video.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚡️ 40 tokens/sec on my MacBook", "raw": "⚡️ 40 tokens/sec on my MacBook", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🔗 Try it: ", "raw": "🔗 Try it: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/webml-community/whisper-large-v3-turbo-webgpu?v2=", "href": null, "resource": { "type": "space", "id": "webml-community/whisper-large-v3-turbo-webgpu", "discussionNum": null }, "url": "https://huggingface.co/spaces/webml-community/whisper-large-v3-turbo-webgpu?v2=", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Model: ", "raw": "Model: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/ylacombe/whisper-large-v3-turbo", "href": "https://huggingface.co/ylacombe/whisper-large-v3-turbo", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
🚀 OpenAI's new Whisper "turbo": 8x faster, 40% VRAM efficient, minimal accuracy loss. 🔒 Run it locally in-browser for private transcriptions! Transcribe interviews, audio & video. ⚡️ 40 tokens/sec on my MacBook 🔗 Try it: https://huggingface.co/spaces/webml-community/whisper-large-v3-turbo-webgpu?v2= Model: https://huggingface.co/ylacombe/whisper-large-v3-turbo
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/KDuEw9WdTKW2n2fDXzPhr.mp4" } ]
[]
[ { "reaction": "👍", "users": [ "John6666", "osanseviero", "andersoncliffb", "gregrenard", "tuanlda78202", "den0620", "aidystark", "RayNene" ], "count": 8 }, { "reaction": "🧠", "users": [ "aidystark" ], "count": 1 } ]
2024-10-01T13:42:26.000Z
2024-10-01T13:42:26.489Z
[]
/posts/fdaudens/464439717862996
975
0
795544446957180
[ { "type": "text", "value": "NVIDIA just dropped a gigantic multimodal model called NVLM 72B 🦖 ", "raw": "NVIDIA just dropped a gigantic multimodal model called NVLM 72B 🦖 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/nvidia/NVLM-D-72B", "href": null, "resource": { "type": "model", "id": "nvidia/NVLM-D-72B", "discussionNum": null }, "url": "https://huggingface.co/nvidia/NVLM-D-72B", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Paper page ", "raw": "Paper page ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2409.11402", "href": null, "resource": { "type": "paper", "id": "2409.11402", "discussionNum": null }, "url": "https://huggingface.co/papers/2409.11402", "code": null, "user": null, "label": "NVLM: Open Frontier-Class Multimodal LLMs (2409.11402)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The paper contains many ablation studies on various ways to use the LLM backbone 👇🏻", "raw": "The paper contains many ablation studies on various ways to use the LLM backbone 👇🏻", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🦩 Flamingo-like cross-attention (NVLM-X)", "raw": "🦩 Flamingo-like cross-attention (NVLM-X)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🌋 Llava-like concatenation of image and text embeddings to a decoder-only model (NVLM-D)", "raw": "🌋 Llava-like concatenation of image and text embeddings to a decoder-only model (NVLM-D)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✨ a hybrid architecture (NVLM-H)", "raw": "✨ a hybrid architecture (NVLM-H)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Checking evaluations, NVLM-D and NVLM-H are best or second best compared to other models 👏", "raw": "Checking evaluations, NVLM-D and NVLM-H are best or second best compared to other models 👏", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The released model is NVLM-D based on Qwen-2 Instruct, aligned with InternViT-6B using a huge mixture of different datasets", "raw": "The released model is NVLM-D based on Qwen-2 Instruct, aligned with InternViT-6B using a huge mixture of different datasets", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "You can easily use this model by loading it through transformers' AutoModel 😍", "raw": "You can easily use this model by loading it through transformers' AutoModel 😍", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
NVIDIA just dropped a gigantic multimodal model called NVLM 72B 🦖 https://huggingface.co/nvidia/NVLM-D-72B Paper page https://huggingface.co/papers/2409.11402 The paper contains many ablation studies on various ways to use the LLM backbone 👇🏻 🦩 Flamingo-like cross-attention (NVLM-X) 🌋 Llava-like concatenation of image and text embeddings to a decoder-only model (NVLM-D) ✨ a hybrid architecture (NVLM-H) Checking evaluations, NVLM-D and NVLM-H are best or second best compared to other models 👏 The released model is NVLM-D based on Qwen-2 Instruct, aligned with InternViT-6B using a huge mixture of different datasets You can easily use this model by loading it through transformers' AutoModel 😍
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png", "fullname": "Merve Noyan", "name": "merve", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 5589, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/EbdEkMtTGuwC9-d_0O9qX.png" } ]
[]
[ { "reaction": "🔥", "users": [ "louisbrulenaudet", "iojvsuynv", "KingNish", "DmitryRyumin", "namelessai", "AtAndDev", "John6666", "garrethlee", "osanseviero", "djuna", "gizemsarsinlar" ], "count": 11 } ]
2024-10-01T13:10:03.000Z
2024-10-01T13:10:03.441Z
[]
/posts/merve/795544446957180
2,713
0
354641762457342
[ { "type": "text", "value": "𝗔𝗱𝗱 𝘀𝗼𝘂𝗿𝗰𝗲 𝗵𝗶𝗴𝗵𝗹𝗶𝗴𝗵𝘁𝗶𝗻𝗴 𝘁𝗼 𝘆𝗼𝘂𝗿 𝗥𝗔𝗚 𝘀𝘆𝘀𝘁𝗲𝗺! 📄💡", "raw": "𝗔𝗱𝗱 𝘀𝗼𝘂𝗿𝗰𝗲 𝗵𝗶𝗴𝗵𝗹𝗶𝗴𝗵𝘁𝗶𝗻𝗴 𝘁𝗼 𝘆𝗼𝘂𝗿 𝗥𝗔𝗚 𝘀𝘆𝘀𝘁𝗲𝗺! 📄💡", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "RAG systems are supposed to make your LLM's answer more trustworthy, by inserting in the prompt some supporting documents from a knowledge base : we say that we're \"adding some context\". ", "raw": "RAG systems are supposed to make your LLM's answer more trustworthy, by inserting in the prompt some supporting documents from a knowledge base : we say that we're \"adding some context\". ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👎 But if you don't know which part of the answer has been generated based on which input tokens, it's hard to tell wether it was effectively grounded in the context knowledge or not!", "raw": "👎 But if you don't know which part of the answer has been generated based on which input tokens, it's hard to tell wether it was effectively grounded in the context knowledge or not!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "🤔 I've been working on the question: is it possible to add notes to the answer linking to which part of the context they're generated from?", "raw": "🤔 I've been working on the question: is it possible to add notes to the answer linking to which part of the context they're generated from?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And I've found a great solution: a great technique called Layer-wise Relevance Propagation (LRP), showcased in a paper at ICML `24 by Reduan Achtibat et al allows, allows to precisely score how important each input token was in generating your output! They've made it into a library called LXT.", "raw": "And I've found a great solution: a great technique called Layer-wise Relevance Propagation (LRP), showcased in a paper at ICML `24 by Reduan Achtibat et al allows, allows to precisely score how important each input token was in generating your output! They've made it into a library called LXT.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📊 For each generated output token, LXT gives you attribution scores for each input token.", "raw": "📊 For each generated output token, LXT gives you attribution scores for each input token.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⚙️ So I've worked a bit more on aggregating these scores into meaningful spans between successive input and output tokens, and I finally obtained my desired result: RAG with source highlighting!", "raw": "⚙️ So I've worked a bit more on aggregating these scores into meaningful spans between successive input and output tokens, and I finally obtained my desired result: RAG with source highlighting!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Try the demo here 👉 ", "raw": "Try the demo here 👉 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/spaces/m-ric/rag_highlights", "href": null, "resource": { "type": "space", "id": "m-ric/rag_highlights", "discussionNum": null }, "url": "https://huggingface.co/spaces/m-ric/rag_highlights", "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Caveats:", "raw": "Caveats:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- It slows down generation (for now quite a lot, could hopefully be reduced a lot)", "raw": "- It slows down generation (for now quite a lot, could hopefully be reduced a lot)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- For now it supports only specific models: Llama models and Mixtral", "raw": "- For now it supports only specific models: Llama models and Mixtral", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If there's enough interest in this solution, I can improve it further and spin it off into a specific library for RAG! 🚀", "raw": "If there's enough interest in this solution, I can improve it further and spin it off into a specific library for RAG! 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
𝗔𝗱𝗱 𝘀𝗼𝘂𝗿𝗰𝗲 𝗵𝗶𝗴𝗵𝗹𝗶𝗴𝗵𝘁𝗶𝗻𝗴 𝘁𝗼 𝘆𝗼𝘂𝗿 𝗥𝗔𝗚 𝘀𝘆𝘀𝘁𝗲𝗺! 📄💡 RAG systems are supposed to make your LLM's answer more trustworthy, by inserting in the prompt some supporting documents from a knowledge base : we say that we're "adding some context". 👎 But if you don't know which part of the answer has been generated based on which input tokens, it's hard to tell wether it was effectively grounded in the context knowledge or not! 🤔 I've been working on the question: is it possible to add notes to the answer linking to which part of the context they're generated from? And I've found a great solution: a great technique called Layer-wise Relevance Propagation (LRP), showcased in a paper at ICML `24 by Reduan Achtibat et al allows, allows to precisely score how important each input token was in generating your output! They've made it into a library called LXT. 📊 For each generated output token, LXT gives you attribution scores for each input token. ⚙️ So I've worked a bit more on aggregating these scores into meaningful spans between successive input and output tokens, and I finally obtained my desired result: RAG with source highlighting! Try the demo here 👉 https://huggingface.co/spaces/m-ric/rag_highlights Caveats: - It slows down generation (for now quite a lot, could hopefully be reduced a lot) - For now it supports only specific models: Llama models and Mixtral If there's enough interest in this solution, I can improve it further and spin it off into a specific library for RAG! 🚀
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg", "fullname": "Aymeric Roucher", "name": "m-ric", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 494, "isFollowing": false }
[ { "type": "video", "url": "https://cdn-uploads.huggingface.co/production/uploads/63d10d4e8eaa4831005e92b5/qsb1uGrkgioqy9AIZ7NiF.mp4" } ]
[]
[ { "reaction": "🚀", "users": [ "louisbrulenaudet", "KingNish", "brianjking", "John6666", "Xdotnet", "StephenGenusa" ], "count": 6 }, { "reaction": "👍", "users": [ "brianjking", "aitimer", "tiendung", "chadrick", "VicidiLochi" ], "count": 5 }, { "reaction": "😎", "users": [ "tiendung" ], "count": 1 }, { "reaction": "🔥", "users": [ "AIAJAY" ], "count": 1 } ]
2024-10-01T12:40:36.000Z
2024-10-01T12:40:50.581Z
[]
/posts/m-ric/354641762457342
1,276
0
414549951997367
[ { "type": "text", "value": "We've got a number of great community meetups coming up again where we'll be discussing the basics of getting started and using Argilla for TextCat, TokenCat/NER and RAG. We will walk you through common scenario's and everything you might need to know to get your projects started. ", "raw": "We've got a number of great community meetups coming up again where we'll be discussing the basics of getting started and using Argilla for TextCat, TokenCat/NER and RAG. We will walk you through common scenario's and everything you might need to know to get your projects started. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "First meetup that is coming up: Setting up a text classification project using Argilla and SetFit! ", "raw": "First meetup that is coming up: Setting up a text classification project using Argilla and SetFit! ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Deploy Argilla on Spaces", "raw": "Deploy Argilla on Spaces", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Vibe check your dataset", "raw": "Vibe check your dataset", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Configure and create an Argilla dataset", "raw": "Configure and create an Argilla dataset", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Add records", "raw": "Add records", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Add zero-shot suggestions", "raw": "Add zero-shot suggestions", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Evaluate model suggestions in Argilla", "raw": "Evaluate model suggestions in Argilla", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Train a SetFit model", "raw": "Train a SetFit model", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Hope to see all of you guys there and looking forward to your questions and AI use cases. Don't be shy about bringing your own issues and questions to the table. We would love to answer them.", "raw": "Hope to see all of you guys there and looking forward to your questions and AI use cases. Don't be shy about bringing your own issues and questions to the table. We would love to answer them.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Sign up here: ", "raw": "Sign up here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://lu.ma/31mecp34", "href": "https://lu.ma/31mecp34", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
We've got a number of great community meetups coming up again where we'll be discussing the basics of getting started and using Argilla for TextCat, TokenCat/NER and RAG. We will walk you through common scenario's and everything you might need to know to get your projects started. First meetup that is coming up: Setting up a text classification project using Argilla and SetFit! Deploy Argilla on Spaces Vibe check your dataset Configure and create an Argilla dataset Add records Add zero-shot suggestions Evaluate model suggestions in Argilla Train a SetFit model Hope to see all of you guys there and looking forward to your questions and AI use cases. Don't be shy about bringing your own issues and questions to the table. We would love to answer them. Sign up here: https://lu.ma/31mecp34
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg", "fullname": "David Berenstein", "name": "davidberenstein1957", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 167, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "not-lain", "louisbrulenaudet" ], "count": 3 } ]
2024-10-01T07:12:25.000Z
2024-10-01T14:52:21.535Z
[]
/posts/davidberenstein1957/414549951997367
851
1
560805333820715
[ { "type": "text", "value": "What a great milestone to celebrate! The huggingface_hub library is slowly becoming a cornerstone of the Python ML ecosystem when it comes to interacting with the ", "raw": "What a great milestone to celebrate! The huggingface_hub library is slowly becoming a cornerstone of the Python ML ecosystem when it comes to interacting with the ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@huggingface", "href": null, "resource": null, "url": null, "code": null, "user": "huggingface", "label": null, "lang": null }, { "type": "text", "value": " Hub. It wouldn't be there without the hundreds of community contributions and feedback! No matter if you are loading a model, sharing a dataset, running remote inference or starting jobs on our infra, you are for sure using it! And this is only the beginning so give a star if you wanna follow the project 👉 ", "raw": " Hub. It wouldn't be there without the hundreds of community contributions and feedback! No matter if you are loading a model, sharing a dataset, running remote inference or starting jobs on our infra, you are for sure using it! And this is only the beginning so give a star if you wanna follow the project 👉 ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/huggingface/huggingface_hub", "href": "https://github.com/huggingface/huggingface_hub", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
What a great milestone to celebrate! The huggingface_hub library is slowly becoming a cornerstone of the Python ML ecosystem when it comes to interacting with the @huggingface Hub. It wouldn't be there without the hundreds of community contributions and feedback! No matter if you are loading a model, sharing a dataset, running remote inference or starting jobs on our infra, you are for sure using it! And this is only the beginning so give a star if you wanna follow the project 👉 https://github.com/huggingface/huggingface_hub
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659336880158-6273f303f6d63a28483fde12.png", "fullname": "Lucain Pouget", "name": "Wauplin", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 157, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/6273f303f6d63a28483fde12/p4wJvcjldQ7V0--JxlSvE.gif" } ]
[]
[ { "reaction": "🤗", "users": [ "John6666", "alielfilali01", "louisbrulenaudet", "amyeroberts", "nicolay-r", "philipp-zettl", "jsulz", "dzyla", "MmdMaTriS", "lbourdois", "StephenGenusa", "Namgyu-Youn" ], "count": 12 }, { "reaction": "🔥", "users": [ "philipp-zettl", "rwightman", "John6666", "jsulz", "lbourdois" ], "count": 5 }, { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-10-01T06:44:43.000Z
2024-10-01T09:19:43.768Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false } ]
/posts/Wauplin/560805333820715
2,721
1
230081471483502
[ { "type": "text", "value": "ICML 2024 Tutorial: Physics of Language Models", "raw": "ICML 2024 Tutorial: Physics of Language Models", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://www.youtube.com/watch?v=yBL7J0kgldU", "href": "https://www.youtube.com/watch?v=yBL7J0kgldU", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2309.14316", "href": null, "resource": { "type": "paper", "id": "2309.14316", "discussionNum": null }, "url": "https://huggingface.co/papers/2309.14316", "code": null, "user": null, "label": "Physics of Language Models: Part 3.1, Knowledge Storage and Extraction (2309.14316)", "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Series bài nói về việc hiểu cách LLM hoạt động. Rất thú vị, họ làm thí nghiệm kiểm soát 100% cách huấn luyện model và phát hiện rằng nếu pretrain không chứa dạng dữ liệu extraction (QA instruction, hoặc các dạng dữ liệu mà tác giả gọi là knowledge augmentation) thì mặc dù có qua instruct finetune thì LLM cũng không thể học skill knowledge extraction. => đặt lại câu hỏi liệu cách pretrain rồi mới SFT như hiện tại đã thực sự tốt chưa?", "raw": "Series bài nói về việc hiểu cách LLM hoạt động. Rất thú vị, họ làm thí nghiệm kiểm soát 100% cách huấn luyện model và phát hiện rằng nếu pretrain không chứa dạng dữ liệu extraction (QA instruction, hoặc các dạng dữ liệu mà tác giả gọi là knowledge augmentation) thì mặc dù có qua instruct finetune thì LLM cũng không thể học skill knowledge extraction. => đặt lại câu hỏi liệu cách pretrain rồi mới SFT như hiện tại đã thực sự tốt chưa?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Họ đã thử vài trăm thí nghiệm với các loại kiến trúc mô hình, độ to nhỏ, ... và đều ra kết quả như nhau.", "raw": "Họ đã thử vài trăm thí nghiệm với các loại kiến trúc mô hình, độ to nhỏ, ... và đều ra kết quả như nhau.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "KNOWLEDGE AUGMENTATION (data augmentation)", "raw": "KNOWLEDGE AUGMENTATION (data augmentation)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Nếu bạn không mix instruct data với pre-train data (mix training) tốt nhất hãy áp dụng knowledge augmentation. Tức là cùng một câu đó nhưng diễn tả lại bằng nhiều cách khác nhau.", "raw": "Nếu bạn không mix instruct data với pre-train data (mix training) tốt nhất hãy áp dụng knowledge augmentation. Tức là cùng một câu đó nhưng diễn tả lại bằng nhiều cách khác nhau.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "KNOWLEDGE MANIPULATION", "raw": "KNOWLEDGE MANIPULATION", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "ví dụ giả sử đã biết (đc huấn luyện) tiểu sử của A (bao gồm ngày tháng năm sinh) và hỏi A sinh tháng chẵn hay lẻ (50% cơ hội trả lời đúng). Nếu không sử dụng CoT (gợi nhớ lại kiến thức, xem A sinh tháng mấy) thì kết quả là model không làm được. => CoT (gợi nhớ kiến thức đã học) rất quan trọng với knowledge manipulation (phân loại, so sánh, xếp hạng ...)", "raw": "ví dụ giả sử đã biết (đc huấn luyện) tiểu sử của A (bao gồm ngày tháng năm sinh) và hỏi A sinh tháng chẵn hay lẻ (50% cơ hội trả lời đúng). Nếu không sử dụng CoT (gợi nhớ lại kiến thức, xem A sinh tháng mấy) thì kết quả là model không làm được. => CoT (gợi nhớ kiến thức đã học) rất quan trọng với knowledge manipulation (phân loại, so sánh, xếp hạng ...)", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
ICML 2024 Tutorial: Physics of Language Models https://www.youtube.com/watch?v=yBL7J0kgldU https://huggingface.co/papers/2309.14316 Series bài nói về việc hiểu cách LLM hoạt động. Rất thú vị, họ làm thí nghiệm kiểm soát 100% cách huấn luyện model và phát hiện rằng nếu pretrain không chứa dạng dữ liệu extraction (QA instruction, hoặc các dạng dữ liệu mà tác giả gọi là knowledge augmentation) thì mặc dù có qua instruct finetune thì LLM cũng không thể học skill knowledge extraction. => đặt lại câu hỏi liệu cách pretrain rồi mới SFT như hiện tại đã thực sự tốt chưa? Họ đã thử vài trăm thí nghiệm với các loại kiến trúc mô hình, độ to nhỏ, ... và đều ra kết quả như nhau. KNOWLEDGE AUGMENTATION (data augmentation) Nếu bạn không mix instruct data với pre-train data (mix training) tốt nhất hãy áp dụng knowledge augmentation. Tức là cùng một câu đó nhưng diễn tả lại bằng nhiều cách khác nhau. KNOWLEDGE MANIPULATION ví dụ giả sử đã biết (đc huấn luyện) tiểu sử của A (bao gồm ngày tháng năm sinh) và hỏi A sinh tháng chẵn hay lẻ (50% cơ hội trả lời đúng). Nếu không sử dụng CoT (gợi nhớ lại kiến thức, xem A sinh tháng mấy) thì kết quả là model không làm được. => CoT (gợi nhớ kiến thức đã học) rất quan trọng với knowledge manipulation (phân loại, so sánh, xếp hạng ...)
{ "avatarUrl": "/avatars/5071c5b861341c0dcfcf6ac86327701f.svg", "fullname": "Tien Dung", "name": "tiendung", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 9, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60c953fa9cacafb192d805fd/Kb1TZhqTUEfWgUxki6MpW.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60c953fa9cacafb192d805fd/WTO_5NctHKxeIw-qlL22p.webp" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/60c953fa9cacafb192d805fd/Dy3ffyEmN6diRkrnn2rte.webp" } ]
[]
[ { "reaction": "👀", "users": [ "John6666", "skiiiw", "tuanio" ], "count": 3 }, { "reaction": "🤝", "users": [ "tuanio" ], "count": 1 } ]
2024-10-01T01:23:11.000Z
2024-10-01T04:21:11.301Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/yN5siPIMNlp8JYmKAzwXZ.jpeg", "fullname": "Czerska", "name": "QueenOfEarth", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false } ]
/posts/tiendung/230081471483502
1,174
1
550576377620991
[ { "type": "text", "value": "Want a full walkthrough on how to convert a vertex-colored mesh to a UV-mapped textured mesh?", "raw": "Want a full walkthrough on how to convert a vertex-colored mesh to a UV-mapped textured mesh?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "New Blog Post ", "raw": "New Blog Post ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://huggingface.co/blog/vertex-colored-to-textured-mesh", "href": "https://huggingface.co/blog/vertex-colored-to-textured-mesh", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Want a full walkthrough on how to convert a vertex-colored mesh to a UV-mapped textured mesh? New Blog Post https://huggingface.co/blog/vertex-colored-to-textured-mesh
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672164046414-624b4a964056e2a6914a05c5.png", "fullname": "Dylan Ebert", "name": "dylanebert", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 1764, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-09-30T20:35:15.000Z
2024-10-01T14:52:21.535Z
[]
/posts/dylanebert/550576377620991
1,244
1
353838049420260
[ { "type": "text", "value": "📢 Having a massive amount of data to bulk the remotely accessed LLM 🤖 with Chain-of-Though (CoT) 🔗 might result in connection loss.", "raw": "📢 Having a massive amount of data to bulk the remotely accessed LLM 🤖 with Chain-of-Though (CoT) 🔗 might result in connection loss.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The latter may lead to Python Exception 💥 and challenges with generated content restoration.", "raw": "The latter may lead to Python Exception 💥 and challenges with generated content restoration.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "To address on this problem, sharing the no-strings / tiny framework that exploits SQLite3 for caching each query. ", "raw": "To address on this problem, sharing the no-strings / tiny framework that exploits SQLite3 for caching each query. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Such caching allows smooth relaunch in the case of any data loss. ☕ ", "raw": "Such caching allows smooth relaunch in the case of any data loss. ☕ ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "With that, happy to share bulk-chain project and more on that within links below:", "raw": "With that, happy to share bulk-chain project and more on that within links below:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "⭐ github: ", "raw": "⭐ github: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/bulk-chain", "href": "https://github.com/nicolay-r/bulk-chain", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "📦 PyPI: ", "raw": "📦 PyPI: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://pypi.org/project/bulk-chain/", "href": "https://pypi.org/project/bulk-chain/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "There are three steps to quickstart ", "raw": "There are three steps to quickstart ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "(see them in attachment 👇):", "raw": "(see them in attachment 👇):", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✅ 1. Install library", "raw": "✅ 1. Install library", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✅ 2. Declare CoT-schema in json file 📄", "raw": "✅ 2. Declare CoT-schema in json file 📄", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "✅ 3. Wrap your transformer or use existed adapters", "raw": "✅ 3. Wrap your transformer or use existed adapters", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/bulk-chain/tree/master/ext", "href": "https://github.com/nicolay-r/bulk-chain/tree/master/ext", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "For example, here is the provider for Replicate IO service (", "raw": "For example, here is the provider for Replicate IO service (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://replicate.com/", "href": "https://replicate.com/", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "):", "raw": "):", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://github.com/nicolay-r/bulk-chain/blob/master/ext/replicate.py", "href": "https://github.com/nicolay-r/bulk-chain/blob/master/ext/replicate.py", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "that supports one of the largers publicly available LLaMA-3.1-405B:", "raw": "that supports one of the largers publicly available LLaMA-3.1-405B:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct", "href": null, "resource": { "type": "model", "id": "meta-llama/Llama-3.1-405B-Instruct", "discussionNum": null }, "url": "https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct", "code": null, "user": null, "label": null, "lang": null } ]
📢 Having a massive amount of data to bulk the remotely accessed LLM 🤖 with Chain-of-Though (CoT) 🔗 might result in connection loss. The latter may lead to Python Exception 💥 and challenges with generated content restoration. To address on this problem, sharing the no-strings / tiny framework that exploits SQLite3 for caching each query. Such caching allows smooth relaunch in the case of any data loss. ☕ With that, happy to share bulk-chain project and more on that within links below: ⭐ github: https://github.com/nicolay-r/bulk-chain 📦 PyPI: https://pypi.org/project/bulk-chain/ There are three steps to quickstart (see them in attachment 👇): ✅ 1. Install library ✅ 2. Declare CoT-schema in json file 📄 ✅ 3. Wrap your transformer or use existed adapters https://github.com/nicolay-r/bulk-chain/tree/master/ext For example, here is the provider for Replicate IO service (https://replicate.com/): https://github.com/nicolay-r/bulk-chain/blob/master/ext/replicate.py that supports one of the largers publicly available LLaMA-3.1-405B: https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false }
[ { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/SdToS0v1g_btt_moDphtd.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/LisvA3xRE8GvGRDA30HyE.png" }, { "type": "image", "url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/colDSaMkm5tZj_lZGquWg.png" } ]
[]
[ { "reaction": "👀", "users": [ "John6666", "den0620" ], "count": 2 }, { "reaction": "🔥", "users": [ "jharshraj" ], "count": 1 } ]
2024-09-30T20:23:20.000Z
2024-10-01T12:31:20.800Z
[ { "avatarUrl": "/avatars/03206d89acfae19fa85f7e37d030d920.svg", "fullname": "harsh raj", "name": "jharshraj", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg", "fullname": "Nicolay Rusnachenko", "name": "nicolay-r", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 49, "isFollowing": false } ]
/posts/nicolay-r/353838049420260
1,037
4
731490421648414
[ { "type": "text", "value": "I worked on \"LML: Language Model Learning a Dataset for Data-Augmented Prediction\".", "raw": "I worked on \"LML: Language Model Learning a Dataset for Data-Augmented Prediction\".", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Why LML & DAP:", "raw": "Why LML & DAP:", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- This project introduces a new approach to using Large Language Models (LLMs) for classification tasks in an explainable way.", "raw": "- This project introduces a new approach to using Large Language Models (LLMs) for classification tasks in an explainable way.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Unlike ML models that rely heavily on data cleaning and feature engineering, this method streamlines the process using LLMs.", "raw": "- Unlike ML models that rely heavily on data cleaning and feature engineering, this method streamlines the process using LLMs.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Please upvote here: ", "raw": "Please upvote here: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/papers/2409.18957", "href": null, "resource": { "type": "paper", "id": "2409.18957", "discussionNum": null }, "url": "https://huggingface.co/papers/2409.18957", "code": null, "user": null, "label": "LML: Language Model Learning a Dataset for Data-Augmented Prediction (2409.18957)", "lang": null } ]
I worked on "LML: Language Model Learning a Dataset for Data-Augmented Prediction". Why LML & DAP: - This project introduces a new approach to using Large Language Models (LLMs) for classification tasks in an explainable way. - Unlike ML models that rely heavily on data cleaning and feature engineering, this method streamlines the process using LLMs. Please upvote here: https://huggingface.co/papers/2409.18957
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66123b20ceb89d45d692fc5f/89nz7lbvBOla-CzkHYWlp.png", "fullname": "Praneeth", "name": "prane-eth", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": null, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "nicolay-r" ], "count": 2 } ]
2024-09-30T15:24:04.000Z
2024-09-30T15:24:04.082Z
[]
/posts/prane-eth/731490421648414
902
0
749385335939925
[ { "type": "text", "value": "Visionary Walter Murch (editor for Francis Ford Coppola), in 1999: ", "raw": "Visionary Walter Murch (editor for Francis Ford Coppola), in 1999: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "“ So let's suppose a technical apotheosis some time in the middle of the 21st century, when it somehow becomes possible for one person to make an entire feature film, with virtual actors. Would this be a good thing?", "raw": "“ So let's suppose a technical apotheosis some time in the middle of the 21st century, when it somehow becomes possible for one person to make an entire feature film, with virtual actors. Would this be a good thing?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "If the history of oil painting is any guide, the broadest answer would be yes, with the obvious caution to keep a wary eye on the destabilizing effect of following too intently a hermetically personal vision. One need only look at the unraveling of painting or classical music in the 20th century to see the risks.", "raw": "If the history of oil painting is any guide, the broadest answer would be yes, with the obvious caution to keep a wary eye on the destabilizing effect of following too intently a hermetically personal vision. One need only look at the unraveling of painting or classical music in the 20th century to see the risks.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Let's go even further, and force the issue to its ultimate conclusion by supposing the diabolical invention of a black box that could directly convert a single person's thoughts into a viewable cinematic reality. You would attach a series of electrodes to various points on your skull and simply think the film into existence.", "raw": "Let's go even further, and force the issue to its ultimate conclusion by supposing the diabolical invention of a black box that could directly convert a single person's thoughts into a viewable cinematic reality. You would attach a series of electrodes to various points on your skull and simply think the film into existence.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "And since we are time-traveling, let us present this hypothetical invention as a Faustian bargain to the future filmmakers of the 21st century. If this box were offered by some mysterious cloaked figure in exchange for your eternal soul, would you take it?", "raw": "And since we are time-traveling, let us present this hypothetical invention as a Faustian bargain to the future filmmakers of the 21st century. If this box were offered by some mysterious cloaked figure in exchange for your eternal soul, would you take it?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "The kind of filmmakers who would accept, even leap, at the offer are driven by the desire to see their own vision on screen in as pure a form as possible. They accept present levels of collaboration as the evil necessary to achieve this vision. Alfred Hitchcock, I imagine, would be one of them, judging from his description of the creative process: \"The film is already made in my head before we start shooting.\"”", "raw": "The kind of filmmakers who would accept, even leap, at the offer are driven by the desire to see their own vision on screen in as pure a form as possible. They accept present levels of collaboration as the evil necessary to achieve this vision. Alfred Hitchcock, I imagine, would be one of them, judging from his description of the creative process: \"The film is already made in my head before we start shooting.\"”", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "—", "raw": "—", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Read \"A Digital Cinema of the Mind? Could Be\" by Walter Murch: ", "raw": "Read \"A Digital Cinema of the Mind? Could Be\" by Walter Murch: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://archive.nytimes.com/www.nytimes.com/library/film/050299future-film.html", "href": "https://archive.nytimes.com/www.nytimes.com/library/film/050299future-film.html", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Visionary Walter Murch (editor for Francis Ford Coppola), in 1999: “ So let's suppose a technical apotheosis some time in the middle of the 21st century, when it somehow becomes possible for one person to make an entire feature film, with virtual actors. Would this be a good thing? If the history of oil painting is any guide, the broadest answer would be yes, with the obvious caution to keep a wary eye on the destabilizing effect of following too intently a hermetically personal vision. One need only look at the unraveling of painting or classical music in the 20th century to see the risks. Let's go even further, and force the issue to its ultimate conclusion by supposing the diabolical invention of a black box that could directly convert a single person's thoughts into a viewable cinematic reality. You would attach a series of electrodes to various points on your skull and simply think the film into existence. And since we are time-traveling, let us present this hypothetical invention as a Faustian bargain to the future filmmakers of the 21st century. If this box were offered by some mysterious cloaked figure in exchange for your eternal soul, would you take it? The kind of filmmakers who would accept, even leap, at the offer are driven by the desire to see their own vision on screen in as pure a form as possible. They accept present levels of collaboration as the evil necessary to achieve this vision. Alfred Hitchcock, I imagine, would be one of them, judging from his description of the creative process: "The film is already made in my head before we start shooting."” — Read "A Digital Cinema of the Mind? Could Be" by Walter Murch: https://archive.nytimes.com/www.nytimes.com/library/film/050299future-film.html
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61868ce808aae0b5499a2a95/F6BA0anbsoY_Z7M1JrwOe.jpeg", "fullname": "Sylvain Filoni", "name": "fffiloni", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 5185, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "louisbrulenaudet" ], "count": 2 }, { "reaction": "🔥", "users": [ "edranrekzo", "darduf" ], "count": 2 }, { "reaction": "🚀", "users": [ "OmbelineM" ], "count": 1 } ]
2024-09-30T14:06:17.000Z
2024-10-17T13:23:46.614Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/6zWl0VsnO1m0O2ynNutkT.jpeg", "fullname": "Esteban Manuel Gudiño Acevedo", "name": "KairosArg", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 1, "isFollowing": false } ]
/posts/fffiloni/749385335939925
10,538
1
208348942789195
[ { "type": "text", "value": "Llama 3.2 3b + code-instruct: get our newest version of Enigma!", "raw": "Llama 3.2 3b + code-instruct: get our newest version of Enigma!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/ValiantLabs/Llama3.2-3B-Enigma", "href": null, "resource": { "type": "model", "id": "ValiantLabs/Llama3.2-3B-Enigma", "discussionNum": null }, "url": "https://huggingface.co/ValiantLabs/Llama3.2-3B-Enigma", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": " is trained on our high quality code-instruct (", "raw": " is trained on our high quality code-instruct (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/sequelbox/Tachibana", "href": null, "resource": { "type": "dataset", "id": "sequelbox/Tachibana", "discussionNum": null }, "url": "https://huggingface.co/datasets/sequelbox/Tachibana", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ") and general chat (", "raw": ") and general chat (", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "resource", "value": null, "raw": "https://huggingface.co/datasets/sequelbox/Supernova", "href": null, "resource": { "type": "dataset", "id": "sequelbox/Supernova", "discussionNum": null }, "url": "https://huggingface.co/datasets/sequelbox/Supernova", "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": ") data.", "raw": ") data.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "try it now :) more models and datasets coming soon!", "raw": "try it now :) more models and datasets coming soon!", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Llama 3.2 3b + code-instruct: get our newest version of Enigma! https://huggingface.co/ValiantLabs/Llama3.2-3B-Enigma is trained on our high quality code-instruct (https://huggingface.co/datasets/sequelbox/Tachibana) and general chat (https://huggingface.co/datasets/sequelbox/Supernova) data. try it now :) more models and datasets coming soon!
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63444f2687964b331809eb55/WvZivsvKsM_t0tBtakovK.png", "fullname": "t.d.a.g.", "name": "sequelbox", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 51, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "louisbrulenaudet", "osanseviero", "DiamanteAmarelo" ], "count": 4 }, { "reaction": "🤗", "users": [ "joselito50", "DiamanteAmarelo" ], "count": 2 }, { "reaction": "🔥", "users": [ "zoeywin", "DiamanteAmarelo" ], "count": 2 } ]
2024-09-30T13:38:43.000Z
2024-09-30T13:38:43.007Z
[]
/posts/sequelbox/208348942789195
1,496
0
505213713906991
[ { "type": "text", "value": "Hey Guys. Signed up last year and just upgraded to PRO to have access to private zero Spaces.", "raw": "Hey Guys. Signed up last year and just upgraded to PRO to have access to private zero Spaces.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Umm I have a slight problem. I was trying to deploy flux.1 [dev] on my own private space and it keeps giving me a runtime error. The [schnell] model installed without any problems. Is there a way to resolve this?", "raw": "Umm I have a slight problem. I was trying to deploy flux.1 [dev] on my own private space and it keeps giving me a runtime error. The [schnell] model installed without any problems. Is there a way to resolve this?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Hey Guys. Signed up last year and just upgraded to PRO to have access to private zero Spaces. Umm I have a slight problem. I was trying to deploy flux.1 [dev] on my own private space and it keeps giving me a runtime error. The [schnell] model installed without any problems. Is there a way to resolve this?
{ "avatarUrl": "/avatars/a4a6bcd5bbf3f839f505324a002a1b24.svg", "fullname": "Lavar Storr", "name": "Desgait", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666" ], "count": 1 } ]
2024-09-30T10:11:44.000Z
2024-09-30T15:56:52.005Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }, { "avatarUrl": "/avatars/a4a6bcd5bbf3f839f505324a002a1b24.svg", "fullname": "Lavar Storr", "name": "Desgait", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 2, "isFollowing": false } ]
/posts/Desgait/505213713906991
505
4
373264657117646
[ { "type": "text", "value": "We need a fork feature for models and datasets similar to \"Duplicate this space\" in spaces ! Don't you think ?", "raw": "We need a fork feature for models and datasets similar to \"Duplicate this space\" in spaces ! Don't you think ?", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Sometimes you just want to save something in your profile privately and work on it later without the hassle of \"load_.../push_to_hub\" in a code file.", "raw": "Sometimes you just want to save something in your profile privately and work on it later without the hassle of \"load_.../push_to_hub\" in a code file.", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "I know this is super lazy 😅 But it is what it is ...", "raw": "I know this is super lazy 😅 But it is what it is ...", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "tag : ", "raw": "tag : ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "mention", "value": null, "raw": "@victor", "href": null, "resource": null, "url": null, "code": null, "user": "victor", "label": null, "lang": null }, { "type": "text", "value": " ", "raw": " ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
We need a fork feature for models and datasets similar to "Duplicate this space" in spaces ! Don't you think ? Sometimes you just want to save something in your profile privately and work on it later without the hassle of "load_.../push_to_hub" in a code file. I know this is super lazy 😅 But it is what it is ... tag : @victor
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626237d9bbcbd1c34f1bb231/EJrOjvAL-68qMCYdnvOrq.png", "fullname": "Ali El Filali", "name": "alielfilali01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 186, "isFollowing": false }
[]
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f17f0a0925b9863e28ad517/X7QKoiXbUtEZSG9jyvfk3.jpeg", "fullname": "Victor Mustar", "name": "victor", "type": "user", "isPro": true, "isHf": true, "isMod": false, "followerCount": 2607 } ]
[ { "reaction": "👀", "users": [ "John6666", "nicolay-r" ], "count": 2 }, { "reaction": "🤗", "users": [ "FrankBr" ], "count": 1 } ]
2024-09-30T09:44:58.000Z
2024-09-30T16:39:26.408Z
[ { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626237d9bbcbd1c34f1bb231/EJrOjvAL-68qMCYdnvOrq.png", "fullname": "Ali El Filali", "name": "alielfilali01", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 186, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png", "fullname": "John Smith", "name": "John6666", "type": "user", "isPro": true, "isHf": false, "isMod": false, "followerCount": 398, "isFollowing": false }, { "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6527e89a8808d80ccff88b7a/CuGNmF1Et8KMQ0mCd1NEJ.jpeg", "fullname": "Lain", "name": "not-lain", "type": "user", "isPro": false, "isHf": false, "isMod": false, "followerCount": 941, "isFollowing": false } ]
/posts/alielfilali01/373264657117646
863
5
307253092855432
[ { "type": "text", "value": "Want to supercharge your journalism with AI but don't know where to start? I've got you covered. 🚀", "raw": "Want to supercharge your journalism with AI but don't know where to start? I've got you covered. 🚀", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Ran two workshops at Media Party w/ Brown Institute for Media Innovation at Columbia University this week-end, packed with open-source AI tools for journalists. Thought you might find 'em useful too, so I'm open-sourcing my slides 😉", "raw": "Ran two workshops at Media Party w/ Brown Institute for Media Innovation at Columbia University this week-end, packed with open-source AI tools for journalists. Thought you might find 'em useful too, so I'm open-sourcing my slides 😉", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Here's a taste of what's in the beginner's deck (no-code tools focus):", "raw": "Here's a taste of what's in the beginner's deck (no-code tools focus):", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Scrape websites without coding", "raw": "- Scrape websites without coding", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Analyze bias in AI image generators", "raw": "- Analyze bias in AI image generators", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Transcribe audio/video on your device", "raw": "- Transcribe audio/video on your device", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Edit images with words", "raw": "- Edit images with words", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Extract info from docs, websites, PDFs", "raw": "- Extract info from docs, websites, PDFs", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Analyze images, handwriting, videos", "raw": "- Analyze images, handwriting, videos", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "- Create custom AI assistants", "raw": "- Create custom AI assistants", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "Tons more in the deck. ", "raw": "Tons more in the deck. ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "👉 Full presentation link: ", "raw": "👉 Full presentation link: ", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "link", "value": null, "raw": "https://docs.google.com/presentation/d/1Q887BhrcrDDgfi0O-Mbx2GI1De2H3GkIiXmd9MvmxYE/edit?usp=sharing", "href": "https://docs.google.com/presentation/d/1Q887BhrcrDDgfi0O-Mbx2GI1De2H3GkIiXmd9MvmxYE/edit?usp=sharing", "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "new_line", "value": null, "raw": "\n", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null }, { "type": "text", "value": "#AIinJournalism #MediaInnovation #OpenSourceAI #MediaParty", "raw": "#AIinJournalism #MediaInnovation #OpenSourceAI #MediaParty", "href": null, "resource": null, "url": null, "code": null, "user": null, "label": null, "lang": null } ]
Want to supercharge your journalism with AI but don't know where to start? I've got you covered. 🚀 Ran two workshops at Media Party w/ Brown Institute for Media Innovation at Columbia University this week-end, packed with open-source AI tools for journalists. Thought you might find 'em useful too, so I'm open-sourcing my slides 😉 Here's a taste of what's in the beginner's deck (no-code tools focus): - Scrape websites without coding - Analyze bias in AI image generators - Transcribe audio/video on your device - Edit images with words - Extract info from docs, websites, PDFs - Analyze images, handwriting, videos - Create custom AI assistants Tons more in the deck. 👉 Full presentation link: https://docs.google.com/presentation/d/1Q887BhrcrDDgfi0O-Mbx2GI1De2H3GkIiXmd9MvmxYE/edit?usp=sharing #AIinJournalism #MediaInnovation #OpenSourceAI #MediaParty
{ "avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg", "fullname": "Florent Daudens", "name": "fdaudens", "type": "user", "isPro": false, "isHf": true, "isMod": false, "followerCount": 384, "isFollowing": false }
[]
[]
[ { "reaction": "👀", "users": [ "John6666", "louisbrulenaudet", "quantumcomputer" ], "count": 3 }, { "reaction": "🔥", "users": [ "hl0737" ], "count": 1 } ]
2024-09-29T23:47:45.000Z
2024-09-29T23:47:45.808Z
[]
/posts/fdaudens/307253092855432
2,094
0